NSError *err = nil; if (![socket connectToHost:@"deusty.com" onPort:80 error:&err]) // Asynchronous! { // If there was an error, it's likely something like "already connected" or "no delegate set" NSLog(@"I goofed: %@", err); }
After scheduling stream with a run loop, its client (set with CFReadStreamSetClient) is notified when various events happen with the stream, such as when it finishes opening, when it has bytes available, and when an error occurs. A stream can be scheduled with multiple run loops and run loop modes. Use CFReadStreamUnscheduleFromRunLoop to later remove stream from the run loop.
(lldb) po $x0 <NSThread: 0x100f1f9a0>{number = 2, name = GCDAsyncSocket-CFStream}
(lldb) po $x2 <_NSThreadPerformInfo: 0x100f058e0>
(lldb) ivars 0x100f1f9a0 <NSThread: 0x100f1f9a0>: in NSThread: _private (id): <_NSThreadData: 0x100f318a0> _bytes (unsigned char[44]): Value not representable, [44C] in NSObject: isa (Class): NSThread (isa, 0x41a21933b471)
(lldb) ivars 0x100f318a0 <_NSThreadData: 0x100f318a0>: in _NSThreadData: dict (id): <__NSDictionaryM: 0x100f30130> name (id): @"GCDAsyncSocket-CFStream" target (id): <ReleadeTrack: 0x100707f58> selector (SEL): cfstreamThread: argument (id): nil seqNum (int): 2 qstate (unsigned char): Value not representable, C qos (char): 0 cancel (unsigned char): Value not representable, C status (unsigned char): Value not representable, C performQ (id): nil performD (NSMutableDictionary*): nil attr (struct _opaque_pthread_attr_t): { __sig (long): 1414022209 __opaque (char[56]): Value not representable, [56c] } tid (struct _opaque_pthread_t*): 0x100f31928 -> 0x16fa93000 pri (double): 0.5 defpri (double): 0.5 in NSObject: isa (Class): _NSThreadData (isa, 0x1a21933b449)
if (variant == RRVariant::FastOrMsgSend) { // These checks are only meaningful for objc_release() // They are here so that we avoid a re-load of the isa. if (slowpath(oldisa.getDecodedClass(false)->hasCustomRR())) { ClearExclusive(&isa.bits); if (oldisa.getDecodedClass(false)->canCallSwiftRR()) { swiftRelease.load(memory_order_relaxed)((id)this); return true; } ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(release)); return true; } }
if (slowpath(!oldisa.nonpointer)) { // a Class is a Class forever, so we can perform this checkonce // outside of the CAS loop if (oldisa.getDecodedClass(false)->isMetaClass()) { ClearExclusive(&isa.bits); return false; } }
retry: do { newisa = oldisa; if (slowpath(!newisa.nonpointer)) { ClearExclusive(&isa.bits); return sidetable_release(sideTableLocked, performDealloc); } if (slowpath(newisa.isDeallocating())) { ClearExclusive(&isa.bits); if (sideTableLocked) { ASSERT(variant == RRVariant::Full); sidetable_unlock(); } return false; }
// don't check newisa.fast_rr; we already called any RR overrides uintptr_t carry; newisa.bits = subc(newisa.bits, RC_ONE, 0, &carry); // extra_rc-- if (slowpath(carry)) { // don't ClearExclusive() goto underflow; } } while (slowpath(!StoreReleaseExclusive(&isa.bits, &oldisa.bits, newisa.bits)));
if (slowpath(newisa.isDeallocating())) goto deallocate;
if (variant == RRVariant::Full) { if (slowpath(sideTableLocked)) sidetable_unlock(); } else { ASSERT(!sideTableLocked); } return false;
underflow: // newisa.extra_rc-- underflowed: borrow from side table or deallocate
// abandon newisa to undo the decrement newisa = oldisa;
if (slowpath(newisa.has_sidetable_rc)) { if (variant != RRVariant::Full) { ClearExclusive(&isa.bits); return rootRelease_underflow(performDealloc); }
// Transfer retain count from side table to inline storage.
if (!sideTableLocked) { ClearExclusive(&isa.bits); sidetable_lock(); sideTableLocked = true; // Need to start over to avoid a race against // the nonpointer -> raw pointer transition. oldisa = LoadExclusive(&isa.bits); goto retry; }
// Try to remove some retain counts from the side table. auto borrow = sidetable_subExtraRC_nolock(RC_HALF);
bool emptySideTable = borrow.remaining == 0; // we'll clear the side table if no refcounts remain there
if (borrow.borrowed > 0) { // Side table retain count decreased. // Try to add them to the inline count. bool didTransitionToDeallocating = false; newisa.extra_rc = borrow.borrowed - 1; // redo the original decrement too newisa.has_sidetable_rc = !emptySideTable;
if (!stored && oldisa.nonpointer) { // Inline update failed. // Try it again right now. This prevents livelock on LL/SC // architectures where the side table access itself may have // dropped the reservation. uintptr_t overflow; newisa.bits = addc(oldisa.bits, RC_ONE * (borrow.borrowed-1), 0, &overflow); newisa.has_sidetable_rc = !emptySideTable; if (!overflow) { stored = StoreReleaseExclusive(&isa.bits, &oldisa.bits, newisa.bits); if (stored) { didTransitionToDeallocating = newisa.isDeallocating(); } } }
if (!stored) { // Inline update failed. // Put the retains back in the side table. ClearExclusive(&isa.bits); sidetable_addExtraRC_nolock(borrow.borrowed); oldisa = LoadExclusive(&isa.bits); goto retry; }
// Decrement successful after borrowing from side table. if (emptySideTable) sidetable_clearExtraRC_nolock();
if (!didTransitionToDeallocating) { if (slowpath(sideTableLocked)) sidetable_unlock(); return false; } } else { // Side table is empty after all. Fall-through to the dealloc path. } }
if (variant == RRVariant::FastOrMsgSend) { // These checks are only meaningful for objc_release() // They are here so that we avoid a re-load of the isa. if (slowpath(oldisa.getDecodedClass(false)->hasCustomRR())) { ClearExclusive(&isa.bits); if (oldisa.getDecodedClass(false)->canCallSwiftRR()) { swiftRelease.load(memory_order_relaxed)((id)this); return true; } ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(release)); return true; } } ... newisa.bits = subc(newisa.bits, RC_ONE, 0, &carry); ... if (slowpath(newisa.isDeallocating())) goto deallocate; ... deallocate: // Really deallocate.
if (performDealloc) { ((void(*)(objc_object *, SEL))objc_msgSend)(this, @selector(dealloc)); } return true;
# if __has_feature(ptrauth_calls) # if ISA_SIGNING_AUTH_MODE == ISA_SIGNING_AUTH // Most callers aren't security critical, so skip the // authentication unless they ask for it. Message sending and // cache filling are protected by the auth code in msgSend. if (authenticated) { // Mask off all bits besides the class pointer and signature. clsbits &= ISA_MASK; if (clsbits == 0) return Nil; clsbits = (uintptr_t)ptrauth_auth_data((void *)clsbits, ISA_SIGNING_KEY, ptrauth_blend_discriminator(this, ISA_SIGNING_DISCRIMINATOR)); } else { // If not authenticating, strip using the precomputed class mask. clsbits &= objc_debug_isa_class_mask; } # else // If not authenticating, strip using the precomputed class mask. clsbits &= objc_debug_isa_class_mask; # endif
# else clsbits &= ISA_MASK; # endif
return (Class)clsbits; #endif }
// a better definition is // (uintptr_t)ptrauth_strip((void *)ISA_MASK, ISA_SIGNING_KEY) // however we know that PAC uses bits outside of MACH_VM_MAX_ADDRESS // so approximate the definition here to be constant template <typename T> static constexpr T coveringMask(T n) { for (T mask = 0; mask != ~T{0}; mask = (mask << 1) | 1) { if ((n & mask) == n) return mask; } return ~T{0}; } const uintptr_t objc_debug_isa_class_mask = ISA_MASK & coveringMask(MACH_VM_MAX_ADDRESS - 1);
// class or superclass has default retain/release/autorelease/retainCount/ // _tryRetain/_}isDeallocating/retainWeakReference/allowsWeakReference #define FAST_HAS_DEFAULT_RR (1UL<<2)
struct objc_object { private: isa_t isa; }
struct objc_class : objc_object { // Class ISA; Class superclass; cache_t cache; // formerly cache pointer and vtable class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags