/*********************************************************************** * _objc_init * Bootstrap initialization. Registers our image notifier with dyld. * Called by libSystem BEFORE library initialization time **********************************************************************/
void load_images(constchar *path __unused, conststruct mach_header *mh) { // Return without taking locks if there are no +load methods here. if (!hasLoadMethods((const headerType *)mh)) return;
// Detach current loadable list. struct loadable_class *classes = loadable_classes; int used = loadable_classes_used; loadable_classes = nil; loadable_classes_allocated = 0; loadable_classes_used = 0;
// Call all +loads for the detached list. for (i = 0; i < used; i++) { Class cls = classes[i].cls; load_method_t load_method = (load_method_t)classes[i].method; if (!cls) continue;
header_info *hi; for (hi = FirstHeader; hi; hi = hi->getNext()) { if (mhdr == hi->mhdr()) break; } if (!hi) return;
if (hi->info()->isReplacement()) { // Ignore any classes in this image return; }
// Major loop - process all modules in the image mods = hi->mod_ptr; for (midx = 0; midx < hi->mod_count; midx += 1) { unsignedint index;
// Skip module containing no classes if (mods[midx].symtab == nil) continue;
// Minor loop - process all the classes in given module for (index = 0; index < mods[midx].symtab->cls_def_cnt; index += 1) { // Locate the class description pointer Class cls = (Class)mods[midx].symtab->defs[index]; if (cls->info & CLS_CONNECTED) { schedule_class_load(cls); } } }
// Major loop - process all modules in the header mods = hi->mod_ptr;
// NOTE: The module and category lists are traversed backwards // to preserve the pre-10.4 processing order. Changing the order // would have a small chance of introducing binary compatibility bugs. midx = (unsignedint)hi->mod_count; while (midx-- > 0) { unsignedint index; unsignedint total; Symtab symtab = mods[midx].symtab;
// Nothing to do for a module without a symbol table if (mods[midx].symtab == nil) continue; // Total entries in symbol table (class entries followed // by category entries) total = mods[midx].symtab->cls_def_cnt + mods[midx].symtab->cat_def_cnt;
// Minor loop - register all categories from given module index = total; while (index-- > mods[midx].symtab->cls_def_cnt) { old_category *cat = (old_category *)symtab->defs[index]; add_category_to_loadable_list((Category)cat); } } }
staticvoid schedule_class_load(Class cls) { if (!cls) return; ASSERT(cls->isRealized()); // _read_images should realize
IMP lookUpImpOrForward(id inst, SEL sel, Class cls, int behavior) { const IMP forward_imp = (IMP)_objc_msgForward_impcache; IMP imp = nil; Class curClass;
runtimeLock.assertUnlocked();
// Optimistic cache lookup if (fastpath(behavior & LOOKUP_CACHE)) { imp = cache_getImp(cls, sel); if (imp) goto done_nolock; }
// runtimeLock is held during isRealized and isInitialized checking // to prevent races against concurrent realization.
// runtimeLock is held during method search to make // method-lookup + cache-fill atomic with respect to method addition. // Otherwise, a category could be added but ignored indefinitely because // the cache was re-filled with the old value after the cache flush on // behalf of the category.
runtimeLock.lock();
// We don't want people to be able to craft a binary blob that looks like // a class but really isn't one and do a CFI attack. // // To make these harder we want to make sure this is a class that was // either built into the binary or legitimately registered through // objc_duplicateClass, objc_initializeClassPair or objc_allocateClassPair. // // TODO: this check is quite costly during process startup. checkIsKnownClass(cls);
if (slowpath(!cls->isRealized())) { cls = realizeClassMaybeSwiftAndLeaveLocked(cls, runtimeLock); // runtimeLock may have been dropped but is now locked again }
if (slowpath((behavior & LOOKUP_INITIALIZE) && !cls->isInitialized())) { cls = initializeAndLeaveLocked(cls, inst, runtimeLock); // runtimeLock may have been dropped but is now locked again
// If sel == initialize, class_initialize will send +initialize and // then the messenger will send +initialize again after this // procedure finishes. Of course, if this is not being called // from the messenger then it won't happen. 2778172 }
runtimeLock.assertLocked(); curClass = cls;
// The code used to lookpu the class's cache again right after // we take the lock but for the vast majority of the cases // evidence shows this is a miss most of the time, hence a time loss. // // The only codepath calling into this without having performed some // kind of cache lookup is class_getInstanceMethod().
if (slowpath((curClass = curClass->superclass) == nil)) { // No implementation found, and method resolver didn't help. // Use forwarding. imp = forward_imp; break; }
// Halt if there is a cycle in the superclass chain. if (slowpath(--attempts == 0)) { _objc_fatal("Memory corruption in class list."); }
// Superclass cache. imp = cache_getImp(curClass, sel); if (slowpath(imp == forward_imp)) { // Found a forward:: entry in a superclass. // Stop searching, but don't cache yet; call method // resolver for this class first. break; } if (fastpath(imp)) { // Found the method in a superclass. Cache it in this class. goto done; } }
// No implementation found. Try method resolver once.
// Make sure super is done initializing BEFORE beginning to initialize cls. // See note about deadlock above. supercls = cls->superclass; if (supercls && !supercls->isInitialized()) { initializeNonMetaClass(supercls); }