dl-inlines.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. /* Copyright (C) 2003, 2004 Red Hat, Inc.
  2. * Contributed by Alexandre Oliva <aoliva@redhat.com>
  3. * Copyright (C) 2006-2011 Analog Devices, Inc.
  4. *
  5. * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball.
  6. */
  7. /* Initialize a DL_LOADADDR_TYPE given a got pointer and a complete load map. */
  8. static __always_inline void
  9. __dl_init_loadaddr_map(struct elf32_fdpic_loadaddr *loadaddr, Elf32_Addr dl_boot_got_pointer,
  10. struct elf32_fdpic_loadmap *map)
  11. {
  12. if (map->version != 0) {
  13. SEND_EARLY_STDERR("Invalid loadmap version number\n");
  14. _dl_exit(-1);
  15. }
  16. if (map->nsegs == 0) {
  17. SEND_EARLY_STDERR("Invalid segment count in loadmap\n");
  18. _dl_exit(-1);
  19. }
  20. loadaddr->got_value = (void *)dl_boot_got_pointer;
  21. loadaddr->map = map;
  22. }
  23. /*
  24. * Figure out how many LOAD segments there are in the given headers,
  25. * and allocate a block for the load map big enough for them.
  26. * got_value will be properly initialized later on, with INIT_GOT.
  27. */
  28. static __always_inline int
  29. __dl_init_loadaddr(struct elf32_fdpic_loadaddr *loadaddr, Elf32_Phdr *ppnt,
  30. int pcnt)
  31. {
  32. int count = 0, i;
  33. size_t size;
  34. for (i = 0; i < pcnt; i++)
  35. if (ppnt[i].p_type == PT_LOAD)
  36. count++;
  37. loadaddr->got_value = 0;
  38. size = sizeof(struct elf32_fdpic_loadmap) +
  39. (sizeof(struct elf32_fdpic_loadseg) * count);
  40. loadaddr->map = _dl_malloc(size);
  41. if (!loadaddr->map)
  42. _dl_exit(-1);
  43. loadaddr->map->version = 0;
  44. loadaddr->map->nsegs = 0;
  45. return count;
  46. }
  47. /* Incrementally initialize a load map. */
  48. static __always_inline void
  49. __dl_init_loadaddr_hdr(struct elf32_fdpic_loadaddr loadaddr, void *addr,
  50. Elf32_Phdr *phdr, int maxsegs)
  51. {
  52. struct elf32_fdpic_loadseg *segdata;
  53. if (loadaddr.map->nsegs == maxsegs)
  54. _dl_exit(-1);
  55. segdata = &loadaddr.map->segs[loadaddr.map->nsegs++];
  56. segdata->addr = (Elf32_Addr)addr;
  57. segdata->p_vaddr = phdr->p_vaddr;
  58. segdata->p_memsz = phdr->p_memsz;
  59. #if defined(__SUPPORT_LD_DEBUG__)
  60. if (_dl_debug)
  61. _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n",
  62. loadaddr.map->nsegs - 1,
  63. segdata->p_vaddr, segdata->addr, segdata->p_memsz);
  64. #endif
  65. }
  66. /* Replace an existing entry in the load map. */
  67. static __always_inline void
  68. __dl_update_loadaddr_hdr(struct elf32_fdpic_loadaddr loadaddr, void *addr,
  69. Elf32_Phdr *phdr)
  70. {
  71. struct elf32_fdpic_loadseg *segdata;
  72. void *oldaddr;
  73. int i;
  74. for (i = 0; i < loadaddr.map->nsegs; i++)
  75. if (loadaddr.map->segs[i].p_vaddr == phdr->p_vaddr &&
  76. loadaddr.map->segs[i].p_memsz == phdr->p_memsz)
  77. break;
  78. if (i == loadaddr.map->nsegs)
  79. _dl_exit(-1);
  80. segdata = loadaddr.map->segs + i;
  81. oldaddr = (void *)segdata->addr;
  82. _dl_munmap(oldaddr, segdata->p_memsz);
  83. segdata->addr = (Elf32_Addr)addr;
  84. #if defined (__SUPPORT_LD_DEBUG__)
  85. if (_dl_debug)
  86. _dl_dprintf(_dl_debug_file, "%i: changed mapping %x at %x (old %x), size %x\n",
  87. loadaddr.map->nsegs - 1,
  88. segdata->p_vaddr, segdata->addr, oldaddr, segdata->p_memsz);
  89. #endif
  90. }
  91. #ifndef __dl_loadaddr_unmap
  92. static __always_inline void
  93. __dl_loadaddr_unmap(struct elf32_fdpic_loadaddr loadaddr,
  94. struct funcdesc_ht *funcdesc_ht)
  95. {
  96. int i;
  97. for (i = 0; i < loadaddr.map->nsegs; i++)
  98. _dl_munmap((void *)loadaddr.map->segs[i].addr,
  99. loadaddr.map->segs[i].p_memsz);
  100. /*
  101. * _dl_unmap is only called for dlopen()ed libraries, for which
  102. * calling free() is safe, or before we've completed the initial
  103. * relocation, in which case calling free() is probably pointless,
  104. * but still safe.
  105. */
  106. _dl_free(loadaddr.map);
  107. if (funcdesc_ht)
  108. htab_delete(funcdesc_ht);
  109. }
  110. #endif
  111. /* Figure out whether the given address is in one of the mapped segments. */
  112. static __always_inline int
  113. __dl_addr_in_loadaddr(void *p, struct elf32_fdpic_loadaddr loadaddr)
  114. {
  115. struct elf32_fdpic_loadmap *map = loadaddr.map;
  116. int c;
  117. for (c = 0; c < map->nsegs; c++)
  118. if ((void *)map->segs[c].addr <= p &&
  119. (char *)p < (char *)map->segs[c].addr + map->segs[c].p_memsz)
  120. return 1;
  121. return 0;
  122. }
  123. /*
  124. * The hashcode handling code below is heavily inspired in libiberty's
  125. * hashtab code, but with most adaptation points and support for
  126. * deleting elements removed.
  127. *
  128. * Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
  129. * Contributed by Vladimir Makarov (vmakarov@cygnus.com).
  130. */
  131. static __always_inline unsigned long
  132. higher_prime_number(unsigned long n)
  133. {
  134. /* These are primes that are near, but slightly smaller than, a power of two. */
  135. static const unsigned long primes[] = {
  136. 7,
  137. 13,
  138. 31,
  139. 61,
  140. 127,
  141. 251,
  142. 509,
  143. 1021,
  144. 2039,
  145. 4093,
  146. 8191,
  147. 16381,
  148. 32749,
  149. 65521,
  150. 131071,
  151. 262139,
  152. 524287,
  153. 1048573,
  154. 2097143,
  155. 4194301,
  156. 8388593,
  157. 16777213,
  158. 33554393,
  159. 67108859,
  160. 134217689,
  161. 268435399,
  162. 536870909,
  163. 1073741789,
  164. /* 4294967291 */
  165. ((unsigned long) 2147483647) + ((unsigned long) 2147483644),
  166. };
  167. const unsigned long *low = &primes[0];
  168. const unsigned long *high = &primes[ARRAY_SIZE(primes)];
  169. while (low != high) {
  170. const unsigned long *mid = low + (high - low) / 2;
  171. if (n > *mid)
  172. low = mid + 1;
  173. else
  174. high = mid;
  175. }
  176. #if 0
  177. /* If we've run out of primes, abort. */
  178. if (n > *low) {
  179. fprintf(stderr, "Cannot find prime bigger than %lu\n", n);
  180. abort();
  181. }
  182. #endif
  183. return *low;
  184. }
  185. struct funcdesc_ht
  186. {
  187. /* Table itself */
  188. struct funcdesc_value **entries;
  189. /* Current size (in entries) of the hash table */
  190. size_t size;
  191. /* Current number of elements */
  192. size_t n_elements;
  193. };
  194. static __always_inline int
  195. hash_pointer(const void *p)
  196. {
  197. return (int) ((long)p >> 3);
  198. }
  199. static __always_inline struct funcdesc_ht *
  200. htab_create(void)
  201. {
  202. struct funcdesc_ht *ht = _dl_malloc(sizeof(*ht));
  203. size_t ent_size;
  204. if (!ht)
  205. return NULL;
  206. ht->size = 3;
  207. ent_size = sizeof(struct funcdesc_ht_value *) * ht->size;
  208. ht->entries = _dl_malloc(ent_size);
  209. if (!ht->entries)
  210. return NULL;
  211. ht->n_elements = 0;
  212. _dl_memset(ht->entries, 0, ent_size);
  213. return ht;
  214. }
  215. /*
  216. * This is only called from _dl_loadaddr_unmap, so it's safe to call
  217. * _dl_free(). See the discussion below.
  218. */
  219. static __always_inline void
  220. htab_delete(struct funcdesc_ht *htab)
  221. {
  222. size_t i;
  223. for (i = htab->size - 1; i >= 0; i--)
  224. if (htab->entries[i])
  225. _dl_free(htab->entries[i]);
  226. _dl_free(htab->entries);
  227. _dl_free(htab);
  228. }
  229. /*
  230. * Similar to htab_find_slot, but without several unwanted side effects:
  231. * - Does not call htab->eq_f when it finds an existing entry.
  232. * - Does not change the count of elements/searches/collisions in the
  233. * hash table.
  234. * This function also assumes there are no deleted entries in the table.
  235. * HASH is the hash value for the element to be inserted.
  236. */
  237. static __always_inline struct funcdesc_value **
  238. find_empty_slot_for_expand(struct funcdesc_ht *htab, int hash)
  239. {
  240. size_t size = htab->size;
  241. unsigned int index = hash % size;
  242. struct funcdesc_value **slot = htab->entries + index;
  243. int hash2;
  244. if (!*slot)
  245. return slot;
  246. hash2 = 1 + hash % (size - 2);
  247. for (;;) {
  248. index += hash2;
  249. if (index >= size)
  250. index -= size;
  251. slot = htab->entries + index;
  252. if (!*slot)
  253. return slot;
  254. }
  255. }
  256. /*
  257. * The following function changes size of memory allocated for the
  258. * entries and repeatedly inserts the table elements. The occupancy
  259. * of the table after the call will be about 50%. Naturally the hash
  260. * table must already exist. Remember also that the place of the
  261. * table entries is changed. If memory allocation failures are allowed,
  262. * this function will return zero, indicating that the table could not be
  263. * expanded. If all goes well, it will return a non-zero value.
  264. */
  265. static __always_inline int
  266. htab_expand(struct funcdesc_ht *htab)
  267. {
  268. struct funcdesc_value **oentries;
  269. struct funcdesc_value **olimit;
  270. struct funcdesc_value **p;
  271. struct funcdesc_value **nentries;
  272. size_t nsize;
  273. oentries = htab->entries;
  274. olimit = oentries + htab->size;
  275. /*
  276. * Resize only when table after removal of unused elements is either
  277. * too full or too empty.
  278. */
  279. if (htab->n_elements * 2 > htab->size)
  280. nsize = higher_prime_number(htab->n_elements * 2);
  281. else
  282. nsize = htab->size;
  283. nentries = _dl_malloc(sizeof(*nentries) * nsize);
  284. _dl_memset(nentries, 0, sizeof(*nentries) * nsize);
  285. if (nentries == NULL)
  286. return 0;
  287. htab->entries = nentries;
  288. htab->size = nsize;
  289. p = oentries;
  290. do {
  291. if (*p)
  292. *find_empty_slot_for_expand(htab, hash_pointer((*p)->entry_point)) = *p;
  293. p++;
  294. } while (p < olimit);
  295. #if 0
  296. /*
  297. * We can't tell whether this was allocated by the _dl_malloc()
  298. * built into ld.so or malloc() in the main executable or libc,
  299. * and calling free() for something that wasn't malloc()ed could
  300. * do Very Bad Things (TM). Take the conservative approach
  301. * here, potentially wasting as much memory as actually used by
  302. * the hash table, even if multiple growths occur. That's not
  303. * so bad as to require some overengineered solution that would
  304. * enable us to keep track of how it was allocated.
  305. */
  306. _dl_free(oentries);
  307. #endif
  308. return 1;
  309. }
  310. /*
  311. * This function searches for a hash table slot containing an entry
  312. * equal to the given element. To delete an entry, call this with
  313. * INSERT = 0, then call htab_clear_slot on the slot returned (possibly
  314. * after doing some checks). To insert an entry, call this with
  315. * INSERT = 1, then write the value you want into the returned slot.
  316. * When inserting an entry, NULL may be returned if memory allocation
  317. * fails.
  318. */
  319. static __always_inline struct funcdesc_value **
  320. htab_find_slot(struct funcdesc_ht *htab, void *ptr, int insert)
  321. {
  322. unsigned int index;
  323. int hash, hash2;
  324. size_t size;
  325. struct funcdesc_value **entry;
  326. if (htab->size * 3 <= htab->n_elements * 4 &&
  327. htab_expand(htab) == 0)
  328. return NULL;
  329. hash = hash_pointer(ptr);
  330. size = htab->size;
  331. index = hash % size;
  332. entry = &htab->entries[index];
  333. if (!*entry)
  334. goto empty_entry;
  335. else if ((*entry)->entry_point == ptr)
  336. return entry;
  337. hash2 = 1 + hash % (size - 2);
  338. for (;;) {
  339. index += hash2;
  340. if (index >= size)
  341. index -= size;
  342. entry = &htab->entries[index];
  343. if (!*entry)
  344. goto empty_entry;
  345. else if ((*entry)->entry_point == ptr)
  346. return entry;
  347. }
  348. empty_entry:
  349. if (!insert)
  350. return NULL;
  351. htab->n_elements++;
  352. return entry;
  353. }
  354. void *
  355. _dl_funcdesc_for (void *entry_point, void *got_value)
  356. {
  357. struct elf_resolve *tpnt = ((void**)got_value)[2];
  358. struct funcdesc_ht *ht = tpnt->funcdesc_ht;
  359. struct funcdesc_value **entry;
  360. _dl_assert(got_value == tpnt->loadaddr.got_value);
  361. if (!ht) {
  362. ht = htab_create();
  363. if (!ht)
  364. return (void*)-1;
  365. tpnt->funcdesc_ht = ht;
  366. }
  367. entry = htab_find_slot(ht, entry_point, 1);
  368. if (*entry) {
  369. _dl_assert((*entry)->entry_point == entry_point);
  370. return _dl_stabilize_funcdesc(*entry);
  371. }
  372. *entry = _dl_malloc(sizeof(**entry));
  373. (*entry)->entry_point = entry_point;
  374. (*entry)->got_value = got_value;
  375. return _dl_stabilize_funcdesc(*entry);
  376. }
  377. static __always_inline void const *
  378. _dl_lookup_address(void const *address)
  379. {
  380. struct elf_resolve *rpnt;
  381. struct funcdesc_value const *fd;
  382. /* Make sure we don't make assumptions about its alignment. */
  383. __asm__ ("" : "+r" (address));
  384. if ((Elf32_Addr)address & 7)
  385. /* It's not a function descriptor. */
  386. return address;
  387. fd = address;
  388. for (rpnt = _dl_loaded_modules; rpnt; rpnt = rpnt->next) {
  389. if (!rpnt->funcdesc_ht)
  390. continue;
  391. if (fd->got_value != rpnt->loadaddr.got_value)
  392. continue;
  393. address = htab_find_slot(rpnt->funcdesc_ht, (void *)fd->entry_point, 0);
  394. if (address && *(struct funcdesc_value *const*)address == fd) {
  395. address = (*(struct funcdesc_value *const*)address)->entry_point;
  396. break;
  397. } else
  398. address = fd;
  399. }
  400. return address;
  401. }