dl-inlines.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618
  1. /* Copyright (C) 2003, 2004 Red Hat, Inc.
  2. Contributed by Alexandre Oliva <aoliva@redhat.com>
  3. This file is part of uClibc.
  4. uClibc is free software; you can redistribute it and/or modify it
  5. under the terms of the GNU Lesser General Public License as
  6. published by the Free Software Foundation; either version 2.1 of the
  7. License, or (at your option) any later version.
  8. uClibc is distributed in the hope that it will be useful, but WITHOUT
  9. ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Library General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with uClibc; see the file COPYING.LIB. If not, write to
  14. the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
  15. USA. */
  16. #include <bfin_sram.h>
  17. #ifndef _dl_assert
  18. # define _dl_assert(expr)
  19. #endif
  20. /* Initialize a DL_LOADADDR_TYPE given a got pointer and a complete
  21. load map. */
  22. static __always_inline void
  23. __dl_init_loadaddr_map (struct elf32_fdpic_loadaddr *loadaddr, Elf32_Addr dl_boot_got_pointer,
  24. struct elf32_fdpic_loadmap *map)
  25. {
  26. if (map->version != 0)
  27. {
  28. SEND_EARLY_STDERR ("Invalid loadmap version number\n");
  29. _dl_exit(-1);
  30. }
  31. if (map->nsegs == 0)
  32. {
  33. SEND_EARLY_STDERR ("Invalid segment count in loadmap\n");
  34. _dl_exit(-1);
  35. }
  36. loadaddr->got_value = (void *)dl_boot_got_pointer;
  37. loadaddr->map = map;
  38. }
  39. /* Figure out how many LOAD segments there are in the given headers,
  40. and allocate a block for the load map big enough for them.
  41. got_value will be properly initialized later on, with INIT_GOT. */
  42. static __always_inline int
  43. __dl_init_loadaddr (struct elf32_fdpic_loadaddr *loadaddr, Elf32_Phdr *ppnt,
  44. int pcnt)
  45. {
  46. int count = 0, i;
  47. size_t size;
  48. for (i = 0; i < pcnt; i++)
  49. if (ppnt[i].p_type == PT_LOAD)
  50. count++;
  51. loadaddr->got_value = 0;
  52. size = sizeof (struct elf32_fdpic_loadmap)
  53. + sizeof (struct elf32_fdpic_loadseg) * count;
  54. loadaddr->map = _dl_malloc (size);
  55. if (! loadaddr->map)
  56. _dl_exit (-1);
  57. loadaddr->map->version = 0;
  58. loadaddr->map->nsegs = 0;
  59. return count;
  60. }
  61. /* Incrementally initialize a load map. */
  62. static __always_inline void
  63. __dl_init_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr,
  64. Elf32_Phdr *phdr, int maxsegs)
  65. {
  66. struct elf32_fdpic_loadseg *segdata;
  67. if (loadaddr.map->nsegs == maxsegs)
  68. _dl_exit (-1);
  69. segdata = &loadaddr.map->segs[loadaddr.map->nsegs++];
  70. segdata->addr = (Elf32_Addr) addr;
  71. segdata->p_vaddr = phdr->p_vaddr;
  72. segdata->p_memsz = phdr->p_memsz;
  73. #if defined (__SUPPORT_LD_DEBUG__)
  74. if (_dl_debug)
  75. _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n",
  76. loadaddr.map->nsegs-1,
  77. segdata->p_vaddr, segdata->addr, segdata->p_memsz);
  78. #endif
  79. }
  80. /* Replace an existing entry in the load map. */
  81. static __always_inline void
  82. __dl_update_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr,
  83. Elf32_Phdr *phdr)
  84. {
  85. struct elf32_fdpic_loadseg *segdata;
  86. void *oldaddr;
  87. int i;
  88. for (i = 0; i < loadaddr.map->nsegs; i++)
  89. if (loadaddr.map->segs[i].p_vaddr == phdr->p_vaddr
  90. && loadaddr.map->segs[i].p_memsz == phdr->p_memsz)
  91. break;
  92. if (i == loadaddr.map->nsegs)
  93. _dl_exit (-1);
  94. segdata = loadaddr.map->segs + i;
  95. oldaddr = (void *)segdata->addr;
  96. _dl_munmap (oldaddr, segdata->p_memsz);
  97. segdata->addr = (Elf32_Addr) addr;
  98. #if defined (__SUPPORT_LD_DEBUG__)
  99. if (_dl_debug)
  100. _dl_dprintf(_dl_debug_file, "%i: changed mapping %x at %x (old %x), size %x\n",
  101. loadaddr.map->nsegs-1,
  102. segdata->p_vaddr, segdata->addr, oldaddr, segdata->p_memsz);
  103. #endif
  104. }
  105. static __always_inline void __dl_loadaddr_unmap
  106. (struct elf32_fdpic_loadaddr loadaddr, struct funcdesc_ht *funcdesc_ht);
  107. /* Figure out whether the given address is in one of the mapped
  108. segments. */
  109. static __always_inline int
  110. __dl_addr_in_loadaddr (void *p, struct elf32_fdpic_loadaddr loadaddr)
  111. {
  112. struct elf32_fdpic_loadmap *map = loadaddr.map;
  113. int c;
  114. for (c = 0; c < map->nsegs; c++)
  115. if ((void*)map->segs[c].addr <= p
  116. && (char*)p < (char*)map->segs[c].addr + map->segs[c].p_memsz)
  117. return 1;
  118. return 0;
  119. }
  120. static __always_inline void * _dl_funcdesc_for (void *entry_point, void *got_value);
  121. /* The hashcode handling code below is heavily inspired in libiberty's
  122. hashtab code, but with most adaptation points and support for
  123. deleting elements removed.
  124. Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
  125. Contributed by Vladimir Makarov (vmakarov@cygnus.com). */
  126. static __always_inline unsigned long
  127. higher_prime_number (unsigned long n)
  128. {
  129. /* These are primes that are near, but slightly smaller than, a
  130. power of two. */
  131. static const unsigned long primes[] = {
  132. (unsigned long) 7,
  133. (unsigned long) 13,
  134. (unsigned long) 31,
  135. (unsigned long) 61,
  136. (unsigned long) 127,
  137. (unsigned long) 251,
  138. (unsigned long) 509,
  139. (unsigned long) 1021,
  140. (unsigned long) 2039,
  141. (unsigned long) 4093,
  142. (unsigned long) 8191,
  143. (unsigned long) 16381,
  144. (unsigned long) 32749,
  145. (unsigned long) 65521,
  146. (unsigned long) 131071,
  147. (unsigned long) 262139,
  148. (unsigned long) 524287,
  149. (unsigned long) 1048573,
  150. (unsigned long) 2097143,
  151. (unsigned long) 4194301,
  152. (unsigned long) 8388593,
  153. (unsigned long) 16777213,
  154. (unsigned long) 33554393,
  155. (unsigned long) 67108859,
  156. (unsigned long) 134217689,
  157. (unsigned long) 268435399,
  158. (unsigned long) 536870909,
  159. (unsigned long) 1073741789,
  160. (unsigned long) 2147483647,
  161. /* 4294967291L */
  162. ((unsigned long) 2147483647) + ((unsigned long) 2147483644),
  163. };
  164. const unsigned long *low = &primes[0];
  165. const unsigned long *high = &primes[sizeof(primes) / sizeof(primes[0])];
  166. while (low != high)
  167. {
  168. const unsigned long *mid = low + (high - low) / 2;
  169. if (n > *mid)
  170. low = mid + 1;
  171. else
  172. high = mid;
  173. }
  174. #if 0
  175. /* If we've run out of primes, abort. */
  176. if (n > *low)
  177. {
  178. fprintf (stderr, "Cannot find prime bigger than %lu\n", n);
  179. abort ();
  180. }
  181. #endif
  182. return *low;
  183. }
  184. struct funcdesc_ht
  185. {
  186. /* Table itself. */
  187. struct funcdesc_value **entries;
  188. /* Current size (in entries) of the hash table */
  189. size_t size;
  190. /* Current number of elements. */
  191. size_t n_elements;
  192. };
  193. static __always_inline int
  194. hash_pointer (const void *p)
  195. {
  196. return (int) ((long)p >> 3);
  197. }
  198. static __always_inline struct funcdesc_ht *
  199. htab_create (void)
  200. {
  201. struct funcdesc_ht *ht = _dl_malloc (sizeof (struct funcdesc_ht));
  202. if (! ht)
  203. return NULL;
  204. ht->size = 3;
  205. ht->entries = _dl_malloc (sizeof (struct funcdesc_ht_value *) * ht->size);
  206. if (! ht->entries)
  207. return NULL;
  208. ht->n_elements = 0;
  209. _dl_memset (ht->entries, 0, sizeof (struct funcdesc_ht_value *) * ht->size);
  210. return ht;
  211. }
  212. /* This is only called from _dl_loadaddr_unmap, so it's safe to call
  213. _dl_free(). See the discussion below. */
  214. static __always_inline void
  215. htab_delete (struct funcdesc_ht *htab)
  216. {
  217. int i;
  218. for (i = htab->size - 1; i >= 0; i--)
  219. if (htab->entries[i])
  220. _dl_free (htab->entries[i]);
  221. _dl_free (htab->entries);
  222. _dl_free (htab);
  223. }
  224. /* Similar to htab_find_slot, but without several unwanted side effects:
  225. - Does not call htab->eq_f when it finds an existing entry.
  226. - Does not change the count of elements/searches/collisions in the
  227. hash table.
  228. This function also assumes there are no deleted entries in the table.
  229. HASH is the hash value for the element to be inserted. */
  230. static __always_inline struct funcdesc_value **
  231. find_empty_slot_for_expand (struct funcdesc_ht *htab, int hash)
  232. {
  233. size_t size = htab->size;
  234. unsigned int index = hash % size;
  235. struct funcdesc_value **slot = htab->entries + index;
  236. int hash2;
  237. if (! *slot)
  238. return slot;
  239. hash2 = 1 + hash % (size - 2);
  240. for (;;)
  241. {
  242. index += hash2;
  243. if (index >= size)
  244. index -= size;
  245. slot = htab->entries + index;
  246. if (! *slot)
  247. return slot;
  248. }
  249. }
  250. /* The following function changes size of memory allocated for the
  251. entries and repeatedly inserts the table elements. The occupancy
  252. of the table after the call will be about 50%. Naturally the hash
  253. table must already exist. Remember also that the place of the
  254. table entries is changed. If memory allocation failures are allowed,
  255. this function will return zero, indicating that the table could not be
  256. expanded. If all goes well, it will return a non-zero value. */
  257. static __always_inline int
  258. htab_expand (struct funcdesc_ht *htab)
  259. {
  260. struct funcdesc_value **oentries;
  261. struct funcdesc_value **olimit;
  262. struct funcdesc_value **p;
  263. struct funcdesc_value **nentries;
  264. size_t nsize;
  265. oentries = htab->entries;
  266. olimit = oentries + htab->size;
  267. /* Resize only when table after removal of unused elements is either
  268. too full or too empty. */
  269. if (htab->n_elements * 2 > htab->size)
  270. nsize = higher_prime_number (htab->n_elements * 2);
  271. else
  272. nsize = htab->size;
  273. nentries = _dl_malloc (sizeof (struct funcdesc_value *) * nsize);
  274. _dl_memset (nentries, 0, sizeof (struct funcdesc_value *) * nsize);
  275. if (nentries == NULL)
  276. return 0;
  277. htab->entries = nentries;
  278. htab->size = nsize;
  279. p = oentries;
  280. do
  281. {
  282. if (*p)
  283. *find_empty_slot_for_expand (htab, hash_pointer ((*p)->entry_point))
  284. = *p;
  285. p++;
  286. }
  287. while (p < olimit);
  288. #if 0 /* We can't tell whether this was allocated by the _dl_malloc()
  289. built into ld.so or malloc() in the main executable or libc,
  290. and calling free() for something that wasn't malloc()ed could
  291. do Very Bad Things (TM). Take the conservative approach
  292. here, potentially wasting as much memory as actually used by
  293. the hash table, even if multiple growths occur. That's not
  294. so bad as to require some overengineered solution that would
  295. enable us to keep track of how it was allocated. */
  296. _dl_free (oentries);
  297. #endif
  298. return 1;
  299. }
  300. /* This function searches for a hash table slot containing an entry
  301. equal to the given element. To delete an entry, call this with
  302. INSERT = 0, then call htab_clear_slot on the slot returned (possibly
  303. after doing some checks). To insert an entry, call this with
  304. INSERT = 1, then write the value you want into the returned slot.
  305. When inserting an entry, NULL may be returned if memory allocation
  306. fails. */
  307. static __always_inline struct funcdesc_value **
  308. htab_find_slot (struct funcdesc_ht *htab, void *ptr, int insert)
  309. {
  310. unsigned int index;
  311. int hash, hash2;
  312. size_t size;
  313. struct funcdesc_value **entry;
  314. if (htab->size * 3 <= htab->n_elements * 4
  315. && htab_expand (htab) == 0)
  316. return NULL;
  317. hash = hash_pointer (ptr);
  318. size = htab->size;
  319. index = hash % size;
  320. entry = &htab->entries[index];
  321. if (!*entry)
  322. goto empty_entry;
  323. else if ((*entry)->entry_point == ptr)
  324. return entry;
  325. hash2 = 1 + hash % (size - 2);
  326. for (;;)
  327. {
  328. index += hash2;
  329. if (index >= size)
  330. index -= size;
  331. entry = &htab->entries[index];
  332. if (!*entry)
  333. goto empty_entry;
  334. else if ((*entry)->entry_point == ptr)
  335. return entry;
  336. }
  337. empty_entry:
  338. if (!insert)
  339. return NULL;
  340. htab->n_elements++;
  341. return entry;
  342. }
  343. void *
  344. _dl_funcdesc_for (void *entry_point, void *got_value)
  345. {
  346. struct elf_resolve *tpnt = ((void**)got_value)[2];
  347. struct funcdesc_ht *ht = tpnt->funcdesc_ht;
  348. struct funcdesc_value **entry;
  349. _dl_assert (got_value == tpnt->loadaddr.got_value);
  350. if (! ht)
  351. {
  352. ht = htab_create ();
  353. if (! ht)
  354. return (void*)-1;
  355. tpnt->funcdesc_ht = ht;
  356. }
  357. entry = htab_find_slot (ht, entry_point, 1);
  358. if (*entry)
  359. {
  360. _dl_assert ((*entry)->entry_point == entry_point);
  361. return _dl_stabilize_funcdesc (*entry);
  362. }
  363. *entry = _dl_malloc (sizeof (struct funcdesc_value));
  364. (*entry)->entry_point = entry_point;
  365. (*entry)->got_value = got_value;
  366. return _dl_stabilize_funcdesc (*entry);
  367. }
  368. static __always_inline void const *
  369. _dl_lookup_address (void const *address)
  370. {
  371. struct elf_resolve *rpnt;
  372. struct funcdesc_value const *fd;
  373. /* Make sure we don't make assumptions about its alignment. */
  374. __asm__ ("" : "+r" (address));
  375. if ((Elf32_Addr)address & 7)
  376. /* It's not a function descriptor. */
  377. return address;
  378. fd = (struct funcdesc_value const *)address;
  379. for (rpnt = _dl_loaded_modules; rpnt; rpnt = rpnt->next)
  380. {
  381. if (! rpnt->funcdesc_ht)
  382. continue;
  383. if (fd->got_value != rpnt->loadaddr.got_value)
  384. continue;
  385. address = htab_find_slot (rpnt->funcdesc_ht, (void*)fd->entry_point, 0);
  386. if (address && *(struct funcdesc_value *const*)address == fd)
  387. {
  388. address = (*(struct funcdesc_value *const*)address)->entry_point;
  389. break;
  390. }
  391. else
  392. address = fd;
  393. }
  394. return address;
  395. }
  396. void
  397. __dl_loadaddr_unmap (struct elf32_fdpic_loadaddr loadaddr,
  398. struct funcdesc_ht *funcdesc_ht)
  399. {
  400. int i;
  401. for (i = 0; i < loadaddr.map->nsegs; i++)
  402. {
  403. struct elf32_fdpic_loadseg *segdata;
  404. ssize_t offs;
  405. segdata = loadaddr.map->segs + i;
  406. /* FIXME:
  407. A more cleaner way is to add type for struct elf32_fdpic_loadseg,
  408. and release the memory according to the type.
  409. Currently, we hardcode the memory address of L1 SRAM. */
  410. if ((segdata->addr & 0xff800000) == 0xff800000)
  411. {
  412. _dl_sram_free ((void *)segdata->addr);
  413. continue;
  414. }
  415. offs = (segdata->p_vaddr & ADDR_ALIGN);
  416. _dl_munmap ((void*)segdata->addr - offs,
  417. segdata->p_memsz + offs);
  418. }
  419. /* _dl_unmap is only called for dlopen()ed libraries, for which
  420. calling free() is safe, or before we've completed the initial
  421. relocation, in which case calling free() is probably pointless,
  422. but still safe. */
  423. _dl_free (loadaddr.map);
  424. if (funcdesc_ht)
  425. htab_delete (funcdesc_ht);
  426. }
  427. static __always_inline int
  428. __dl_is_special_segment (Elf32_Ehdr *epnt,
  429. Elf32_Phdr *ppnt)
  430. {
  431. if (ppnt->p_type != PT_LOAD)
  432. return 0;
  433. if ((epnt->e_flags & EF_BFIN_CODE_IN_L1)
  434. && !(ppnt->p_flags & PF_W)
  435. && (ppnt->p_flags & PF_X))
  436. return 1;
  437. if ((epnt->e_flags & EF_BFIN_DATA_IN_L1)
  438. && (ppnt->p_flags & PF_W)
  439. && !(ppnt->p_flags & PF_X))
  440. return 1;
  441. /* 0xfeb00000, 0xfec00000, 0xff700000, 0xff800000, 0xff900000,
  442. and 0xffa00000 are also used in GNU ld and linux kernel.
  443. They need to be kept synchronized. */
  444. if (ppnt->p_vaddr == 0xff700000
  445. || ppnt->p_vaddr == 0xff800000
  446. || ppnt->p_vaddr == 0xff900000
  447. || ppnt->p_vaddr == 0xffa00000
  448. || ppnt->p_vaddr == 0xfeb00000
  449. || ppnt->p_vaddr == 0xfec00000)
  450. return 1;
  451. return 0;
  452. }
  453. static __always_inline char *
  454. __dl_map_segment (Elf32_Ehdr *epnt,
  455. Elf32_Phdr *ppnt,
  456. int infile,
  457. int flags)
  458. {
  459. char *status, *tryaddr, *addr;
  460. size_t size;
  461. if (((epnt->e_flags & EF_BFIN_CODE_IN_L1) || ppnt->p_vaddr == 0xffa00000)
  462. && !(ppnt->p_flags & PF_W)
  463. && (ppnt->p_flags & PF_X)) {
  464. status = (char *) _dl_mmap
  465. (tryaddr = 0,
  466. size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz,
  467. LXFLAGS(ppnt->p_flags),
  468. flags | MAP_EXECUTABLE | MAP_DENYWRITE,
  469. infile, ppnt->p_offset & OFFS_ALIGN);
  470. if (_dl_mmap_check_error(status)
  471. || (tryaddr && tryaddr != status))
  472. return NULL;
  473. addr = (char *) _dl_sram_alloc (ppnt->p_filesz, L1_INST_SRAM);
  474. if (addr != NULL)
  475. _dl_dma_memcpy (addr, status + (ppnt->p_vaddr & ADDR_ALIGN), ppnt->p_filesz);
  476. _dl_munmap (status, size);
  477. if (addr == NULL)
  478. _dl_dprintf(2, "%s:%i: L1 allocation failed\n", _dl_progname, __LINE__);
  479. return addr;
  480. }
  481. if (((epnt->e_flags & EF_BFIN_DATA_IN_L1)
  482. || ppnt->p_vaddr == 0xff700000
  483. || ppnt->p_vaddr == 0xff800000
  484. || ppnt->p_vaddr == 0xff900000)
  485. && (ppnt->p_flags & PF_W)
  486. && !(ppnt->p_flags & PF_X)) {
  487. if (ppnt->p_vaddr == 0xff800000)
  488. addr = (char *) _dl_sram_alloc (ppnt->p_memsz, L1_DATA_A_SRAM);
  489. else if (ppnt->p_vaddr == 0xff900000)
  490. addr = (char *) _dl_sram_alloc (ppnt->p_memsz, L1_DATA_B_SRAM);
  491. else
  492. addr = (char *) _dl_sram_alloc (ppnt->p_memsz, L1_DATA_SRAM);
  493. if (addr == NULL) {
  494. _dl_dprintf(2, "%s:%i: L1 allocation failed\n", _dl_progname, __LINE__);
  495. } else {
  496. if (_DL_PREAD (infile, addr, ppnt->p_filesz, ppnt->p_offset) != ppnt->p_filesz) {
  497. _dl_sram_free (addr);
  498. return NULL;
  499. }
  500. if (ppnt->p_filesz < ppnt->p_memsz)
  501. _dl_memset (addr + ppnt->p_filesz, 0, ppnt->p_memsz - ppnt->p_filesz);
  502. }
  503. return addr;
  504. }
  505. if (ppnt->p_vaddr == 0xfeb00000
  506. || ppnt->p_vaddr == 0xfec00000) {
  507. addr = (char *) _dl_sram_alloc (ppnt->p_memsz, L2_SRAM);
  508. if (addr == NULL) {
  509. _dl_dprintf(2, "%s:%i: L2 allocation failed\n", _dl_progname, __LINE__);
  510. } else {
  511. if (_DL_PREAD (infile, addr, ppnt->p_filesz, ppnt->p_offset) != ppnt->p_filesz) {
  512. _dl_sram_free (addr);
  513. return NULL;
  514. }
  515. if (ppnt->p_filesz < ppnt->p_memsz)
  516. _dl_memset (addr + ppnt->p_filesz, 0, ppnt->p_memsz - ppnt->p_filesz);
  517. }
  518. return addr;
  519. }
  520. return 0;
  521. }