dl-inlines.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. /* Copyright (C) 2003, 2004 Red Hat, Inc.
  2. Contributed by Alexandre Oliva <aoliva@redhat.com>
  3. This file is part of uClibc.
  4. uClibc is free software; you can redistribute it and/or modify it
  5. under the terms of the GNU Lesser General Public License as
  6. published by the Free Software Foundation; either version 2.1 of the
  7. License, or (at your option) any later version.
  8. uClibc is distributed in the hope that it will be useful, but WITHOUT
  9. ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Library General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with uClibc; see the file COPYING.LIB. If not, write to
  14. the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
  15. USA. */
  16. #include <bfin_sram.h>
  17. #ifndef _dl_assert
  18. # define _dl_assert(expr)
  19. #endif
  20. /* Initialize a DL_LOADADDR_TYPE given a got pointer and a complete
  21. load map. */
  22. inline static void
  23. __dl_init_loadaddr_map (struct elf32_fdpic_loadaddr *loadaddr, Elf32_Addr dl_boot_got_pointer,
  24. struct elf32_fdpic_loadmap *map)
  25. {
  26. if (map->version != 0)
  27. {
  28. SEND_EARLY_STDERR ("Invalid loadmap version number\n");
  29. _dl_exit(-1);
  30. }
  31. if (map->nsegs == 0)
  32. {
  33. SEND_EARLY_STDERR ("Invalid segment count in loadmap\n");
  34. _dl_exit(-1);
  35. }
  36. loadaddr->got_value = dl_boot_got_pointer;
  37. loadaddr->map = map;
  38. }
  39. /* Figure out how many LOAD segments there are in the given headers,
  40. and allocate a block for the load map big enough for them.
  41. got_value will be properly initialized later on, with INIT_GOT. */
  42. inline static int
  43. __dl_init_loadaddr (struct elf32_fdpic_loadaddr *loadaddr, Elf32_Phdr *ppnt,
  44. int pcnt)
  45. {
  46. int count = 0, i;
  47. size_t size;
  48. for (i = 0; i < pcnt; i++)
  49. if (ppnt[i].p_type == PT_LOAD)
  50. count++;
  51. loadaddr->got_value = 0;
  52. size = sizeof (struct elf32_fdpic_loadmap)
  53. + sizeof (struct elf32_fdpic_loadseg) * count;
  54. loadaddr->map = _dl_malloc (size);
  55. if (! loadaddr->map)
  56. _dl_exit (-1);
  57. loadaddr->map->version = 0;
  58. loadaddr->map->nsegs = 0;
  59. return count;
  60. }
  61. /* Incrementally initialize a load map. */
  62. inline static void
  63. __dl_init_loadaddr_hdr (struct elf32_fdpic_loadaddr loadaddr, void *addr,
  64. Elf32_Phdr *phdr, int maxsegs)
  65. {
  66. struct elf32_fdpic_loadseg *segdata;
  67. if (loadaddr.map->nsegs == maxsegs)
  68. _dl_exit (-1);
  69. segdata = &loadaddr.map->segs[loadaddr.map->nsegs++];
  70. segdata->addr = (Elf32_Addr) addr;
  71. segdata->p_vaddr = phdr->p_vaddr;
  72. segdata->p_memsz = phdr->p_memsz;
  73. #if defined (__SUPPORT_LD_DEBUG__)
  74. {
  75. extern char *_dl_debug;
  76. extern int _dl_debug_file;
  77. if (_dl_debug)
  78. _dl_dprintf(_dl_debug_file, "%i: mapped %x at %x, size %x\n",
  79. loadaddr.map->nsegs-1,
  80. segdata->p_vaddr, segdata->addr, segdata->p_memsz);
  81. }
  82. #endif
  83. }
  84. inline static void __dl_loadaddr_unmap
  85. (struct elf32_fdpic_loadaddr loadaddr, struct funcdesc_ht *funcdesc_ht);
  86. /* Figure out whether the given address is in one of the mapped
  87. segments. */
  88. inline static int
  89. __dl_addr_in_loadaddr (void *p, struct elf32_fdpic_loadaddr loadaddr)
  90. {
  91. struct elf32_fdpic_loadmap *map = loadaddr.map;
  92. int c;
  93. for (c = 0; c < map->nsegs; c++)
  94. if ((void*)map->segs[c].addr <= p
  95. && (char*)p < (char*)map->segs[c].addr + map->segs[c].p_memsz)
  96. return 1;
  97. return 0;
  98. }
  99. inline static void * _dl_funcdesc_for (void *entry_point, void *got_value);
  100. /* The hashcode handling code below is heavily inspired in libiberty's
  101. hashtab code, but with most adaptation points and support for
  102. deleting elements removed.
  103. Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
  104. Contributed by Vladimir Makarov (vmakarov@cygnus.com). */
  105. inline static unsigned long
  106. higher_prime_number (unsigned long n)
  107. {
  108. /* These are primes that are near, but slightly smaller than, a
  109. power of two. */
  110. static const unsigned long primes[] = {
  111. (unsigned long) 7,
  112. (unsigned long) 13,
  113. (unsigned long) 31,
  114. (unsigned long) 61,
  115. (unsigned long) 127,
  116. (unsigned long) 251,
  117. (unsigned long) 509,
  118. (unsigned long) 1021,
  119. (unsigned long) 2039,
  120. (unsigned long) 4093,
  121. (unsigned long) 8191,
  122. (unsigned long) 16381,
  123. (unsigned long) 32749,
  124. (unsigned long) 65521,
  125. (unsigned long) 131071,
  126. (unsigned long) 262139,
  127. (unsigned long) 524287,
  128. (unsigned long) 1048573,
  129. (unsigned long) 2097143,
  130. (unsigned long) 4194301,
  131. (unsigned long) 8388593,
  132. (unsigned long) 16777213,
  133. (unsigned long) 33554393,
  134. (unsigned long) 67108859,
  135. (unsigned long) 134217689,
  136. (unsigned long) 268435399,
  137. (unsigned long) 536870909,
  138. (unsigned long) 1073741789,
  139. (unsigned long) 2147483647,
  140. /* 4294967291L */
  141. ((unsigned long) 2147483647) + ((unsigned long) 2147483644),
  142. };
  143. const unsigned long *low = &primes[0];
  144. const unsigned long *high = &primes[sizeof(primes) / sizeof(primes[0])];
  145. while (low != high)
  146. {
  147. const unsigned long *mid = low + (high - low) / 2;
  148. if (n > *mid)
  149. low = mid + 1;
  150. else
  151. high = mid;
  152. }
  153. #if 0
  154. /* If we've run out of primes, abort. */
  155. if (n > *low)
  156. {
  157. fprintf (stderr, "Cannot find prime bigger than %lu\n", n);
  158. abort ();
  159. }
  160. #endif
  161. return *low;
  162. }
  163. struct funcdesc_ht
  164. {
  165. /* Table itself. */
  166. struct funcdesc_value **entries;
  167. /* Current size (in entries) of the hash table */
  168. size_t size;
  169. /* Current number of elements. */
  170. size_t n_elements;
  171. };
  172. inline static int
  173. hash_pointer (const void *p)
  174. {
  175. return (int) ((long)p >> 3);
  176. }
  177. inline static struct funcdesc_ht *
  178. htab_create (void)
  179. {
  180. struct funcdesc_ht *ht = _dl_malloc (sizeof (struct funcdesc_ht));
  181. if (! ht)
  182. return NULL;
  183. ht->size = 3;
  184. ht->entries = _dl_malloc (sizeof (struct funcdesc_ht_value *) * ht->size);
  185. if (! ht->entries)
  186. return NULL;
  187. ht->n_elements = 0;
  188. _dl_memset (ht->entries, 0, sizeof (struct funcdesc_ht_value *) * ht->size);
  189. return ht;
  190. }
  191. /* This is only called from _dl_loadaddr_unmap, so it's safe to call
  192. _dl_free(). See the discussion below. */
  193. inline static void
  194. htab_delete (struct funcdesc_ht *htab)
  195. {
  196. int i;
  197. for (i = htab->size - 1; i >= 0; i--)
  198. if (htab->entries[i])
  199. _dl_free (htab->entries[i]);
  200. _dl_free (htab->entries);
  201. _dl_free (htab);
  202. }
  203. /* Similar to htab_find_slot, but without several unwanted side effects:
  204. - Does not call htab->eq_f when it finds an existing entry.
  205. - Does not change the count of elements/searches/collisions in the
  206. hash table.
  207. This function also assumes there are no deleted entries in the table.
  208. HASH is the hash value for the element to be inserted. */
  209. inline static struct funcdesc_value **
  210. find_empty_slot_for_expand (struct funcdesc_ht *htab, int hash)
  211. {
  212. size_t size = htab->size;
  213. unsigned int index = hash % size;
  214. struct funcdesc_value **slot = htab->entries + index;
  215. int hash2;
  216. if (! *slot)
  217. return slot;
  218. hash2 = 1 + hash % (size - 2);
  219. for (;;)
  220. {
  221. index += hash2;
  222. if (index >= size)
  223. index -= size;
  224. slot = htab->entries + index;
  225. if (! *slot)
  226. return slot;
  227. }
  228. }
  229. /* The following function changes size of memory allocated for the
  230. entries and repeatedly inserts the table elements. The occupancy
  231. of the table after the call will be about 50%. Naturally the hash
  232. table must already exist. Remember also that the place of the
  233. table entries is changed. If memory allocation failures are allowed,
  234. this function will return zero, indicating that the table could not be
  235. expanded. If all goes well, it will return a non-zero value. */
  236. inline static int
  237. htab_expand (struct funcdesc_ht *htab)
  238. {
  239. struct funcdesc_value **oentries;
  240. struct funcdesc_value **olimit;
  241. struct funcdesc_value **p;
  242. struct funcdesc_value **nentries;
  243. size_t nsize;
  244. oentries = htab->entries;
  245. olimit = oentries + htab->size;
  246. /* Resize only when table after removal of unused elements is either
  247. too full or too empty. */
  248. if (htab->n_elements * 2 > htab->size)
  249. nsize = higher_prime_number (htab->n_elements * 2);
  250. else
  251. nsize = htab->size;
  252. nentries = _dl_malloc (sizeof (struct funcdesc_value *) * nsize);
  253. _dl_memset (nentries, 0, sizeof (struct funcdesc_value *) * nsize);
  254. if (nentries == NULL)
  255. return 0;
  256. htab->entries = nentries;
  257. htab->size = nsize;
  258. p = oentries;
  259. do
  260. {
  261. if (*p)
  262. *find_empty_slot_for_expand (htab, hash_pointer ((*p)->entry_point))
  263. = *p;
  264. p++;
  265. }
  266. while (p < olimit);
  267. #if 0 /* We can't tell whether this was allocated by the _dl_malloc()
  268. built into ld.so or malloc() in the main executable or libc,
  269. and calling free() for something that wasn't malloc()ed could
  270. do Very Bad Things (TM). Take the conservative approach
  271. here, potentially wasting as much memory as actually used by
  272. the hash table, even if multiple growths occur. That's not
  273. so bad as to require some overengineered solution that would
  274. enable us to keep track of how it was allocated. */
  275. _dl_free (oentries);
  276. #endif
  277. return 1;
  278. }
  279. /* This function searches for a hash table slot containing an entry
  280. equal to the given element. To delete an entry, call this with
  281. INSERT = 0, then call htab_clear_slot on the slot returned (possibly
  282. after doing some checks). To insert an entry, call this with
  283. INSERT = 1, then write the value you want into the returned slot.
  284. When inserting an entry, NULL may be returned if memory allocation
  285. fails. */
  286. inline static struct funcdesc_value **
  287. htab_find_slot (struct funcdesc_ht *htab, void *ptr, int insert)
  288. {
  289. unsigned int index;
  290. int hash, hash2;
  291. size_t size;
  292. struct funcdesc_value **entry;
  293. if (htab->size * 3 <= htab->n_elements * 4
  294. && htab_expand (htab) == 0)
  295. return NULL;
  296. hash = hash_pointer (ptr);
  297. size = htab->size;
  298. index = hash % size;
  299. entry = &htab->entries[index];
  300. if (!*entry)
  301. goto empty_entry;
  302. else if ((*entry)->entry_point == ptr)
  303. return entry;
  304. hash2 = 1 + hash % (size - 2);
  305. for (;;)
  306. {
  307. index += hash2;
  308. if (index >= size)
  309. index -= size;
  310. entry = &htab->entries[index];
  311. if (!*entry)
  312. goto empty_entry;
  313. else if ((*entry)->entry_point == ptr)
  314. return entry;
  315. }
  316. empty_entry:
  317. if (!insert)
  318. return NULL;
  319. htab->n_elements++;
  320. return entry;
  321. }
  322. void *
  323. _dl_funcdesc_for (void *entry_point, void *got_value)
  324. {
  325. struct elf_resolve *tpnt = ((void**)got_value)[2];
  326. struct funcdesc_ht *ht = tpnt->funcdesc_ht;
  327. struct funcdesc_value **entry;
  328. _dl_assert (got_value == tpnt->loadaddr.got_value);
  329. if (! ht)
  330. {
  331. ht = htab_create ();
  332. if (! ht)
  333. return (void*)-1;
  334. tpnt->funcdesc_ht = ht;
  335. }
  336. entry = htab_find_slot (ht, entry_point, 1);
  337. if (*entry)
  338. {
  339. _dl_assert ((*entry)->entry_point == entry_point);
  340. return _dl_stabilize_funcdesc (*entry);
  341. }
  342. *entry = _dl_malloc (sizeof (struct funcdesc_value));
  343. (*entry)->entry_point = entry_point;
  344. (*entry)->got_value = got_value;
  345. return _dl_stabilize_funcdesc (*entry);
  346. }
  347. inline static void const *
  348. _dl_lookup_address (void const *address)
  349. {
  350. struct elf_resolve *rpnt;
  351. struct funcdesc_value const *fd;
  352. /* Make sure we don't make assumptions about its alignment. */
  353. __asm__ ("" : "+r" (address));
  354. if ((Elf32_Addr)address & 7)
  355. /* It's not a function descriptor. */
  356. return address;
  357. fd = (struct funcdesc_value const *)address;
  358. for (rpnt = _dl_loaded_modules; rpnt; rpnt = rpnt->next)
  359. {
  360. if (! rpnt->funcdesc_ht)
  361. continue;
  362. if (fd->got_value != rpnt->loadaddr.got_value)
  363. continue;
  364. address = htab_find_slot (rpnt->funcdesc_ht, (void*)fd->entry_point, 0);
  365. if (address && *(struct funcdesc_value *const*)address == fd)
  366. {
  367. address = (*(struct funcdesc_value *const*)address)->entry_point;
  368. break;
  369. }
  370. else
  371. address = fd;
  372. }
  373. return address;
  374. }
  375. void
  376. __dl_loadaddr_unmap (struct elf32_fdpic_loadaddr loadaddr,
  377. struct funcdesc_ht *funcdesc_ht)
  378. {
  379. int i;
  380. for (i = 0; i < loadaddr.map->nsegs; i++)
  381. {
  382. struct elf32_fdpic_loadseg *segdata;
  383. ssize_t offs;
  384. segdata = loadaddr.map->segs + i;
  385. /* FIXME:
  386. A more cleaner way is to add type for struct elf32_fdpic_loadseg,
  387. and release the memory according to the type.
  388. Currently, we hardcode the memory address of L1 SRAM. */
  389. if ((segdata->addr & 0xff800000) == 0xff800000)
  390. {
  391. _dl_sram_free ((void *)segdata->addr);
  392. continue;
  393. }
  394. offs = (segdata->p_vaddr & ADDR_ALIGN);
  395. _dl_munmap ((void*)segdata->addr - offs,
  396. segdata->p_memsz + offs);
  397. }
  398. /* _dl_unmap is only called for dlopen()ed libraries, for which
  399. calling free() is safe, or before we've completed the initial
  400. relocation, in which case calling free() is probably pointless,
  401. but still safe. */
  402. _dl_free (loadaddr.map);
  403. if (funcdesc_ht)
  404. htab_delete (funcdesc_ht);
  405. }
  406. inline static int
  407. __dl_is_special_segment (Elf32_Ehdr *epnt,
  408. Elf32_Phdr *ppnt)
  409. {
  410. if (ppnt->p_type != PT_LOAD)
  411. return 0;
  412. if ((epnt->e_flags & EF_BFIN_CODE_IN_L1)
  413. && !(ppnt->p_flags & PF_W)
  414. && (ppnt->p_flags & PF_X))
  415. return 1;
  416. if ((epnt->e_flags & EF_BFIN_DATA_IN_L1)
  417. && (ppnt->p_flags & PF_W)
  418. && !(ppnt->p_flags & PF_X))
  419. return 1;
  420. /* 0xff700000, 0xff800000, 0xff900000 and 0xffa00000 are also used in
  421. GNU ld and linux kernel. They need to be keep synchronized. */
  422. if (ppnt->p_vaddr == 0xff700000
  423. || ppnt->p_vaddr == 0xff800000
  424. || ppnt->p_vaddr == 0xff900000
  425. || ppnt->p_vaddr == 0xffa00000)
  426. return 1;
  427. return 0;
  428. }
  429. inline static char *
  430. __dl_map_segment (Elf32_Ehdr *epnt,
  431. Elf32_Phdr *ppnt,
  432. int infile,
  433. int flags)
  434. {
  435. char *status, *tryaddr, *l1addr;
  436. size_t size;
  437. if (((epnt->e_flags & EF_BFIN_CODE_IN_L1) || ppnt->p_vaddr == 0xffa00000)
  438. && !(ppnt->p_flags & PF_W)
  439. && (ppnt->p_flags & PF_X)) {
  440. status = (char *) _dl_mmap
  441. (tryaddr = 0,
  442. size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz,
  443. LXFLAGS(ppnt->p_flags),
  444. flags | MAP_EXECUTABLE | MAP_DENYWRITE,
  445. infile, ppnt->p_offset & OFFS_ALIGN);
  446. if (_dl_mmap_check_error(status)
  447. || (tryaddr && tryaddr != status))
  448. return NULL;
  449. l1addr = (char *) _dl_sram_alloc (ppnt->p_filesz, L1_INST_SRAM);
  450. if (l1addr != NULL)
  451. _dl_dma_memcpy (l1addr, status + (ppnt->p_vaddr & ADDR_ALIGN), ppnt->p_filesz);
  452. _dl_munmap (status, size);
  453. if (l1addr == NULL)
  454. _dl_dprintf(2, "%s:%i: L1 allocation failed\n", _dl_progname, __LINE__);
  455. return l1addr;
  456. }
  457. if (((epnt->e_flags & EF_BFIN_DATA_IN_L1)
  458. || ppnt->p_vaddr == 0xff700000
  459. || ppnt->p_vaddr == 0xff800000
  460. || ppnt->p_vaddr == 0xff900000)
  461. && (ppnt->p_flags & PF_W)
  462. && !(ppnt->p_flags & PF_X)) {
  463. if (ppnt->p_vaddr == 0xff800000)
  464. l1addr = (char *) _dl_sram_alloc (ppnt->p_memsz, L1_DATA_A_SRAM);
  465. else if (ppnt->p_vaddr == 0xff900000)
  466. l1addr = (char *) _dl_sram_alloc (ppnt->p_memsz, L1_DATA_B_SRAM);
  467. else
  468. l1addr = (char *) _dl_sram_alloc (ppnt->p_memsz, L1_DATA_SRAM);
  469. if (l1addr == NULL) {
  470. _dl_dprintf(2, "%s:%i: L1 allocation failed\n", _dl_progname, __LINE__);
  471. } else {
  472. if (_DL_PREAD (infile, l1addr, ppnt->p_filesz, ppnt->p_offset) != ppnt->p_filesz) {
  473. _dl_sram_free (l1addr);
  474. return NULL;
  475. }
  476. if (ppnt->p_filesz < ppnt->p_memsz)
  477. _dl_memset (l1addr + ppnt->p_filesz, 0, ppnt->p_memsz - ppnt->p_filesz);
  478. }
  479. return l1addr;
  480. }
  481. return 0;
  482. }