HardenedBSD src tree https://hardenedbsd.org/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

5980 lines
157 KiB

  1. /*-
  2. * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  3. *
  4. * Copyright 1996, 1997, 1998, 1999, 2000 John D. Polstra.
  5. * Copyright 2003 Alexander Kabaev <kan@FreeBSD.ORG>.
  6. * Copyright 2009-2013 Konstantin Belousov <kib@FreeBSD.ORG>.
  7. * Copyright 2012 John Marino <draco@marino.st>.
  8. * Copyright 2014-2017 The FreeBSD Foundation
  9. * All rights reserved.
  10. *
  11. * Portions of this software were developed by Konstantin Belousov
  12. * under sponsorship from the FreeBSD Foundation.
  13. *
  14. * Redistribution and use in source and binary forms, with or without
  15. * modification, are permitted provided that the following conditions
  16. * are met:
  17. * 1. Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. * 2. Redistributions in binary form must reproduce the above copyright
  20. * notice, this list of conditions and the following disclaimer in the
  21. * documentation and/or other materials provided with the distribution.
  22. *
  23. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  24. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  25. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  26. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  27. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  28. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  29. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  30. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  32. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. /*
  35. * Dynamic linker for ELF.
  36. *
  37. * John Polstra <jdp@polstra.com>.
  38. */
  39. #include <sys/cdefs.h>
  40. __FBSDID("$FreeBSD$");
  41. #include <sys/param.h>
  42. #include <sys/mount.h>
  43. #include <sys/mman.h>
  44. #ifdef HARDENEDBSD
  45. #include <sys/pax.h>
  46. #endif
  47. #include <sys/stat.h>
  48. #include <sys/sysctl.h>
  49. #include <sys/uio.h>
  50. #include <sys/utsname.h>
  51. #include <sys/ktrace.h>
  52. #include <dlfcn.h>
  53. #include <err.h>
  54. #include <errno.h>
  55. #include <fcntl.h>
  56. #include <stdarg.h>
  57. #include <stdio.h>
  58. #include <stdlib.h>
  59. #include <string.h>
  60. #include <unistd.h>
  61. #include "debug.h"
  62. #include "rtld.h"
  63. #include "libmap.h"
  64. #include "paths.h"
  65. #include "rtld_tls.h"
  66. #include "rtld_printf.h"
  67. #include "rtld_malloc.h"
  68. #include "rtld_utrace.h"
  69. #include "notes.h"
  70. /* Types. */
  71. typedef void (*func_ptr_type)(void);
  72. typedef void * (*path_enum_proc) (const char *path, size_t len, void *arg);
  73. #ifdef HARDENEDBSD
  74. struct integriforce_so_check {
  75. char isc_path[MAXPATHLEN];
  76. int isc_result;
  77. };
  78. #endif
  79. /* Variables that cannot be static: */
  80. extern struct r_debug r_debug; /* For GDB */
  81. extern int _thread_autoinit_dummy_decl;
  82. extern char* __progname;
  83. extern void (*__cleanup)(void);
  84. /*
  85. * Function declarations.
  86. */
  87. static const char *basename(const char *);
  88. static void digest_dynamic1(Obj_Entry *, int, const Elf_Dyn **,
  89. const Elf_Dyn **, const Elf_Dyn **);
  90. static bool digest_dynamic2(Obj_Entry *, const Elf_Dyn *, const Elf_Dyn *,
  91. const Elf_Dyn *);
  92. static bool digest_dynamic(Obj_Entry *, int);
  93. static Obj_Entry *digest_phdr(const Elf_Phdr *, int, caddr_t, const char *);
  94. static void distribute_static_tls(Objlist *, RtldLockState *);
  95. static Obj_Entry *dlcheck(void *);
  96. static int dlclose_locked(void *, RtldLockState *);
  97. static Obj_Entry *dlopen_object(const char *name, int fd, Obj_Entry *refobj,
  98. int lo_flags, int mode, RtldLockState *lockstate);
  99. static Obj_Entry *do_load_object(int, const char *, char *, struct stat *, int);
  100. static int do_search_info(const Obj_Entry *obj, int, struct dl_serinfo *);
  101. static bool donelist_check(DoneList *, const Obj_Entry *);
  102. static void errmsg_restore(char *);
  103. static char *errmsg_save(void);
  104. static void *fill_search_info(const char *, size_t, void *);
  105. static char *find_library(const char *, const Obj_Entry *, int *);
  106. static const char *gethints(bool);
  107. static void hold_object(Obj_Entry *);
  108. static void unhold_object(Obj_Entry *);
  109. static void init_dag(Obj_Entry *);
  110. static void init_marker(Obj_Entry *);
  111. static void init_pagesizes(Elf_Auxinfo **aux_info);
  112. static void init_rtld(caddr_t, Elf_Auxinfo **);
  113. static void initlist_add_neededs(Needed_Entry *, Objlist *);
  114. static void initlist_add_objects(Obj_Entry *, Obj_Entry *, Objlist *);
  115. static int initlist_objects_ifunc(Objlist *, bool, int, RtldLockState *);
  116. static void linkmap_add(Obj_Entry *);
  117. static void linkmap_delete(Obj_Entry *);
  118. static void load_filtees(Obj_Entry *, int flags, RtldLockState *);
  119. static void unload_filtees(Obj_Entry *, RtldLockState *);
  120. #if defined(HARDENEDBSD) && defined(SHLIBRANDOM)
  121. static void randomize_neededs(Obj_Entry *obj, int flags);
  122. #endif
  123. static int load_needed_objects(Obj_Entry *, int);
  124. static int load_preload_objects(void);
  125. static Obj_Entry *load_object(const char *, int fd, const Obj_Entry *, int);
  126. static void map_stacks_exec(RtldLockState *);
  127. static int obj_disable_relro(Obj_Entry *);
  128. static int obj_enforce_relro(Obj_Entry *);
  129. static Obj_Entry *obj_from_addr(const void *);
  130. static void objlist_call_fini(Objlist *, Obj_Entry *, RtldLockState *);
  131. static void objlist_call_init(Objlist *, RtldLockState *);
  132. static void objlist_clear(Objlist *);
  133. static Objlist_Entry *objlist_find(Objlist *, const Obj_Entry *);
  134. static void objlist_init(Objlist *);
  135. static void objlist_push_head(Objlist *, Obj_Entry *);
  136. static void objlist_push_tail(Objlist *, Obj_Entry *);
  137. static void objlist_put_after(Objlist *, Obj_Entry *, Obj_Entry *);
  138. static void objlist_remove(Objlist *, Obj_Entry *);
  139. static int open_binary_fd(const char *argv0, bool search_in_path,
  140. const char **binpath_res);
  141. static int parse_args(char* argv[], int argc, bool *use_pathp, int *fdp,
  142. const char **argv0);
  143. static int parse_integer(const char *);
  144. static void *path_enumerate(const char *, path_enum_proc, const char *, void *);
  145. static void print_usage(const char *argv0);
  146. static void release_object(Obj_Entry *);
  147. static int relocate_object_dag(Obj_Entry *root, bool bind_now,
  148. Obj_Entry *rtldobj, int flags, RtldLockState *lockstate);
  149. static int relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj,
  150. int flags, RtldLockState *lockstate);
  151. static int relocate_objects(Obj_Entry *, bool, Obj_Entry *, int,
  152. RtldLockState *);
  153. static int resolve_object_ifunc(Obj_Entry *, bool, int, RtldLockState *);
  154. static int rtld_dirname(const char *, char *);
  155. static int rtld_dirname_abs(const char *, char *);
  156. static void *rtld_dlopen(const char *name, int fd, int mode);
  157. static void rtld_exit(void);
  158. static void rtld_nop_exit(void);
  159. static char *search_library_path(const char *, const char *, const char *,
  160. int *);
  161. static char *search_library_pathfds(const char *, const char *, int *);
  162. static const void **get_program_var_addr(const char *, RtldLockState *);
  163. static void set_program_var(const char *, const void *);
  164. static int symlook_default(SymLook *, const Obj_Entry *refobj);
  165. static int symlook_global(SymLook *, DoneList *);
  166. static void symlook_init_from_req(SymLook *, const SymLook *);
  167. static int symlook_list(SymLook *, const Objlist *, DoneList *);
  168. static int symlook_needed(SymLook *, const Needed_Entry *, DoneList *);
  169. static int symlook_obj1_sysv(SymLook *, const Obj_Entry *);
  170. static int symlook_obj1_gnu(SymLook *, const Obj_Entry *);
  171. static void trace_loaded_objects(Obj_Entry *);
  172. static void unlink_object(Obj_Entry *);
  173. static void unload_object(Obj_Entry *, RtldLockState *lockstate);
  174. static void unref_dag(Obj_Entry *);
  175. static void ref_dag(Obj_Entry *);
  176. static char *origin_subst_one(Obj_Entry *, char *, const char *,
  177. const char *, bool);
  178. static char *origin_subst(Obj_Entry *, const char *);
  179. static bool obj_resolve_origin(Obj_Entry *obj);
  180. static void preinit_main(void);
  181. static int rtld_verify_versions(const Objlist *);
  182. static int rtld_verify_object_versions(Obj_Entry *);
  183. static void object_add_name(Obj_Entry *, const char *);
  184. static int object_match_name(const Obj_Entry *, const char *);
  185. static void ld_utrace_log(int, void *, void *, size_t, int, const char *);
  186. static void rtld_fill_dl_phdr_info(const Obj_Entry *obj,
  187. struct dl_phdr_info *phdr_info);
  188. static uint32_t gnu_hash(const char *);
  189. static bool matched_symbol(SymLook *, const Obj_Entry *, Sym_Match_Result *,
  190. const unsigned long);
  191. void r_debug_state(struct r_debug *, struct link_map *) __noinline __exported;
  192. void _r_debug_postinit(struct link_map *) __noinline __exported;
  193. int __sys_openat(int, const char *, int, ...);
  194. /*
  195. * Data declarations.
  196. */
  197. static char *error_message; /* Message for dlerror(), or NULL */
  198. struct r_debug r_debug __exported; /* for GDB; */
  199. static bool libmap_disable; /* Disable libmap */
  200. static bool ld_loadfltr; /* Immediate filters processing */
  201. static char *libmap_override; /* Maps to use in addition to libmap.conf */
  202. static bool trust; /* False for setuid and setgid programs */
  203. static bool dangerous_ld_env; /* True if environment variables have been
  204. used to affect the libraries loaded */
  205. bool ld_bind_not; /* Disable PLT update */
  206. static char *ld_bind_now; /* Environment variable for immediate binding */
  207. static char *ld_debug; /* Environment variable for debugging */
  208. static char *ld_library_path; /* Environment variable for search path */
  209. static char *ld_library_dirs; /* Environment variable for library descriptors */
  210. static char *ld_preload; /* Environment variable for libraries to
  211. load first */
  212. static const char *ld_elf_hints_path; /* Environment variable for alternative hints path */
  213. static const char *ld_tracing; /* Called from ldd to print libs */
  214. static char *ld_utrace; /* Use utrace() to log events. */
  215. static struct obj_entry_q obj_list; /* Queue of all loaded objects */
  216. static Obj_Entry *obj_main; /* The main program shared object */
  217. static Obj_Entry obj_rtld; /* The dynamic linker shared object */
  218. static unsigned int obj_count; /* Number of objects in obj_list */
  219. static unsigned int obj_loads; /* Number of loads of objects (gen count) */
  220. #ifdef HARDENEDBSD
  221. static Elf_Word pax_flags = 0; /* PaX / HardenedBSD flags */
  222. #endif
  223. static Objlist list_global = /* Objects dlopened with RTLD_GLOBAL */
  224. STAILQ_HEAD_INITIALIZER(list_global);
  225. static Objlist list_main = /* Objects loaded at program startup */
  226. STAILQ_HEAD_INITIALIZER(list_main);
  227. static Objlist list_fini = /* Objects needing fini() calls */
  228. STAILQ_HEAD_INITIALIZER(list_fini);
  229. Elf_Sym sym_zero; /* For resolving undefined weak refs. */
  230. #define GDB_STATE(s,m) r_debug.r_state = s; r_debug_state(&r_debug,m);
  231. extern Elf_Dyn _DYNAMIC;
  232. #pragma weak _DYNAMIC
  233. int dlclose(void *) __exported;
  234. char *dlerror(void) __exported;
  235. void *dlopen(const char *, int) __exported;
  236. void *fdlopen(int, int) __exported;
  237. void *dlsym(void *, const char *) __exported;
  238. dlfunc_t dlfunc(void *, const char *) __exported;
  239. void *dlvsym(void *, const char *, const char *) __exported;
  240. int dladdr(const void *, Dl_info *) __exported;
  241. void dllockinit(void *, void *(*)(void *), void (*)(void *), void (*)(void *),
  242. void (*)(void *), void (*)(void *), void (*)(void *)) __exported;
  243. int dlinfo(void *, int , void *) __exported;
  244. int dl_iterate_phdr(__dl_iterate_hdr_callback, void *) __exported;
  245. int _rtld_addr_phdr(const void *, struct dl_phdr_info *) __exported;
  246. int _rtld_get_stack_prot(void) __exported;
  247. int _rtld_is_dlopened(void *) __exported;
  248. void _rtld_error(const char *, ...) __exported;
  249. /* Only here to fix -Wmissing-prototypes warnings */
  250. int __getosreldate(void);
  251. void __pthread_cxa_finalize(struct dl_phdr_info *a);
  252. func_ptr_type _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp);
  253. Elf_Addr _rtld_bind(Obj_Entry *obj, Elf_Size reloff);
  254. int npagesizes;
  255. static int osreldate;
  256. size_t *pagesizes;
  257. static int stack_prot = PROT_READ | PROT_WRITE | RTLD_DEFAULT_STACK_EXEC;
  258. static int max_stack_flags;
  259. /*
  260. * Global declarations normally provided by crt1. The dynamic linker is
  261. * not built with crt1, so we have to provide them ourselves.
  262. */
  263. char *__progname;
  264. char **environ;
  265. /*
  266. * Used to pass argc, argv to init functions.
  267. */
  268. int main_argc;
  269. char **main_argv;
  270. /*
  271. * Globals to control TLS allocation.
  272. */
  273. size_t tls_last_offset; /* Static TLS offset of last module */
  274. size_t tls_last_size; /* Static TLS size of last module */
  275. size_t tls_static_space; /* Static TLS space allocated */
  276. static size_t tls_static_max_align;
  277. Elf_Addr tls_dtv_generation = 1; /* Used to detect when dtv size changes */
  278. int tls_max_index = 1; /* Largest module index allocated */
  279. static bool ld_library_path_rpath = false;
  280. /*
  281. * Globals for path names, and such
  282. */
  283. const char *ld_elf_hints_default = _PATH_ELF_HINTS;
  284. const char *ld_path_libmap_conf = _PATH_LIBMAP_CONF;
  285. const char *ld_path_rtld = _PATH_RTLD;
  286. const char *ld_standard_library_path = STANDARD_LIBRARY_PATH;
  287. const char *ld_env_prefix = LD_;
  288. static void (*rtld_exit_ptr)(void);
  289. /*
  290. * Fill in a DoneList with an allocation large enough to hold all of
  291. * the currently-loaded objects. Keep this as a macro since it calls
  292. * alloca and we want that to occur within the scope of the caller.
  293. */
  294. #define donelist_init(dlp) \
  295. ((dlp)->objs = alloca(obj_count * sizeof (dlp)->objs[0]), \
  296. assert((dlp)->objs != NULL), \
  297. (dlp)->num_alloc = obj_count, \
  298. (dlp)->num_used = 0)
  299. #define LD_UTRACE(e, h, mb, ms, r, n) do { \
  300. if (ld_utrace != NULL) \
  301. ld_utrace_log(e, h, mb, ms, r, n); \
  302. } while (0)
  303. static void
  304. ld_utrace_log(int event, void *handle, void *mapbase, size_t mapsize,
  305. int refcnt, const char *name)
  306. {
  307. struct utrace_rtld ut;
  308. static const char rtld_utrace_sig[RTLD_UTRACE_SIG_SZ] = RTLD_UTRACE_SIG;
  309. memcpy(ut.sig, rtld_utrace_sig, sizeof(ut.sig));
  310. ut.event = event;
  311. ut.handle = handle;
  312. ut.mapbase = mapbase;
  313. ut.mapsize = mapsize;
  314. ut.refcnt = refcnt;
  315. bzero(ut.name, sizeof(ut.name));
  316. if (name)
  317. strlcpy(ut.name, name, sizeof(ut.name));
  318. utrace(&ut, sizeof(ut));
  319. }
  320. #ifdef RTLD_VARIANT_ENV_NAMES
  321. /*
  322. * construct the env variable based on the type of binary that's
  323. * running.
  324. */
  325. static inline const char *
  326. _LD(const char *var)
  327. {
  328. static char buffer[128];
  329. strlcpy(buffer, ld_env_prefix, sizeof(buffer));
  330. strlcat(buffer, var, sizeof(buffer));
  331. return (buffer);
  332. }
  333. #else
  334. #define _LD(x) LD_ x
  335. #endif
  336. /*
  337. * Main entry point for dynamic linking. The first argument is the
  338. * stack pointer. The stack is expected to be laid out as described
  339. * in the SVR4 ABI specification, Intel 386 Processor Supplement.
  340. * Specifically, the stack pointer points to a word containing
  341. * ARGC. Following that in the stack is a null-terminated sequence
  342. * of pointers to argument strings. Then comes a null-terminated
  343. * sequence of pointers to environment strings. Finally, there is a
  344. * sequence of "auxiliary vector" entries.
  345. *
  346. * The second argument points to a place to store the dynamic linker's
  347. * exit procedure pointer and the third to a place to store the main
  348. * program's object.
  349. *
  350. * The return value is the main program's entry point.
  351. */
  352. func_ptr_type
  353. _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp)
  354. {
  355. Elf_Auxinfo *aux, *auxp, *auxpf, *aux_info[AT_COUNT];
  356. Objlist_Entry *entry;
  357. Obj_Entry *last_interposer, *obj, *preload_tail;
  358. const Elf_Phdr *phdr;
  359. Objlist initlist;
  360. RtldLockState lockstate;
  361. struct stat st;
  362. Elf_Addr *argcp;
  363. char **argv, **env, **envp, *kexecpath, *library_path_rpath;
  364. const char *argv0, *binpath;
  365. caddr_t imgentry;
  366. char buf[MAXPATHLEN];
  367. int argc, fd, i, mib[4], old_osrel, osrel, phnum, rtld_argc;
  368. size_t sz;
  369. bool dir_enable, direct_exec, explicit_fd, search_in_path;
  370. /*
  371. * On entry, the dynamic linker itself has not been relocated yet.
  372. * Be very careful not to reference any global data until after
  373. * init_rtld has returned. It is OK to reference file-scope statics
  374. * and string constants, and to call static and global functions.
  375. */
  376. /* Find the auxiliary vector on the stack. */
  377. argcp = sp;
  378. argc = *sp++;
  379. argv = (char **) sp;
  380. sp += argc + 1; /* Skip over arguments and NULL terminator */
  381. env = (char **) sp;
  382. while (*sp++ != 0) /* Skip over environment, and NULL terminator */
  383. ;
  384. aux = (Elf_Auxinfo *) sp;
  385. /* Digest the auxiliary vector. */
  386. for (i = 0; i < AT_COUNT; i++)
  387. aux_info[i] = NULL;
  388. for (auxp = aux; auxp->a_type != AT_NULL; auxp++) {
  389. if (auxp->a_type < AT_COUNT)
  390. aux_info[auxp->a_type] = auxp;
  391. }
  392. /* Initialize and relocate ourselves. */
  393. assert(aux_info[AT_BASE] != NULL);
  394. init_rtld((caddr_t) aux_info[AT_BASE]->a_un.a_ptr, aux_info);
  395. __progname = obj_rtld.path;
  396. argv0 = argv[0] != NULL ? argv[0] : "(null)";
  397. environ = env;
  398. main_argc = argc;
  399. main_argv = argv;
  400. #ifdef HARDENEDBSD
  401. /* Load PaX flags */
  402. if (aux_info[AT_PAXFLAGS] != NULL) {
  403. pax_flags = aux_info[AT_PAXFLAGS]->a_un.a_val;
  404. aux_info[AT_PAXFLAGS]->a_un.a_val = 0;
  405. }
  406. #endif
  407. trust = !issetugid();
  408. direct_exec = false;
  409. md_abi_variant_hook(aux_info);
  410. fd = -1;
  411. if (aux_info[AT_EXECFD] != NULL) {
  412. fd = aux_info[AT_EXECFD]->a_un.a_val;
  413. } else {
  414. assert(aux_info[AT_PHDR] != NULL);
  415. phdr = (const Elf_Phdr *)aux_info[AT_PHDR]->a_un.a_ptr;
  416. if (phdr == obj_rtld.phdr) {
  417. if (!trust) {
  418. _rtld_error("Tainted process refusing to run binary %s",
  419. argv0);
  420. rtld_die();
  421. }
  422. direct_exec = true;
  423. /*
  424. * Set osrel for us, it is later reset to the binary'
  425. * value before first instruction of code from the binary
  426. * is executed.
  427. */
  428. mib[0] = CTL_KERN;
  429. mib[1] = KERN_PROC;
  430. mib[2] = KERN_PROC_OSREL;
  431. mib[3] = getpid();
  432. osrel = __FreeBSD_version;
  433. sz = sizeof(old_osrel);
  434. (void)sysctl(mib, 4, &old_osrel, &sz, &osrel, sizeof(osrel));
  435. dbg("opening main program in direct exec mode");
  436. if (argc >= 2) {
  437. rtld_argc = parse_args(argv, argc, &search_in_path, &fd, &argv0);
  438. explicit_fd = (fd != -1);
  439. binpath = NULL;
  440. if (!explicit_fd)
  441. fd = open_binary_fd(argv0, search_in_path, &binpath);
  442. if (fstat(fd, &st) == -1) {
  443. _rtld_error("Failed to fstat FD %d (%s): %s", fd,
  444. explicit_fd ? "user-provided descriptor" : argv0,
  445. rtld_strerror(errno));
  446. rtld_die();
  447. }
  448. /*
  449. * Rough emulation of the permission checks done by
  450. * execve(2), only Unix DACs are checked, ACLs are
  451. * ignored. Preserve the semantic of disabling owner
  452. * to execute if owner x bit is cleared, even if
  453. * others x bit is enabled.
  454. * mmap(2) does not allow to mmap with PROT_EXEC if
  455. * binary' file comes from noexec mount. We cannot
  456. * set a text reference on the binary.
  457. */
  458. dir_enable = false;
  459. if (st.st_uid == geteuid()) {
  460. if ((st.st_mode & S_IXUSR) != 0)
  461. dir_enable = true;
  462. } else if (st.st_gid == getegid()) {
  463. if ((st.st_mode & S_IXGRP) != 0)
  464. dir_enable = true;
  465. } else if ((st.st_mode & S_IXOTH) != 0) {
  466. dir_enable = true;
  467. }
  468. if (!dir_enable) {
  469. _rtld_error("No execute permission for binary %s",
  470. argv0);
  471. rtld_die();
  472. }
  473. /*
  474. * For direct exec mode, argv[0] is the interpreter
  475. * name, we must remove it and shift arguments left
  476. * before invoking binary main. Since stack layout
  477. * places environment pointers and aux vectors right
  478. * after the terminating NULL, we must shift
  479. * environment and aux as well.
  480. */
  481. main_argc = argc - rtld_argc;
  482. for (i = 0; i <= main_argc; i++)
  483. argv[i] = argv[i + rtld_argc];
  484. *argcp -= rtld_argc;
  485. environ = env = envp = argv + main_argc + 1;
  486. do {
  487. *envp = *(envp + rtld_argc);
  488. envp++;
  489. } while (*envp != NULL);
  490. aux = auxp = (Elf_Auxinfo *)envp;
  491. auxpf = (Elf_Auxinfo *)(envp + rtld_argc);
  492. /* XXXKIB insert place for AT_EXECPATH if not present */
  493. for (;; auxp++, auxpf++) {
  494. *auxp = *auxpf;
  495. if (auxp->a_type == AT_NULL)
  496. break;
  497. }
  498. /* Point AT_EXECPATH auxv and aux_info to the binary path. */
  499. if (binpath == NULL) {
  500. aux_info[AT_EXECPATH] = NULL;
  501. } else {
  502. if (aux_info[AT_EXECPATH] == NULL) {
  503. aux_info[AT_EXECPATH] = xmalloc(sizeof(Elf_Auxinfo));
  504. aux_info[AT_EXECPATH]->a_type = AT_EXECPATH;
  505. }
  506. aux_info[AT_EXECPATH]->a_un.a_ptr = __DECONST(void *,
  507. binpath);
  508. }
  509. } else {
  510. _rtld_error("No binary");
  511. rtld_die();
  512. }
  513. }
  514. }
  515. ld_bind_now = getenv(_LD("BIND_NOW"));
  516. /*
  517. * If the process is tainted, then we un-set the dangerous environment
  518. * variables. The process will be marked as tainted until setuid(2)
  519. * is called. If any child process calls setuid(2) we do not want any
  520. * future processes to honor the potentially un-safe variables.
  521. */
  522. if (!trust) {
  523. if (unsetenv(_LD("PRELOAD")) || unsetenv(_LD("LIBMAP")) ||
  524. unsetenv(_LD("LIBRARY_PATH")) || unsetenv(_LD("LIBRARY_PATH_FDS")) ||
  525. unsetenv(_LD("LIBMAP_DISABLE")) || unsetenv(_LD("BIND_NOT")) ||
  526. unsetenv(_LD("DEBUG")) || unsetenv(_LD("ELF_HINTS_PATH")) ||
  527. unsetenv(_LD("LOADFLTR")) || unsetenv(_LD("LIBRARY_PATH_RPATH"))) {
  528. _rtld_error("environment corrupt; aborting");
  529. rtld_die();
  530. }
  531. }
  532. ld_debug = getenv(_LD("DEBUG"));
  533. if (ld_bind_now == NULL)
  534. ld_bind_not = getenv(_LD("BIND_NOT")) != NULL;
  535. libmap_disable = getenv(_LD("LIBMAP_DISABLE")) != NULL;
  536. libmap_override = getenv(_LD("LIBMAP"));
  537. ld_library_path = getenv(_LD("LIBRARY_PATH"));
  538. ld_library_dirs = getenv(_LD("LIBRARY_PATH_FDS"));
  539. ld_preload = getenv(_LD("PRELOAD"));
  540. ld_elf_hints_path = getenv(_LD("ELF_HINTS_PATH"));
  541. ld_loadfltr = getenv(_LD("LOADFLTR")) != NULL;
  542. library_path_rpath = getenv(_LD("LIBRARY_PATH_RPATH"));
  543. if (library_path_rpath != NULL) {
  544. if (library_path_rpath[0] == 'y' ||
  545. library_path_rpath[0] == 'Y' ||
  546. library_path_rpath[0] == '1')
  547. ld_library_path_rpath = true;
  548. else
  549. ld_library_path_rpath = false;
  550. }
  551. dangerous_ld_env = libmap_disable || (libmap_override != NULL) ||
  552. (ld_library_path != NULL) || (ld_preload != NULL) ||
  553. (ld_elf_hints_path != NULL) || ld_loadfltr;
  554. ld_tracing = getenv(_LD("TRACE_LOADED_OBJECTS"));
  555. ld_utrace = getenv(_LD("UTRACE"));
  556. if ((ld_elf_hints_path == NULL) || strlen(ld_elf_hints_path) == 0)
  557. ld_elf_hints_path = ld_elf_hints_default;
  558. if (ld_debug != NULL && *ld_debug != '\0')
  559. debug = 1;
  560. dbg("%s is initialized, base address = %p", __progname,
  561. (caddr_t) aux_info[AT_BASE]->a_un.a_ptr);
  562. dbg("RTLD dynamic = %p", obj_rtld.dynamic);
  563. dbg("RTLD pltgot = %p", obj_rtld.pltgot);
  564. dbg("initializing thread locks");
  565. lockdflt_init();
  566. if (aux_info[AT_STACKPROT] != NULL &&
  567. aux_info[AT_STACKPROT]->a_un.a_val != 0)
  568. stack_prot = aux_info[AT_STACKPROT]->a_un.a_val;
  569. /*
  570. * Load the main program, or process its program header if it is
  571. * already loaded.
  572. */
  573. if (fd != -1) { /* Load the main program. */
  574. dbg("loading main program");
  575. obj_main = map_object(fd, argv0, NULL);
  576. close(fd);
  577. if (obj_main == NULL)
  578. rtld_die();
  579. max_stack_flags = obj_main->stack_flags;
  580. if ((max_stack_flags & PF_X) == PF_X)
  581. if ((stack_prot & PROT_EXEC) == 0)
  582. max_stack_flags &= ~(PF_X);
  583. } else { /* Main program already loaded. */
  584. dbg("processing main program's program header");
  585. assert(aux_info[AT_PHDR] != NULL);
  586. phdr = (const Elf_Phdr *) aux_info[AT_PHDR]->a_un.a_ptr;
  587. assert(aux_info[AT_PHNUM] != NULL);
  588. phnum = aux_info[AT_PHNUM]->a_un.a_val;
  589. assert(aux_info[AT_PHENT] != NULL);
  590. assert(aux_info[AT_PHENT]->a_un.a_val == sizeof(Elf_Phdr));
  591. assert(aux_info[AT_ENTRY] != NULL);
  592. imgentry = (caddr_t) aux_info[AT_ENTRY]->a_un.a_ptr;
  593. if ((obj_main = digest_phdr(phdr, phnum, imgentry, argv0)) == NULL)
  594. rtld_die();
  595. }
  596. if (aux_info[AT_EXECPATH] != NULL && fd == -1) {
  597. kexecpath = aux_info[AT_EXECPATH]->a_un.a_ptr;
  598. dbg("AT_EXECPATH %p %s", kexecpath, kexecpath);
  599. if (kexecpath[0] == '/')
  600. obj_main->path = kexecpath;
  601. else if (getcwd(buf, sizeof(buf)) == NULL ||
  602. strlcat(buf, "/", sizeof(buf)) >= sizeof(buf) ||
  603. strlcat(buf, kexecpath, sizeof(buf)) >= sizeof(buf))
  604. obj_main->path = xstrdup(argv0);
  605. else
  606. obj_main->path = xstrdup(buf);
  607. } else {
  608. dbg("No AT_EXECPATH or direct exec");
  609. obj_main->path = xstrdup(argv0);
  610. }
  611. dbg("obj_main path %s", obj_main->path);
  612. obj_main->mainprog = true;
  613. #ifndef COMPAT_32BIT
  614. /*
  615. * Get the actual dynamic linker pathname from the executable if
  616. * possible. (It should always be possible.) That ensures that
  617. * gdb will find the right dynamic linker even if a non-standard
  618. * one is being used.
  619. */
  620. if (obj_main->interp != NULL &&
  621. strcmp(obj_main->interp, obj_rtld.path) != 0) {
  622. free(obj_rtld.path);
  623. obj_rtld.path = xstrdup(obj_main->interp);
  624. __progname = obj_rtld.path;
  625. }
  626. #endif
  627. if (!digest_dynamic(obj_main, 0))
  628. rtld_die();
  629. dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d",
  630. obj_main->path, obj_main->valid_hash_sysv, obj_main->valid_hash_gnu,
  631. obj_main->dynsymcount);
  632. linkmap_add(obj_main);
  633. linkmap_add(&obj_rtld);
  634. /* Link the main program into the list of objects. */
  635. TAILQ_INSERT_HEAD(&obj_list, obj_main, next);
  636. obj_count++;
  637. obj_loads++;
  638. /* Initialize a fake symbol for resolving undefined weak references. */
  639. sym_zero.st_info = ELF_ST_INFO(STB_GLOBAL, STT_NOTYPE);
  640. sym_zero.st_shndx = SHN_UNDEF;
  641. sym_zero.st_value = -(uintptr_t)obj_main->relocbase;
  642. if (!libmap_disable)
  643. libmap_disable = (bool)lm_init(libmap_override);
  644. dbg("loading LD_PRELOAD libraries");
  645. if (load_preload_objects() == -1)
  646. rtld_die();
  647. preload_tail = globallist_curr(TAILQ_LAST(&obj_list, obj_entry_q));
  648. dbg("loading needed objects");
  649. if (load_needed_objects(obj_main, ld_tracing != NULL ? RTLD_LO_TRACE :
  650. 0) == -1)
  651. rtld_die();
  652. /* Make a list of all objects loaded at startup. */
  653. last_interposer = obj_main;
  654. TAILQ_FOREACH(obj, &obj_list, next) {
  655. if (obj->marker)
  656. continue;
  657. if (obj->z_interpose && obj != obj_main) {
  658. objlist_put_after(&list_main, last_interposer, obj);
  659. last_interposer = obj;
  660. } else {
  661. objlist_push_tail(&list_main, obj);
  662. }
  663. obj->refcount++;
  664. }
  665. dbg("checking for required versions");
  666. if (rtld_verify_versions(&list_main) == -1 && !ld_tracing)
  667. rtld_die();
  668. if (ld_tracing) { /* We're done */
  669. trace_loaded_objects(obj_main);
  670. exit(0);
  671. }
  672. if (getenv(_LD("DUMP_REL_PRE")) != NULL) {
  673. dump_relocations(obj_main);
  674. exit (0);
  675. }
  676. /*
  677. * Processing tls relocations requires having the tls offsets
  678. * initialized. Prepare offsets before starting initial
  679. * relocation processing.
  680. */
  681. dbg("initializing initial thread local storage offsets");
  682. STAILQ_FOREACH(entry, &list_main, link) {
  683. /*
  684. * Allocate all the initial objects out of the static TLS
  685. * block even if they didn't ask for it.
  686. */
  687. allocate_tls_offset(entry->obj);
  688. }
  689. if (relocate_objects(obj_main,
  690. ld_bind_now != NULL && *ld_bind_now != '\0',
  691. &obj_rtld, SYMLOOK_EARLY, NULL) == -1)
  692. rtld_die();
  693. dbg("doing copy relocations");
  694. if (do_copy_relocations(obj_main) == -1)
  695. rtld_die();
  696. if (getenv(_LD("DUMP_REL_POST")) != NULL) {
  697. dump_relocations(obj_main);
  698. exit (0);
  699. }
  700. ifunc_init(aux);
  701. /*
  702. * Setup TLS for main thread. This must be done after the
  703. * relocations are processed, since tls initialization section
  704. * might be the subject for relocations.
  705. */
  706. dbg("initializing initial thread local storage");
  707. allocate_initial_tls(globallist_curr(TAILQ_FIRST(&obj_list)));
  708. dbg("initializing key program variables");
  709. set_program_var("__progname", argv[0] != NULL ? basename(argv[0]) : "");
  710. set_program_var("environ", env);
  711. set_program_var("__elf_aux_vector", aux);
  712. /* Make a list of init functions to call. */
  713. objlist_init(&initlist);
  714. initlist_add_objects(globallist_curr(TAILQ_FIRST(&obj_list)),
  715. preload_tail, &initlist);
  716. r_debug_state(NULL, &obj_main->linkmap); /* say hello to gdb! */
  717. map_stacks_exec(NULL);
  718. if (!obj_main->crt_no_init) {
  719. /*
  720. * Make sure we don't call the main program's init and fini
  721. * functions for binaries linked with old crt1 which calls
  722. * _init itself.
  723. */
  724. obj_main->init = obj_main->fini = (Elf_Addr)NULL;
  725. obj_main->preinit_array = obj_main->init_array =
  726. obj_main->fini_array = (Elf_Addr)NULL;
  727. }
  728. /*
  729. * Execute MD initializers required before we call the objects'
  730. * init functions.
  731. */
  732. pre_init();
  733. if (direct_exec) {
  734. /* Set osrel for direct-execed binary */
  735. mib[0] = CTL_KERN;
  736. mib[1] = KERN_PROC;
  737. mib[2] = KERN_PROC_OSREL;
  738. mib[3] = getpid();
  739. osrel = obj_main->osrel;
  740. sz = sizeof(old_osrel);
  741. dbg("setting osrel to %d", osrel);
  742. (void)sysctl(mib, 4, &old_osrel, &sz, &osrel, sizeof(osrel));
  743. }
  744. wlock_acquire(rtld_bind_lock, &lockstate);
  745. dbg("resolving ifuncs");
  746. if (initlist_objects_ifunc(&initlist, ld_bind_now != NULL &&
  747. *ld_bind_now != '\0', SYMLOOK_EARLY, &lockstate) == -1)
  748. rtld_die();
  749. rtld_exit_ptr = rtld_exit;
  750. if (obj_main->crt_no_init)
  751. preinit_main();
  752. objlist_call_init(&initlist, &lockstate);
  753. _r_debug_postinit(&obj_main->linkmap);
  754. objlist_clear(&initlist);
  755. dbg("loading filtees");
  756. TAILQ_FOREACH(obj, &obj_list, next) {
  757. if (obj->marker)
  758. continue;
  759. if (ld_loadfltr || obj->z_loadfltr)
  760. load_filtees(obj, 0, &lockstate);
  761. }
  762. dbg("enforcing main obj relro");
  763. if (obj_enforce_relro(obj_main) == -1)
  764. rtld_die();
  765. lock_release(rtld_bind_lock, &lockstate);
  766. dbg("transferring control to program entry point = %p", obj_main->entry);
  767. /* Return the exit procedure and the program entry point. */
  768. *exit_proc = rtld_exit_ptr;
  769. *objp = obj_main;
  770. return (func_ptr_type) obj_main->entry;
  771. }
  772. void *
  773. rtld_resolve_ifunc(const Obj_Entry *obj, const Elf_Sym *def)
  774. {
  775. void *ptr;
  776. Elf_Addr target;
  777. ptr = (void *)make_function_pointer(def, obj);
  778. target = call_ifunc_resolver(ptr);
  779. return ((void *)target);
  780. }
  781. /*
  782. * NB: MIPS uses a private version of this function (_mips_rtld_bind).
  783. * Changes to this function should be applied there as well.
  784. */
  785. Elf_Addr
  786. _rtld_bind(Obj_Entry *obj, Elf_Size reloff)
  787. {
  788. const Elf_Rel *rel;
  789. const Elf_Sym *def;
  790. const Obj_Entry *defobj;
  791. Elf_Addr *where;
  792. Elf_Addr target;
  793. RtldLockState lockstate;
  794. rlock_acquire(rtld_bind_lock, &lockstate);
  795. if (sigsetjmp(lockstate.env, 0) != 0)
  796. lock_upgrade(rtld_bind_lock, &lockstate);
  797. if (obj->pltrel)
  798. rel = (const Elf_Rel *)((const char *)obj->pltrel + reloff);
  799. else
  800. rel = (const Elf_Rel *)((const char *)obj->pltrela + reloff);
  801. where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
  802. def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, SYMLOOK_IN_PLT,
  803. NULL, &lockstate);
  804. if (def == NULL)
  805. rtld_die();
  806. if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC)
  807. target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
  808. else
  809. target = (Elf_Addr)(defobj->relocbase + def->st_value);
  810. dbg("\"%s\" in \"%s\" ==> %p in \"%s\"",
  811. defobj->strtab + def->st_name, basename(obj->path),
  812. (void *)target, basename(defobj->path));
  813. /*
  814. * Write the new contents for the jmpslot. Note that depending on
  815. * architecture, the value which we need to return back to the
  816. * lazy binding trampoline may or may not be the target
  817. * address. The value returned from reloc_jmpslot() is the value
  818. * that the trampoline needs.
  819. */
  820. target = reloc_jmpslot(where, target, defobj, obj, rel);
  821. lock_release(rtld_bind_lock, &lockstate);
  822. return target;
  823. }
  824. /*
  825. * Error reporting function. Use it like printf. If formats the message
  826. * into a buffer, and sets things up so that the next call to dlerror()
  827. * will return the message.
  828. */
  829. void
  830. _rtld_error(const char *fmt, ...)
  831. {
  832. static char buf[512];
  833. va_list ap;
  834. va_start(ap, fmt);
  835. rtld_vsnprintf(buf, sizeof buf, fmt, ap);
  836. error_message = buf;
  837. va_end(ap);
  838. LD_UTRACE(UTRACE_RTLD_ERROR, NULL, NULL, 0, 0, error_message);
  839. }
  840. /*
  841. * Return a dynamically-allocated copy of the current error message, if any.
  842. */
  843. static char *
  844. errmsg_save(void)
  845. {
  846. return error_message == NULL ? NULL : xstrdup(error_message);
  847. }
  848. /*
  849. * Restore the current error message from a copy which was previously saved
  850. * by errmsg_save(). The copy is freed.
  851. */
  852. static void
  853. errmsg_restore(char *saved_msg)
  854. {
  855. if (saved_msg == NULL)
  856. error_message = NULL;
  857. else {
  858. _rtld_error("%s", saved_msg);
  859. free(saved_msg);
  860. }
  861. }
  862. static const char *
  863. basename(const char *name)
  864. {
  865. const char *p = strrchr(name, '/');
  866. return p != NULL ? p + 1 : name;
  867. }
  868. static struct utsname uts;
  869. static char *
  870. origin_subst_one(Obj_Entry *obj, char *real, const char *kw,
  871. const char *subst, bool may_free)
  872. {
  873. char *p, *p1, *res, *resp;
  874. int subst_len, kw_len, subst_count, old_len, new_len;
  875. kw_len = strlen(kw);
  876. /*
  877. * First, count the number of the keyword occurrences, to
  878. * preallocate the final string.
  879. */
  880. for (p = real, subst_count = 0;; p = p1 + kw_len, subst_count++) {
  881. p1 = strstr(p, kw);
  882. if (p1 == NULL)
  883. break;
  884. }
  885. /*
  886. * If the keyword is not found, just return.
  887. *
  888. * Return non-substituted string if resolution failed. We
  889. * cannot do anything more reasonable, the failure mode of the
  890. * caller is unresolved library anyway.
  891. */
  892. if (subst_count == 0 || (obj != NULL && !obj_resolve_origin(obj)))
  893. return (may_free ? real : xstrdup(real));
  894. if (obj != NULL)
  895. subst = obj->origin_path;
  896. /*
  897. * There is indeed something to substitute. Calculate the
  898. * length of the resulting string, and allocate it.
  899. */
  900. subst_len = strlen(subst);
  901. old_len = strlen(real);
  902. new_len = old_len + (subst_len - kw_len) * subst_count;
  903. res = xmalloc(new_len + 1);
  904. /*
  905. * Now, execute the substitution loop.
  906. */
  907. for (p = real, resp = res, *resp = '\0';;) {
  908. p1 = strstr(p, kw);
  909. if (p1 != NULL) {
  910. /* Copy the prefix before keyword. */
  911. memcpy(resp, p, p1 - p);
  912. resp += p1 - p;
  913. /* Keyword replacement. */
  914. memcpy(resp, subst, subst_len);
  915. resp += subst_len;
  916. *resp = '\0';
  917. p = p1 + kw_len;
  918. } else
  919. break;
  920. }
  921. /* Copy to the end of string and finish. */
  922. strcat(resp, p);
  923. if (may_free)
  924. free(real);
  925. return (res);
  926. }
  927. static char *
  928. origin_subst(Obj_Entry *obj, const char *real)
  929. {
  930. char *res1, *res2, *res3, *res4;
  931. if (obj == NULL || !trust)
  932. return (xstrdup(real));
  933. if (uts.sysname[0] == '\0') {
  934. if (uname(&uts) != 0) {
  935. _rtld_error("utsname failed: %d", errno);
  936. return (NULL);
  937. }
  938. }
  939. /* __DECONST is safe here since without may_free real is unchanged */
  940. res1 = origin_subst_one(obj, __DECONST(char *, real), "$ORIGIN", NULL,
  941. false);
  942. res2 = origin_subst_one(NULL, res1, "$OSNAME", uts.sysname, true);
  943. res3 = origin_subst_one(NULL, res2, "$OSREL", uts.release, true);
  944. res4 = origin_subst_one(NULL, res3, "$PLATFORM", uts.machine, true);
  945. return (res4);
  946. }
  947. void
  948. rtld_die(void)
  949. {
  950. const char *msg = dlerror();
  951. if (msg == NULL)
  952. msg = "Fatal error";
  953. rtld_fdputstr(STDERR_FILENO, _BASENAME_RTLD ": ");
  954. rtld_fdputstr(STDERR_FILENO, msg);
  955. rtld_fdputchar(STDERR_FILENO, '\n');
  956. _exit(1);
  957. }
  958. /*
  959. * Process a shared object's DYNAMIC section, and save the important
  960. * information in its Obj_Entry structure.
  961. */
  962. static void
  963. digest_dynamic1(Obj_Entry *obj, int early, const Elf_Dyn **dyn_rpath,
  964. const Elf_Dyn **dyn_soname, const Elf_Dyn **dyn_runpath)
  965. {
  966. const Elf_Dyn *dynp;
  967. Needed_Entry **needed_tail = &obj->needed;
  968. Needed_Entry **needed_filtees_tail = &obj->needed_filtees;
  969. Needed_Entry **needed_aux_filtees_tail = &obj->needed_aux_filtees;
  970. const Elf_Hashelt *hashtab;
  971. const Elf32_Word *hashval;
  972. Elf32_Word bkt, nmaskwords;
  973. unsigned int bloom_size32;
  974. int plttype = DT_REL;
  975. *dyn_rpath = NULL;
  976. *dyn_soname = NULL;
  977. *dyn_runpath = NULL;
  978. obj->bind_now = false;
  979. for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) {
  980. switch (dynp->d_tag) {
  981. case DT_REL:
  982. obj->rel = (const Elf_Rel *)(obj->relocbase + dynp->d_un.d_ptr);
  983. break;
  984. case DT_RELSZ:
  985. obj->relsize = dynp->d_un.d_val;
  986. break;
  987. case DT_RELENT:
  988. assert(dynp->d_un.d_val == sizeof(Elf_Rel));
  989. break;
  990. case DT_JMPREL:
  991. obj->pltrel = (const Elf_Rel *)
  992. (obj->relocbase + dynp->d_un.d_ptr);
  993. break;
  994. case DT_PLTRELSZ:
  995. obj->pltrelsize = dynp->d_un.d_val;
  996. break;
  997. case DT_RELA:
  998. obj->rela = (const Elf_Rela *)(obj->relocbase + dynp->d_un.d_ptr);
  999. break;
  1000. case DT_RELASZ:
  1001. obj->relasize = dynp->d_un.d_val;
  1002. break;
  1003. case DT_RELAENT:
  1004. assert(dynp->d_un.d_val == sizeof(Elf_Rela));
  1005. break;
  1006. case DT_PLTREL:
  1007. plttype = dynp->d_un.d_val;
  1008. assert(dynp->d_un.d_val == DT_REL || plttype == DT_RELA);
  1009. break;
  1010. case DT_SYMTAB:
  1011. obj->symtab = (const Elf_Sym *)
  1012. (obj->relocbase + dynp->d_un.d_ptr);
  1013. break;
  1014. case DT_SYMENT:
  1015. assert(dynp->d_un.d_val == sizeof(Elf_Sym));
  1016. break;
  1017. case DT_STRTAB:
  1018. obj->strtab = (const char *)(obj->relocbase + dynp->d_un.d_ptr);
  1019. break;
  1020. case DT_STRSZ:
  1021. obj->strsize = dynp->d_un.d_val;
  1022. break;
  1023. case DT_VERNEED:
  1024. obj->verneed = (const Elf_Verneed *)(obj->relocbase +
  1025. dynp->d_un.d_val);
  1026. break;
  1027. case DT_VERNEEDNUM:
  1028. obj->verneednum = dynp->d_un.d_val;
  1029. break;
  1030. case DT_VERDEF:
  1031. obj->verdef = (const Elf_Verdef *)(obj->relocbase +
  1032. dynp->d_un.d_val);
  1033. break;
  1034. case DT_VERDEFNUM:
  1035. obj->verdefnum = dynp->d_un.d_val;
  1036. break;
  1037. case DT_VERSYM:
  1038. obj->versyms = (const Elf_Versym *)(obj->relocbase +
  1039. dynp->d_un.d_val);
  1040. break;
  1041. case DT_HASH:
  1042. {
  1043. hashtab = (const Elf_Hashelt *)(obj->relocbase +
  1044. dynp->d_un.d_ptr);
  1045. obj->nbuckets = hashtab[0];
  1046. obj->nchains = hashtab[1];
  1047. obj->buckets = hashtab + 2;
  1048. obj->chains = obj->buckets + obj->nbuckets;
  1049. obj->valid_hash_sysv = obj->nbuckets > 0 && obj->nchains > 0 &&
  1050. obj->buckets != NULL;
  1051. }
  1052. break;
  1053. case DT_GNU_HASH:
  1054. {
  1055. hashtab = (const Elf_Hashelt *)(obj->relocbase +
  1056. dynp->d_un.d_ptr);
  1057. obj->nbuckets_gnu = hashtab[0];
  1058. obj->symndx_gnu = hashtab[1];
  1059. nmaskwords = hashtab[2];
  1060. bloom_size32 = (__ELF_WORD_SIZE / 32) * nmaskwords;
  1061. obj->maskwords_bm_gnu = nmaskwords - 1;
  1062. obj->shift2_gnu = hashtab[3];
  1063. obj->bloom_gnu = (const Elf_Addr *)(hashtab + 4);
  1064. obj->buckets_gnu = hashtab + 4 + bloom_size32;
  1065. obj->chain_zero_gnu = obj->buckets_gnu + obj->nbuckets_gnu -
  1066. obj->symndx_gnu;
  1067. /* Number of bitmask words is required to be power of 2 */
  1068. obj->valid_hash_gnu = powerof2(nmaskwords) &&
  1069. obj->nbuckets_gnu > 0 && obj->buckets_gnu != NULL;
  1070. }
  1071. break;
  1072. case DT_NEEDED:
  1073. if (!obj->rtld) {
  1074. Needed_Entry *nep = NEW(Needed_Entry);
  1075. nep->name = dynp->d_un.d_val;
  1076. nep->obj = NULL;
  1077. nep->next = NULL;
  1078. *needed_tail = nep;
  1079. needed_tail = &nep->next;
  1080. }
  1081. break;
  1082. case DT_FILTER:
  1083. if (!obj->rtld) {
  1084. Needed_Entry *nep = NEW(Needed_Entry);
  1085. nep->name = dynp->d_un.d_val;
  1086. nep->obj = NULL;
  1087. nep->next = NULL;
  1088. *needed_filtees_tail = nep;
  1089. needed_filtees_tail = &nep->next;
  1090. if (obj->linkmap.l_refname == NULL)
  1091. obj->linkmap.l_refname = (char *)dynp->d_un.d_val;
  1092. }
  1093. break;
  1094. case DT_AUXILIARY:
  1095. if (!obj->rtld) {
  1096. Needed_Entry *nep = NEW(Needed_Entry);
  1097. nep->name = dynp->d_un.d_val;
  1098. nep->obj = NULL;
  1099. nep->next = NULL;
  1100. *needed_aux_filtees_tail = nep;
  1101. needed_aux_filtees_tail = &nep->next;
  1102. }
  1103. break;
  1104. case DT_PLTGOT:
  1105. obj->pltgot = (Elf_Addr *)(obj->relocbase + dynp->d_un.d_ptr);
  1106. break;
  1107. case DT_TEXTREL:
  1108. obj->textrel = true;
  1109. break;
  1110. case DT_SYMBOLIC:
  1111. obj->symbolic = true;
  1112. break;
  1113. case DT_RPATH:
  1114. /*
  1115. * We have to wait until later to process this, because we
  1116. * might not have gotten the address of the string table yet.
  1117. */
  1118. *dyn_rpath = dynp;
  1119. break;
  1120. case DT_SONAME:
  1121. *dyn_soname = dynp;
  1122. break;
  1123. case DT_RUNPATH:
  1124. *dyn_runpath = dynp;
  1125. break;
  1126. case DT_INIT:
  1127. obj->init = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
  1128. break;
  1129. case DT_PREINIT_ARRAY:
  1130. obj->preinit_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
  1131. break;
  1132. case DT_PREINIT_ARRAYSZ:
  1133. obj->preinit_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
  1134. break;
  1135. case DT_INIT_ARRAY:
  1136. obj->init_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
  1137. break;
  1138. case DT_INIT_ARRAYSZ:
  1139. obj->init_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
  1140. break;
  1141. case DT_FINI:
  1142. obj->fini = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
  1143. break;
  1144. case DT_FINI_ARRAY:
  1145. obj->fini_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
  1146. break;
  1147. case DT_FINI_ARRAYSZ:
  1148. obj->fini_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
  1149. break;
  1150. /*
  1151. * Don't process DT_DEBUG on MIPS as the dynamic section
  1152. * is mapped read-only. DT_MIPS_RLD_MAP is used instead.
  1153. */
  1154. #ifndef __mips__
  1155. case DT_DEBUG:
  1156. if (!early)
  1157. dbg("Filling in DT_DEBUG entry");
  1158. (__DECONST(Elf_Dyn *, dynp))->d_un.d_ptr = (Elf_Addr)&r_debug;
  1159. break;
  1160. #endif
  1161. case DT_FLAGS:
  1162. if (dynp->d_un.d_val & DF_ORIGIN)
  1163. obj->z_origin = true;
  1164. if (dynp->d_un.d_val & DF_SYMBOLIC)
  1165. obj->symbolic = true;
  1166. if (dynp->d_un.d_val & DF_TEXTREL)
  1167. obj->textrel = true;
  1168. if (dynp->d_un.d_val & DF_BIND_NOW)
  1169. obj->bind_now = true;
  1170. if (dynp->d_un.d_val & DF_STATIC_TLS)
  1171. obj->static_tls = true;
  1172. break;
  1173. #ifdef __mips__
  1174. case DT_MIPS_LOCAL_GOTNO:
  1175. obj->local_gotno = dynp->d_un.d_val;
  1176. break;
  1177. case DT_MIPS_SYMTABNO:
  1178. obj->symtabno = dynp->d_un.d_val;
  1179. break;
  1180. case DT_MIPS_GOTSYM:
  1181. obj->gotsym = dynp->d_un.d_val;
  1182. break;
  1183. case DT_MIPS_RLD_MAP:
  1184. *((Elf_Addr *)(dynp->d_un.d_ptr)) = (Elf_Addr) &r_debug;
  1185. break;
  1186. case DT_MIPS_PLTGOT:
  1187. obj->mips_pltgot = (Elf_Addr *)(obj->relocbase +
  1188. dynp->d_un.d_ptr);
  1189. break;
  1190. #endif
  1191. #ifdef __powerpc64__
  1192. case DT_PPC64_GLINK:
  1193. obj->glink = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
  1194. break;
  1195. #endif
  1196. case DT_FLAGS_1:
  1197. if (dynp->d_un.d_val & DF_1_NOOPEN)
  1198. obj->z_noopen = true;
  1199. if (dynp->d_un.d_val & DF_1_ORIGIN)
  1200. obj->z_origin = true;
  1201. if (dynp->d_un.d_val & DF_1_GLOBAL)
  1202. obj->z_global = true;
  1203. if (dynp->d_un.d_val & DF_1_BIND_NOW)
  1204. obj->bind_now = true;
  1205. if (dynp->d_un.d_val & DF_1_NODELETE)
  1206. obj->z_nodelete = true;
  1207. if (dynp->d_un.d_val & DF_1_LOADFLTR)
  1208. obj->z_loadfltr = true;
  1209. if (dynp->d_un.d_val & DF_1_INTERPOSE)
  1210. obj->z_interpose = true;
  1211. if (dynp->d_un.d_val & DF_1_NODEFLIB)
  1212. obj->z_nodeflib = true;
  1213. if (dynp->d_un.d_val & DF_1_PIE)
  1214. obj->z_pie = true;
  1215. break;
  1216. default:
  1217. if (!early) {
  1218. dbg("Ignoring d_tag %ld = %#lx", (long)dynp->d_tag,
  1219. (long)dynp->d_tag);
  1220. }
  1221. break;
  1222. }
  1223. }
  1224. obj->traced = false;
  1225. if (plttype == DT_RELA) {
  1226. obj->pltrela = (const Elf_Rela *) obj->pltrel;
  1227. obj->pltrel = NULL;
  1228. obj->pltrelasize = obj->pltrelsize;
  1229. obj->pltrelsize = 0;
  1230. }
  1231. /* Determine size of dynsym table (equal to nchains of sysv hash) */
  1232. if (obj->valid_hash_sysv)
  1233. obj->dynsymcount = obj->nchains;
  1234. else if (obj->valid_hash_gnu) {
  1235. obj->dynsymcount = 0;
  1236. for (bkt = 0; bkt < obj->nbuckets_gnu; bkt++) {
  1237. if (obj->buckets_gnu[bkt] == 0)
  1238. continue;
  1239. hashval = &obj->chain_zero_gnu[obj->buckets_gnu[bkt]];
  1240. do
  1241. obj->dynsymcount++;
  1242. while ((*hashval++ & 1u) == 0);
  1243. }
  1244. obj->dynsymcount += obj->symndx_gnu;
  1245. }
  1246. if (obj->linkmap.l_refname != NULL)
  1247. obj->linkmap.l_refname = obj->strtab + (unsigned long)obj->
  1248. linkmap.l_refname;
  1249. }
  1250. static bool
  1251. obj_resolve_origin(Obj_Entry *obj)
  1252. {
  1253. if (obj->origin_path != NULL)
  1254. return (true);
  1255. obj->origin_path = xmalloc(PATH_MAX);
  1256. return (rtld_dirname_abs(obj->path, obj->origin_path) != -1);
  1257. }
  1258. static bool
  1259. digest_dynamic2(Obj_Entry *obj, const Elf_Dyn *dyn_rpath,
  1260. const Elf_Dyn *dyn_soname, const Elf_Dyn *dyn_runpath)
  1261. {
  1262. if (obj->z_origin && !obj_resolve_origin(obj))
  1263. return (false);
  1264. if (dyn_runpath != NULL) {
  1265. obj->runpath = (const char *)obj->strtab + dyn_runpath->d_un.d_val;
  1266. obj->runpath = origin_subst(obj, obj->runpath);
  1267. } else if (dyn_rpath != NULL) {
  1268. obj->rpath = (const char *)obj->strtab + dyn_rpath->d_un.d_val;
  1269. obj->rpath = origin_subst(obj, obj->rpath);
  1270. }
  1271. if (dyn_soname != NULL)
  1272. object_add_name(obj, obj->strtab + dyn_soname->d_un.d_val);
  1273. return (true);
  1274. }
  1275. static bool
  1276. digest_dynamic(Obj_Entry *obj, int early)
  1277. {
  1278. const Elf_Dyn *dyn_rpath;
  1279. const Elf_Dyn *dyn_soname;
  1280. const Elf_Dyn *dyn_runpath;
  1281. digest_dynamic1(obj, early, &dyn_rpath, &dyn_soname, &dyn_runpath);
  1282. return (digest_dynamic2(obj, dyn_rpath, dyn_soname, dyn_runpath));
  1283. }
  1284. /*
  1285. * Process a shared object's program header. This is used only for the
  1286. * main program, when the kernel has already loaded the main program
  1287. * into memory before calling the dynamic linker. It creates and
  1288. * returns an Obj_Entry structure.
  1289. */
  1290. static Obj_Entry *
  1291. digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry, const char *path)
  1292. {
  1293. Obj_Entry *obj;
  1294. const Elf_Phdr *phlimit = phdr + phnum;
  1295. const Elf_Phdr *ph;
  1296. Elf_Addr note_start, note_end;
  1297. int nsegs = 0;
  1298. obj = obj_new();
  1299. for (ph = phdr; ph < phlimit; ph++) {
  1300. if (ph->p_type != PT_PHDR)
  1301. continue;
  1302. obj->phdr = phdr;
  1303. obj->phsize = ph->p_memsz;
  1304. obj->relocbase = __DECONST(char *, phdr) - ph->p_vaddr;
  1305. break;
  1306. }
  1307. obj->stack_flags = PF_R | PF_W;
  1308. for (ph = phdr; ph < phlimit; ph++) {
  1309. switch (ph->p_type) {
  1310. case PT_INTERP:
  1311. obj->interp = (const char *)(ph->p_vaddr + obj->relocbase);
  1312. break;
  1313. case PT_LOAD:
  1314. if (nsegs == 0) { /* First load segment */
  1315. obj->vaddrbase = trunc_page(ph->p_vaddr);
  1316. obj->mapbase = obj->vaddrbase + obj->relocbase;
  1317. obj->textsize = round_page(ph->p_vaddr + ph->p_memsz) -
  1318. obj->vaddrbase;
  1319. } else { /* Last load segment */
  1320. obj->mapsize = round_page(ph->p_vaddr + ph->p_memsz) -
  1321. obj->vaddrbase;
  1322. }
  1323. nsegs++;
  1324. break;
  1325. case PT_DYNAMIC:
  1326. obj->dynamic = (const Elf_Dyn *)(ph->p_vaddr + obj->relocbase);
  1327. break;
  1328. case PT_TLS:
  1329. obj->tlsindex = 1;
  1330. obj->tlssize = ph->p_memsz;
  1331. obj->tlsalign = ph->p_align;
  1332. obj->tlsinitsize = ph->p_filesz;
  1333. obj->tlsinit = (void*)(ph->p_vaddr + obj->relocbase);
  1334. obj->tlspoffset = ph->p_offset;
  1335. break;
  1336. case PT_GNU_STACK:
  1337. obj->stack_flags = ph->p_flags;
  1338. break;
  1339. case PT_GNU_RELRO:
  1340. obj->relro_page = obj->relocbase + trunc_page(ph->p_vaddr);
  1341. obj->relro_size = round_page(ph->p_memsz);
  1342. break;
  1343. case PT_NOTE:
  1344. note_start = (Elf_Addr)obj->relocbase + ph->p_vaddr;
  1345. note_end = note_start + ph->p_filesz;
  1346. digest_notes(obj, note_start, note_end);
  1347. break;
  1348. }
  1349. }
  1350. if (nsegs < 1) {
  1351. _rtld_error("%s: too few PT_LOAD segments", path);
  1352. return NULL;
  1353. }
  1354. obj->entry = entry;
  1355. return obj;
  1356. }
  1357. void
  1358. digest_notes(Obj_Entry *obj, Elf_Addr note_start, Elf_Addr note_end)
  1359. {
  1360. const Elf_Note *note;
  1361. const char *note_name;
  1362. uintptr_t p;
  1363. for (note = (const Elf_Note *)note_start; (Elf_Addr)note < note_end;
  1364. note = (const Elf_Note *)((const char *)(note + 1) +
  1365. roundup2(note->n_namesz, sizeof(Elf32_Addr)) +
  1366. roundup2(note->n_descsz, sizeof(Elf32_Addr)))) {
  1367. if (note->n_namesz != sizeof(NOTE_FREEBSD_VENDOR) ||
  1368. note->n_descsz != sizeof(int32_t))
  1369. continue;
  1370. if (note->n_type != NT_FREEBSD_ABI_TAG &&
  1371. note->n_type != NT_FREEBSD_FEATURE_CTL &&
  1372. note->n_type != NT_FREEBSD_NOINIT_TAG)
  1373. continue;
  1374. note_name = (const char *)(note + 1);
  1375. if (strncmp(NOTE_FREEBSD_VENDOR, note_name,
  1376. sizeof(NOTE_FREEBSD_VENDOR)) != 0)
  1377. continue;
  1378. switch (note->n_type) {
  1379. case NT_FREEBSD_ABI_TAG:
  1380. /* FreeBSD osrel note */
  1381. p = (uintptr_t)(note + 1);
  1382. p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
  1383. obj->osrel = *(const int32_t *)(p);
  1384. dbg("note osrel %d", obj->osrel);
  1385. break;
  1386. case NT_FREEBSD_FEATURE_CTL:
  1387. /* FreeBSD ABI feature control note */
  1388. p = (uintptr_t)(note + 1);
  1389. p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
  1390. obj->fctl0 = *(const uint32_t *)(p);
  1391. dbg("note fctl0 %#x", obj->fctl0);
  1392. break;
  1393. case NT_FREEBSD_NOINIT_TAG:
  1394. /* FreeBSD 'crt does not call init' note */
  1395. obj->crt_no_init = true;
  1396. dbg("note crt_no_init");
  1397. break;
  1398. }
  1399. }
  1400. }
  1401. static Obj_Entry *
  1402. dlcheck(void *handle)
  1403. {
  1404. Obj_Entry *obj;
  1405. TAILQ_FOREACH(obj, &obj_list, next) {
  1406. if (obj == (Obj_Entry *) handle)
  1407. break;
  1408. }
  1409. if (obj == NULL || obj->refcount == 0 || obj->dl_refcount == 0) {
  1410. _rtld_error("Invalid shared object handle %p", handle);
  1411. return NULL;
  1412. }
  1413. return obj;
  1414. }
  1415. /*
  1416. * If the given object is already in the donelist, return true. Otherwise
  1417. * add the object to the list and return false.
  1418. */
  1419. static bool
  1420. donelist_check(DoneList *dlp, const Obj_Entry *obj)
  1421. {
  1422. unsigned int i;
  1423. for (i = 0; i < dlp->num_used; i++)
  1424. if (dlp->objs[i] == obj)
  1425. return true;
  1426. /*
  1427. * Our donelist allocation should always be sufficient. But if
  1428. * our threads locking isn't working properly, more shared objects
  1429. * could have been loaded since we allocated the list. That should
  1430. * never happen, but we'll handle it properly just in case it does.
  1431. */
  1432. if (dlp->num_used < dlp->num_alloc)
  1433. dlp->objs[dlp->num_used++] = obj;
  1434. return false;
  1435. }
  1436. /*
  1437. * Hash function for symbol table lookup. Don't even think about changing
  1438. * this. It is specified by the System V ABI.
  1439. */
  1440. unsigned long
  1441. elf_hash(const char *name)
  1442. {
  1443. const unsigned char *p = (const unsigned char *) name;
  1444. unsigned long h = 0;
  1445. unsigned long g;
  1446. while (*p != '\0') {
  1447. h = (h << 4) + *p++;
  1448. if ((g = h & 0xf0000000) != 0)
  1449. h ^= g >> 24;
  1450. h &= ~g;
  1451. }
  1452. return h;
  1453. }
  1454. /*
  1455. * The GNU hash function is the Daniel J. Bernstein hash clipped to 32 bits
  1456. * unsigned in case it's implemented with a wider type.
  1457. */
  1458. static uint32_t
  1459. gnu_hash(const char *s)
  1460. {
  1461. uint32_t h;
  1462. unsigned char c;
  1463. h = 5381;
  1464. for (c = *s; c != '\0'; c = *++s)
  1465. h = h * 33 + c;
  1466. return (h & 0xffffffff);
  1467. }
  1468. /*
  1469. * Find the library with the given name, and return its full pathname.
  1470. * The returned string is dynamically allocated. Generates an error
  1471. * message and returns NULL if the library cannot be found.
  1472. *
  1473. * If the second argument is non-NULL, then it refers to an already-
  1474. * loaded shared object, whose library search path will be searched.
  1475. *
  1476. * If a library is successfully located via LD_LIBRARY_PATH_FDS, its
  1477. * descriptor (which is close-on-exec) will be passed out via the third
  1478. * argument.
  1479. *
  1480. * The search order is:
  1481. * DT_RPATH in the referencing file _unless_ DT_RUNPATH is present (1)
  1482. * DT_RPATH of the main object if DSO without defined DT_RUNPATH (1)
  1483. * LD_LIBRARY_PATH
  1484. * DT_RUNPATH in the referencing file
  1485. * ldconfig hints (if -z nodefaultlib, filter out default library directories
  1486. * from list)
  1487. * /lib:/usr/lib _unless_ the referencing file is linked with -z nodefaultlib
  1488. *
  1489. * (1) Handled in digest_dynamic2 - rpath left NULL if runpath defined.
  1490. */
  1491. static char *
  1492. find_library(const char *xname, const Obj_Entry *refobj, int *fdp)
  1493. {
  1494. char *pathname, *refobj_path;
  1495. const char *name;
  1496. bool nodeflib, objgiven;
  1497. objgiven = refobj != NULL;
  1498. if (libmap_disable || !objgiven ||
  1499. (name = lm_find(refobj->path, xname)) == NULL)
  1500. name = xname;
  1501. if (strchr(name, '/') != NULL) { /* Hard coded pathname */
  1502. if (name[0] != '/' && !trust) {
  1503. _rtld_error("Absolute pathname required "
  1504. "for shared object \"%s\"", name);
  1505. return (NULL);
  1506. }
  1507. return (origin_subst(__DECONST(Obj_Entry *, refobj),
  1508. __DECONST(char *, name)));
  1509. }
  1510. dbg(" Searching for \"%s\"", name);
  1511. refobj_path = objgiven ? refobj->path : NULL;
  1512. /*
  1513. * If refobj->rpath != NULL, then refobj->runpath is NULL. Fall
  1514. * back to pre-conforming behaviour if user requested so with
  1515. * LD_LIBRARY_PATH_RPATH environment variable and ignore -z
  1516. * nodeflib.
  1517. */
  1518. if (objgiven && refobj->rpath != NULL && ld_library_path_rpath) {
  1519. pathname = search_library_path(name, ld_library_path,
  1520. refobj_path, fdp);
  1521. if (pathname != NULL)
  1522. return (pathname);
  1523. if (refobj != NULL) {
  1524. pathname = search_library_path(name, refobj->rpath,
  1525. refobj_path, fdp);
  1526. if (pathname != NULL)
  1527. return (pathname);
  1528. }
  1529. pathname = search_library_pathfds(name, ld_library_dirs, fdp);
  1530. if (pathname != NULL)
  1531. return (pathname);
  1532. pathname = search_library_path(name, gethints(false),
  1533. refobj_path, fdp);
  1534. if (pathname != NULL)
  1535. return (pathname);
  1536. pathname = search_library_path(name, ld_standard_library_path,
  1537. refobj_path, fdp);
  1538. if (pathname != NULL)
  1539. return (pathname);
  1540. } else {
  1541. nodeflib = objgiven ? refobj->z_nodeflib : false;
  1542. if (objgiven) {
  1543. pathname = search_library_path(name, refobj->rpath,
  1544. refobj->path, fdp);
  1545. if (pathname != NULL)
  1546. return (pathname);
  1547. }
  1548. if (objgiven && refobj->runpath == NULL && refobj != obj_main) {
  1549. pathname = search_library_path(name, obj_main->rpath,
  1550. refobj_path, fdp);
  1551. if (pathname != NULL)
  1552. return (pathname);
  1553. }
  1554. pathname = search_library_path(name, ld_library_path,
  1555. refobj_path, fdp);
  1556. if (pathname != NULL)
  1557. return (pathname);
  1558. if (objgiven) {
  1559. pathname = search_library_path(name, refobj->runpath,
  1560. refobj_path, fdp);
  1561. if (pathname != NULL)
  1562. return (pathname);
  1563. }
  1564. pathname = search_library_pathfds(name, ld_library_dirs, fdp);
  1565. if (pathname != NULL)
  1566. return (pathname);
  1567. pathname = search_library_path(name, gethints(nodeflib),
  1568. refobj_path, fdp);
  1569. if (pathname != NULL)
  1570. return (pathname);
  1571. if (objgiven && !nodeflib) {
  1572. pathname = search_library_path(name,
  1573. ld_standard_library_path, refobj_path, fdp);
  1574. if (pathname != NULL)
  1575. return (pathname);
  1576. }
  1577. }
  1578. if (objgiven && refobj->path != NULL) {
  1579. _rtld_error("Shared object \"%s\" not found, "
  1580. "required by \"%s\"", name, basename(refobj->path));
  1581. } else {
  1582. _rtld_error("Shared object \"%s\" not found", name);
  1583. }
  1584. return (NULL);
  1585. }
  1586. /*
  1587. * Given a symbol number in a referencing object, find the corresponding
  1588. * definition of the symbol. Returns a pointer to the symbol, or NULL if
  1589. * no definition was found. Returns a pointer to the Obj_Entry of the
  1590. * defining object via the reference parameter DEFOBJ_OUT.
  1591. */
  1592. const Elf_Sym *
  1593. find_symdef(unsigned long symnum, const Obj_Entry *refobj,
  1594. const Obj_Entry **defobj_out, int flags, SymCache *cache,
  1595. RtldLockState *lockstate)
  1596. {
  1597. const Elf_Sym *ref;
  1598. const Elf_Sym *def;
  1599. const Obj_Entry *defobj;
  1600. const Ver_Entry *ve;
  1601. SymLook req;
  1602. const char *name;
  1603. int res;
  1604. /*
  1605. * If we have already found this symbol, get the information from
  1606. * the cache.
  1607. */
  1608. if (symnum >= refobj->dynsymcount)
  1609. return NULL; /* Bad object */
  1610. if (cache != NULL && cache[symnum].sym != NULL) {
  1611. *defobj_out = cache[symnum].obj;
  1612. return cache[symnum].sym;
  1613. }
  1614. ref = refobj->symtab + symnum;
  1615. name = refobj->strtab + ref->st_name;
  1616. def = NULL;
  1617. defobj = NULL;
  1618. ve = NULL;
  1619. /*
  1620. * We don't have to do a full scale lookup if the symbol is local.
  1621. * We know it will bind to the instance in this load module; to
  1622. * which we already have a pointer (ie ref). By not doing a lookup,
  1623. * we not only improve performance, but it also avoids unresolvable
  1624. * symbols when local symbols are not in the hash table. This has
  1625. * been seen with the ia64 toolchain.
  1626. */
  1627. if (ELF_ST_BIND(ref->st_info) != STB_LOCAL) {
  1628. if (ELF_ST_TYPE(ref->st_info) == STT_SECTION) {
  1629. _rtld_error("%s: Bogus symbol table entry %lu", refobj->path,
  1630. symnum);
  1631. }
  1632. symlook_init(&req, name);
  1633. req.flags = flags;
  1634. ve = req.ventry = fetch_ventry(refobj, symnum);
  1635. req.lockstate = lockstate;
  1636. res = symlook_default(&req, refobj);
  1637. if (res == 0) {
  1638. def = req.sym_out;
  1639. defobj = req.defobj_out;
  1640. }
  1641. } else {
  1642. def = ref;
  1643. defobj = refobj;
  1644. }
  1645. /*
  1646. * If we found no definition and the reference is weak, treat the
  1647. * symbol as having the value zero.
  1648. */
  1649. if (def == NULL && ELF_ST_BIND(ref->st_info) == STB_WEAK) {
  1650. def = &sym_zero;
  1651. defobj = obj_main;
  1652. }
  1653. if (def != NULL) {
  1654. *defobj_out = defobj;
  1655. /* Record the information in the cache to avoid subsequent lookups. */
  1656. if (cache != NULL) {
  1657. cache[symnum].sym = def;
  1658. cache[symnum].obj = defobj;
  1659. }
  1660. } else {
  1661. if (refobj != &obj_rtld)
  1662. _rtld_error("%s: Undefined symbol \"%s%s%s\"", refobj->path, name,
  1663. ve != NULL ? "@" : "", ve != NULL ? ve->name : "");
  1664. }
  1665. return def;
  1666. }
  1667. /*
  1668. * Return the search path from the ldconfig hints file, reading it if
  1669. * necessary. If nostdlib is true, then the default search paths are
  1670. * not added to result.
  1671. *
  1672. * Returns NULL if there are problems with the hints file,
  1673. * or if the search path there is empty.
  1674. */
  1675. static const char *
  1676. gethints(bool nostdlib)
  1677. {
  1678. static char *filtered_path;
  1679. static const char *hints;
  1680. static struct elfhints_hdr hdr;
  1681. struct fill_search_info_args sargs, hargs;
  1682. struct dl_serinfo smeta, hmeta, *SLPinfo, *hintinfo;
  1683. struct dl_serpath *SLPpath, *hintpath;
  1684. char *p;
  1685. struct stat hint_stat;
  1686. unsigned int SLPndx, hintndx, fndx, fcount;
  1687. int fd;
  1688. size_t flen;
  1689. uint32_t dl;
  1690. bool skip;
  1691. /* First call, read the hints file */
  1692. if (hints == NULL) {
  1693. /* Keep from trying again in case the hints file is bad. */
  1694. hints = "";
  1695. if ((fd = open(ld_elf_hints_path, O_RDONLY | O_CLOEXEC)) == -1)
  1696. return (NULL);
  1697. /*
  1698. * Check of hdr.dirlistlen value against type limit
  1699. * intends to pacify static analyzers. Further
  1700. * paranoia leads to checks that dirlist is fully
  1701. * contained in the file range.
  1702. */
  1703. if (read(fd, &hdr, sizeof hdr) != sizeof hdr ||
  1704. hdr.magic != ELFHINTS_MAGIC ||
  1705. hdr.version != 1 || hdr.dirlistlen > UINT_MAX / 2 ||
  1706. fstat(fd, &hint_stat) == -1) {
  1707. cleanup1:
  1708. close(fd);
  1709. hdr.dirlistlen = 0;
  1710. return (NULL);
  1711. }
  1712. dl = hdr.strtab;
  1713. if (dl + hdr.dirlist < dl)
  1714. goto cleanup1;
  1715. dl += hdr.dirlist;
  1716. if (dl + hdr.dirlistlen < dl)
  1717. goto cleanup1;
  1718. dl += hdr.dirlistlen;
  1719. if (dl > hint_stat.st_size)
  1720. goto cleanup1;
  1721. p = xmalloc(hdr.dirlistlen + 1);
  1722. if (pread(fd, p, hdr.dirlistlen + 1,
  1723. hdr.strtab + hdr.dirlist) != (ssize_t)hdr.dirlistlen + 1 ||
  1724. p[hdr.dirlistlen] != '\0') {
  1725. free(p);
  1726. goto cleanup1;
  1727. }
  1728. hints = p;
  1729. close(fd);
  1730. }
  1731. /*
  1732. * If caller agreed to receive list which includes the default
  1733. * paths, we are done. Otherwise, if we still did not
  1734. * calculated filtered result, do it now.
  1735. */
  1736. if (!nostdlib)
  1737. return (hints[0] != '\0' ? hints : NULL);
  1738. if (filtered_path != NULL)
  1739. goto filt_ret;
  1740. /*
  1741. * Obtain the list of all configured search paths, and the
  1742. * list of the default paths.
  1743. *
  1744. * First estimate the size of the results.
  1745. */
  1746. smeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
  1747. smeta.dls_cnt = 0;
  1748. hmeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
  1749. hmeta.dls_cnt = 0;
  1750. sargs.request = RTLD_DI_SERINFOSIZE;
  1751. sargs.serinfo = &smeta;
  1752. hargs.request = RTLD_DI_SERINFOSIZE;
  1753. hargs.serinfo = &hmeta;
  1754. path_enumerate(ld_standard_library_path, fill_search_info, NULL,
  1755. &sargs);
  1756. path_enumerate(hints, fill_search_info, NULL, &hargs);
  1757. SLPinfo = xmalloc(smeta.dls_size);
  1758. hintinfo = xmalloc(hmeta.dls_size);
  1759. /*
  1760. * Next fetch both sets of paths.
  1761. */
  1762. sargs.request = RTLD_DI_SERINFO;
  1763. sargs.serinfo = SLPinfo;
  1764. sargs.serpath = &SLPinfo->dls_serpath[0];
  1765. sargs.strspace = (char *)&SLPinfo->dls_serpath[smeta.dls_cnt];
  1766. hargs.request = RTLD_DI_SERINFO;
  1767. hargs.serinfo = hintinfo;
  1768. hargs.serpath = &hintinfo->dls_serpath[0];
  1769. hargs.strspace = (char *)&hintinfo->dls_serpath[hmeta.dls_cnt];
  1770. path_enumerate(ld_standard_library_path, fill_search_info, NULL,
  1771. &sargs);
  1772. path_enumerate(hints, fill_search_info, NULL, &hargs);
  1773. /*
  1774. * Now calculate the difference between two sets, by excluding
  1775. * standard paths from the full set.
  1776. */
  1777. fndx = 0;
  1778. fcount = 0;
  1779. filtered_path = xmalloc(hdr.dirlistlen + 1);
  1780. hintpath = &hintinfo->dls_serpath[0];
  1781. for (hintndx = 0; hintndx < hmeta.dls_cnt; hintndx++, hintpath++) {
  1782. skip = false;
  1783. SLPpath = &SLPinfo->dls_serpath[0];
  1784. /*
  1785. * Check each standard path against current.
  1786. */
  1787. for (SLPndx = 0; SLPndx < smeta.dls_cnt; SLPndx++, SLPpath++) {
  1788. /* matched, skip the path */
  1789. if (!strcmp(hintpath->dls_name, SLPpath->dls_name)) {
  1790. skip = true;
  1791. break;
  1792. }
  1793. }
  1794. if (skip)
  1795. continue;
  1796. /*
  1797. * Not matched against any standard path, add the path
  1798. * to result. Separate consequtive paths with ':'.
  1799. */
  1800. if (fcount > 0) {
  1801. filtered_path[fndx] = ':';
  1802. fndx++;
  1803. }
  1804. fcount++;
  1805. flen = strlen(hintpath->dls_name);
  1806. strncpy((filtered_path + fndx), hintpath->dls_name, flen);
  1807. fndx += flen;
  1808. }
  1809. filtered_path[fndx] = '\0';
  1810. free(SLPinfo);
  1811. free(hintinfo);
  1812. filt_ret:
  1813. return (filtered_path[0] != '\0' ? filtered_path : NULL);
  1814. }
  1815. static void
  1816. init_dag(Obj_Entry *root)
  1817. {
  1818. const Needed_Entry *needed;
  1819. const Objlist_Entry *elm;
  1820. DoneList donelist;
  1821. if (root->dag_inited)
  1822. return;
  1823. donelist_init(&donelist);
  1824. /* Root object belongs to own DAG. */
  1825. objlist_push_tail(&root->dldags, root);
  1826. objlist_push_tail(&root->dagmembers, root);
  1827. donelist_check(&donelist, root);
  1828. /*
  1829. * Add dependencies of root object to DAG in breadth order
  1830. * by exploiting the fact that each new object get added
  1831. * to the tail of the dagmembers list.
  1832. */
  1833. STAILQ_FOREACH(elm, &root->dagmembers, link) {
  1834. for (needed = elm->obj->needed; needed != NULL; needed = needed->next) {
  1835. if (needed->obj == NULL || donelist_check(&donelist, needed->obj))
  1836. continue;
  1837. objlist_push_tail(&needed->obj->dldags, root);
  1838. objlist_push_tail(&root->dagmembers, needed->obj);
  1839. }
  1840. }
  1841. root->dag_inited = true;
  1842. }
  1843. static void
  1844. init_marker(Obj_Entry *marker)
  1845. {
  1846. bzero(marker, sizeof(*marker));
  1847. marker->marker = true;
  1848. }
  1849. Obj_Entry *
  1850. globallist_curr(const Obj_Entry *obj)
  1851. {
  1852. for (;;) {
  1853. if (obj == NULL)
  1854. return (NULL);
  1855. if (!obj->marker)
  1856. return (__DECONST(Obj_Entry *, obj));
  1857. obj = TAILQ_PREV(obj, obj_entry_q, next);
  1858. }
  1859. }
  1860. Obj_Entry *
  1861. globallist_next(const Obj_Entry *obj)
  1862. {
  1863. for (;;) {
  1864. obj = TAILQ_NEXT(obj, next);
  1865. if (obj == NULL)
  1866. return (NULL);
  1867. if (!obj->marker)
  1868. return (__DECONST(Obj_Entry *, obj));
  1869. }
  1870. }
  1871. /* Prevent the object from being unmapped while the bind lock is dropped. */
  1872. static void
  1873. hold_object(Obj_Entry *obj)
  1874. {
  1875. obj->holdcount++;
  1876. }
  1877. static void
  1878. unhold_object(Obj_Entry *obj)
  1879. {
  1880. assert(obj->holdcount > 0);
  1881. if (--obj->holdcount == 0 && obj->unholdfree)
  1882. release_object(obj);
  1883. }
  1884. static void
  1885. process_z(Obj_Entry *root)
  1886. {
  1887. const Objlist_Entry *elm;
  1888. Obj_Entry *obj;
  1889. /*
  1890. * Walk over object DAG and process every dependent object
  1891. * that is marked as DF_1_NODELETE or DF_1_GLOBAL. They need
  1892. * to grow their own DAG.
  1893. *
  1894. * For DF_1_GLOBAL, DAG is required for symbol lookups in
  1895. * symlook_global() to work.
  1896. *
  1897. * For DF_1_NODELETE, the DAG should have its reference upped.
  1898. */
  1899. STAILQ_FOREACH(elm, &root->dagmembers, link) {
  1900. obj = elm->obj;
  1901. if (obj == NULL)
  1902. continue;
  1903. if (obj->z_nodelete && !obj->ref_nodel) {
  1904. dbg("obj %s -z nodelete", obj->path);
  1905. init_dag(obj);
  1906. ref_dag(obj);
  1907. obj->ref_nodel = true;
  1908. }
  1909. if (obj->z_global && objlist_find(&list_global, obj) == NULL) {
  1910. dbg("obj %s -z global", obj->path);
  1911. objlist_push_tail(&list_global, obj);
  1912. init_dag(obj);
  1913. }
  1914. }
  1915. }
  1916. /*
  1917. * Initialize the dynamic linker. The argument is the address at which
  1918. * the dynamic linker has been mapped into memory. The primary task of
  1919. * this function is to relocate the dynamic linker.
  1920. */
  1921. static void
  1922. init_rtld(caddr_t mapbase, Elf_Auxinfo **aux_info)
  1923. {
  1924. Obj_Entry objtmp; /* Temporary rtld object */
  1925. const Elf_Ehdr *ehdr;
  1926. const Elf_Dyn *dyn_rpath;
  1927. const Elf_Dyn *dyn_soname;
  1928. const Elf_Dyn *dyn_runpath;
  1929. #ifdef RTLD_INIT_PAGESIZES_EARLY
  1930. /* The page size is required by the dynamic memory allocator. */
  1931. init_pagesizes(aux_info);
  1932. #endif
  1933. /*
  1934. * Conjure up an Obj_Entry structure for the dynamic linker.
  1935. *
  1936. * The "path" member can't be initialized yet because string constants
  1937. * cannot yet be accessed. Below we will set it correctly.
  1938. */
  1939. memset(&objtmp, 0, sizeof(objtmp));
  1940. objtmp.path = NULL;
  1941. objtmp.rtld = true;
  1942. objtmp.mapbase = mapbase;
  1943. #ifdef PIC
  1944. objtmp.relocbase = mapbase;
  1945. #endif
  1946. objtmp.dynamic = rtld_dynamic(&objtmp);
  1947. digest_dynamic1(&objtmp, 1, &dyn_rpath, &dyn_soname, &dyn_runpath);
  1948. assert(objtmp.needed == NULL);
  1949. #if !defined(__mips__)
  1950. /* MIPS has a bogus DT_TEXTREL. */
  1951. assert(!objtmp.textrel);
  1952. #endif
  1953. /*
  1954. * Temporarily put the dynamic linker entry into the object list, so
  1955. * that symbols can be found.
  1956. */
  1957. relocate_objects(&objtmp, true, &objtmp, 0, NULL);
  1958. ehdr = (Elf_Ehdr *)mapbase;
  1959. objtmp.phdr = (Elf_Phdr *)((char *)mapbase + ehdr->e_phoff);
  1960. objtmp.phsize = ehdr->e_phnum * sizeof(objtmp.phdr[0]);
  1961. /* Initialize the object list. */
  1962. TAILQ_INIT(&obj_list);
  1963. /* Now that non-local variables can be accesses, copy out obj_rtld. */
  1964. memcpy(&obj_rtld, &objtmp, sizeof(obj_rtld));
  1965. #ifndef RTLD_INIT_PAGESIZES_EARLY
  1966. /* The page size is required by the dynamic memory allocator. */
  1967. init_pagesizes(aux_info);
  1968. #endif
  1969. if (aux_info[AT_OSRELDATE] != NULL)
  1970. osreldate = aux_info[AT_OSRELDATE]->a_un.a_val;
  1971. digest_dynamic2(&obj_rtld, dyn_rpath, dyn_soname, dyn_runpath);
  1972. /* Replace the path with a dynamically allocated copy. */
  1973. obj_rtld.path = xstrdup(ld_path_rtld);
  1974. r_debug.r_brk = r_debug_state;
  1975. r_debug.r_state = RT_CONSISTENT;
  1976. }
  1977. /*
  1978. * Retrieve the array of supported page sizes. The kernel provides the page
  1979. * sizes in increasing order.
  1980. */
  1981. static void
  1982. init_pagesizes(Elf_Auxinfo **aux_info)
  1983. {
  1984. static size_t psa[MAXPAGESIZES];
  1985. int mib[2];
  1986. size_t len, size;
  1987. if (aux_info[AT_PAGESIZES] != NULL && aux_info[AT_PAGESIZESLEN] !=
  1988. NULL) {
  1989. size = aux_info[AT_PAGESIZESLEN]->a_un.a_val;
  1990. pagesizes = aux_info[AT_PAGESIZES]->a_un.a_ptr;
  1991. } else {
  1992. len = 2;
  1993. if (sysctlnametomib("hw.pagesizes", mib, &len) == 0)
  1994. size = sizeof(psa);
  1995. else {
  1996. /* As a fallback, retrieve the base page size. */
  1997. size = sizeof(psa[0]);
  1998. if (aux_info[AT_PAGESZ] != NULL) {
  1999. psa[0] = aux_info[AT_PAGESZ]->a_un.a_val;
  2000. goto psa_filled;
  2001. } else {
  2002. mib[0] = CTL_HW;
  2003. mib[1] = HW_PAGESIZE;
  2004. len = 2;
  2005. }
  2006. }
  2007. if (sysctl(mib, len, psa, &size, NULL, 0) == -1) {
  2008. _rtld_error("sysctl for hw.pagesize(s) failed");
  2009. rtld_die();
  2010. }
  2011. psa_filled:
  2012. pagesizes = psa;
  2013. }
  2014. npagesizes = size / sizeof(pagesizes[0]);
  2015. /* Discard any invalid entries at the end of the array. */
  2016. while (npagesizes > 0 && pagesizes[npagesizes - 1] == 0)
  2017. npagesizes--;
  2018. }
  2019. /*
  2020. * Add the init functions from a needed object list (and its recursive
  2021. * needed objects) to "list". This is not used directly; it is a helper
  2022. * function for initlist_add_objects(). The write lock must be held
  2023. * when this function is called.
  2024. */
  2025. static void
  2026. initlist_add_neededs(Needed_Entry *needed, Objlist *list)
  2027. {
  2028. /* Recursively process the successor needed objects. */
  2029. if (needed->next != NULL)
  2030. initlist_add_neededs(needed->next, list);
  2031. /* Process the current needed object. */
  2032. if (needed->obj != NULL)
  2033. initlist_add_objects(needed->obj, needed->obj, list);
  2034. }
  2035. /*
  2036. * Scan all of the DAGs rooted in the range of objects from "obj" to
  2037. * "tail" and add their init functions to "list". This recurses over
  2038. * the DAGs and ensure the proper init ordering such that each object's
  2039. * needed libraries are initialized before the object itself. At the
  2040. * same time, this function adds the objects to the global finalization
  2041. * list "list_fini" in the opposite order. The write lock must be
  2042. * held when this function is called.
  2043. */
  2044. static void
  2045. initlist_add_objects(Obj_Entry *obj, Obj_Entry *tail, Objlist *list)
  2046. {
  2047. Obj_Entry *nobj;
  2048. if (obj->init_scanned || obj->init_done)
  2049. return;
  2050. obj->init_scanned = true;
  2051. /* Recursively process the successor objects. */
  2052. nobj = globallist_next(obj);
  2053. if (nobj != NULL && obj != tail)
  2054. initlist_add_objects(nobj, tail, list);
  2055. /* Recursively process the needed objects. */
  2056. if (obj->needed != NULL)
  2057. initlist_add_neededs(obj->needed, list);
  2058. if (obj->needed_filtees != NULL)
  2059. initlist_add_neededs(obj->needed_filtees, list);
  2060. if (obj->needed_aux_filtees != NULL)
  2061. initlist_add_neededs(obj->needed_aux_filtees, list);
  2062. /* Add the object to the init list. */
  2063. objlist_push_tail(list, obj);
  2064. /* Add the object to the global fini list in the reverse order. */
  2065. if ((obj->fini != (Elf_Addr)NULL || obj->fini_array != (Elf_Addr)NULL)
  2066. && !obj->on_fini_list) {
  2067. objlist_push_head(&list_fini, obj);
  2068. obj->on_fini_list = true;
  2069. }
  2070. }
  2071. #ifndef FPTR_TARGET
  2072. #define FPTR_TARGET(f) ((Elf_Addr) (f))
  2073. #endif
  2074. static void
  2075. free_needed_filtees(Needed_Entry *n, RtldLockState *lockstate)
  2076. {
  2077. Needed_Entry *needed, *needed1;
  2078. for (needed = n; needed != NULL; needed = needed->next) {
  2079. if (needed->obj != NULL) {
  2080. dlclose_locked(needed->obj, lockstate);
  2081. needed->obj = NULL;
  2082. }
  2083. }
  2084. for (needed = n; needed != NULL; needed = needed1) {
  2085. needed1 = needed->next;
  2086. free(needed);
  2087. }
  2088. }
  2089. static void
  2090. unload_filtees(Obj_Entry *obj, RtldLockState *lockstate)
  2091. {
  2092. free_needed_filtees(obj->needed_filtees, lockstate);
  2093. obj->needed_filtees = NULL;
  2094. free_needed_filtees(obj->needed_aux_filtees, lockstate);
  2095. obj->needed_aux_filtees = NULL;
  2096. obj->filtees_loaded = false;
  2097. }
  2098. static void
  2099. load_filtee1(Obj_Entry *obj, Needed_Entry *needed, int flags,
  2100. RtldLockState *lockstate)
  2101. {
  2102. for (; needed != NULL; needed = needed->next) {
  2103. needed->obj = dlopen_object(obj->strtab + needed->name, -1, obj,
  2104. flags, ((ld_loadfltr || obj->z_loadfltr) ? RTLD_NOW : RTLD_LAZY) |
  2105. RTLD_LOCAL, lockstate);
  2106. }
  2107. }
  2108. static void
  2109. load_filtees(Obj_Entry *obj, int flags, RtldLockState *lockstate)
  2110. {
  2111. lock_restart_for_upgrade(lockstate);
  2112. if (!obj->filtees_loaded) {
  2113. load_filtee1(obj, obj->needed_filtees, flags, lockstate);
  2114. load_filtee1(obj, obj->needed_aux_filtees, flags, lockstate);
  2115. obj->filtees_loaded = true;
  2116. }
  2117. }
  2118. static int
  2119. process_needed(Obj_Entry *obj, Needed_Entry *needed, int flags)
  2120. {
  2121. Obj_Entry *obj1;
  2122. for (; needed != NULL; needed = needed->next) {
  2123. obj1 = needed->obj = load_object(obj->strtab + needed->name, -1, obj,
  2124. flags & ~RTLD_LO_NOLOAD);
  2125. if (obj1 == NULL && !ld_tracing && (flags & RTLD_LO_FILTEES) == 0)
  2126. return (-1);
  2127. }
  2128. return (0);
  2129. }
  2130. #if defined(HARDENEDBSD) && defined(SHLIBRANDOM)
  2131. static void
  2132. randomize_neededs(Obj_Entry *obj, int flags)
  2133. {
  2134. Needed_Entry **needs=NULL, *need=NULL;
  2135. unsigned int i, j, nneed;
  2136. size_t sz = sizeof(unsigned int);
  2137. int mib[2];
  2138. if (!(obj->needed) || (flags & RTLD_LO_FILTEES))
  2139. return;
  2140. mib[0] = CTL_KERN;
  2141. mib[1] = KERN_ARND;
  2142. for (nneed = 0, need = obj->needed; need != NULL; need = need->next)
  2143. nneed++;
  2144. if (nneed > 1) {
  2145. needs = xcalloc(nneed, sizeof(Needed_Entry **));
  2146. for (i = 0, need = obj->needed; i < nneed; i++, need = need->next)
  2147. needs[i] = need;
  2148. for (i=0; i < nneed; i++) {
  2149. do {
  2150. if (sysctl(mib, 2, &j, &sz, NULL, 0))
  2151. goto err;
  2152. j %= nneed;
  2153. } while (j == i);
  2154. need = needs[i];
  2155. needs[i] = needs[j];
  2156. needs[j] = need;
  2157. }
  2158. for (i=0; i < nneed; i++)
  2159. needs[i]->next = i + 1 < nneed ? needs[i + 1] : NULL;
  2160. obj->needed = needs[0];
  2161. }
  2162. err:
  2163. if (needs != NULL)
  2164. free(needs);
  2165. return;
  2166. }
  2167. #endif
  2168. /*
  2169. * Given a shared object, traverse its list of needed objects, and load
  2170. * each of them. Returns 0 on success. Generates an error message and
  2171. * returns -1 on failure.
  2172. */
  2173. static int
  2174. load_needed_objects(Obj_Entry *first, int flags)
  2175. {
  2176. Obj_Entry *obj;
  2177. for (obj = first; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
  2178. if (obj->marker)
  2179. continue;
  2180. #if defined(HARDENEDBSD) && defined(SHLIBRANDOM)
  2181. if ((pax_flags & (PAX_HARDENING_NOSHLIBRANDOM | PAX_HARDENING_SHLIBRANDOM)) !=
  2182. PAX_HARDENING_NOSHLIBRANDOM)
  2183. randomize_neededs(obj, flags);
  2184. #endif
  2185. if (process_needed(obj, obj->needed, flags) == -1)
  2186. return (-1);
  2187. }
  2188. return (0);
  2189. }
  2190. static int
  2191. load_preload_objects(void)
  2192. {
  2193. char *p = ld_preload;
  2194. Obj_Entry *obj;
  2195. static const char delim[] = " \t:;";
  2196. if (p == NULL)
  2197. return 0;
  2198. p += strspn(p, delim);
  2199. while (*p != '\0') {
  2200. size_t len = strcspn(p, delim);
  2201. char savech;
  2202. savech = p[len];
  2203. p[len] = '\0';
  2204. obj = load_object(p, -1, NULL, 0);
  2205. if (obj == NULL)
  2206. return -1; /* XXX - cleanup */
  2207. obj->z_interpose = true;
  2208. p[len] = savech;
  2209. p += len;
  2210. p += strspn(p, delim);
  2211. }
  2212. LD_UTRACE(UTRACE_PRELOAD_FINISHED, NULL, NULL, 0, 0, NULL);
  2213. return 0;
  2214. }
  2215. static const char *
  2216. printable_path(const char *path)
  2217. {
  2218. return (path == NULL ? "<unknown>" : path);
  2219. }
  2220. /*
  2221. * Load a shared object into memory, if it is not already loaded. The
  2222. * object may be specified by name or by user-supplied file descriptor
  2223. * fd_u. In the later case, the fd_u descriptor is not closed, but its
  2224. * duplicate is.
  2225. *
  2226. * Returns a pointer to the Obj_Entry for the object. Returns NULL
  2227. * on failure.
  2228. */
  2229. static Obj_Entry *
  2230. load_object(const char *name, int fd_u, const Obj_Entry *refobj, int flags)
  2231. {
  2232. Obj_Entry *obj;
  2233. int fd;
  2234. struct stat sb;
  2235. char *path;
  2236. fd = -1;
  2237. if (name != NULL) {
  2238. TAILQ_FOREACH(obj, &obj_list, next) {
  2239. if (obj->marker || obj->doomed)
  2240. continue;
  2241. if (object_match_name(obj, name))
  2242. return (obj);
  2243. }
  2244. path = find_library(name, refobj, &fd);
  2245. if (path == NULL)
  2246. return (NULL);
  2247. } else
  2248. path = NULL;
  2249. if (fd >= 0) {
  2250. /*
  2251. * search_library_pathfds() opens a fresh file descriptor for the
  2252. * library, so there is no need to dup().
  2253. */
  2254. } else if (fd_u == -1) {
  2255. /*
  2256. * If we didn't find a match by pathname, or the name is not
  2257. * supplied, open the file and check again by device and inode.
  2258. * This avoids false mismatches caused by multiple links or ".."
  2259. * in pathnames.
  2260. *
  2261. * To avoid a race, we open the file and use fstat() rather than
  2262. * using stat().
  2263. */
  2264. if ((fd = open(path, O_RDONLY | O_CLOEXEC | O_VERIFY)) == -1) {
  2265. _rtld_error("Cannot open \"%s\"", path);
  2266. free(path);
  2267. return (NULL);
  2268. }
  2269. } else {
  2270. fd = fcntl(fd_u, F_DUPFD_CLOEXEC, 0);
  2271. if (fd == -1) {
  2272. _rtld_error("Cannot dup fd");
  2273. free(path);
  2274. return (NULL);
  2275. }
  2276. }
  2277. if (fstat(fd, &sb) == -1) {
  2278. _rtld_error("Cannot fstat \"%s\"", printable_path(path));
  2279. close(fd);
  2280. free(path);
  2281. return NULL;
  2282. }
  2283. TAILQ_FOREACH(obj, &obj_list, next) {
  2284. if (obj->marker || obj->doomed)
  2285. continue;
  2286. if (obj->ino == sb.st_ino && obj->dev == sb.st_dev)
  2287. break;
  2288. }
  2289. if (obj != NULL && name != NULL) {
  2290. object_add_name(obj, name);
  2291. free(path);
  2292. close(fd);
  2293. return obj;
  2294. }
  2295. if (flags & RTLD_LO_NOLOAD) {
  2296. free(path);
  2297. close(fd);
  2298. return (NULL);
  2299. }
  2300. /* First use of this object, so we must map it in */
  2301. obj = do_load_object(fd, name, path, &sb, flags);
  2302. if (obj == NULL)
  2303. free(path);
  2304. close(fd);
  2305. return obj;
  2306. }
  2307. static Obj_Entry *
  2308. do_load_object(int fd, const char *name, char *path, struct stat *sbp,
  2309. int flags)
  2310. {
  2311. Obj_Entry *obj;
  2312. struct statfs fs;
  2313. #ifdef HARDENEDBSD
  2314. struct integriforce_so_check check;
  2315. int res, err;
  2316. size_t sz;
  2317. #endif
  2318. /*
  2319. * but first, make sure that environment variables haven't been
  2320. * used to circumvent the noexec flag on a filesystem.
  2321. */
  2322. if (dangerous_ld_env) {
  2323. if (fstatfs(fd, &fs) != 0) {
  2324. _rtld_error("Cannot fstatfs \"%s\"", printable_path(path));
  2325. return NULL;
  2326. }
  2327. if (fs.f_flags & MNT_NOEXEC) {
  2328. _rtld_error("Cannot execute objects on %s", fs.f_mntonname);
  2329. return NULL;
  2330. }
  2331. }
  2332. #ifdef HARDENEDBSD
  2333. if (path != NULL) {
  2334. sz = sizeof(int);
  2335. err = sysctlbyname("kern.features.integriforce",
  2336. &res, &sz, NULL, 0);
  2337. if (err == 0 && res == 1) {
  2338. strlcpy(check.isc_path, path, MAXPATHLEN);
  2339. check.isc_result = 0;
  2340. sz = sizeof(struct integriforce_so_check);
  2341. err = sysctlbyname("hardening.secadm.integriforce_so",
  2342. &check, &sz, &check, sizeof(struct integriforce_so_check));
  2343. if (err == 0 && check.isc_result != 0) {
  2344. _rtld_error("Integriforce validation failed on %s. Aborting.\n", path);
  2345. return (NULL);
  2346. }
  2347. }
  2348. }
  2349. #endif
  2350. dbg("loading \"%s\"", printable_path(path));
  2351. obj = map_object(fd, printable_path(path), sbp);
  2352. if (obj == NULL)
  2353. return NULL;
  2354. /*
  2355. * If DT_SONAME is present in the object, digest_dynamic2 already
  2356. * added it to the object names.
  2357. */
  2358. if (name != NULL)
  2359. object_add_name(obj, name);
  2360. obj->path = path;
  2361. if (!digest_dynamic(obj, 0))
  2362. goto errp;
  2363. dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d", obj->path,
  2364. obj->valid_hash_sysv, obj->valid_hash_gnu, obj->dynsymcount);
  2365. if (obj->z_pie && (flags & RTLD_LO_TRACE) == 0) {
  2366. dbg("refusing to load PIE executable \"%s\"", obj->path);
  2367. _rtld_error("Cannot load PIE binary %s as DSO", obj->path);
  2368. goto errp;
  2369. }
  2370. if (obj->z_noopen && (flags & (RTLD_LO_DLOPEN | RTLD_LO_TRACE)) ==
  2371. RTLD_LO_DLOPEN) {
  2372. dbg("refusing to load non-loadable \"%s\"", obj->path);
  2373. _rtld_error("Cannot dlopen non-loadable %s", obj->path);
  2374. goto errp;
  2375. }
  2376. obj->dlopened = (flags & RTLD_LO_DLOPEN) != 0;
  2377. TAILQ_INSERT_TAIL(&obj_list, obj, next);
  2378. obj_count++;
  2379. obj_loads++;
  2380. linkmap_add(obj); /* for GDB & dlinfo() */
  2381. max_stack_flags |= obj->stack_flags;
  2382. if ((max_stack_flags & PF_X) == PF_X)
  2383. if ((stack_prot & PROT_EXEC) == 0)
  2384. max_stack_flags &= ~(PF_X);
  2385. dbg(" %p .. %p: %s", obj->mapbase,
  2386. obj->mapbase + obj->mapsize - 1, obj->path);
  2387. if (obj->textrel)
  2388. dbg(" WARNING: %s has impure text", obj->path);
  2389. LD_UTRACE(UTRACE_LOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0,
  2390. obj->path);
  2391. return (obj);
  2392. errp:
  2393. munmap(obj->mapbase, obj->mapsize);
  2394. obj_free(obj);
  2395. return (NULL);
  2396. }
  2397. static Obj_Entry *
  2398. obj_from_addr(const void *addr)
  2399. {
  2400. Obj_Entry *obj;
  2401. TAILQ_FOREACH(obj, &obj_list, next) {
  2402. if (obj->marker)
  2403. continue;
  2404. if (addr < (void *) obj->mapbase)
  2405. continue;
  2406. if (addr < (void *)(obj->mapbase + obj->mapsize))
  2407. return obj;
  2408. }
  2409. return NULL;
  2410. }
  2411. static void
  2412. preinit_main(void)
  2413. {
  2414. Elf_Addr *preinit_addr;
  2415. int index;
  2416. preinit_addr = (Elf_Addr *)obj_main->preinit_array;
  2417. if (preinit_addr == NULL)
  2418. return;
  2419. for (index = 0; index < obj_main->preinit_array_num; index++) {
  2420. if (preinit_addr[index] != 0 && preinit_addr[index] != 1) {
  2421. dbg("calling preinit function for %s at %p", obj_main->path,
  2422. (void *)preinit_addr[index]);
  2423. LD_UTRACE(UTRACE_INIT_CALL, obj_main, (void *)preinit_addr[index],
  2424. 0, 0, obj_main->path);
  2425. call_init_pointer(obj_main, preinit_addr[index]);
  2426. }
  2427. }
  2428. }
  2429. /*
  2430. * Call the finalization functions for each of the objects in "list"
  2431. * belonging to the DAG of "root" and referenced once. If NULL "root"
  2432. * is specified, every finalization function will be called regardless
  2433. * of the reference count and the list elements won't be freed. All of
  2434. * the objects are expected to have non-NULL fini functions.
  2435. */
  2436. static void
  2437. objlist_call_fini(Objlist *list, Obj_Entry *root, RtldLockState *lockstate)
  2438. {
  2439. Objlist_Entry *elm;
  2440. char *saved_msg;
  2441. Elf_Addr *fini_addr;
  2442. int index;
  2443. assert(root == NULL || root->refcount == 1);
  2444. if (root != NULL)
  2445. root->doomed = true;
  2446. /*
  2447. * Preserve the current error message since a fini function might
  2448. * call into the dynamic linker and overwrite it.
  2449. */
  2450. saved_msg = errmsg_save();
  2451. do {
  2452. STAILQ_FOREACH(elm, list, link) {
  2453. if (root != NULL && (elm->obj->refcount != 1 ||
  2454. objlist_find(&root->dagmembers, elm->obj) == NULL))
  2455. continue;
  2456. /* Remove object from fini list to prevent recursive invocation. */
  2457. STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link);
  2458. /* Ensure that new references cannot be acquired. */
  2459. elm->obj->doomed = true;
  2460. hold_object(elm->obj);
  2461. lock_release(rtld_bind_lock, lockstate);
  2462. /*
  2463. * It is legal to have both DT_FINI and DT_FINI_ARRAY defined.
  2464. * When this happens, DT_FINI_ARRAY is processed first.
  2465. */
  2466. fini_addr = (Elf_Addr *)elm->obj->fini_array;
  2467. if (fini_addr != NULL && elm->obj->fini_array_num > 0) {
  2468. for (index = elm->obj->fini_array_num - 1; index >= 0;
  2469. index--) {
  2470. if (fini_addr[index] != 0 && fini_addr[index] != 1) {
  2471. dbg("calling fini function for %s at %p",
  2472. elm->obj->path, (void *)fini_addr[index]);
  2473. LD_UTRACE(UTRACE_FINI_CALL, elm->obj,
  2474. (void *)fini_addr[index], 0, 0, elm->obj->path);
  2475. call_initfini_pointer(elm->obj, fini_addr[index]);
  2476. }
  2477. }
  2478. }
  2479. if (elm->obj->fini != (Elf_Addr)NULL) {
  2480. dbg("calling fini function for %s at %p", elm->obj->path,
  2481. (void *)elm->obj->fini);
  2482. LD_UTRACE(UTRACE_FINI_CALL, elm->obj, (void *)elm->obj->fini,
  2483. 0, 0, elm->obj->path);
  2484. call_initfini_pointer(elm->obj, elm->obj->fini);
  2485. }
  2486. wlock_acquire(rtld_bind_lock, lockstate);
  2487. unhold_object(elm->obj);
  2488. /* No need to free anything if process is going down. */
  2489. if (root != NULL)
  2490. free(elm);
  2491. /*
  2492. * We must restart the list traversal after every fini call
  2493. * because a dlclose() call from the fini function or from
  2494. * another thread might have modified the reference counts.
  2495. */
  2496. break;
  2497. }
  2498. } while (elm != NULL);
  2499. errmsg_restore(saved_msg);
  2500. }
  2501. /*
  2502. * Call the initialization functions for each of the objects in
  2503. * "list". All of the objects are expected to have non-NULL init
  2504. * functions.
  2505. */
  2506. static void
  2507. objlist_call_init(Objlist *list, RtldLockState *lockstate)
  2508. {
  2509. Objlist_Entry *elm;
  2510. Obj_Entry *obj;
  2511. char *saved_msg;
  2512. Elf_Addr *init_addr;
  2513. void (*reg)(void (*)(void));
  2514. int index;
  2515. /*
  2516. * Clean init_scanned flag so that objects can be rechecked and
  2517. * possibly initialized earlier if any of vectors called below
  2518. * cause the change by using dlopen.
  2519. */
  2520. TAILQ_FOREACH(obj, &obj_list, next) {
  2521. if (obj->marker)
  2522. continue;
  2523. obj->init_scanned = false;
  2524. }
  2525. /*
  2526. * Preserve the current error message since an init function might
  2527. * call into the dynamic linker and overwrite it.
  2528. */
  2529. saved_msg = errmsg_save();
  2530. STAILQ_FOREACH(elm, list, link) {
  2531. if (elm->obj->init_done) /* Initialized early. */
  2532. continue;
  2533. /*
  2534. * Race: other thread might try to use this object before current
  2535. * one completes the initialization. Not much can be done here
  2536. * without better locking.
  2537. */
  2538. elm->obj->init_done = true;
  2539. hold_object(elm->obj);
  2540. reg = NULL;
  2541. if (elm->obj == obj_main && obj_main->crt_no_init) {
  2542. reg = (void (*)(void (*)(void)))get_program_var_addr(
  2543. "__libc_atexit", lockstate);
  2544. }
  2545. lock_release(rtld_bind_lock, lockstate);
  2546. if (reg != NULL) {
  2547. reg(rtld_exit);
  2548. rtld_exit_ptr = rtld_nop_exit;
  2549. }
  2550. /*
  2551. * It is legal to have both DT_INIT and DT_INIT_ARRAY defined.
  2552. * When this happens, DT_INIT is processed first.
  2553. */
  2554. if (elm->obj->init != (Elf_Addr)NULL) {
  2555. dbg("calling init function for %s at %p", elm->obj->path,
  2556. (void *)elm->obj->init);
  2557. LD_UTRACE(UTRACE_INIT_CALL, elm->obj, (void *)elm->obj->init,
  2558. 0, 0, elm->obj->path);
  2559. call_initfini_pointer(elm->obj, elm->obj->init);
  2560. }
  2561. init_addr = (Elf_Addr *)elm->obj->init_array;
  2562. if (init_addr != NULL) {
  2563. for (index = 0; index < elm->obj->init_array_num; index++) {
  2564. if (init_addr[index] != 0 && init_addr[index] != 1) {
  2565. dbg("calling init function for %s at %p", elm->obj->path,
  2566. (void *)init_addr[index]);
  2567. LD_UTRACE(UTRACE_INIT_CALL, elm->obj,
  2568. (void *)init_addr[index], 0, 0, elm->obj->path);
  2569. call_init_pointer(elm->obj, init_addr[index]);
  2570. }
  2571. }
  2572. }
  2573. wlock_acquire(rtld_bind_lock, lockstate);
  2574. unhold_object(elm->obj);
  2575. }
  2576. errmsg_restore(saved_msg);
  2577. }
  2578. static void
  2579. objlist_clear(Objlist *list)
  2580. {
  2581. Objlist_Entry *elm;
  2582. while (!STAILQ_EMPTY(list)) {
  2583. elm = STAILQ_FIRST(list);
  2584. STAILQ_REMOVE_HEAD(list, link);
  2585. free(elm);
  2586. }
  2587. }
  2588. static Objlist_Entry *
  2589. objlist_find(Objlist *list, const Obj_Entry *obj)
  2590. {
  2591. Objlist_Entry *elm;
  2592. STAILQ_FOREACH(elm, list, link)
  2593. if (elm->obj == obj)
  2594. return elm;
  2595. return NULL;
  2596. }
  2597. static void
  2598. objlist_init(Objlist *list)
  2599. {
  2600. STAILQ_INIT(list);
  2601. }
  2602. static void
  2603. objlist_push_head(Objlist *list, Obj_Entry *obj)
  2604. {
  2605. Objlist_Entry *elm;
  2606. elm = NEW(Objlist_Entry);
  2607. elm->obj = obj;
  2608. STAILQ_INSERT_HEAD(list, elm, link);
  2609. }
  2610. static void
  2611. objlist_push_tail(Objlist *list, Obj_Entry *obj)
  2612. {
  2613. Objlist_Entry *elm;
  2614. elm = NEW(Objlist_Entry);
  2615. elm->obj = obj;
  2616. STAILQ_INSERT_TAIL(list, elm, link);
  2617. }
  2618. static void
  2619. objlist_put_after(Objlist *list, Obj_Entry *listobj, Obj_Entry *obj)
  2620. {
  2621. Objlist_Entry *elm, *listelm;
  2622. STAILQ_FOREACH(listelm, list, link) {
  2623. if (listelm->obj == listobj)
  2624. break;
  2625. }
  2626. elm = NEW(Objlist_Entry);
  2627. elm->obj = obj;
  2628. if (listelm != NULL)
  2629. STAILQ_INSERT_AFTER(list, listelm, elm, link);
  2630. else
  2631. STAILQ_INSERT_TAIL(list, elm, link);
  2632. }
  2633. static void
  2634. objlist_remove(Objlist *list, Obj_Entry *obj)
  2635. {
  2636. Objlist_Entry *elm;
  2637. if ((elm = objlist_find(list, obj)) != NULL) {
  2638. STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link);
  2639. free(elm);
  2640. }
  2641. }
  2642. /*
  2643. * Relocate dag rooted in the specified object.
  2644. * Returns 0 on success, or -1 on failure.
  2645. */
  2646. static int
  2647. relocate_object_dag(Obj_Entry *root, bool bind_now, Obj_Entry *rtldobj,
  2648. int flags, RtldLockState *lockstate)
  2649. {
  2650. Objlist_Entry *elm;
  2651. int error;
  2652. error = 0;
  2653. STAILQ_FOREACH(elm, &root->dagmembers, link) {
  2654. error = relocate_object(elm->obj, bind_now, rtldobj, flags,
  2655. lockstate);
  2656. if (error == -1)
  2657. break;
  2658. }
  2659. return (error);
  2660. }
  2661. /*
  2662. * Prepare for, or clean after, relocating an object marked with
  2663. * DT_TEXTREL or DF_TEXTREL. Before relocating, all read-only
  2664. * segments are remapped read-write. After relocations are done, the
  2665. * segment's permissions are returned back to the modes specified in
  2666. * the phdrs. If any relocation happened, or always for wired
  2667. * program, COW is triggered.
  2668. */
  2669. static int
  2670. reloc_textrel_prot(Obj_Entry *obj, bool before)
  2671. {
  2672. const Elf_Phdr *ph;
  2673. void *base;
  2674. size_t l, sz;
  2675. int prot;
  2676. for (l = obj->phsize / sizeof(*ph), ph = obj->phdr; l > 0;
  2677. l--, ph++) {
  2678. if (ph->p_type != PT_LOAD || (ph->p_flags & PF_W) != 0)
  2679. continue;
  2680. base = obj->relocbase + trunc_page(ph->p_vaddr);
  2681. sz = round_page(ph->p_vaddr + ph->p_filesz) -
  2682. trunc_page(ph->p_vaddr);
  2683. prot = convert_prot(ph->p_flags) | (before ? PROT_WRITE : 0);
  2684. if (mprotect(base, sz, prot) == -1) {
  2685. _rtld_error("%s: Cannot write-%sable text segment: %s",
  2686. obj->path, before ? "en" : "dis",
  2687. rtld_strerror(errno));
  2688. return (-1);
  2689. }
  2690. }
  2691. return (0);
  2692. }
  2693. /*
  2694. * Relocate single object.
  2695. * Returns 0 on success, or -1 on failure.
  2696. */
  2697. static int
  2698. relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj,
  2699. int flags, RtldLockState *lockstate)
  2700. {
  2701. if (obj->relocated)
  2702. return (0);
  2703. obj->relocated = true;
  2704. if (obj != rtldobj)
  2705. dbg("relocating \"%s\"", obj->path);
  2706. if (obj->symtab == NULL || obj->strtab == NULL ||
  2707. !(obj->valid_hash_sysv || obj->valid_hash_gnu)) {
  2708. _rtld_error("%s: Shared object has no run-time symbol table",
  2709. obj->path);
  2710. return (-1);
  2711. }
  2712. /* There are relocations to the write-protected text segment. */
  2713. if (obj->textrel && reloc_textrel_prot(obj, true) != 0)
  2714. return (-1);
  2715. /* Process the non-PLT non-IFUNC relocations. */
  2716. if (reloc_non_plt(obj, rtldobj, flags, lockstate))
  2717. return (-1);
  2718. /* Re-protected the text segment. */
  2719. if (obj->textrel && reloc_textrel_prot(obj, false) != 0)
  2720. return (-1);
  2721. /* Set the special PLT or GOT entries. */
  2722. init_pltgot(obj);
  2723. /* Process the PLT relocations. */
  2724. if (reloc_plt(obj, flags, lockstate) == -1)
  2725. return (-1);
  2726. /* Relocate the jump slots if we are doing immediate binding. */
  2727. if ((obj->bind_now || bind_now) && reloc_jmpslots(obj, flags,
  2728. lockstate) == -1)
  2729. return (-1);
  2730. if (!obj->mainprog && obj_enforce_relro(obj) == -1)
  2731. return (-1);
  2732. /*
  2733. * Set up the magic number and version in the Obj_Entry. These
  2734. * were checked in the crt1.o from the original ElfKit, so we
  2735. * set them for backward compatibility.
  2736. */
  2737. obj->magic = RTLD_MAGIC;
  2738. obj->version = RTLD_VERSION;
  2739. return (0);
  2740. }
  2741. /*
  2742. * Relocate newly-loaded shared objects. The argument is a pointer to
  2743. * the Obj_Entry for the first such object. All objects from the first
  2744. * to the end of the list of objects are relocated. Returns 0 on success,
  2745. * or -1 on failure.
  2746. */
  2747. static int
  2748. relocate_objects(Obj_Entry *first, bool bind_now, Obj_Entry *rtldobj,
  2749. int flags, RtldLockState *lockstate)
  2750. {
  2751. Obj_Entry *obj;
  2752. int error;
  2753. for (error = 0, obj = first; obj != NULL;
  2754. obj = TAILQ_NEXT(obj, next)) {
  2755. if (obj->marker)
  2756. continue;
  2757. error = relocate_object(obj, bind_now, rtldobj, flags,
  2758. lockstate);
  2759. if (error == -1)
  2760. break;
  2761. }
  2762. return (error);
  2763. }
  2764. /*
  2765. * The handling of R_MACHINE_IRELATIVE relocations and jumpslots
  2766. * referencing STT_GNU_IFUNC symbols is postponed till the other
  2767. * relocations are done. The indirect functions specified as
  2768. * ifunc are allowed to call other symbols, so we need to have
  2769. * objects relocated before asking for resolution from indirects.
  2770. *
  2771. * The R_MACHINE_IRELATIVE slots are resolved in greedy fashion,
  2772. * instead of the usual lazy handling of PLT slots. It is
  2773. * consistent with how GNU does it.
  2774. */
  2775. static int
  2776. resolve_object_ifunc(Obj_Entry *obj, bool bind_now, int flags,
  2777. RtldLockState *lockstate)
  2778. {
  2779. if (obj->ifuncs_resolved)
  2780. return (0);
  2781. obj->ifuncs_resolved = true;
  2782. if (!obj->irelative && !obj->irelative_nonplt &&
  2783. !((obj->bind_now || bind_now) && obj->gnu_ifunc))
  2784. return (0);
  2785. if (obj_disable_relro(obj) == -1 ||
  2786. (obj->irelative && reloc_iresolve(obj, lockstate) == -1) ||
  2787. (obj->irelative_nonplt && reloc_iresolve_nonplt(obj,
  2788. lockstate) == -1) ||
  2789. ((obj->bind_now || bind_now) && obj->gnu_ifunc &&
  2790. reloc_gnu_ifunc(obj, flags, lockstate) == -1) ||
  2791. obj_enforce_relro(obj) == -1)
  2792. return (-1);
  2793. return (0);
  2794. }
  2795. static int
  2796. initlist_objects_ifunc(Objlist *list, bool bind_now, int flags,
  2797. RtldLockState *lockstate)
  2798. {
  2799. Objlist_Entry *elm;
  2800. Obj_Entry *obj;
  2801. STAILQ_FOREACH(elm, list, link) {
  2802. obj = elm->obj;
  2803. if (obj->marker)
  2804. continue;
  2805. if (resolve_object_ifunc(obj, bind_now, flags,
  2806. lockstate) == -1)
  2807. return (-1);
  2808. }
  2809. return (0);
  2810. }
  2811. /*
  2812. * Cleanup procedure. It will be called (by the atexit mechanism) just
  2813. * before the process exits.
  2814. */
  2815. static void
  2816. rtld_exit(void)
  2817. {
  2818. RtldLockState lockstate;
  2819. wlock_acquire(rtld_bind_lock, &lockstate);
  2820. dbg("rtld_exit()");
  2821. objlist_call_fini(&list_fini, NULL, &lockstate);
  2822. /* No need to remove the items from the list, since we are exiting. */
  2823. if (!libmap_disable)
  2824. lm_fini();
  2825. lock_release(rtld_bind_lock, &lockstate);
  2826. }
  2827. static void
  2828. rtld_nop_exit(void)
  2829. {
  2830. }
  2831. /*
  2832. * Iterate over a search path, translate each element, and invoke the
  2833. * callback on the result.
  2834. */
  2835. static void *
  2836. path_enumerate(const char *path, path_enum_proc callback,
  2837. const char *refobj_path, void *arg)
  2838. {
  2839. const char *trans;
  2840. if (path == NULL)
  2841. return (NULL);
  2842. path += strspn(path, ":;");
  2843. while (*path != '\0') {
  2844. size_t len;
  2845. char *res;
  2846. len = strcspn(path, ":;");
  2847. trans = lm_findn(refobj_path, path, len);
  2848. if (trans)
  2849. res = callback(trans, strlen(trans), arg);
  2850. else
  2851. res = callback(path, len, arg);
  2852. if (res != NULL)
  2853. return (res);
  2854. path += len;
  2855. path += strspn(path, ":;");
  2856. }
  2857. return (NULL);
  2858. }
  2859. struct try_library_args {
  2860. const char *name;
  2861. size_t namelen;
  2862. char *buffer;
  2863. size_t buflen;
  2864. int fd;
  2865. };
  2866. static void *
  2867. try_library_path(const char *dir, size_t dirlen, void *param)
  2868. {
  2869. struct try_library_args *arg;
  2870. int fd;
  2871. arg = param;
  2872. if (*dir == '/' || trust) {
  2873. char *pathname;
  2874. if (dirlen + 1 + arg->namelen + 1 > arg->buflen)
  2875. return (NULL);
  2876. pathname = arg->buffer;
  2877. strncpy(pathname, dir, dirlen);
  2878. pathname[dirlen] = '/';
  2879. strcpy(pathname + dirlen + 1, arg->name);
  2880. dbg(" Trying \"%s\"", pathname);
  2881. fd = open(pathname, O_RDONLY | O_CLOEXEC | O_VERIFY);
  2882. if (fd >= 0) {
  2883. dbg(" Opened \"%s\", fd %d", pathname, fd);
  2884. pathname = xmalloc(dirlen + 1 + arg->namelen + 1);
  2885. strcpy(pathname, arg->buffer);
  2886. arg->fd = fd;
  2887. return (pathname);
  2888. } else {
  2889. dbg(" Failed to open \"%s\": %s",
  2890. pathname, rtld_strerror(errno));
  2891. }
  2892. }
  2893. return (NULL);
  2894. }
  2895. static char *
  2896. search_library_path(const char *name, const char *path,
  2897. const char *refobj_path, int *fdp)
  2898. {
  2899. char *p;
  2900. struct try_library_args arg;
  2901. if (path == NULL)
  2902. return NULL;
  2903. arg.name = name;
  2904. arg.namelen = strlen(name);
  2905. arg.buffer = xmalloc(PATH_MAX);
  2906. arg.buflen = PATH_MAX;
  2907. arg.fd = -1;
  2908. p = path_enumerate(path, try_library_path, refobj_path, &arg);
  2909. *fdp = arg.fd;
  2910. free(arg.buffer);
  2911. return (p);
  2912. }
  2913. /*
  2914. * Finds the library with the given name using the directory descriptors
  2915. * listed in the LD_LIBRARY_PATH_FDS environment variable.
  2916. *
  2917. * Returns a freshly-opened close-on-exec file descriptor for the library,
  2918. * or -1 if the library cannot be found.
  2919. */
  2920. static char *
  2921. search_library_pathfds(const char *name, const char *path, int *fdp)
  2922. {
  2923. char *envcopy, *fdstr, *found, *last_token;
  2924. size_t len;
  2925. int dirfd, fd;
  2926. dbg("%s('%s', '%s', fdp)", __func__, name, path);
  2927. /* Don't load from user-specified libdirs into setuid binaries. */
  2928. if (!trust)
  2929. return (NULL);
  2930. /* We can't do anything if LD_LIBRARY_PATH_FDS isn't set. */
  2931. if (path == NULL)
  2932. return (NULL);
  2933. /* LD_LIBRARY_PATH_FDS only works with relative paths. */
  2934. if (name[0] == '/') {
  2935. dbg("Absolute path (%s) passed to %s", name, __func__);
  2936. return (NULL);
  2937. }
  2938. /*
  2939. * Use strtok_r() to walk the FD:FD:FD list. This requires a local
  2940. * copy of the path, as strtok_r rewrites separator tokens
  2941. * with '\0'.
  2942. */
  2943. found = NULL;
  2944. envcopy = xstrdup(path);
  2945. for (fdstr = strtok_r(envcopy, ":", &last_token); fdstr != NULL;
  2946. fdstr = strtok_r(NULL, ":", &last_token)) {
  2947. dirfd = parse_integer(fdstr);
  2948. if (dirfd < 0) {
  2949. _rtld_error("failed to parse directory FD: '%s'",
  2950. fdstr);
  2951. break;
  2952. }
  2953. fd = __sys_openat(dirfd, name, O_RDONLY | O_CLOEXEC | O_VERIFY);
  2954. if (fd >= 0) {
  2955. *fdp = fd;
  2956. len = strlen(fdstr) + strlen(name) + 3;
  2957. found = xmalloc(len);
  2958. if (rtld_snprintf(found, len, "#%d/%s", dirfd, name) < 0) {
  2959. _rtld_error("error generating '%d/%s'",
  2960. dirfd, name);
  2961. rtld_die();
  2962. }
  2963. dbg("open('%s') => %d", found, fd);
  2964. break;
  2965. }
  2966. }
  2967. free(envcopy);
  2968. return (found);
  2969. }
  2970. int
  2971. dlclose(void *handle)
  2972. {
  2973. RtldLockState lockstate;
  2974. int error;
  2975. wlock_acquire(rtld_bind_lock, &lockstate);
  2976. error = dlclose_locked(handle, &lockstate);
  2977. lock_release(rtld_bind_lock, &lockstate);
  2978. return (error);
  2979. }
  2980. static int
  2981. dlclose_locked(void *handle, RtldLockState *lockstate)
  2982. {
  2983. Obj_Entry *root;
  2984. root = dlcheck(handle);
  2985. if (root == NULL)
  2986. return -1;
  2987. LD_UTRACE(UTRACE_DLCLOSE_START, handle, NULL, 0, root->dl_refcount,
  2988. root->path);
  2989. /* Unreference the object and its dependencies. */
  2990. root->dl_refcount--;
  2991. if (root->refcount == 1) {
  2992. /*
  2993. * The object will be no longer referenced, so we must unload it.
  2994. * First, call the fini functions.
  2995. */
  2996. objlist_call_fini(&list_fini, root, lockstate);
  2997. unref_dag(root);
  2998. /* Finish cleaning up the newly-unreferenced objects. */
  2999. GDB_STATE(RT_DELETE,&root->linkmap);
  3000. unload_object(root, lockstate);
  3001. GDB_STATE(RT_CONSISTENT,NULL);
  3002. } else
  3003. unref_dag(root);
  3004. LD_UTRACE(UTRACE_DLCLOSE_STOP, handle, NULL, 0, 0, NULL);
  3005. return 0;
  3006. }
  3007. char *
  3008. dlerror(void)
  3009. {
  3010. char *msg = error_message;
  3011. error_message = NULL;
  3012. return msg;
  3013. }
  3014. /*
  3015. * This function is deprecated and has no effect.
  3016. */
  3017. void
  3018. dllockinit(void *context,
  3019. void *(*_lock_create)(void *context) __unused,
  3020. void (*_rlock_acquire)(void *lock) __unused,
  3021. void (*_wlock_acquire)(void *lock) __unused,
  3022. void (*_lock_release)(void *lock) __unused,
  3023. void (*_lock_destroy)(void *lock) __unused,
  3024. void (*context_destroy)(void *context))
  3025. {
  3026. static void *cur_context;
  3027. static void (*cur_context_destroy)(void *);
  3028. /* Just destroy the context from the previous call, if necessary. */
  3029. if (cur_context_destroy != NULL)
  3030. cur_context_destroy(cur_context);
  3031. cur_context = context;
  3032. cur_context_destroy = context_destroy;
  3033. }
  3034. void *
  3035. dlopen(const char *name, int mode)
  3036. {
  3037. return (rtld_dlopen(name, -1, mode));
  3038. }
  3039. void *
  3040. fdlopen(int fd, int mode)
  3041. {
  3042. return (rtld_dlopen(NULL, fd, mode));
  3043. }
  3044. static void *
  3045. rtld_dlopen(const char *name, int fd, int mode)
  3046. {
  3047. RtldLockState lockstate;
  3048. int lo_flags;
  3049. LD_UTRACE(UTRACE_DLOPEN_START, NULL, NULL, 0, mode, name);
  3050. ld_tracing = (mode & RTLD_TRACE) == 0 ? NULL : "1";
  3051. if (ld_tracing != NULL) {
  3052. rlock_acquire(rtld_bind_lock, &lockstate);
  3053. if (sigsetjmp(lockstate.env, 0) != 0)
  3054. lock_upgrade(rtld_bind_lock, &lockstate);
  3055. environ = __DECONST(char **, *get_program_var_addr("environ", &lockstate));
  3056. lock_release(rtld_bind_lock, &lockstate);
  3057. }
  3058. lo_flags = RTLD_LO_DLOPEN;
  3059. if (mode & RTLD_NODELETE)
  3060. lo_flags |= RTLD_LO_NODELETE;
  3061. if (mode & RTLD_NOLOAD)
  3062. lo_flags |= RTLD_LO_NOLOAD;
  3063. if (mode & RTLD_DEEPBIND)
  3064. lo_flags |= RTLD_LO_DEEPBIND;
  3065. if (ld_tracing != NULL)
  3066. lo_flags |= RTLD_LO_TRACE | RTLD_LO_IGNSTLS;
  3067. return (dlopen_object(name, fd, obj_main, lo_flags,
  3068. mode & (RTLD_MODEMASK | RTLD_GLOBAL), NULL));
  3069. }
  3070. static void
  3071. dlopen_cleanup(Obj_Entry *obj, RtldLockState *lockstate)
  3072. {
  3073. obj->dl_refcount--;
  3074. unref_dag(obj);
  3075. if (obj->refcount == 0)
  3076. unload_object(obj, lockstate);
  3077. }
  3078. static Obj_Entry *
  3079. dlopen_object(const char *name, int fd, Obj_Entry *refobj, int lo_flags,
  3080. int mode, RtldLockState *lockstate)
  3081. {
  3082. Obj_Entry *old_obj_tail;
  3083. Obj_Entry *obj;
  3084. Objlist initlist;
  3085. RtldLockState mlockstate;
  3086. int result;
  3087. dbg("dlopen_object name \"%s\" fd %d refobj \"%s\" lo_flags %#x mode %#x",
  3088. name != NULL ? name : "<null>", fd, refobj == NULL ? "<null>" :
  3089. refobj->path, lo_flags, mode);
  3090. objlist_init(&initlist);
  3091. if (lockstate == NULL && !(lo_flags & RTLD_LO_EARLY)) {
  3092. wlock_acquire(rtld_bind_lock, &mlockstate);
  3093. lockstate = &mlockstate;
  3094. }
  3095. GDB_STATE(RT_ADD,NULL);
  3096. old_obj_tail = globallist_curr(TAILQ_LAST(&obj_list, obj_entry_q));
  3097. obj = NULL;
  3098. if (name == NULL && fd == -1) {
  3099. obj = obj_main;
  3100. obj->refcount++;
  3101. } else {
  3102. obj = load_object(name, fd, refobj, lo_flags);
  3103. }
  3104. if (obj) {
  3105. obj->dl_refcount++;
  3106. if (mode & RTLD_GLOBAL && objlist_find(&list_global, obj) == NULL)
  3107. objlist_push_tail(&list_global, obj);
  3108. if (globallist_next(old_obj_tail) != NULL) {
  3109. /* We loaded something new. */
  3110. assert(globallist_next(old_obj_tail) == obj);
  3111. if ((lo_flags & RTLD_LO_DEEPBIND) != 0)
  3112. obj->symbolic = true;
  3113. result = 0;
  3114. if ((lo_flags & (RTLD_LO_EARLY | RTLD_LO_IGNSTLS)) == 0 &&
  3115. obj->static_tls && !allocate_tls_offset(obj)) {
  3116. _rtld_error("%s: No space available "
  3117. "for static Thread Local Storage", obj->path);
  3118. result = -1;
  3119. }
  3120. if (result != -1)
  3121. result = load_needed_objects(obj, lo_flags & (RTLD_LO_DLOPEN |
  3122. RTLD_LO_EARLY | RTLD_LO_IGNSTLS | RTLD_LO_TRACE));
  3123. init_dag(obj);
  3124. ref_dag(obj);
  3125. if (result != -1)
  3126. result = rtld_verify_versions(&obj->dagmembers);
  3127. if (result != -1 && ld_tracing)
  3128. goto trace;
  3129. if (result == -1 || relocate_object_dag(obj,
  3130. (mode & RTLD_MODEMASK) == RTLD_NOW, &obj_rtld,
  3131. (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0,
  3132. lockstate) == -1) {
  3133. dlopen_cleanup(obj, lockstate);
  3134. obj = NULL;
  3135. } else if (lo_flags & RTLD_LO_EARLY) {
  3136. /*
  3137. * Do not call the init functions for early loaded
  3138. * filtees. The image is still not initialized enough
  3139. * for them to work.
  3140. *
  3141. * Our object is found by the global object list and
  3142. * will be ordered among all init calls done right
  3143. * before transferring control to main.
  3144. */
  3145. } else {
  3146. /* Make list of init functions to call. */
  3147. initlist_add_objects(obj, obj, &initlist);
  3148. }
  3149. /*
  3150. * Process all no_delete or global objects here, given
  3151. * them own DAGs to prevent their dependencies from being
  3152. * unloaded. This has to be done after we have loaded all
  3153. * of the dependencies, so that we do not miss any.
  3154. */
  3155. if (obj != NULL)
  3156. process_z(obj);
  3157. } else {
  3158. /*
  3159. * Bump the reference counts for objects on this DAG. If
  3160. * this is the first dlopen() call for the object that was
  3161. * already loaded as a dependency, initialize the dag
  3162. * starting at it.
  3163. */
  3164. init_dag(obj);
  3165. ref_dag(obj);
  3166. if ((lo_flags & RTLD_LO_TRACE) != 0)
  3167. goto trace;
  3168. }
  3169. if (obj != NULL && ((lo_flags & RTLD_LO_NODELETE) != 0 ||
  3170. obj->z_nodelete) && !obj->ref_nodel) {
  3171. dbg("obj %s nodelete", obj->path);
  3172. ref_dag(obj);
  3173. obj->z_nodelete = obj->ref_nodel = true;
  3174. }
  3175. }
  3176. LD_UTRACE(UTRACE_DLOPEN_STOP, obj, NULL, 0, obj ? obj->dl_refcount : 0,
  3177. name);
  3178. GDB_STATE(RT_CONSISTENT,obj ? &obj->linkmap : NULL);
  3179. if ((lo_flags & RTLD_LO_EARLY) == 0) {
  3180. map_stacks_exec(lockstate);
  3181. if (obj != NULL)
  3182. distribute_static_tls(&initlist, lockstate);
  3183. }
  3184. if (initlist_objects_ifunc(&initlist, (mode & RTLD_MODEMASK) == RTLD_NOW,
  3185. (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0,
  3186. lockstate) == -1) {
  3187. objlist_clear(&initlist);
  3188. dlopen_cleanup(obj, lockstate);
  3189. if (lockstate == &mlockstate)
  3190. lock_release(rtld_bind_lock, lockstate);
  3191. return (NULL);
  3192. }
  3193. if (!(lo_flags & RTLD_LO_EARLY)) {
  3194. /* Call the init functions. */
  3195. objlist_call_init(&initlist, lockstate);
  3196. }
  3197. objlist_clear(&initlist);
  3198. if (lockstate == &mlockstate)
  3199. lock_release(rtld_bind_lock, lockstate);
  3200. return obj;
  3201. trace:
  3202. trace_loaded_objects(obj);
  3203. if (lockstate == &mlockstate)
  3204. lock_release(rtld_bind_lock, lockstate);
  3205. exit(0);
  3206. }
  3207. static void *
  3208. do_dlsym(void *handle, const char *name, void *retaddr, const Ver_Entry *ve,
  3209. int flags)
  3210. {
  3211. DoneList donelist;
  3212. const Obj_Entry *obj, *defobj;
  3213. const Elf_Sym *def;
  3214. SymLook req;
  3215. RtldLockState lockstate;
  3216. tls_index ti;
  3217. void *sym;
  3218. int res;
  3219. def = NULL;
  3220. defobj = NULL;
  3221. symlook_init(&req, name);
  3222. req.ventry = ve;
  3223. req.flags = flags | SYMLOOK_IN_PLT;
  3224. req.lockstate = &lockstate;
  3225. LD_UTRACE(UTRACE_DLSYM_START, handle, NULL, 0, 0, name);
  3226. rlock_acquire(rtld_bind_lock, &lockstate);
  3227. if (sigsetjmp(lockstate.env, 0) != 0)
  3228. lock_upgrade(rtld_bind_lock, &lockstate);
  3229. if (handle == NULL || handle == RTLD_NEXT ||
  3230. handle == RTLD_DEFAULT || handle == RTLD_SELF) {
  3231. if ((obj = obj_from_addr(retaddr)) == NULL) {
  3232. _rtld_error("Cannot determine caller's shared object");
  3233. lock_release(rtld_bind_lock, &lockstate);
  3234. LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name);
  3235. return NULL;
  3236. }
  3237. if (handle == NULL) { /* Just the caller's shared object. */
  3238. res = symlook_obj(&req, obj);
  3239. if (res == 0) {
  3240. def = req.sym_out;
  3241. defobj = req.defobj_out;
  3242. }
  3243. } else if (handle == RTLD_NEXT || /* Objects after caller's */
  3244. handle == RTLD_SELF) { /* ... caller included */
  3245. if (handle == RTLD_NEXT)
  3246. obj = globallist_next(obj);
  3247. for (; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
  3248. if (obj->marker)
  3249. continue;
  3250. res = symlook_obj(&req, obj);
  3251. if (res == 0) {
  3252. if (def == NULL ||
  3253. ELF_ST_BIND(req.sym_out->st_info) != STB_WEAK) {
  3254. def = req.sym_out;
  3255. defobj = req.defobj_out;
  3256. if (ELF_ST_BIND(def->st_info) != STB_WEAK)
  3257. break;
  3258. }
  3259. }
  3260. }
  3261. /*
  3262. * Search the dynamic linker itself, and possibly resolve the
  3263. * symbol from there. This is how the application links to
  3264. * dynamic linker services such as dlopen.
  3265. */
  3266. if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
  3267. res = symlook_obj(&req, &obj_rtld);
  3268. if (res == 0) {
  3269. def = req.sym_out;
  3270. defobj = req.defobj_out;
  3271. }
  3272. }
  3273. } else {
  3274. assert(handle == RTLD_DEFAULT);
  3275. res = symlook_default(&req, obj);
  3276. if (res == 0) {
  3277. defobj = req.defobj_out;
  3278. def = req.sym_out;
  3279. }
  3280. }
  3281. } else {
  3282. if ((obj = dlcheck(handle)) == NULL) {
  3283. lock_release(rtld_bind_lock, &lockstate);
  3284. LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name);
  3285. return NULL;
  3286. }
  3287. donelist_init(&donelist);
  3288. if (obj->mainprog) {
  3289. /* Handle obtained by dlopen(NULL, ...) implies global scope. */
  3290. res = symlook_global(&req, &donelist);
  3291. if (res == 0) {
  3292. def = req.sym_out;
  3293. defobj = req.defobj_out;
  3294. }
  3295. /*
  3296. * Search the dynamic linker itself, and possibly resolve the
  3297. * symbol from there. This is how the application links to
  3298. * dynamic linker services such as dlopen.
  3299. */
  3300. if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
  3301. res = symlook_obj(&req, &obj_rtld);
  3302. if (res == 0) {
  3303. def = req.sym_out;
  3304. defobj = req.defobj_out;
  3305. }
  3306. }
  3307. }
  3308. else {
  3309. /* Search the whole DAG rooted at the given object. */
  3310. res = symlook_list(&req, &obj->dagmembers, &donelist);
  3311. if (res == 0) {
  3312. def = req.sym_out;
  3313. defobj = req.defobj_out;
  3314. }
  3315. }
  3316. }
  3317. if (def != NULL) {
  3318. lock_release(rtld_bind_lock, &lockstate);
  3319. /*
  3320. * The value required by the caller is derived from the value
  3321. * of the symbol. this is simply the relocated value of the
  3322. * symbol.
  3323. */
  3324. if (ELF_ST_TYPE(def->st_info) == STT_FUNC)
  3325. sym = make_function_pointer(def, defobj);
  3326. else if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC)
  3327. sym = rtld_resolve_ifunc(defobj, def);
  3328. else if (ELF_ST_TYPE(def->st_info) == STT_TLS) {
  3329. ti.ti_module = defobj->tlsindex;
  3330. ti.ti_offset = def->st_value;
  3331. sym = __tls_get_addr(&ti);
  3332. } else
  3333. sym = defobj->relocbase + def->st_value;
  3334. LD_UTRACE(UTRACE_DLSYM_STOP, handle, sym, 0, 0, name);
  3335. return (sym);
  3336. }
  3337. _rtld_error("Undefined symbol \"%s%s%s\"", name, ve != NULL ? "@" : "",
  3338. ve != NULL ? ve->name : "");
  3339. lock_release(rtld_bind_lock, &lockstate);
  3340. LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name);
  3341. return NULL;
  3342. }
  3343. void *
  3344. dlsym(void *handle, const char *name)
  3345. {
  3346. return do_dlsym(handle, name, __builtin_return_address(0), NULL,
  3347. SYMLOOK_DLSYM);
  3348. }
  3349. dlfunc_t
  3350. dlfunc(void *handle, const char *name)
  3351. {
  3352. union {
  3353. void *d;
  3354. dlfunc_t f;
  3355. } rv;
  3356. rv.d = do_dlsym(handle, name, __builtin_return_address(0), NULL,
  3357. SYMLOOK_DLSYM);
  3358. return (rv.f);
  3359. }
  3360. void *
  3361. dlvsym(void *handle, const char *name, const char *version)
  3362. {
  3363. Ver_Entry ventry;
  3364. ventry.name = version;
  3365. ventry.file = NULL;
  3366. ventry.hash = elf_hash(version);
  3367. ventry.flags= 0;
  3368. return do_dlsym(handle, name, __builtin_return_address(0), &ventry,
  3369. SYMLOOK_DLSYM);
  3370. }
  3371. int
  3372. _rtld_addr_phdr(const void *addr, struct dl_phdr_info *phdr_info)
  3373. {
  3374. const Obj_Entry *obj;
  3375. RtldLockState lockstate;
  3376. rlock_acquire(rtld_bind_lock, &lockstate);
  3377. obj = obj_from_addr(addr);
  3378. if (obj == NULL) {
  3379. _rtld_error("No shared object contains address");
  3380. lock_release(rtld_bind_lock, &lockstate);
  3381. return (0);
  3382. }
  3383. rtld_fill_dl_phdr_info(obj, phdr_info);
  3384. lock_release(rtld_bind_lock, &lockstate);
  3385. return (1);
  3386. }
  3387. int
  3388. dladdr(const void *addr, Dl_info *info)
  3389. {
  3390. const Obj_Entry *obj;
  3391. const Elf_Sym *def;
  3392. void *symbol_addr;
  3393. unsigned long symoffset;
  3394. RtldLockState lockstate;
  3395. rlock_acquire(rtld_bind_lock, &lockstate);
  3396. obj = obj_from_addr(addr);
  3397. if (obj == NULL) {
  3398. _rtld_error("No shared object contains address");
  3399. lock_release(rtld_bind_lock, &lockstate);
  3400. return 0;
  3401. }
  3402. info->dli_fname = obj->path;
  3403. info->dli_fbase = obj->mapbase;
  3404. info->dli_saddr = (void *)0;
  3405. info->dli_sname = NULL;
  3406. /*
  3407. * Walk the symbol list looking for the symbol whose address is
  3408. * closest to the address sent in.
  3409. */
  3410. for (symoffset = 0; symoffset < obj->dynsymcount; symoffset++) {
  3411. def = obj->symtab + symoffset;
  3412. /*
  3413. * For skip the symbol if st_shndx is either SHN_UNDEF or
  3414. * SHN_COMMON.
  3415. */
  3416. if (def->st_shndx == SHN_UNDEF || def->st_shndx == SHN_COMMON)
  3417. continue;
  3418. /*
  3419. * If the symbol is greater than the specified address, or if it
  3420. * is further away from addr than the current nearest symbol,
  3421. * then reject it.
  3422. */
  3423. symbol_addr = obj->relocbase + def->st_value;
  3424. if (symbol_addr > addr || symbol_addr < info->dli_saddr)
  3425. continue;
  3426. /* Update our idea of the nearest symbol. */
  3427. info->dli_sname = obj->strtab + def->st_name;
  3428. info->dli_saddr = symbol_addr;
  3429. /* Exact match? */
  3430. if (info->dli_saddr == addr)
  3431. break;
  3432. }
  3433. lock_release(rtld_bind_lock, &lockstate);
  3434. return 1;
  3435. }
  3436. int
  3437. dlinfo(void *handle, int request, void *p)
  3438. {
  3439. const Obj_Entry *obj;
  3440. RtldLockState lockstate;
  3441. int error;
  3442. rlock_acquire(rtld_bind_lock, &lockstate);
  3443. if (handle == NULL || handle == RTLD_SELF) {
  3444. void *retaddr;
  3445. retaddr = __builtin_return_address(0); /* __GNUC__ only */
  3446. if ((obj = obj_from_addr(retaddr)) == NULL)
  3447. _rtld_error("Cannot determine caller's shared object");
  3448. } else
  3449. obj = dlcheck(handle);
  3450. if (obj == NULL) {
  3451. lock_release(rtld_bind_lock, &lockstate);
  3452. return (-1);
  3453. }
  3454. error = 0;
  3455. switch (request) {
  3456. case RTLD_DI_LINKMAP:
  3457. *((struct link_map const **)p) = &obj->linkmap;
  3458. break;
  3459. case RTLD_DI_ORIGIN:
  3460. error = rtld_dirname(obj->path, p);
  3461. break;
  3462. case RTLD_DI_SERINFOSIZE:
  3463. case RTLD_DI_SERINFO:
  3464. error = do_search_info(obj, request, (struct dl_serinfo *)p);
  3465. break;
  3466. default:
  3467. _rtld_error("Invalid request %d passed to dlinfo()", request);
  3468. error = -1;
  3469. }
  3470. lock_release(rtld_bind_lock, &lockstate);
  3471. return (error);
  3472. }
  3473. static void
  3474. rtld_fill_dl_phdr_info(const Obj_Entry *obj, struct dl_phdr_info *phdr_info)
  3475. {
  3476. phdr_info->dlpi_addr = (Elf_Addr)obj->relocbase;
  3477. phdr_info->dlpi_name = obj->path;
  3478. phdr_info->dlpi_phdr = obj->phdr;
  3479. phdr_info->dlpi_phnum = obj->phsize / sizeof(obj->phdr[0]);
  3480. phdr_info->dlpi_tls_modid = obj->tlsindex;
  3481. phdr_info->dlpi_tls_data = obj->tlsinit;
  3482. phdr_info->dlpi_adds = obj_loads;
  3483. phdr_info->dlpi_subs = obj_loads - obj_count;
  3484. }
  3485. int
  3486. dl_iterate_phdr(__dl_iterate_hdr_callback callback, void *param)
  3487. {
  3488. struct dl_phdr_info phdr_info;
  3489. Obj_Entry *obj, marker;
  3490. RtldLockState bind_lockstate, phdr_lockstate;
  3491. int error;
  3492. init_marker(&marker);
  3493. error = 0;
  3494. wlock_acquire(rtld_phdr_lock, &phdr_lockstate);
  3495. wlock_acquire(rtld_bind_lock, &bind_lockstate);
  3496. for (obj = globallist_curr(TAILQ_FIRST(&obj_list)); obj != NULL;) {
  3497. TAILQ_INSERT_AFTER(&obj_list, obj, &marker, next);
  3498. rtld_fill_dl_phdr_info(obj, &phdr_info);
  3499. hold_object(obj);
  3500. lock_release(rtld_bind_lock, &bind_lockstate);
  3501. error = callback(&phdr_info, sizeof phdr_info, param);
  3502. wlock_acquire(rtld_bind_lock, &bind_lockstate);
  3503. unhold_object(obj);
  3504. obj = globallist_next(&marker);
  3505. TAILQ_REMOVE(&obj_list, &marker, next);
  3506. if (error != 0) {
  3507. lock_release(rtld_bind_lock, &bind_lockstate);
  3508. lock_release(rtld_phdr_lock, &phdr_lockstate);
  3509. return (error);
  3510. }
  3511. }
  3512. if (error == 0) {
  3513. rtld_fill_dl_phdr_info(&obj_rtld, &phdr_info);
  3514. lock_release(rtld_bind_lock, &bind_lockstate);
  3515. error = callback(&phdr_info, sizeof(phdr_info), param);
  3516. }
  3517. lock_release(rtld_phdr_lock, &phdr_lockstate);
  3518. return (error);
  3519. }
  3520. static void *
  3521. fill_search_info(const char *dir, size_t dirlen, void *param)
  3522. {
  3523. struct fill_search_info_args *arg;
  3524. arg = param;
  3525. if (arg->request == RTLD_DI_SERINFOSIZE) {
  3526. arg->serinfo->dls_cnt ++;
  3527. arg->serinfo->dls_size += sizeof(struct dl_serpath) + dirlen + 1;
  3528. } else {
  3529. struct dl_serpath *s_entry;
  3530. s_entry = arg->serpath;
  3531. s_entry->dls_name = arg->strspace;
  3532. s_entry->dls_flags = arg->flags;
  3533. strncpy(arg->strspace, dir, dirlen);
  3534. arg->strspace[dirlen] = '\0';
  3535. arg->strspace += dirlen + 1;
  3536. arg->serpath++;
  3537. }
  3538. return (NULL);
  3539. }
  3540. static int
  3541. do_search_info(const Obj_Entry *obj, int request, struct dl_serinfo *info)
  3542. {
  3543. struct dl_serinfo _info;
  3544. struct fill_search_info_args args;
  3545. args.request = RTLD_DI_SERINFOSIZE;
  3546. args.serinfo = &_info;
  3547. _info.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
  3548. _info.dls_cnt = 0;
  3549. path_enumerate(obj->rpath, fill_search_info, NULL, &args);
  3550. path_enumerate(ld_library_path, fill_search_info, NULL, &args);
  3551. path_enumerate(obj->runpath, fill_search_info, NULL, &args);
  3552. path_enumerate(gethints(obj->z_nodeflib), fill_search_info, NULL, &args);
  3553. if (!obj->z_nodeflib)
  3554. path_enumerate(ld_standard_library_path, fill_search_info, NULL, &args);
  3555. if (request == RTLD_DI_SERINFOSIZE) {
  3556. info->dls_size = _info.dls_size;
  3557. info->dls_cnt = _info.dls_cnt;
  3558. return (0);
  3559. }
  3560. if (info->dls_cnt != _info.dls_cnt || info->dls_size != _info.dls_size) {
  3561. _rtld_error("Uninitialized Dl_serinfo struct passed to dlinfo()");
  3562. return (-1);
  3563. }
  3564. args.request = RTLD_DI_SERINFO;
  3565. args.serinfo = info;
  3566. args.serpath = &info->dls_serpath[0];
  3567. args.strspace = (char *)&info->dls_serpath[_info.dls_cnt];
  3568. args.flags = LA_SER_RUNPATH;
  3569. if (path_enumerate(obj->rpath, fill_search_info, NULL, &args) != NULL)
  3570. return (-1);
  3571. args.flags = LA_SER_LIBPATH;
  3572. if (path_enumerate(ld_library_path, fill_search_info, NULL, &args) != NULL)
  3573. return (-1);
  3574. args.flags = LA_SER_RUNPATH;
  3575. if (path_enumerate(obj->runpath, fill_search_info, NULL, &args) != NULL)
  3576. return (-1);
  3577. args.flags = LA_SER_CONFIG;
  3578. if (path_enumerate(gethints(obj->z_nodeflib), fill_search_info, NULL, &args)
  3579. != NULL)
  3580. return (-1);
  3581. args.flags = LA_SER_DEFAULT;
  3582. if (!obj->z_nodeflib && path_enumerate(ld_standard_library_path,
  3583. fill_search_info, NULL, &args) != NULL)
  3584. return (-1);
  3585. return (0);
  3586. }
  3587. static int
  3588. rtld_dirname(const char *path, char *bname)
  3589. {
  3590. const char *endp;
  3591. /* Empty or NULL string gets treated as "." */
  3592. if (path == NULL || *path == '\0') {
  3593. bname[0] = '.';
  3594. bname[1] = '\0';
  3595. return (0);
  3596. }
  3597. /* Strip trailing slashes */
  3598. endp = path + strlen(path) - 1;
  3599. while (endp > path && *endp == '/')
  3600. endp--;
  3601. /* Find the start of the dir */
  3602. while (endp > path && *endp != '/')
  3603. endp--;
  3604. /* Either the dir is "/" or there are no slashes */
  3605. if (endp == path) {
  3606. bname[0] = *endp == '/' ? '/' : '.';
  3607. bname[1] = '\0';
  3608. return (0);
  3609. } else {
  3610. do {
  3611. endp--;
  3612. } while (endp > path && *endp == '/');
  3613. }
  3614. if (endp - path + 2 > PATH_MAX)
  3615. {
  3616. _rtld_error("Filename is too long: %s", path);
  3617. return(-1);
  3618. }
  3619. strncpy(bname, path, endp - path + 1);
  3620. bname[endp - path + 1] = '\0';
  3621. return (0);
  3622. }
  3623. static int
  3624. rtld_dirname_abs(const char *path, char *base)
  3625. {
  3626. char *last;
  3627. if (realpath(path, base) == NULL) {
  3628. _rtld_error("realpath \"%s\" failed (%s)", path,
  3629. rtld_strerror(errno));
  3630. return (-1);
  3631. }
  3632. dbg("%s -> %s", path, base);
  3633. last = strrchr(base, '/');
  3634. if (last == NULL) {
  3635. _rtld_error("non-abs result from realpath \"%s\"", path);
  3636. return (-1);
  3637. }
  3638. if (last != base)
  3639. *last = '\0';
  3640. return (0);
  3641. }
  3642. static void
  3643. linkmap_add(Obj_Entry *obj)
  3644. {
  3645. struct link_map *l, *prev;
  3646. l = &obj->linkmap;
  3647. l->l_name = obj->path;
  3648. l->l_base = obj->mapbase;
  3649. l->l_ld = obj->dynamic;
  3650. l->l_addr = obj->relocbase;
  3651. if (r_debug.r_map == NULL) {
  3652. r_debug.r_map = l;
  3653. return;
  3654. }
  3655. /*
  3656. * Scan to the end of the list, but not past the entry for the
  3657. * dynamic linker, which we want to keep at the very end.
  3658. */
  3659. for (prev = r_debug.r_map;
  3660. prev->l_next != NULL && prev->l_next != &obj_rtld.linkmap;
  3661. prev = prev->l_next)
  3662. ;
  3663. /* Link in the new entry. */
  3664. l->l_prev = prev;
  3665. l->l_next = prev->l_next;
  3666. if (l->l_next != NULL)
  3667. l->l_next->l_prev = l;
  3668. prev->l_next = l;
  3669. }
  3670. static void
  3671. linkmap_delete(Obj_Entry *obj)
  3672. {
  3673. struct link_map *l;
  3674. l = &obj->linkmap;
  3675. if (l->l_prev == NULL) {
  3676. if ((r_debug.r_map = l->l_next) != NULL)
  3677. l->l_next->l_prev = NULL;
  3678. return;
  3679. }
  3680. if ((l->l_prev->l_next = l->l_next) != NULL)
  3681. l->l_next->l_prev = l->l_prev;
  3682. }
  3683. /*
  3684. * Function for the debugger to set a breakpoint on to gain control.
  3685. *
  3686. * The two parameters allow the debugger to easily find and determine
  3687. * what the runtime loader is doing and to whom it is doing it.
  3688. *
  3689. * When the loadhook trap is hit (r_debug_state, set at program
  3690. * initialization), the arguments can be found on the stack:
  3691. *
  3692. * +8 struct link_map *m
  3693. * +4 struct r_debug *rd
  3694. * +0 RetAddr
  3695. */
  3696. void
  3697. r_debug_state(struct r_debug* rd __unused, struct link_map *m __unused)
  3698. {
  3699. /*
  3700. * The following is a hack to force the compiler to emit calls to
  3701. * this function, even when optimizing. If the function is empty,
  3702. * the compiler is not obliged to emit any code for calls to it,
  3703. * even when marked __noinline. However, gdb depends on those
  3704. * calls being made.
  3705. */
  3706. __compiler_membar();
  3707. }
  3708. /*
  3709. * A function called after init routines have completed. This can be used to
  3710. * break before a program's entry routine is called, and can be used when
  3711. * main is not available in the symbol table.
  3712. */
  3713. void
  3714. _r_debug_postinit(struct link_map *m __unused)
  3715. {
  3716. /* See r_debug_state(). */
  3717. __compiler_membar();
  3718. }
  3719. static void
  3720. release_object(Obj_Entry *obj)
  3721. {
  3722. if (obj->holdcount > 0) {
  3723. obj->unholdfree = true;
  3724. return;
  3725. }
  3726. munmap(obj->mapbase, obj->mapsize);
  3727. linkmap_delete(obj);
  3728. obj_free(obj);
  3729. }
  3730. /*
  3731. * Get address of the pointer variable in the main program.
  3732. * Prefer non-weak symbol over the weak one.
  3733. */
  3734. static const void **
  3735. get_program_var_addr(const char *name, RtldLockState *lockstate)
  3736. {
  3737. SymLook req;
  3738. DoneList donelist;
  3739. symlook_init(&req, name);
  3740. req.lockstate = lockstate;
  3741. donelist_init(&donelist);
  3742. if (symlook_global(&req, &donelist) != 0)
  3743. return (NULL);
  3744. if (ELF_ST_TYPE(req.sym_out->st_info) == STT_FUNC)
  3745. return ((const void **)make_function_pointer(req.sym_out,
  3746. req.defobj_out));
  3747. else if (ELF_ST_TYPE(req.sym_out->st_info) == STT_GNU_IFUNC)
  3748. return ((const void **)rtld_resolve_ifunc(req.defobj_out, req.sym_out));
  3749. else
  3750. return ((const void **)(req.defobj_out->relocbase +
  3751. req.sym_out->st_value));
  3752. }
  3753. /*
  3754. * Set a pointer variable in the main program to the given value. This
  3755. * is used to set key variables such as "environ" before any of the
  3756. * init functions are called.
  3757. */
  3758. static void
  3759. set_program_var(const char *name, const void *value)
  3760. {
  3761. const void **addr;
  3762. if ((addr = get_program_var_addr(name, NULL)) != NULL) {
  3763. dbg("\"%s\": *%p <-- %p", name, addr, value);
  3764. *addr = value;
  3765. }
  3766. }
  3767. /*
  3768. * Search the global objects, including dependencies and main object,
  3769. * for the given symbol.
  3770. */
  3771. static int
  3772. symlook_global(SymLook *req, DoneList *donelist)
  3773. {
  3774. SymLook req1;
  3775. const Objlist_Entry *elm;
  3776. int res;
  3777. symlook_init_from_req(&req1, req);
  3778. /* Search all objects loaded at program start up. */
  3779. if (req->defobj_out == NULL ||
  3780. ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) {
  3781. res = symlook_list(&req1, &list_main, donelist);
  3782. if (res == 0 && (req->defobj_out == NULL ||
  3783. ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
  3784. req->sym_out = req1.sym_out;
  3785. req->defobj_out = req1.defobj_out;
  3786. assert(req->defobj_out != NULL);
  3787. }
  3788. }
  3789. /* Search all DAGs whose roots are RTLD_GLOBAL objects. */
  3790. STAILQ_FOREACH(elm, &list_global, link) {
  3791. if (req->defobj_out != NULL &&
  3792. ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK)
  3793. break;
  3794. res = symlook_list(&req1, &elm->obj->dagmembers, donelist);
  3795. if (res == 0 && (req->defobj_out == NULL ||
  3796. ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
  3797. req->sym_out = req1.sym_out;
  3798. req->defobj_out = req1.defobj_out;
  3799. assert(req->defobj_out != NULL);
  3800. }
  3801. }
  3802. return (req->sym_out != NULL ? 0 : ESRCH);
  3803. }
  3804. /*
  3805. * Given a symbol name in a referencing object, find the corresponding
  3806. * definition of the symbol. Returns a pointer to the symbol, or NULL if
  3807. * no definition was found. Returns a pointer to the Obj_Entry of the
  3808. * defining object via the reference parameter DEFOBJ_OUT.
  3809. */
  3810. static int
  3811. symlook_default(SymLook *req, const Obj_Entry *refobj)
  3812. {
  3813. DoneList donelist;
  3814. const Objlist_Entry *elm;
  3815. SymLook req1;
  3816. int res;
  3817. donelist_init(&donelist);
  3818. symlook_init_from_req(&req1, req);
  3819. /*
  3820. * Look first in the referencing object if linked symbolically,
  3821. * and similarly handle protected symbols.
  3822. */
  3823. res = symlook_obj(&req1, refobj);
  3824. if (res == 0 && (refobj->symbolic ||
  3825. ELF_ST_VISIBILITY(req1.sym_out->st_other) == STV_PROTECTED)) {
  3826. req->sym_out = req1.sym_out;
  3827. req->defobj_out = req1.defobj_out;
  3828. assert(req->defobj_out != NULL);
  3829. }
  3830. if (refobj->symbolic || req->defobj_out != NULL)
  3831. donelist_check(&donelist, refobj);
  3832. symlook_global(req, &donelist);
  3833. /* Search all dlopened DAGs containing the referencing object. */
  3834. STAILQ_FOREACH(elm, &refobj->dldags, link) {
  3835. if (req->sym_out != NULL &&
  3836. ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK)
  3837. break;
  3838. res = symlook_list(&req1, &elm->obj->dagmembers, &donelist);
  3839. if (res == 0 && (req->sym_out == NULL ||
  3840. ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
  3841. req->sym_out = req1.sym_out;
  3842. req->defobj_out = req1.defobj_out;
  3843. assert(req->defobj_out != NULL);
  3844. }
  3845. }
  3846. /*
  3847. * Search the dynamic linker itself, and possibly resolve the
  3848. * symbol from there. This is how the application links to
  3849. * dynamic linker services such as dlopen.
  3850. */
  3851. if (req->sym_out == NULL ||
  3852. ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) {
  3853. res = symlook_obj(&req1, &obj_rtld);
  3854. if (res == 0) {
  3855. req->sym_out = req1.sym_out;
  3856. req->defobj_out = req1.defobj_out;
  3857. assert(req->defobj_out != NULL);
  3858. }
  3859. }
  3860. return (req->sym_out != NULL ? 0 : ESRCH);
  3861. }
  3862. static int
  3863. symlook_list(SymLook *req, const Objlist *objlist, DoneList *dlp)
  3864. {
  3865. const Elf_Sym *def;
  3866. const Obj_Entry *defobj;
  3867. const Objlist_Entry *elm;
  3868. SymLook req1;
  3869. int res;
  3870. def = NULL;
  3871. defobj = NULL;
  3872. STAILQ_FOREACH(elm, objlist, link) {
  3873. if (donelist_check(dlp, elm->obj))
  3874. continue;
  3875. symlook_init_from_req(&req1, req);
  3876. if ((res = symlook_obj(&req1, elm->obj)) == 0) {
  3877. if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) {
  3878. def = req1.sym_out;
  3879. defobj = req1.defobj_out;
  3880. if (ELF_ST_BIND(def->st_info) != STB_WEAK)
  3881. break;
  3882. }
  3883. }
  3884. }
  3885. if (def != NULL) {
  3886. req->sym_out = def;
  3887. req->defobj_out = defobj;
  3888. return (0);
  3889. }
  3890. return (ESRCH);
  3891. }
  3892. /*
  3893. * Search the chain of DAGS cointed to by the given Needed_Entry
  3894. * for a symbol of the given name. Each DAG is scanned completely
  3895. * before advancing to the next one. Returns a pointer to the symbol,
  3896. * or NULL if no definition was found.
  3897. */
  3898. static int
  3899. symlook_needed(SymLook *req, const Needed_Entry *needed, DoneList *dlp)
  3900. {
  3901. const Elf_Sym *def;
  3902. const Needed_Entry *n;
  3903. const Obj_Entry *defobj;
  3904. SymLook req1;
  3905. int res;
  3906. def = NULL;
  3907. defobj = NULL;
  3908. symlook_init_from_req(&req1, req);
  3909. for (n = needed; n != NULL; n = n->next) {
  3910. if (n->obj == NULL ||
  3911. (res = symlook_list(&req1, &n->obj->dagmembers, dlp)) != 0)
  3912. continue;
  3913. if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) {
  3914. def = req1.sym_out;
  3915. defobj = req1.defobj_out;
  3916. if (ELF_ST_BIND(def->st_info) != STB_WEAK)
  3917. break;
  3918. }
  3919. }
  3920. if (def != NULL) {
  3921. req->sym_out = def;
  3922. req->defobj_out = defobj;
  3923. return (0);
  3924. }
  3925. return (ESRCH);
  3926. }
  3927. /*
  3928. * Search the symbol table of a single shared object for a symbol of
  3929. * the given name and version, if requested. Returns a pointer to the
  3930. * symbol, or NULL if no definition was found. If the object is
  3931. * filter, return filtered symbol from filtee.
  3932. *
  3933. * The symbol's hash value is passed in for efficiency reasons; that
  3934. * eliminates many recomputations of the hash value.
  3935. */
  3936. int
  3937. symlook_obj(SymLook *req, const Obj_Entry *obj)
  3938. {
  3939. DoneList donelist;
  3940. SymLook req1;
  3941. int flags, res, mres;
  3942. /*
  3943. * If there is at least one valid hash at this point, we prefer to
  3944. * use the faster GNU version if available.
  3945. */
  3946. if (obj->valid_hash_gnu)
  3947. mres = symlook_obj1_gnu(req, obj);
  3948. else if (obj->valid_hash_sysv)
  3949. mres = symlook_obj1_sysv(req, obj);
  3950. else
  3951. return (EINVAL);
  3952. if (mres == 0) {
  3953. if (obj->needed_filtees != NULL) {
  3954. flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0;
  3955. load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate);
  3956. donelist_init(&donelist);
  3957. symlook_init_from_req(&req1, req);
  3958. res = symlook_needed(&req1, obj->needed_filtees, &donelist);
  3959. if (res == 0) {
  3960. req->sym_out = req1.sym_out;
  3961. req->defobj_out = req1.defobj_out;
  3962. }
  3963. return (res);
  3964. }
  3965. if (obj->needed_aux_filtees != NULL) {
  3966. flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0;
  3967. load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate);
  3968. donelist_init(&donelist);
  3969. symlook_init_from_req(&req1, req);
  3970. res = symlook_needed(&req1, obj->needed_aux_filtees, &donelist);
  3971. if (res == 0) {
  3972. req->sym_out = req1.sym_out;
  3973. req->defobj_out = req1.defobj_out;
  3974. return (res);
  3975. }
  3976. }
  3977. }
  3978. return (mres);
  3979. }
  3980. /* Symbol match routine common to both hash functions */
  3981. static bool
  3982. matched_symbol(SymLook *req, const Obj_Entry *obj, Sym_Match_Result *result,
  3983. const unsigned long symnum)
  3984. {
  3985. Elf_Versym verndx;
  3986. const Elf_Sym *symp;
  3987. const char *strp;
  3988. symp = obj->symtab + symnum;
  3989. strp = obj->strtab + symp->st_name;
  3990. switch (ELF_ST_TYPE(symp->st_info)) {
  3991. case STT_FUNC:
  3992. case STT_NOTYPE:
  3993. case STT_OBJECT:
  3994. case STT_COMMON:
  3995. case STT_GNU_IFUNC:
  3996. if (symp->st_value == 0)
  3997. return (false);
  3998. /* fallthrough */
  3999. case STT_TLS:
  4000. if (symp->st_shndx != SHN_UNDEF)
  4001. break;
  4002. #ifndef __mips__
  4003. else if (((req->flags & SYMLOOK_IN_PLT) == 0) &&
  4004. (ELF_ST_TYPE(symp->st_info) == STT_FUNC))
  4005. break;
  4006. #endif
  4007. /* fallthrough */
  4008. default:
  4009. return (false);
  4010. }
  4011. if (req->name[0] != strp[0] || strcmp(req->name, strp) != 0)
  4012. return (false);
  4013. if (req->ventry == NULL) {
  4014. if (obj->versyms != NULL) {
  4015. verndx = VER_NDX(obj->versyms[symnum]);
  4016. if (verndx > obj->vernum) {
  4017. _rtld_error(
  4018. "%s: symbol %s references wrong version %d",
  4019. obj->path, obj->strtab + symnum, verndx);
  4020. return (false);
  4021. }
  4022. /*
  4023. * If we are not called from dlsym (i.e. this
  4024. * is a normal relocation from unversioned
  4025. * binary), accept the symbol immediately if
  4026. * it happens to have first version after this
  4027. * shared object became versioned. Otherwise,
  4028. * if symbol is versioned and not hidden,
  4029. * remember it. If it is the only symbol with
  4030. * this name exported by the shared object, it
  4031. * will be returned as a match by the calling
  4032. * function. If symbol is global (verndx < 2)
  4033. * accept it unconditionally.
  4034. */
  4035. if ((req->flags & SYMLOOK_DLSYM) == 0 &&
  4036. verndx == VER_NDX_GIVEN) {
  4037. result->sym_out = symp;
  4038. return (true);
  4039. }
  4040. else if (verndx >= VER_NDX_GIVEN) {
  4041. if ((obj->versyms[symnum] & VER_NDX_HIDDEN)
  4042. == 0) {
  4043. if (result->vsymp == NULL)
  4044. result->vsymp = symp;
  4045. result->vcount++;
  4046. }
  4047. return (false);
  4048. }
  4049. }
  4050. result->sym_out = symp;
  4051. return (true);
  4052. }
  4053. if (obj->versyms == NULL) {
  4054. if (object_match_name(obj, req->ventry->name)) {
  4055. _rtld_error("%s: object %s should provide version %s "
  4056. "for symbol %s", obj_rtld.path, obj->path,
  4057. req->ventry->name, obj->strtab + symnum);
  4058. return (false);
  4059. }
  4060. } else {
  4061. verndx = VER_NDX(obj->versyms[symnum]);
  4062. if (verndx > obj->vernum) {
  4063. _rtld_error("%s: symbol %s references wrong version %d",
  4064. obj->path, obj->strtab + symnum, verndx);
  4065. return (false);
  4066. }
  4067. if (obj->vertab[verndx].hash != req->ventry->hash ||
  4068. strcmp(obj->vertab[verndx].name, req->ventry->name)) {
  4069. /*
  4070. * Version does not match. Look if this is a
  4071. * global symbol and if it is not hidden. If
  4072. * global symbol (verndx < 2) is available,
  4073. * use it. Do not return symbol if we are
  4074. * called by dlvsym, because dlvsym looks for
  4075. * a specific version and default one is not
  4076. * what dlvsym wants.
  4077. */
  4078. if ((req->flags & SYMLOOK_DLSYM) ||
  4079. (verndx >= VER_NDX_GIVEN) ||
  4080. (obj->versyms[symnum] & VER_NDX_HIDDEN))
  4081. return (false);
  4082. }
  4083. }
  4084. result->sym_out = symp;
  4085. return (true);
  4086. }
  4087. /*
  4088. * Search for symbol using SysV hash function.
  4089. * obj->buckets is known not to be NULL at this point; the test for this was
  4090. * performed with the obj->valid_hash_sysv assignment.
  4091. */
  4092. static int
  4093. symlook_obj1_sysv(SymLook *req, const Obj_Entry *obj)
  4094. {
  4095. unsigned long symnum;
  4096. Sym_Match_Result matchres;
  4097. matchres.sym_out = NULL;
  4098. matchres.vsymp = NULL;
  4099. matchres.vcount = 0;
  4100. for (symnum = obj->buckets[req->hash % obj->nbuckets];
  4101. symnum != STN_UNDEF; symnum = obj->chains[symnum]) {
  4102. if (symnum >= obj->nchains)
  4103. return (ESRCH); /* Bad object */
  4104. if (matched_symbol(req, obj, &matchres, symnum)) {
  4105. req->sym_out = matchres.sym_out;
  4106. req->defobj_out = obj;
  4107. return (0);
  4108. }
  4109. }
  4110. if (matchres.vcount == 1) {
  4111. req->sym_out = matchres.vsymp;
  4112. req->defobj_out = obj;
  4113. return (0);
  4114. }
  4115. return (ESRCH);
  4116. }
  4117. /* Search for symbol using GNU hash function */
  4118. static int
  4119. symlook_obj1_gnu(SymLook *req, const Obj_Entry *obj)
  4120. {
  4121. Elf_Addr bloom_word;
  4122. const Elf32_Word *hashval;
  4123. Elf32_Word bucket;
  4124. Sym_Match_Result matchres;
  4125. unsigned int h1, h2;
  4126. unsigned long symnum;
  4127. matchres.sym_out = NULL;
  4128. matchres.vsymp = NULL;
  4129. matchres.vcount = 0;
  4130. /* Pick right bitmask word from Bloom filter array */
  4131. bloom_word = obj->bloom_gnu[(req->hash_gnu / __ELF_WORD_SIZE) &
  4132. obj->maskwords_bm_gnu];
  4133. /* Calculate modulus word size of gnu hash and its derivative */
  4134. h1 = req->hash_gnu & (__ELF_WORD_SIZE - 1);
  4135. h2 = ((req->hash_gnu >> obj->shift2_gnu) & (__ELF_WORD_SIZE - 1));
  4136. /* Filter out the "definitely not in set" queries */
  4137. if (((bloom_word >> h1) & (bloom_word >> h2) & 1) == 0)
  4138. return (ESRCH);
  4139. /* Locate hash chain and corresponding value element*/
  4140. bucket = obj->buckets_gnu[req->hash_gnu % obj->nbuckets_gnu];
  4141. if (bucket == 0)
  4142. return (ESRCH);
  4143. hashval = &obj->chain_zero_gnu[bucket];
  4144. do {
  4145. if (((*hashval ^ req->hash_gnu) >> 1) == 0) {
  4146. symnum = hashval - obj->chain_zero_gnu;
  4147. if (matched_symbol(req, obj, &matchres, symnum)) {
  4148. req->sym_out = matchres.sym_out;
  4149. req->defobj_out = obj;
  4150. return (0);
  4151. }
  4152. }
  4153. } while ((*hashval++ & 1) == 0);
  4154. if (matchres.vcount == 1) {
  4155. req->sym_out = matchres.vsymp;
  4156. req->defobj_out = obj;
  4157. return (0);
  4158. }
  4159. return (ESRCH);
  4160. }
  4161. static void
  4162. trace_loaded_objects(Obj_Entry *obj)
  4163. {
  4164. const char *fmt1, *fmt2, *fmt, *main_local, *list_containers;
  4165. int c;
  4166. if ((main_local = getenv(_LD("TRACE_LOADED_OBJECTS_PROGNAME"))) == NULL)
  4167. main_local = "";
  4168. if ((fmt1 = getenv(_LD("TRACE_LOADED_OBJECTS_FMT1"))) == NULL)
  4169. fmt1 = "\t%o => %p (%x)\n";
  4170. if ((fmt2 = getenv(_LD("TRACE_LOADED_OBJECTS_FMT2"))) == NULL)
  4171. fmt2 = "\t%o (%x)\n";
  4172. list_containers = getenv(_LD("TRACE_LOADED_OBJECTS_ALL"));
  4173. for (; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
  4174. Needed_Entry *needed;
  4175. const char *name, *path;
  4176. bool is_lib;
  4177. if (obj->marker)
  4178. continue;
  4179. if (list_containers && obj->needed != NULL)
  4180. rtld_printf("%s:\n", obj->path);
  4181. for (needed = obj->needed; needed; needed = needed->next) {
  4182. if (needed->obj != NULL) {
  4183. if (needed->obj->traced && !list_containers)
  4184. continue;
  4185. needed->obj->traced = true;
  4186. path = needed->obj->path;
  4187. } else
  4188. path = "not found";
  4189. name = obj->strtab + needed->name;
  4190. is_lib = strncmp(name, "lib", 3) == 0; /* XXX - bogus */
  4191. fmt = is_lib ? fmt1 : fmt2;
  4192. while ((c = *fmt++) != '\0') {
  4193. switch (c) {
  4194. default:
  4195. rtld_putchar(c);
  4196. continue;
  4197. case '\\':
  4198. switch (c = *fmt) {
  4199. case '\0':
  4200. continue;
  4201. case 'n':
  4202. rtld_putchar('\n');
  4203. break;
  4204. case 't':
  4205. rtld_putchar('\t');
  4206. break;
  4207. }
  4208. break;
  4209. case '%':
  4210. switch (c = *fmt) {
  4211. case '\0':
  4212. continue;
  4213. case '%':
  4214. default:
  4215. rtld_putchar(c);
  4216. break;
  4217. case 'A':
  4218. rtld_putstr(main_local);
  4219. break;
  4220. case 'a':
  4221. rtld_putstr(obj_main->path);
  4222. break;
  4223. case 'o':
  4224. rtld_putstr(name);
  4225. break;
  4226. #if 0
  4227. case 'm':
  4228. rtld_printf("%d", sodp->sod_major);
  4229. break;
  4230. case 'n':
  4231. rtld_printf("%d", sodp->sod_minor);
  4232. break;
  4233. #endif
  4234. case 'p':
  4235. rtld_putstr(path);
  4236. break;
  4237. case 'x':
  4238. rtld_printf("%p", needed->obj ? needed->obj->mapbase :
  4239. 0);
  4240. break;
  4241. }
  4242. break;
  4243. }
  4244. ++fmt;
  4245. }
  4246. }
  4247. }
  4248. }
  4249. /*
  4250. * Unload a dlopened object and its dependencies from memory and from
  4251. * our data structures. It is assumed that the DAG rooted in the
  4252. * object has already been unreferenced, and that the object has a
  4253. * reference count of 0.
  4254. */
  4255. static void
  4256. unload_object(Obj_Entry *root, RtldLockState *lockstate)
  4257. {
  4258. Obj_Entry marker, *obj, *next;
  4259. assert(root->refcount == 0);
  4260. /*
  4261. * Pass over the DAG removing unreferenced objects from
  4262. * appropriate lists.
  4263. */
  4264. unlink_object(root);
  4265. /* Unmap all objects that are no longer referenced. */
  4266. for (obj = TAILQ_FIRST(&obj_list); obj != NULL; obj = next) {
  4267. next = TAILQ_NEXT(obj, next);
  4268. if (obj->marker || obj->refcount != 0)
  4269. continue;
  4270. LD_UTRACE(UTRACE_UNLOAD_OBJECT, obj, obj->mapbase,
  4271. obj->mapsize, 0, obj->path);
  4272. dbg("unloading \"%s\"", obj->path);
  4273. /*
  4274. * Unlink the object now to prevent new references from
  4275. * being acquired while the bind lock is dropped in
  4276. * recursive dlclose() invocations.
  4277. */
  4278. TAILQ_REMOVE(&obj_list, obj, next);
  4279. obj_count--;
  4280. if (obj->filtees_loaded) {
  4281. if (next != NULL) {
  4282. init_marker(&marker);
  4283. TAILQ_INSERT_BEFORE(next, &marker, next);
  4284. unload_filtees(obj, lockstate);
  4285. next = TAILQ_NEXT(&marker, next);
  4286. TAILQ_REMOVE(&obj_list, &marker, next);
  4287. } else
  4288. unload_filtees(obj, lockstate);
  4289. }
  4290. release_object(obj);
  4291. }
  4292. }
  4293. static void
  4294. unlink_object(Obj_Entry *root)
  4295. {
  4296. Objlist_Entry *elm;
  4297. if (root->refcount == 0) {
  4298. /* Remove the object from the RTLD_GLOBAL list. */
  4299. objlist_remove(&list_global, root);
  4300. /* Remove the object from all objects' DAG lists. */
  4301. STAILQ_FOREACH(elm, &root->dagmembers, link) {
  4302. objlist_remove(&elm->obj->dldags, root);
  4303. if (elm->obj != root)
  4304. unlink_object(elm->obj);
  4305. }
  4306. }
  4307. }
  4308. static void
  4309. ref_dag(Obj_Entry *root)
  4310. {
  4311. Objlist_Entry *elm;
  4312. assert(root->dag_inited);
  4313. STAILQ_FOREACH(elm, &root->dagmembers, link)
  4314. elm->obj->refcount++;
  4315. }
  4316. static void
  4317. unref_dag(Obj_Entry *root)
  4318. {
  4319. Objlist_Entry *elm;
  4320. assert(root->dag_inited);
  4321. STAILQ_FOREACH(elm, &root->dagmembers, link)
  4322. elm->obj->refcount--;
  4323. }
  4324. /*
  4325. * Common code for MD __tls_get_addr().
  4326. */
  4327. static void *tls_get_addr_slow(Elf_Addr **, int, size_t) __noinline;
  4328. static void *
  4329. tls_get_addr_slow(Elf_Addr **dtvp, int index, size_t offset)
  4330. {
  4331. Elf_Addr *newdtv, *dtv;
  4332. RtldLockState lockstate;
  4333. int to_copy;
  4334. dtv = *dtvp;
  4335. /* Check dtv generation in case new modules have arrived */
  4336. if (dtv[0] != tls_dtv_generation) {
  4337. wlock_acquire(rtld_bind_lock, &lockstate);
  4338. newdtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
  4339. to_copy = dtv[1];
  4340. if (to_copy > tls_max_index)
  4341. to_copy = tls_max_index;
  4342. memcpy(&newdtv[2], &dtv[2], to_copy * sizeof(Elf_Addr));
  4343. newdtv[0] = tls_dtv_generation;
  4344. newdtv[1] = tls_max_index;
  4345. free(dtv);
  4346. lock_release(rtld_bind_lock, &lockstate);
  4347. dtv = *dtvp = newdtv;
  4348. }
  4349. /* Dynamically allocate module TLS if necessary */
  4350. if (dtv[index + 1] == 0) {
  4351. /* Signal safe, wlock will block out signals. */
  4352. wlock_acquire(rtld_bind_lock, &lockstate);
  4353. if (!dtv[index + 1])
  4354. dtv[index + 1] = (Elf_Addr)allocate_module_tls(index);
  4355. lock_release(rtld_bind_lock, &lockstate);
  4356. }
  4357. return ((void *)(dtv[index + 1] + offset));
  4358. }
  4359. void *
  4360. tls_get_addr_common(Elf_Addr **dtvp, int index, size_t offset)
  4361. {
  4362. Elf_Addr *dtv;
  4363. dtv = *dtvp;
  4364. /* Check dtv generation in case new modules have arrived */
  4365. if (__predict_true(dtv[0] == tls_dtv_generation &&
  4366. dtv[index + 1] != 0))
  4367. return ((void *)(dtv[index + 1] + offset));
  4368. return (tls_get_addr_slow(dtvp, index, offset));
  4369. }
  4370. #if defined(__aarch64__) || defined(__arm__) || defined(__mips__) || \
  4371. defined(__powerpc__) || defined(__riscv)
  4372. /*
  4373. * Return pointer to allocated TLS block
  4374. */
  4375. static void *
  4376. get_tls_block_ptr(void *tcb, size_t tcbsize)
  4377. {
  4378. size_t extra_size, post_size, pre_size, tls_block_size;
  4379. size_t tls_init_align;
  4380. tls_init_align = MAX(obj_main->tlsalign, 1);
  4381. /* Compute fragments sizes. */
  4382. extra_size = tcbsize - TLS_TCB_SIZE;
  4383. post_size = calculate_tls_post_size(tls_init_align);
  4384. tls_block_size = tcbsize + post_size;
  4385. pre_size = roundup2(tls_block_size, tls_init_align) - tls_block_size;
  4386. return ((char *)tcb - pre_size - extra_size);
  4387. }
  4388. /*
  4389. * Allocate Static TLS using the Variant I method.
  4390. *
  4391. * For details on the layout, see lib/libc/gen/tls.c.
  4392. *
  4393. * NB: rtld's tls_static_space variable includes TLS_TCB_SIZE and post_size as
  4394. * it is based on tls_last_offset, and TLS offsets here are really TCB
  4395. * offsets, whereas libc's tls_static_space is just the executable's static
  4396. * TLS segment.
  4397. */
  4398. void *
  4399. allocate_tls(Obj_Entry *objs, void *oldtcb, size_t tcbsize, size_t tcbalign)
  4400. {
  4401. Obj_Entry *obj;
  4402. char *tls_block;
  4403. Elf_Addr *dtv, **tcb;
  4404. Elf_Addr addr;
  4405. Elf_Addr i;
  4406. size_t extra_size, maxalign, post_size, pre_size, tls_block_size;
  4407. size_t tls_init_align, tls_init_offset;
  4408. if (oldtcb != NULL && tcbsize == TLS_TCB_SIZE)
  4409. return (oldtcb);
  4410. assert(tcbsize >= TLS_TCB_SIZE);
  4411. maxalign = MAX(tcbalign, tls_static_max_align);
  4412. tls_init_align = MAX(obj_main->tlsalign, 1);
  4413. /* Compute fragmets sizes. */
  4414. extra_size = tcbsize - TLS_TCB_SIZE;
  4415. post_size = calculate_tls_post_size(tls_init_align);
  4416. tls_block_size = tcbsize + post_size;
  4417. pre_size = roundup2(tls_block_size, tls_init_align) - tls_block_size;
  4418. tls_block_size += pre_size + tls_static_space - TLS_TCB_SIZE - post_size;
  4419. /* Allocate whole TLS block */
  4420. tls_block = malloc_aligned(tls_block_size, maxalign, 0);
  4421. tcb = (Elf_Addr **)(tls_block + pre_size + extra_size);
  4422. if (oldtcb != NULL) {
  4423. memcpy(tls_block, get_tls_block_ptr(oldtcb, tcbsize),
  4424. tls_static_space);
  4425. free_aligned(get_tls_block_ptr(oldtcb, tcbsize));
  4426. /* Adjust the DTV. */
  4427. dtv = tcb[0];
  4428. for (i = 0; i < dtv[1]; i++) {
  4429. if (dtv[i+2] >= (Elf_Addr)oldtcb &&
  4430. dtv[i+2] < (Elf_Addr)oldtcb + tls_static_space) {
  4431. dtv[i+2] = dtv[i+2] - (Elf_Addr)oldtcb + (Elf_Addr)tcb;
  4432. }
  4433. }
  4434. } else {
  4435. dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
  4436. tcb[0] = dtv;
  4437. dtv[0] = tls_dtv_generation;
  4438. dtv[1] = tls_max_index;
  4439. for (obj = globallist_curr(objs); obj != NULL;
  4440. obj = globallist_next(obj)) {
  4441. if (obj->tlsoffset == 0)
  4442. continue;
  4443. tls_init_offset = obj->tlspoffset & (obj->tlsalign - 1);
  4444. addr = (Elf_Addr)tcb + obj->tlsoffset;
  4445. if (tls_init_offset > 0)
  4446. memset((void *)addr, 0, tls_init_offset);
  4447. if (obj->tlsinitsize > 0) {
  4448. memcpy((void *)(addr + tls_init_offset), obj->tlsinit,
  4449. obj->tlsinitsize);
  4450. }
  4451. if (obj->tlssize > obj->tlsinitsize) {
  4452. memset((void *)(addr + tls_init_offset + obj->tlsinitsize),
  4453. 0, obj->tlssize - obj->tlsinitsize - tls_init_offset);
  4454. }
  4455. dtv[obj->tlsindex + 1] = addr;
  4456. }
  4457. }
  4458. return (tcb);
  4459. }
  4460. void
  4461. free_tls(void *tcb, size_t tcbsize, size_t tcbalign __unused)
  4462. {
  4463. Elf_Addr *dtv;
  4464. Elf_Addr tlsstart, tlsend;
  4465. size_t post_size;
  4466. size_t dtvsize, i, tls_init_align;
  4467. assert(tcbsize >= TLS_TCB_SIZE);
  4468. tls_init_align = MAX(obj_main->tlsalign, 1);
  4469. /* Compute fragments sizes. */
  4470. post_size = calculate_tls_post_size(tls_init_align);
  4471. tlsstart = (Elf_Addr)tcb + TLS_TCB_SIZE + post_size;
  4472. tlsend = (Elf_Addr)tcb + tls_static_space;
  4473. dtv = *(Elf_Addr **)tcb;
  4474. dtvsize = dtv[1];
  4475. for (i = 0; i < dtvsize; i++) {
  4476. if (dtv[i+2] && (dtv[i+2] < tlsstart || dtv[i+2] >= tlsend)) {
  4477. free((void*)dtv[i+2]);
  4478. }
  4479. }
  4480. free(dtv);
  4481. free_aligned(get_tls_block_ptr(tcb, tcbsize));
  4482. }
  4483. #endif
  4484. #if defined(__i386__) || defined(__amd64__) || defined(__sparc64__)
  4485. /*
  4486. * Allocate Static TLS using the Variant II method.
  4487. */
  4488. void *
  4489. allocate_tls(Obj_Entry *objs, void *oldtls, size_t tcbsize, size_t tcbalign)
  4490. {
  4491. Obj_Entry *obj;
  4492. size_t size, ralign;
  4493. char *tls;
  4494. Elf_Addr *dtv, *olddtv;
  4495. Elf_Addr segbase, oldsegbase, addr;
  4496. size_t i;
  4497. ralign = tcbalign;
  4498. if (tls_static_max_align > ralign)
  4499. ralign = tls_static_max_align;
  4500. size = roundup(tls_static_space, ralign) + roundup(tcbsize, ralign);
  4501. assert(tcbsize >= 2*sizeof(Elf_Addr));
  4502. tls = malloc_aligned(size, ralign, 0 /* XXX */);
  4503. dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
  4504. segbase = (Elf_Addr)(tls + roundup(tls_static_space, ralign));
  4505. ((Elf_Addr*)segbase)[0] = segbase;
  4506. ((Elf_Addr*)segbase)[1] = (Elf_Addr) dtv;
  4507. dtv[0] = tls_dtv_generation;
  4508. dtv[1] = tls_max_index;
  4509. if (oldtls) {
  4510. /*
  4511. * Copy the static TLS block over whole.
  4512. */
  4513. oldsegbase = (Elf_Addr) oldtls;
  4514. memcpy((void *)(segbase - tls_static_space),
  4515. (const void *)(oldsegbase - tls_static_space),
  4516. tls_static_space);
  4517. /*
  4518. * If any dynamic TLS blocks have been created tls_get_addr(),
  4519. * move them over.
  4520. */
  4521. olddtv = ((Elf_Addr**)oldsegbase)[1];
  4522. for (i = 0; i < olddtv[1]; i++) {
  4523. if (olddtv[i+2] < oldsegbase - size || olddtv[i+2] > oldsegbase) {
  4524. dtv[i+2] = olddtv[i+2];
  4525. olddtv[i+2] = 0;
  4526. }
  4527. }
  4528. /*
  4529. * We assume that this block was the one we created with
  4530. * allocate_initial_tls().
  4531. */
  4532. free_tls(oldtls, 2*sizeof(Elf_Addr), sizeof(Elf_Addr));
  4533. } else {
  4534. for (obj = objs; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
  4535. if (obj->marker || obj->tlsoffset == 0)
  4536. continue;
  4537. addr = segbase - obj->tlsoffset;
  4538. memset((void*)(addr + obj->tlsinitsize),
  4539. 0, obj->tlssize - obj->tlsinitsize);
  4540. if (obj->tlsinit) {
  4541. memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize);
  4542. obj->static_tls_copied = true;
  4543. }
  4544. dtv[obj->tlsindex + 1] = addr;
  4545. }
  4546. }
  4547. return (void*) segbase;
  4548. }
  4549. void
  4550. free_tls(void *tls, size_t tcbsize __unused, size_t tcbalign)
  4551. {
  4552. Elf_Addr* dtv;
  4553. size_t size, ralign;
  4554. int dtvsize, i;
  4555. Elf_Addr tlsstart, tlsend;
  4556. /*
  4557. * Figure out the size of the initial TLS block so that we can
  4558. * find stuff which ___tls_get_addr() allocated dynamically.
  4559. */
  4560. ralign = tcbalign;
  4561. if (tls_static_max_align > ralign)
  4562. ralign = tls_static_max_align;
  4563. size = roundup(tls_static_space, ralign);
  4564. dtv = ((Elf_Addr**)tls)[1];
  4565. dtvsize = dtv[1];
  4566. tlsend = (Elf_Addr) tls;
  4567. tlsstart = tlsend - size;
  4568. for (i = 0; i < dtvsize; i++) {
  4569. if (dtv[i + 2] != 0 && (dtv[i + 2] < tlsstart || dtv[i + 2] > tlsend)) {
  4570. free_aligned((void *)dtv[i + 2]);
  4571. }
  4572. }
  4573. free_aligned((void *)tlsstart);
  4574. free((void*) dtv);
  4575. }
  4576. #endif
  4577. /*
  4578. * Allocate TLS block for module with given index.
  4579. */
  4580. void *
  4581. allocate_module_tls(int index)
  4582. {
  4583. Obj_Entry *obj;
  4584. char *p;
  4585. TAILQ_FOREACH(obj, &obj_list, next) {
  4586. if (obj->marker)
  4587. continue;
  4588. if (obj->tlsindex == index)
  4589. break;
  4590. }
  4591. if (obj == NULL) {
  4592. _rtld_error("Can't find module with TLS index %d", index);
  4593. rtld_die();
  4594. }
  4595. p = malloc_aligned(obj->tlssize, obj->tlsalign, obj->tlspoffset);
  4596. memcpy(p, obj->tlsinit, obj->tlsinitsize);
  4597. memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize);
  4598. return (p);
  4599. }
  4600. bool
  4601. allocate_tls_offset(Obj_Entry *obj)
  4602. {
  4603. size_t off;
  4604. if (obj->tls_done)
  4605. return true;
  4606. if (obj->tlssize == 0) {
  4607. obj->tls_done = true;
  4608. return true;
  4609. }
  4610. if (tls_last_offset == 0)
  4611. off = calculate_first_tls_offset(obj->tlssize, obj->tlsalign,
  4612. obj->tlspoffset);
  4613. else
  4614. off = calculate_tls_offset(tls_last_offset, tls_last_size,
  4615. obj->tlssize, obj->tlsalign, obj->tlspoffset);
  4616. /*
  4617. * If we have already fixed the size of the static TLS block, we
  4618. * must stay within that size. When allocating the static TLS, we
  4619. * leave a small amount of space spare to be used for dynamically
  4620. * loading modules which use static TLS.
  4621. */
  4622. if (tls_static_space != 0) {
  4623. if (calculate_tls_end(off, obj->tlssize) > tls_static_space)
  4624. return false;
  4625. } else if (obj->tlsalign > tls_static_max_align) {
  4626. tls_static_max_align = obj->tlsalign;
  4627. }
  4628. tls_last_offset = obj->tlsoffset = off;
  4629. tls_last_size = obj->tlssize;
  4630. obj->tls_done = true;
  4631. return true;
  4632. }
  4633. void
  4634. free_tls_offset(Obj_Entry *obj)
  4635. {
  4636. /*
  4637. * If we were the last thing to allocate out of the static TLS
  4638. * block, we give our space back to the 'allocator'. This is a
  4639. * simplistic workaround to allow libGL.so.1 to be loaded and
  4640. * unloaded multiple times.
  4641. */
  4642. if (calculate_tls_end(obj->tlsoffset, obj->tlssize)
  4643. == calculate_tls_end(tls_last_offset, tls_last_size)) {
  4644. tls_last_offset -= obj->tlssize;
  4645. tls_last_size = 0;
  4646. }
  4647. }
  4648. void *
  4649. _rtld_allocate_tls(void *oldtls, size_t tcbsize, size_t tcbalign)
  4650. {
  4651. void *ret;
  4652. RtldLockState lockstate;
  4653. wlock_acquire(rtld_bind_lock, &lockstate);
  4654. ret = allocate_tls(globallist_curr(TAILQ_FIRST(&obj_list)), oldtls,
  4655. tcbsize, tcbalign);
  4656. lock_release(rtld_bind_lock, &lockstate);
  4657. return (ret);
  4658. }
  4659. void
  4660. _rtld_free_tls(void *tcb, size_t tcbsize, size_t tcbalign)
  4661. {
  4662. RtldLockState lockstate;
  4663. wlock_acquire(rtld_bind_lock, &lockstate);
  4664. free_tls(tcb, tcbsize, tcbalign);
  4665. lock_release(rtld_bind_lock, &lockstate);
  4666. }
  4667. static void
  4668. object_add_name(Obj_Entry *obj, const char *name)
  4669. {
  4670. Name_Entry *entry;
  4671. size_t len;
  4672. len = strlen(name);
  4673. entry = malloc(sizeof(Name_Entry) + len);
  4674. if (entry != NULL) {
  4675. strcpy(entry->name, name);
  4676. STAILQ_INSERT_TAIL(&obj->names, entry, link);
  4677. }
  4678. }
  4679. static int
  4680. object_match_name(const Obj_Entry *obj, const char *name)
  4681. {
  4682. Name_Entry *entry;
  4683. STAILQ_FOREACH(entry, &obj->names, link) {
  4684. if (strcmp(name, entry->name) == 0)
  4685. return (1);
  4686. }
  4687. return (0);
  4688. }
  4689. static Obj_Entry *
  4690. locate_dependency(const Obj_Entry *obj, const char *name)
  4691. {
  4692. const Objlist_Entry *entry;
  4693. const Needed_Entry *needed;
  4694. STAILQ_FOREACH(entry, &list_main, link) {
  4695. if (object_match_name(entry->obj, name))
  4696. return entry->obj;
  4697. }
  4698. for (needed = obj->needed; needed != NULL; needed = needed->next) {
  4699. if (strcmp(obj->strtab + needed->name, name) == 0 ||
  4700. (needed->obj != NULL && object_match_name(needed->obj, name))) {
  4701. /*
  4702. * If there is DT_NEEDED for the name we are looking for,
  4703. * we are all set. Note that object might not be found if
  4704. * dependency was not loaded yet, so the function can
  4705. * return NULL here. This is expected and handled
  4706. * properly by the caller.
  4707. */
  4708. return (needed->obj);
  4709. }
  4710. }
  4711. _rtld_error("%s: Unexpected inconsistency: dependency %s not found",
  4712. obj->path, name);
  4713. rtld_die();
  4714. }
  4715. static int
  4716. check_object_provided_version(Obj_Entry *refobj, const Obj_Entry *depobj,
  4717. const Elf_Vernaux *vna)
  4718. {
  4719. const Elf_Verdef *vd;
  4720. const char *vername;
  4721. vername = refobj->strtab + vna->vna_name;
  4722. vd = depobj->verdef;
  4723. if (vd == NULL) {
  4724. _rtld_error("%s: version %s required by %s not defined",
  4725. depobj->path, vername, refobj->path);
  4726. return (-1);
  4727. }
  4728. for (;;) {
  4729. if (vd->vd_version != VER_DEF_CURRENT) {
  4730. _rtld_error("%s: Unsupported version %d of Elf_Verdef entry",
  4731. depobj->path, vd->vd_version);
  4732. return (-1);
  4733. }
  4734. if (vna->vna_hash == vd->vd_hash) {
  4735. const Elf_Verdaux *aux = (const Elf_Verdaux *)
  4736. ((const char *)vd + vd->vd_aux);
  4737. if (strcmp(vername, depobj->strtab + aux->vda_name) == 0)
  4738. return (0);
  4739. }
  4740. if (vd->vd_next == 0)
  4741. break;
  4742. vd = (const Elf_Verdef *)((const char *)vd + vd->vd_next);
  4743. }
  4744. if (vna->vna_flags & VER_FLG_WEAK)
  4745. return (0);
  4746. _rtld_error("%s: version %s required by %s not found",
  4747. depobj->path, vername, refobj->path);
  4748. return (-1);
  4749. }
  4750. static int
  4751. rtld_verify_object_versions(Obj_Entry *obj)
  4752. {
  4753. const Elf_Verneed *vn;
  4754. const Elf_Verdef *vd;
  4755. const Elf_Verdaux *vda;
  4756. const Elf_Vernaux *vna;
  4757. const Obj_Entry *depobj;
  4758. int maxvernum, vernum;
  4759. if (obj->ver_checked)
  4760. return (0);
  4761. obj->ver_checked = true;
  4762. maxvernum = 0;
  4763. /*
  4764. * Walk over defined and required version records and figure out
  4765. * max index used by any of them. Do very basic sanity checking
  4766. * while there.
  4767. */
  4768. vn = obj->verneed;
  4769. while (vn != NULL) {
  4770. if (vn->vn_version != VER_NEED_CURRENT) {
  4771. _rtld_error("%s: Unsupported version %d of Elf_Verneed entry",
  4772. obj->path, vn->vn_version);
  4773. return (-1);
  4774. }
  4775. vna = (const Elf_Vernaux *)((const char *)vn + vn->vn_aux);
  4776. for (;;) {
  4777. vernum = VER_NEED_IDX(vna->vna_other);
  4778. if (vernum > maxvernum)
  4779. maxvernum = vernum;
  4780. if (vna->vna_next == 0)
  4781. break;
  4782. vna = (const Elf_Vernaux *)((const char *)vna + vna->vna_next);
  4783. }
  4784. if (vn->vn_next == 0)
  4785. break;
  4786. vn = (const Elf_Verneed *)((const char *)vn + vn->vn_next);
  4787. }
  4788. vd = obj->verdef;
  4789. while (vd != NULL) {
  4790. if (vd->vd_version != VER_DEF_CURRENT) {
  4791. _rtld_error("%s: Unsupported version %d of Elf_Verdef entry",
  4792. obj->path, vd->vd_version);
  4793. return (-1);
  4794. }
  4795. vernum = VER_DEF_IDX(vd->vd_ndx);
  4796. if (vernum > maxvernum)
  4797. maxvernum = vernum;
  4798. if (vd->vd_next == 0)
  4799. break;
  4800. vd = (const Elf_Verdef *)((const char *)vd + vd->vd_next);
  4801. }
  4802. if (maxvernum == 0)
  4803. return (0);
  4804. /*
  4805. * Store version information in array indexable by version index.
  4806. * Verify that object version requirements are satisfied along the
  4807. * way.
  4808. */
  4809. obj->vernum = maxvernum + 1;
  4810. obj->vertab = xcalloc(obj->vernum, sizeof(Ver_Entry));
  4811. vd = obj->verdef;
  4812. while (vd != NULL) {
  4813. if ((vd->vd_flags & VER_FLG_BASE) == 0) {
  4814. vernum = VER_DEF_IDX(vd->vd_ndx);
  4815. assert(vernum <= maxvernum);
  4816. vda = (const Elf_Verdaux *)((const char *)vd + vd->vd_aux);
  4817. obj->vertab[vernum].hash = vd->vd_hash;
  4818. obj->vertab[vernum].name = obj->strtab + vda->vda_name;
  4819. obj->vertab[vernum].file = NULL;
  4820. obj->vertab[vernum].flags = 0;
  4821. }
  4822. if (vd->vd_next == 0)
  4823. break;
  4824. vd = (const Elf_Verdef *)((const char *)vd + vd->vd_next);
  4825. }
  4826. vn = obj->verneed;
  4827. while (vn != NULL) {
  4828. depobj = locate_dependency(obj, obj->strtab + vn->vn_file);
  4829. if (depobj == NULL)
  4830. return (-1);
  4831. vna = (const Elf_Vernaux *)((const char *)vn + vn->vn_aux);
  4832. for (;;) {
  4833. if (check_object_provided_version(obj, depobj, vna))
  4834. return (-1);
  4835. vernum = VER_NEED_IDX(vna->vna_other);
  4836. assert(vernum <= maxvernum);
  4837. obj->vertab[vernum].hash = vna->vna_hash;
  4838. obj->vertab[vernum].name = obj->strtab + vna->vna_name;
  4839. obj->vertab[vernum].file = obj->strtab + vn->vn_file;
  4840. obj->vertab[vernum].flags = (vna->vna_other & VER_NEED_HIDDEN) ?
  4841. VER_INFO_HIDDEN : 0;
  4842. if (vna->vna_next == 0)
  4843. break;
  4844. vna = (const Elf_Vernaux *)((const char *)vna + vna->vna_next);
  4845. }
  4846. if (vn->vn_next == 0)
  4847. break;
  4848. vn = (const Elf_Verneed *)((const char *)vn + vn->vn_next);
  4849. }
  4850. return 0;
  4851. }
  4852. static int
  4853. rtld_verify_versions(const Objlist *objlist)
  4854. {
  4855. Objlist_Entry *entry;
  4856. int rc;
  4857. rc = 0;
  4858. STAILQ_FOREACH(entry, objlist, link) {
  4859. /*
  4860. * Skip dummy objects or objects that have their version requirements
  4861. * already checked.
  4862. */
  4863. if (entry->obj->strtab == NULL || entry->obj->vertab != NULL)
  4864. continue;
  4865. if (rtld_verify_object_versions(entry->obj) == -1) {
  4866. rc = -1;
  4867. if (ld_tracing == NULL)
  4868. break;
  4869. }
  4870. }
  4871. if (rc == 0 || ld_tracing != NULL)
  4872. rc = rtld_verify_object_versions(&obj_rtld);
  4873. return rc;
  4874. }
  4875. const Ver_Entry *
  4876. fetch_ventry(const Obj_Entry *obj, unsigned long symnum)
  4877. {
  4878. Elf_Versym vernum;
  4879. if (obj->vertab) {
  4880. vernum = VER_NDX(obj->versyms[symnum]);
  4881. if (vernum >= obj->vernum) {
  4882. _rtld_error("%s: symbol %s has wrong verneed value %d",
  4883. obj->path, obj->strtab + symnum, vernum);
  4884. } else if (obj->vertab[vernum].hash != 0) {
  4885. return &obj->vertab[vernum];
  4886. }
  4887. }
  4888. return NULL;
  4889. }
  4890. int
  4891. _rtld_get_stack_prot(void)
  4892. {
  4893. return (stack_prot);
  4894. }
  4895. int
  4896. _rtld_is_dlopened(void *arg)
  4897. {
  4898. Obj_Entry *obj;
  4899. RtldLockState lockstate;
  4900. int res;
  4901. rlock_acquire(rtld_bind_lock, &lockstate);
  4902. obj = dlcheck(arg);
  4903. if (obj == NULL)
  4904. obj = obj_from_addr(arg);
  4905. if (obj == NULL) {
  4906. _rtld_error("No shared object contains address");
  4907. lock_release(rtld_bind_lock, &lockstate);
  4908. return (-1);
  4909. }
  4910. res = obj->dlopened ? 1 : 0;
  4911. lock_release(rtld_bind_lock, &lockstate);
  4912. return (res);
  4913. }
  4914. static int
  4915. obj_remap_relro(Obj_Entry *obj, int prot)
  4916. {
  4917. if (obj->relro_size > 0 && mprotect(obj->relro_page, obj->relro_size,
  4918. prot) == -1) {
  4919. _rtld_error("%s: Cannot set relro protection to %#x: %s",
  4920. obj->path, prot, rtld_strerror(errno));
  4921. return (-1);
  4922. }
  4923. return (0);
  4924. }
  4925. static int
  4926. obj_disable_relro(Obj_Entry *obj)
  4927. {
  4928. return (obj_remap_relro(obj, PROT_READ | PROT_WRITE));
  4929. }
  4930. static int
  4931. obj_enforce_relro(Obj_Entry *obj)
  4932. {
  4933. return (obj_remap_relro(obj, PROT_READ));
  4934. }
  4935. static void
  4936. map_stacks_exec(RtldLockState *lockstate)
  4937. {
  4938. void (*thr_map_stacks_exec)(void);
  4939. if ((max_stack_flags & PF_X) == 0 || (stack_prot & PROT_EXEC) != 0)
  4940. return;
  4941. thr_map_stacks_exec = (void (*)(void))(uintptr_t)
  4942. get_program_var_addr("__pthread_map_stacks_exec", lockstate);
  4943. if (thr_map_stacks_exec != NULL) {
  4944. stack_prot |= PROT_EXEC;
  4945. thr_map_stacks_exec();
  4946. }
  4947. }
  4948. static void
  4949. distribute_static_tls(Objlist *list, RtldLockState *lockstate)
  4950. {
  4951. Objlist_Entry *elm;
  4952. Obj_Entry *obj;
  4953. void (*distrib)(size_t, void *, size_t, size_t);
  4954. distrib = (void (*)(size_t, void *, size_t, size_t))(uintptr_t)
  4955. get_program_var_addr("__pthread_distribute_static_tls", lockstate);
  4956. if (distrib == NULL)
  4957. return;
  4958. STAILQ_FOREACH(elm, list, link) {
  4959. obj = elm->obj;
  4960. if (obj->marker || !obj->tls_done || obj->static_tls_copied)
  4961. continue;
  4962. distrib(obj->tlsoffset, obj->tlsinit, obj->tlsinitsize,
  4963. obj->tlssize);
  4964. obj->static_tls_copied = true;
  4965. }
  4966. }
  4967. void
  4968. symlook_init(SymLook *dst, const char *name)
  4969. {
  4970. bzero(dst, sizeof(*dst));
  4971. dst->name = name;
  4972. dst->hash = elf_hash(name);
  4973. dst->hash_gnu = gnu_hash(name);
  4974. }
  4975. static void
  4976. symlook_init_from_req(SymLook *dst, const SymLook *src)
  4977. {
  4978. dst->name = src->name;
  4979. dst->hash = src->hash;
  4980. dst->hash_gnu = src->hash_gnu;
  4981. dst->ventry = src->ventry;
  4982. dst->flags = src->flags;
  4983. dst->defobj_out = NULL;
  4984. dst->sym_out = NULL;
  4985. dst->lockstate = src->lockstate;
  4986. }
  4987. static int
  4988. open_binary_fd(const char *argv0, bool search_in_path,
  4989. const char **binpath_res)
  4990. {
  4991. char *binpath, *pathenv, *pe, *res1;
  4992. const char *res;
  4993. int fd;
  4994. binpath = NULL;
  4995. res = NULL;
  4996. if (search_in_path && strchr(argv0, '/') == NULL) {
  4997. binpath = xmalloc(PATH_MAX);
  4998. pathenv = getenv("PATH");
  4999. if (pathenv == NULL) {
  5000. _rtld_error("-p and no PATH environment variable");
  5001. rtld_die();
  5002. }
  5003. pathenv = strdup(pathenv);
  5004. if (pathenv == NULL) {
  5005. _rtld_error("Cannot allocate memory");
  5006. rtld_die();
  5007. }
  5008. fd = -1;
  5009. errno = ENOENT;
  5010. while ((pe = strsep(&pathenv, ":")) != NULL) {
  5011. if (strlcpy(binpath, pe, PATH_MAX) >= PATH_MAX)
  5012. continue;
  5013. if (binpath[0] != '\0' &&
  5014. strlcat(binpath, "/", PATH_MAX) >= PATH_MAX)
  5015. continue;
  5016. if (strlcat(binpath, argv0, PATH_MAX) >= PATH_MAX)
  5017. continue;
  5018. fd = open(binpath, O_RDONLY | O_CLOEXEC | O_VERIFY);
  5019. if (fd != -1 || errno != ENOENT) {
  5020. res = binpath;
  5021. break;
  5022. }
  5023. }
  5024. free(pathenv);
  5025. } else {
  5026. fd = open(argv0, O_RDONLY | O_CLOEXEC | O_VERIFY);
  5027. res = argv0;
  5028. }
  5029. if (fd == -1) {
  5030. _rtld_error("Cannot open %s: %s", argv0, rtld_strerror(errno));
  5031. rtld_die();
  5032. }
  5033. if (res != NULL && res[0] != '/') {
  5034. res1 = xmalloc(PATH_MAX);
  5035. if (realpath(res, res1) != NULL) {
  5036. if (res != argv0)
  5037. free(__DECONST(char *, res));
  5038. res = res1;
  5039. } else {
  5040. free(res1);
  5041. }
  5042. }
  5043. *binpath_res = res;
  5044. return (fd);
  5045. }
  5046. /*
  5047. * Parse a set of command-line arguments.
  5048. */
  5049. static int
  5050. parse_args(char* argv[], int argc, bool *use_pathp, int *fdp,
  5051. const char **argv0)
  5052. {
  5053. const char *arg;
  5054. char machine[64];
  5055. size_t sz;
  5056. int arglen, fd, i, j, mib[2];
  5057. char opt;
  5058. bool seen_b, seen_f;
  5059. dbg("Parsing command-line arguments");
  5060. *use_pathp = false;
  5061. *fdp = -1;
  5062. seen_b = seen_f = false;
  5063. for (i = 1; i < argc; i++ ) {
  5064. arg = argv[i];
  5065. dbg("argv[%d]: '%s'", i, arg);
  5066. /*
  5067. * rtld arguments end with an explicit "--" or with the first
  5068. * non-prefixed argument.
  5069. */
  5070. if (strcmp(arg, "--") == 0) {
  5071. i++;
  5072. break;
  5073. }
  5074. if (arg[0] != '-')
  5075. break;
  5076. /*
  5077. * All other arguments are single-character options that can
  5078. * be combined, so we need to search through `arg` for them.
  5079. */
  5080. arglen = strlen(arg);
  5081. for (j = 1; j < arglen; j++) {
  5082. opt = arg[j];
  5083. if (opt == 'h') {
  5084. print_usage(argv[0]);
  5085. _exit(0);
  5086. } else if (opt == 'b') {
  5087. if (seen_f) {
  5088. _rtld_error("Both -b and -f specified");
  5089. rtld_die();
  5090. }
  5091. i++;
  5092. *argv0 = argv[i];
  5093. seen_b = true;
  5094. break;
  5095. } else if (opt == 'f') {
  5096. if (seen_b) {
  5097. _rtld_error("Both -b and -f specified");
  5098. rtld_die();
  5099. }
  5100. /*
  5101. * -f XX can be used to specify a
  5102. * descriptor for the binary named at
  5103. * the command line (i.e., the later
  5104. * argument will specify the process
  5105. * name but the descriptor is what
  5106. * will actually be executed).
  5107. *
  5108. * -f must be the last option in, e.g., -abcf.
  5109. */
  5110. if (j != arglen - 1) {
  5111. _rtld_error("Invalid options: %s", arg);
  5112. rtld_die();
  5113. }
  5114. i++;
  5115. fd = parse_integer(argv[i]);
  5116. if (fd == -1) {
  5117. _rtld_error(
  5118. "Invalid file descriptor: '%s'",
  5119. argv[i]);
  5120. rtld_die();
  5121. }
  5122. *fdp = fd;
  5123. seen_f = true;
  5124. break;
  5125. } else if (opt == 'p') {
  5126. *use_pathp = true;
  5127. } else if (opt == 'v') {
  5128. machine[0] = '\0';
  5129. mib[0] = CTL_HW;
  5130. mib[1] = HW_MACHINE;
  5131. sz = sizeof(machine);
  5132. sysctl(mib, nitems(mib), machine, &sz, NULL, 0);
  5133. rtld_printf(
  5134. "FreeBSD ld-elf.so.1 %s\n"
  5135. "FreeBSD_version %d\n"
  5136. "Default lib path %s\n"
  5137. "Env prefix %s\n"
  5138. "Hint file %s\n"
  5139. "libmap file %s\n",
  5140. machine,
  5141. __FreeBSD_version, ld_standard_library_path,
  5142. ld_env_prefix, ld_elf_hints_default,
  5143. ld_path_libmap_conf);
  5144. _exit(0);
  5145. } else {
  5146. _rtld_error("Invalid argument: '%s'", arg);
  5147. print_usage(argv[0]);
  5148. rtld_die();
  5149. }
  5150. }
  5151. }
  5152. if (!seen_b)
  5153. *argv0 = argv[i];
  5154. return (i);
  5155. }
  5156. /*
  5157. * Parse a file descriptor number without pulling in more of libc (e.g. atoi).
  5158. */
  5159. static int
  5160. parse_integer(const char *str)
  5161. {
  5162. static const int RADIX = 10; /* XXXJA: possibly support hex? */
  5163. const char *orig;
  5164. int n;
  5165. char c;
  5166. orig = str;
  5167. n = 0;
  5168. for (c = *str; c != '\0'; c = *++str) {
  5169. if (c < '0' || c > '9')
  5170. return (-1);
  5171. n *= RADIX;
  5172. n += c - '0';
  5173. }
  5174. /* Make sure we actually parsed something. */
  5175. if (str == orig)
  5176. return (-1);
  5177. return (n);
  5178. }
  5179. static void
  5180. print_usage(const char *argv0)
  5181. {
  5182. rtld_printf(
  5183. "Usage: %s [-h] [-b <exe>] [-f <FD>] [-p] [--] <binary> [<args>]\n"
  5184. "\n"
  5185. "Options:\n"
  5186. " -h Display this help message\n"
  5187. " -b <exe> Execute <exe> instead of <binary>, arg0 is <binary>\n"
  5188. " -f <FD> Execute <FD> instead of searching for <binary>\n"
  5189. " -p Search in PATH for named binary\n"
  5190. " -v Display identification information\n"
  5191. " -- End of RTLD options\n"
  5192. " <binary> Name of process to execute\n"
  5193. " <args> Arguments to the executed process\n", argv0);
  5194. }
  5195. /*
  5196. * Overrides for libc_pic-provided functions.
  5197. */
  5198. int
  5199. __getosreldate(void)
  5200. {
  5201. size_t len;
  5202. int oid[2];
  5203. int error, osrel;
  5204. if (osreldate != 0)
  5205. return (osreldate);
  5206. oid[0] = CTL_KERN;
  5207. oid[1] = KERN_OSRELDATE;
  5208. osrel = 0;
  5209. len = sizeof(osrel);
  5210. error = sysctl(oid, 2, &osrel, &len, NULL, 0);
  5211. if (error == 0 && osrel > 0 && len == sizeof(osrel))
  5212. osreldate = osrel;
  5213. return (osreldate);
  5214. }
  5215. void
  5216. exit(int status)
  5217. {
  5218. _exit(status);
  5219. }
  5220. void (*__cleanup)(void);
  5221. int __isthreaded = 0;
  5222. int _thread_autoinit_dummy_decl = 1;
  5223. /*
  5224. * No unresolved symbols for rtld.
  5225. */
  5226. void
  5227. __pthread_cxa_finalize(struct dl_phdr_info *a __unused)
  5228. {
  5229. }
  5230. const char *
  5231. rtld_strerror(int errnum)
  5232. {
  5233. if (errnum < 0 || errnum >= sys_nerr)
  5234. return ("Unknown error");
  5235. return (sys_errlist[errnum]);
  5236. }
  5237. /*
  5238. * No ifunc relocations.
  5239. */
  5240. void *
  5241. memset(void *dest, int c, size_t len)
  5242. {
  5243. size_t i;
  5244. for (i = 0; i < len; i++)
  5245. ((char *)dest)[i] = c;
  5246. return (dest);
  5247. }
  5248. void
  5249. bzero(void *dest, size_t len)
  5250. {
  5251. size_t i;
  5252. for (i = 0; i < len; i++)
  5253. ((char *)dest)[i] = 0;
  5254. }
  5255. /* malloc */
  5256. void *
  5257. malloc(size_t nbytes)
  5258. {
  5259. return (__crt_malloc(nbytes));
  5260. }
  5261. void *
  5262. calloc(size_t num, size_t size)
  5263. {
  5264. return (__crt_calloc(num, size));
  5265. }
  5266. void
  5267. free(void *cp)
  5268. {
  5269. __crt_free(cp);
  5270. }
  5271. void *
  5272. realloc(void *cp, size_t nbytes)
  5273. {
  5274. return (__crt_realloc(cp, nbytes));
  5275. }
  5276. extern int _rtld_version__FreeBSD_version __exported;
  5277. int _rtld_version__FreeBSD_version = __FreeBSD_version;
  5278. extern char _rtld_version_laddr_offset __exported;
  5279. char _rtld_version_laddr_offset;