HardenedBSD src tree https://hardenedbsd.org/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

6022 lines
158 KiB

  1. /*-
  2. * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  3. *
  4. * Copyright 1996, 1997, 1998, 1999, 2000 John D. Polstra.
  5. * Copyright 2003 Alexander Kabaev <kan@FreeBSD.ORG>.
  6. * Copyright 2009-2013 Konstantin Belousov <kib@FreeBSD.ORG>.
  7. * Copyright 2012 John Marino <draco@marino.st>.
  8. * Copyright 2014-2017 The FreeBSD Foundation
  9. * All rights reserved.
  10. *
  11. * Portions of this software were developed by Konstantin Belousov
  12. * under sponsorship from the FreeBSD Foundation.
  13. *
  14. * Redistribution and use in source and binary forms, with or without
  15. * modification, are permitted provided that the following conditions
  16. * are met:
  17. * 1. Redistributions of source code must retain the above copyright
  18. * notice, this list of conditions and the following disclaimer.
  19. * 2. Redistributions in binary form must reproduce the above copyright
  20. * notice, this list of conditions and the following disclaimer in the
  21. * documentation and/or other materials provided with the distribution.
  22. *
  23. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  24. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  25. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  26. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  27. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  28. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  29. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  30. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  32. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33. */
  34. /*
  35. * Dynamic linker for ELF.
  36. *
  37. * John Polstra <jdp@polstra.com>.
  38. */
  39. #include <sys/cdefs.h>
  40. __FBSDID("$FreeBSD$");
  41. #include <sys/param.h>
  42. #include <sys/mount.h>
  43. #include <sys/mman.h>
  44. #ifdef HARDENEDBSD
  45. #include <sys/pax.h>
  46. #endif
  47. #include <sys/stat.h>
  48. #include <sys/sysctl.h>
  49. #include <sys/uio.h>
  50. #include <sys/utsname.h>
  51. #include <sys/ktrace.h>
  52. #include <dlfcn.h>
  53. #include <err.h>
  54. #include <errno.h>
  55. #include <fcntl.h>
  56. #include <stdarg.h>
  57. #include <stdio.h>
  58. #include <stdlib.h>
  59. #include <string.h>
  60. #include <unistd.h>
  61. #include "debug.h"
  62. #include "rtld.h"
  63. #include "libmap.h"
  64. #include "paths.h"
  65. #include "rtld_tls.h"
  66. #include "rtld_printf.h"
  67. #include "rtld_malloc.h"
  68. #include "rtld_utrace.h"
  69. #include "notes.h"
  70. /* Types. */
  71. typedef void (*func_ptr_type)(void);
  72. typedef void * (*path_enum_proc) (const char *path, size_t len, void *arg);
  73. #ifdef HARDENEDBSD
  74. struct integriforce_so_check {
  75. char isc_path[MAXPATHLEN];
  76. int isc_result;
  77. };
  78. #endif
  79. /* Variables that cannot be static: */
  80. extern struct r_debug r_debug; /* For GDB */
  81. extern int _thread_autoinit_dummy_decl;
  82. extern char* __progname;
  83. extern void (*__cleanup)(void);
  84. /*
  85. * Function declarations.
  86. */
  87. static const char *basename(const char *);
  88. static void digest_dynamic1(Obj_Entry *, int, const Elf_Dyn **,
  89. const Elf_Dyn **, const Elf_Dyn **);
  90. static bool digest_dynamic2(Obj_Entry *, const Elf_Dyn *, const Elf_Dyn *,
  91. const Elf_Dyn *);
  92. static bool digest_dynamic(Obj_Entry *, int);
  93. static Obj_Entry *digest_phdr(const Elf_Phdr *, int, caddr_t, const char *);
  94. static void distribute_static_tls(Objlist *, RtldLockState *);
  95. static Obj_Entry *dlcheck(void *);
  96. static int dlclose_locked(void *, RtldLockState *);
  97. static Obj_Entry *dlopen_object(const char *name, int fd, Obj_Entry *refobj,
  98. int lo_flags, int mode, RtldLockState *lockstate);
  99. static Obj_Entry *do_load_object(int, const char *, char *, struct stat *, int);
  100. static int do_search_info(const Obj_Entry *obj, int, struct dl_serinfo *);
  101. static bool donelist_check(DoneList *, const Obj_Entry *);
  102. static void errmsg_restore(char *);
  103. static char *errmsg_save(void);
  104. static void *fill_search_info(const char *, size_t, void *);
  105. static char *find_library(const char *, const Obj_Entry *, int *);
  106. static const char *gethints(bool);
  107. static void hold_object(Obj_Entry *);
  108. static void unhold_object(Obj_Entry *);
  109. static void init_dag(Obj_Entry *);
  110. static void init_marker(Obj_Entry *);
  111. static void init_pagesizes(Elf_Auxinfo **aux_info);
  112. static void init_rtld(caddr_t, Elf_Auxinfo **);
  113. static void initlist_add_neededs(Needed_Entry *, Objlist *);
  114. static void initlist_add_objects(Obj_Entry *, Obj_Entry *, Objlist *);
  115. static int initlist_objects_ifunc(Objlist *, bool, int, RtldLockState *);
  116. static void linkmap_add(Obj_Entry *);
  117. static void linkmap_delete(Obj_Entry *);
  118. static void load_filtees(Obj_Entry *, int flags, RtldLockState *);
  119. static void unload_filtees(Obj_Entry *, RtldLockState *);
  120. #if defined(HARDENEDBSD) && defined(SHLIBRANDOM)
  121. static void randomize_neededs(Obj_Entry *obj, int flags);
  122. #endif
  123. static int load_needed_objects(Obj_Entry *, int);
  124. static int load_preload_objects(void);
  125. static Obj_Entry *load_object(const char *, int fd, const Obj_Entry *, int);
  126. static void map_stacks_exec(RtldLockState *);
  127. static int obj_disable_relro(Obj_Entry *);
  128. static int obj_enforce_relro(Obj_Entry *);
  129. static Obj_Entry *obj_from_addr(const void *);
  130. static void objlist_call_fini(Objlist *, Obj_Entry *, RtldLockState *);
  131. static void objlist_call_init(Objlist *, RtldLockState *);
  132. static void objlist_clear(Objlist *);
  133. static Objlist_Entry *objlist_find(Objlist *, const Obj_Entry *);
  134. static void objlist_init(Objlist *);
  135. static void objlist_push_head(Objlist *, Obj_Entry *);
  136. static void objlist_push_tail(Objlist *, Obj_Entry *);
  137. static void objlist_put_after(Objlist *, Obj_Entry *, Obj_Entry *);
  138. static void objlist_remove(Objlist *, Obj_Entry *);
  139. static int open_binary_fd(const char *argv0, bool search_in_path,
  140. const char **binpath_res);
  141. static int parse_args(char* argv[], int argc, bool *use_pathp, int *fdp,
  142. const char **argv0);
  143. static int parse_integer(const char *);
  144. static void *path_enumerate(const char *, path_enum_proc, const char *, void *);
  145. static void print_usage(const char *argv0);
  146. static void release_object(Obj_Entry *);
  147. static int relocate_object_dag(Obj_Entry *root, bool bind_now,
  148. Obj_Entry *rtldobj, int flags, RtldLockState *lockstate);
  149. static int relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj,
  150. int flags, RtldLockState *lockstate);
  151. static int relocate_objects(Obj_Entry *, bool, Obj_Entry *, int,
  152. RtldLockState *);
  153. static int resolve_object_ifunc(Obj_Entry *, bool, int, RtldLockState *);
  154. static int rtld_dirname(const char *, char *);
  155. static int rtld_dirname_abs(const char *, char *);
  156. static void *rtld_dlopen(const char *name, int fd, int mode);
  157. static void rtld_exit(void);
  158. static void rtld_nop_exit(void);
  159. static char *search_library_path(const char *, const char *, const char *,
  160. int *);
  161. static char *search_library_pathfds(const char *, const char *, int *);
  162. static const void **get_program_var_addr(const char *, RtldLockState *);
  163. static void set_program_var(const char *, const void *);
  164. static int symlook_default(SymLook *, const Obj_Entry *refobj);
  165. static int symlook_global(SymLook *, DoneList *);
  166. static void symlook_init_from_req(SymLook *, const SymLook *);
  167. static int symlook_list(SymLook *, const Objlist *, DoneList *);
  168. static int symlook_needed(SymLook *, const Needed_Entry *, DoneList *);
  169. static int symlook_obj1_sysv(SymLook *, const Obj_Entry *);
  170. static int symlook_obj1_gnu(SymLook *, const Obj_Entry *);
  171. static void trace_loaded_objects(Obj_Entry *);
  172. static void unlink_object(Obj_Entry *);
  173. static void unload_object(Obj_Entry *, RtldLockState *lockstate);
  174. static void unref_dag(Obj_Entry *);
  175. static void ref_dag(Obj_Entry *);
  176. static char *origin_subst_one(Obj_Entry *, char *, const char *,
  177. const char *, bool);
  178. static char *origin_subst(Obj_Entry *, const char *);
  179. static bool obj_resolve_origin(Obj_Entry *obj);
  180. static void preinit_main(void);
  181. static int rtld_verify_versions(const Objlist *);
  182. static int rtld_verify_object_versions(Obj_Entry *);
  183. static void object_add_name(Obj_Entry *, const char *);
  184. static int object_match_name(const Obj_Entry *, const char *);
  185. static void ld_utrace_log(int, void *, void *, size_t, int, const char *);
  186. static void rtld_fill_dl_phdr_info(const Obj_Entry *obj,
  187. struct dl_phdr_info *phdr_info);
  188. static uint32_t gnu_hash(const char *);
  189. static bool matched_symbol(SymLook *, const Obj_Entry *, Sym_Match_Result *,
  190. const unsigned long);
  191. void r_debug_state(struct r_debug *, struct link_map *) __noinline __exported;
  192. void _r_debug_postinit(struct link_map *) __noinline __exported;
  193. int __sys_openat(int, const char *, int, ...);
  194. /*
  195. * Data declarations.
  196. */
  197. static char *error_message; /* Message for dlerror(), or NULL */
  198. struct r_debug r_debug __exported; /* for GDB; */
  199. static bool libmap_disable; /* Disable libmap */
  200. static bool ld_loadfltr; /* Immediate filters processing */
  201. static char *libmap_override; /* Maps to use in addition to libmap.conf */
  202. static bool trust; /* False for setuid and setgid programs */
  203. static bool dangerous_ld_env; /* True if environment variables have been
  204. used to affect the libraries loaded */
  205. bool ld_bind_not; /* Disable PLT update */
  206. static char *ld_bind_now; /* Environment variable for immediate binding */
  207. static char *ld_debug; /* Environment variable for debugging */
  208. static char *ld_library_path; /* Environment variable for search path */
  209. static char *ld_library_dirs; /* Environment variable for library descriptors */
  210. static char *ld_preload; /* Environment variable for libraries to
  211. load first */
  212. static const char *ld_elf_hints_path; /* Environment variable for alternative hints path */
  213. static const char *ld_tracing; /* Called from ldd to print libs */
  214. static char *ld_utrace; /* Use utrace() to log events. */
  215. static struct obj_entry_q obj_list; /* Queue of all loaded objects */
  216. static Obj_Entry *obj_main; /* The main program shared object */
  217. static Obj_Entry obj_rtld; /* The dynamic linker shared object */
  218. static unsigned int obj_count; /* Number of objects in obj_list */
  219. static unsigned int obj_loads; /* Number of loads of objects (gen count) */
  220. #ifdef HARDENEDBSD
  221. static Elf_Word pax_flags = 0; /* PaX / HardenedBSD flags */
  222. #endif
  223. static Objlist list_global = /* Objects dlopened with RTLD_GLOBAL */
  224. STAILQ_HEAD_INITIALIZER(list_global);
  225. static Objlist list_main = /* Objects loaded at program startup */
  226. STAILQ_HEAD_INITIALIZER(list_main);
  227. static Objlist list_fini = /* Objects needing fini() calls */
  228. STAILQ_HEAD_INITIALIZER(list_fini);
  229. Elf_Sym sym_zero; /* For resolving undefined weak refs. */
  230. #define GDB_STATE(s,m) r_debug.r_state = s; r_debug_state(&r_debug,m);
  231. extern Elf_Dyn _DYNAMIC;
  232. #pragma weak _DYNAMIC
  233. int dlclose(void *) __exported;
  234. char *dlerror(void) __exported;
  235. void *dlopen(const char *, int) __exported;
  236. void *fdlopen(int, int) __exported;
  237. void *dlsym(void *, const char *) __exported;
  238. dlfunc_t dlfunc(void *, const char *) __exported;
  239. void *dlvsym(void *, const char *, const char *) __exported;
  240. int dladdr(const void *, Dl_info *) __exported;
  241. void dllockinit(void *, void *(*)(void *), void (*)(void *), void (*)(void *),
  242. void (*)(void *), void (*)(void *), void (*)(void *)) __exported;
  243. int dlinfo(void *, int , void *) __exported;
  244. int dl_iterate_phdr(__dl_iterate_hdr_callback, void *) __exported;
  245. int _rtld_addr_phdr(const void *, struct dl_phdr_info *) __exported;
  246. int _rtld_get_stack_prot(void) __exported;
  247. int _rtld_is_dlopened(void *) __exported;
  248. void _rtld_error(const char *, ...) __exported;
  249. /* Only here to fix -Wmissing-prototypes warnings */
  250. int __getosreldate(void);
  251. void __pthread_cxa_finalize(struct dl_phdr_info *a);
  252. func_ptr_type _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp);
  253. Elf_Addr _rtld_bind(Obj_Entry *obj, Elf_Size reloff);
  254. int npagesizes;
  255. static int osreldate;
  256. size_t *pagesizes;
  257. static int stack_prot = PROT_READ | PROT_WRITE | RTLD_DEFAULT_STACK_EXEC;
  258. static int max_stack_flags;
  259. /*
  260. * Global declarations normally provided by crt1. The dynamic linker is
  261. * not built with crt1, so we have to provide them ourselves.
  262. */
  263. char *__progname;
  264. char **environ;
  265. /*
  266. * Used to pass argc, argv to init functions.
  267. */
  268. int main_argc;
  269. char **main_argv;
  270. /*
  271. * Globals to control TLS allocation.
  272. */
  273. size_t tls_last_offset; /* Static TLS offset of last module */
  274. size_t tls_last_size; /* Static TLS size of last module */
  275. size_t tls_static_space; /* Static TLS space allocated */
  276. static size_t tls_static_max_align;
  277. Elf_Addr tls_dtv_generation = 1; /* Used to detect when dtv size changes */
  278. int tls_max_index = 1; /* Largest module index allocated */
  279. static bool ld_library_path_rpath = false;
  280. /*
  281. * Globals for path names, and such
  282. */
  283. const char *ld_elf_hints_default = _PATH_ELF_HINTS;
  284. const char *ld_path_libmap_conf = _PATH_LIBMAP_CONF;
  285. const char *ld_path_rtld = _PATH_RTLD;
  286. const char *ld_standard_library_path = STANDARD_LIBRARY_PATH;
  287. const char *ld_env_prefix = LD_;
  288. static void (*rtld_exit_ptr)(void);
  289. /*
  290. * Fill in a DoneList with an allocation large enough to hold all of
  291. * the currently-loaded objects. Keep this as a macro since it calls
  292. * alloca and we want that to occur within the scope of the caller.
  293. */
  294. #define donelist_init(dlp) \
  295. ((dlp)->objs = alloca(obj_count * sizeof (dlp)->objs[0]), \
  296. assert((dlp)->objs != NULL), \
  297. (dlp)->num_alloc = obj_count, \
  298. (dlp)->num_used = 0)
  299. #define LD_UTRACE(e, h, mb, ms, r, n) do { \
  300. if (ld_utrace != NULL) \
  301. ld_utrace_log(e, h, mb, ms, r, n); \
  302. } while (0)
  303. static void
  304. ld_utrace_log(int event, void *handle, void *mapbase, size_t mapsize,
  305. int refcnt, const char *name)
  306. {
  307. struct utrace_rtld ut;
  308. static const char rtld_utrace_sig[RTLD_UTRACE_SIG_SZ] = RTLD_UTRACE_SIG;
  309. memcpy(ut.sig, rtld_utrace_sig, sizeof(ut.sig));
  310. ut.event = event;
  311. ut.handle = handle;
  312. ut.mapbase = mapbase;
  313. ut.mapsize = mapsize;
  314. ut.refcnt = refcnt;
  315. bzero(ut.name, sizeof(ut.name));
  316. if (name)
  317. strlcpy(ut.name, name, sizeof(ut.name));
  318. utrace(&ut, sizeof(ut));
  319. }
  320. #ifdef RTLD_VARIANT_ENV_NAMES
  321. /*
  322. * construct the env variable based on the type of binary that's
  323. * running.
  324. */
  325. static inline const char *
  326. _LD(const char *var)
  327. {
  328. static char buffer[128];
  329. strlcpy(buffer, ld_env_prefix, sizeof(buffer));
  330. strlcat(buffer, var, sizeof(buffer));
  331. return (buffer);
  332. }
  333. #else
  334. #define _LD(x) LD_ x
  335. #endif
  336. /*
  337. * Main entry point for dynamic linking. The first argument is the
  338. * stack pointer. The stack is expected to be laid out as described
  339. * in the SVR4 ABI specification, Intel 386 Processor Supplement.
  340. * Specifically, the stack pointer points to a word containing
  341. * ARGC. Following that in the stack is a null-terminated sequence
  342. * of pointers to argument strings. Then comes a null-terminated
  343. * sequence of pointers to environment strings. Finally, there is a
  344. * sequence of "auxiliary vector" entries.
  345. *
  346. * The second argument points to a place to store the dynamic linker's
  347. * exit procedure pointer and the third to a place to store the main
  348. * program's object.
  349. *
  350. * The return value is the main program's entry point.
  351. */
  352. func_ptr_type
  353. _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp)
  354. {
  355. Elf_Auxinfo *aux, *auxp, *auxpf, *aux_info[AT_COUNT];
  356. Objlist_Entry *entry;
  357. Obj_Entry *last_interposer, *obj, *preload_tail;
  358. const Elf_Phdr *phdr;
  359. Objlist initlist;
  360. RtldLockState lockstate;
  361. struct stat st;
  362. Elf_Addr *argcp;
  363. char **argv, **env, **envp, *kexecpath, *library_path_rpath;
  364. const char *argv0, *binpath;
  365. caddr_t imgentry;
  366. char buf[MAXPATHLEN];
  367. int argc, fd, i, mib[4], old_osrel, osrel, phnum, rtld_argc;
  368. size_t sz;
  369. bool dir_enable, direct_exec, explicit_fd, search_in_path;
  370. /*
  371. * On entry, the dynamic linker itself has not been relocated yet.
  372. * Be very careful not to reference any global data until after
  373. * init_rtld has returned. It is OK to reference file-scope statics
  374. * and string constants, and to call static and global functions.
  375. */
  376. /* Find the auxiliary vector on the stack. */
  377. argcp = sp;
  378. argc = *sp++;
  379. argv = (char **) sp;
  380. sp += argc + 1; /* Skip over arguments and NULL terminator */
  381. env = (char **) sp;
  382. while (*sp++ != 0) /* Skip over environment, and NULL terminator */
  383. ;
  384. aux = (Elf_Auxinfo *) sp;
  385. /* Digest the auxiliary vector. */
  386. for (i = 0; i < AT_COUNT; i++)
  387. aux_info[i] = NULL;
  388. for (auxp = aux; auxp->a_type != AT_NULL; auxp++) {
  389. if (auxp->a_type < AT_COUNT)
  390. aux_info[auxp->a_type] = auxp;
  391. }
  392. /* Initialize and relocate ourselves. */
  393. assert(aux_info[AT_BASE] != NULL);
  394. init_rtld((caddr_t) aux_info[AT_BASE]->a_un.a_ptr, aux_info);
  395. __progname = obj_rtld.path;
  396. argv0 = argv[0] != NULL ? argv[0] : "(null)";
  397. environ = env;
  398. main_argc = argc;
  399. main_argv = argv;
  400. #ifdef HARDENEDBSD
  401. /* Load PaX flags */
  402. if (aux_info[AT_PAXFLAGS] != NULL) {
  403. pax_flags = aux_info[AT_PAXFLAGS]->a_un.a_val;
  404. aux_info[AT_PAXFLAGS]->a_un.a_val = 0;
  405. }
  406. #endif
  407. trust = !issetugid();
  408. direct_exec = false;
  409. md_abi_variant_hook(aux_info);
  410. fd = -1;
  411. if (aux_info[AT_EXECFD] != NULL) {
  412. fd = aux_info[AT_EXECFD]->a_un.a_val;
  413. } else {
  414. assert(aux_info[AT_PHDR] != NULL);
  415. phdr = (const Elf_Phdr *)aux_info[AT_PHDR]->a_un.a_ptr;
  416. if (phdr == obj_rtld.phdr) {
  417. if (!trust) {
  418. _rtld_error("Tainted process refusing to run binary %s",
  419. argv0);
  420. rtld_die();
  421. }
  422. direct_exec = true;
  423. /*
  424. * Set osrel for us, it is later reset to the binary'
  425. * value before first instruction of code from the binary
  426. * is executed.
  427. */
  428. mib[0] = CTL_KERN;
  429. mib[1] = KERN_PROC;
  430. mib[2] = KERN_PROC_OSREL;
  431. mib[3] = getpid();
  432. osrel = __FreeBSD_version;
  433. sz = sizeof(old_osrel);
  434. (void)sysctl(mib, 4, &old_osrel, &sz, &osrel, sizeof(osrel));
  435. dbg("opening main program in direct exec mode");
  436. if (argc >= 2) {
  437. rtld_argc = parse_args(argv, argc, &search_in_path, &fd, &argv0);
  438. explicit_fd = (fd != -1);
  439. binpath = NULL;
  440. if (!explicit_fd)
  441. fd = open_binary_fd(argv0, search_in_path, &binpath);
  442. if (fstat(fd, &st) == -1) {
  443. _rtld_error("Failed to fstat FD %d (%s): %s", fd,
  444. explicit_fd ? "user-provided descriptor" : argv0,
  445. rtld_strerror(errno));
  446. rtld_die();
  447. }
  448. /*
  449. * Rough emulation of the permission checks done by
  450. * execve(2), only Unix DACs are checked, ACLs are
  451. * ignored. Preserve the semantic of disabling owner
  452. * to execute if owner x bit is cleared, even if
  453. * others x bit is enabled.
  454. * mmap(2) does not allow to mmap with PROT_EXEC if
  455. * binary' file comes from noexec mount. We cannot
  456. * set a text reference on the binary.
  457. */
  458. dir_enable = false;
  459. if (st.st_uid == geteuid()) {
  460. if ((st.st_mode & S_IXUSR) != 0)
  461. dir_enable = true;
  462. } else if (st.st_gid == getegid()) {
  463. if ((st.st_mode & S_IXGRP) != 0)
  464. dir_enable = true;
  465. } else if ((st.st_mode & S_IXOTH) != 0) {
  466. dir_enable = true;
  467. }
  468. if (!dir_enable) {
  469. _rtld_error("No execute permission for binary %s",
  470. argv0);
  471. rtld_die();
  472. }
  473. /*
  474. * For direct exec mode, argv[0] is the interpreter
  475. * name, we must remove it and shift arguments left
  476. * before invoking binary main. Since stack layout
  477. * places environment pointers and aux vectors right
  478. * after the terminating NULL, we must shift
  479. * environment and aux as well.
  480. */
  481. main_argc = argc - rtld_argc;
  482. for (i = 0; i <= main_argc; i++)
  483. argv[i] = argv[i + rtld_argc];
  484. *argcp -= rtld_argc;
  485. environ = env = envp = argv + main_argc + 1;
  486. do {
  487. *envp = *(envp + rtld_argc);
  488. envp++;
  489. } while (*envp != NULL);
  490. aux = auxp = (Elf_Auxinfo *)envp;
  491. auxpf = (Elf_Auxinfo *)(envp + rtld_argc);
  492. /* XXXKIB insert place for AT_EXECPATH if not present */
  493. for (;; auxp++, auxpf++) {
  494. *auxp = *auxpf;
  495. if (auxp->a_type == AT_NULL)
  496. break;
  497. }
  498. /* Point AT_EXECPATH auxv and aux_info to the binary path. */
  499. if (binpath == NULL) {
  500. aux_info[AT_EXECPATH] = NULL;
  501. } else {
  502. if (aux_info[AT_EXECPATH] == NULL) {
  503. aux_info[AT_EXECPATH] = xmalloc(sizeof(Elf_Auxinfo));
  504. aux_info[AT_EXECPATH]->a_type = AT_EXECPATH;
  505. }
  506. aux_info[AT_EXECPATH]->a_un.a_ptr = __DECONST(void *,
  507. binpath);
  508. }
  509. } else {
  510. _rtld_error("No binary");
  511. rtld_die();
  512. }
  513. }
  514. }
  515. ld_bind_now = getenv(_LD("BIND_NOW"));
  516. /*
  517. * If the process is tainted, then we un-set the dangerous environment
  518. * variables. The process will be marked as tainted until setuid(2)
  519. * is called. If any child process calls setuid(2) we do not want any
  520. * future processes to honor the potentially un-safe variables.
  521. */
  522. if (!trust) {
  523. if (unsetenv(_LD("PRELOAD")) || unsetenv(_LD("LIBMAP")) ||
  524. unsetenv(_LD("LIBRARY_PATH")) || unsetenv(_LD("LIBRARY_PATH_FDS")) ||
  525. unsetenv(_LD("LIBMAP_DISABLE")) || unsetenv(_LD("BIND_NOT")) ||
  526. unsetenv(_LD("DEBUG")) || unsetenv(_LD("ELF_HINTS_PATH")) ||
  527. unsetenv(_LD("LOADFLTR")) || unsetenv(_LD("LIBRARY_PATH_RPATH"))) {
  528. _rtld_error("environment corrupt; aborting");
  529. rtld_die();
  530. }
  531. }
  532. ld_debug = getenv(_LD("DEBUG"));
  533. if (ld_bind_now == NULL)
  534. ld_bind_not = getenv(_LD("BIND_NOT")) != NULL;
  535. libmap_disable = getenv(_LD("LIBMAP_DISABLE")) != NULL;
  536. libmap_override = getenv(_LD("LIBMAP"));
  537. ld_library_path = getenv(_LD("LIBRARY_PATH"));
  538. ld_library_dirs = getenv(_LD("LIBRARY_PATH_FDS"));
  539. ld_preload = getenv(_LD("PRELOAD"));
  540. ld_elf_hints_path = getenv(_LD("ELF_HINTS_PATH"));
  541. ld_loadfltr = getenv(_LD("LOADFLTR")) != NULL;
  542. library_path_rpath = getenv(_LD("LIBRARY_PATH_RPATH"));
  543. if (library_path_rpath != NULL) {
  544. if (library_path_rpath[0] == 'y' ||
  545. library_path_rpath[0] == 'Y' ||
  546. library_path_rpath[0] == '1')
  547. ld_library_path_rpath = true;
  548. else
  549. ld_library_path_rpath = false;
  550. }
  551. dangerous_ld_env = libmap_disable || (libmap_override != NULL) ||
  552. (ld_library_path != NULL) || (ld_preload != NULL) ||
  553. (ld_elf_hints_path != NULL) || ld_loadfltr;
  554. ld_tracing = getenv(_LD("TRACE_LOADED_OBJECTS"));
  555. ld_utrace = getenv(_LD("UTRACE"));
  556. if ((ld_elf_hints_path == NULL) || strlen(ld_elf_hints_path) == 0)
  557. ld_elf_hints_path = ld_elf_hints_default;
  558. if (ld_debug != NULL && *ld_debug != '\0')
  559. debug = 1;
  560. dbg("%s is initialized, base address = %p", __progname,
  561. (caddr_t) aux_info[AT_BASE]->a_un.a_ptr);
  562. dbg("RTLD dynamic = %p", obj_rtld.dynamic);
  563. dbg("RTLD pltgot = %p", obj_rtld.pltgot);
  564. dbg("initializing thread locks");
  565. lockdflt_init();
  566. if (aux_info[AT_STACKPROT] != NULL &&
  567. aux_info[AT_STACKPROT]->a_un.a_val != 0)
  568. stack_prot = aux_info[AT_STACKPROT]->a_un.a_val;
  569. /*
  570. * Load the main program, or process its program header if it is
  571. * already loaded.
  572. */
  573. if (fd != -1) { /* Load the main program. */
  574. dbg("loading main program");
  575. obj_main = map_object(fd, argv0, NULL);
  576. close(fd);
  577. if (obj_main == NULL)
  578. rtld_die();
  579. max_stack_flags = obj_main->stack_flags;
  580. if ((max_stack_flags & PF_X) == PF_X)
  581. if ((stack_prot & PROT_EXEC) == 0)
  582. max_stack_flags &= ~(PF_X);
  583. } else { /* Main program already loaded. */
  584. dbg("processing main program's program header");
  585. assert(aux_info[AT_PHDR] != NULL);
  586. phdr = (const Elf_Phdr *) aux_info[AT_PHDR]->a_un.a_ptr;
  587. assert(aux_info[AT_PHNUM] != NULL);
  588. phnum = aux_info[AT_PHNUM]->a_un.a_val;
  589. assert(aux_info[AT_PHENT] != NULL);
  590. assert(aux_info[AT_PHENT]->a_un.a_val == sizeof(Elf_Phdr));
  591. assert(aux_info[AT_ENTRY] != NULL);
  592. imgentry = (caddr_t) aux_info[AT_ENTRY]->a_un.a_ptr;
  593. if ((obj_main = digest_phdr(phdr, phnum, imgentry, argv0)) == NULL)
  594. rtld_die();
  595. }
  596. if (aux_info[AT_EXECPATH] != NULL && fd == -1) {
  597. kexecpath = aux_info[AT_EXECPATH]->a_un.a_ptr;
  598. dbg("AT_EXECPATH %p %s", kexecpath, kexecpath);
  599. if (kexecpath[0] == '/')
  600. obj_main->path = kexecpath;
  601. else if (getcwd(buf, sizeof(buf)) == NULL ||
  602. strlcat(buf, "/", sizeof(buf)) >= sizeof(buf) ||
  603. strlcat(buf, kexecpath, sizeof(buf)) >= sizeof(buf))
  604. obj_main->path = xstrdup(argv0);
  605. else
  606. obj_main->path = xstrdup(buf);
  607. } else {
  608. dbg("No AT_EXECPATH or direct exec");
  609. obj_main->path = xstrdup(argv0);
  610. }
  611. dbg("obj_main path %s", obj_main->path);
  612. obj_main->mainprog = true;
  613. #ifndef COMPAT_32BIT
  614. /*
  615. * Get the actual dynamic linker pathname from the executable if
  616. * possible. (It should always be possible.) That ensures that
  617. * gdb will find the right dynamic linker even if a non-standard
  618. * one is being used.
  619. */
  620. if (obj_main->interp != NULL &&
  621. strcmp(obj_main->interp, obj_rtld.path) != 0) {
  622. free(obj_rtld.path);
  623. obj_rtld.path = xstrdup(obj_main->interp);
  624. __progname = obj_rtld.path;
  625. }
  626. #endif
  627. if (!digest_dynamic(obj_main, 0))
  628. rtld_die();
  629. dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d",
  630. obj_main->path, obj_main->valid_hash_sysv, obj_main->valid_hash_gnu,
  631. obj_main->dynsymcount);
  632. linkmap_add(obj_main);
  633. linkmap_add(&obj_rtld);
  634. /* Link the main program into the list of objects. */
  635. TAILQ_INSERT_HEAD(&obj_list, obj_main, next);
  636. obj_count++;
  637. obj_loads++;
  638. /* Initialize a fake symbol for resolving undefined weak references. */
  639. sym_zero.st_info = ELF_ST_INFO(STB_GLOBAL, STT_NOTYPE);
  640. sym_zero.st_shndx = SHN_UNDEF;
  641. sym_zero.st_value = -(uintptr_t)obj_main->relocbase;
  642. if (!libmap_disable)
  643. libmap_disable = (bool)lm_init(libmap_override);
  644. dbg("loading LD_PRELOAD libraries");
  645. if (load_preload_objects() == -1)
  646. rtld_die();
  647. preload_tail = globallist_curr(TAILQ_LAST(&obj_list, obj_entry_q));
  648. dbg("loading needed objects");
  649. if (load_needed_objects(obj_main, ld_tracing != NULL ? RTLD_LO_TRACE :
  650. 0) == -1)
  651. rtld_die();
  652. /* Make a list of all objects loaded at startup. */
  653. last_interposer = obj_main;
  654. TAILQ_FOREACH(obj, &obj_list, next) {
  655. if (obj->marker)
  656. continue;
  657. if (obj->z_interpose && obj != obj_main) {
  658. objlist_put_after(&list_main, last_interposer, obj);
  659. last_interposer = obj;
  660. } else {
  661. objlist_push_tail(&list_main, obj);
  662. }
  663. obj->refcount++;
  664. }
  665. dbg("checking for required versions");
  666. if (rtld_verify_versions(&list_main) == -1 && !ld_tracing)
  667. rtld_die();
  668. if (ld_tracing) { /* We're done */
  669. trace_loaded_objects(obj_main);
  670. exit(0);
  671. }
  672. if (getenv(_LD("DUMP_REL_PRE")) != NULL) {
  673. dump_relocations(obj_main);
  674. exit (0);
  675. }
  676. /*
  677. * Processing tls relocations requires having the tls offsets
  678. * initialized. Prepare offsets before starting initial
  679. * relocation processing.
  680. */
  681. dbg("initializing initial thread local storage offsets");
  682. STAILQ_FOREACH(entry, &list_main, link) {
  683. /*
  684. * Allocate all the initial objects out of the static TLS
  685. * block even if they didn't ask for it.
  686. */
  687. allocate_tls_offset(entry->obj);
  688. }
  689. if (relocate_objects(obj_main,
  690. ld_bind_now != NULL && *ld_bind_now != '\0',
  691. &obj_rtld, SYMLOOK_EARLY, NULL) == -1)
  692. rtld_die();
  693. dbg("doing copy relocations");
  694. if (do_copy_relocations(obj_main) == -1)
  695. rtld_die();
  696. if (getenv(_LD("DUMP_REL_POST")) != NULL) {
  697. dump_relocations(obj_main);
  698. exit (0);
  699. }
  700. ifunc_init(aux);
  701. /*
  702. * Setup TLS for main thread. This must be done after the
  703. * relocations are processed, since tls initialization section
  704. * might be the subject for relocations.
  705. */
  706. dbg("initializing initial thread local storage");
  707. allocate_initial_tls(globallist_curr(TAILQ_FIRST(&obj_list)));
  708. dbg("initializing key program variables");
  709. set_program_var("__progname", argv[0] != NULL ? basename(argv[0]) : "");
  710. set_program_var("environ", env);
  711. set_program_var("__elf_aux_vector", aux);
  712. /* Make a list of init functions to call. */
  713. objlist_init(&initlist);
  714. initlist_add_objects(globallist_curr(TAILQ_FIRST(&obj_list)),
  715. preload_tail, &initlist);
  716. r_debug_state(NULL, &obj_main->linkmap); /* say hello to gdb! */
  717. map_stacks_exec(NULL);
  718. if (!obj_main->crt_no_init) {
  719. /*
  720. * Make sure we don't call the main program's init and fini
  721. * functions for binaries linked with old crt1 which calls
  722. * _init itself.
  723. */
  724. obj_main->init = obj_main->fini = (Elf_Addr)NULL;
  725. obj_main->preinit_array = obj_main->init_array =
  726. obj_main->fini_array = (Elf_Addr)NULL;
  727. }
  728. /*
  729. * Execute MD initializers required before we call the objects'
  730. * init functions.
  731. */
  732. pre_init();
  733. if (direct_exec) {
  734. /* Set osrel for direct-execed binary */
  735. mib[0] = CTL_KERN;
  736. mib[1] = KERN_PROC;
  737. mib[2] = KERN_PROC_OSREL;
  738. mib[3] = getpid();
  739. osrel = obj_main->osrel;
  740. sz = sizeof(old_osrel);
  741. dbg("setting osrel to %d", osrel);
  742. (void)sysctl(mib, 4, &old_osrel, &sz, &osrel, sizeof(osrel));
  743. }
  744. wlock_acquire(rtld_bind_lock, &lockstate);
  745. dbg("resolving ifuncs");
  746. if (initlist_objects_ifunc(&initlist, ld_bind_now != NULL &&
  747. *ld_bind_now != '\0', SYMLOOK_EARLY, &lockstate) == -1)
  748. rtld_die();
  749. rtld_exit_ptr = rtld_exit;
  750. if (obj_main->crt_no_init)
  751. preinit_main();
  752. objlist_call_init(&initlist, &lockstate);
  753. _r_debug_postinit(&obj_main->linkmap);
  754. objlist_clear(&initlist);
  755. dbg("loading filtees");
  756. TAILQ_FOREACH(obj, &obj_list, next) {
  757. if (obj->marker)
  758. continue;
  759. if (ld_loadfltr || obj->z_loadfltr)
  760. load_filtees(obj, 0, &lockstate);
  761. }
  762. dbg("enforcing main obj relro");
  763. if (obj_enforce_relro(obj_main) == -1)
  764. rtld_die();
  765. lock_release(rtld_bind_lock, &lockstate);
  766. dbg("transferring control to program entry point = %p", obj_main->entry);
  767. /* Return the exit procedure and the program entry point. */
  768. *exit_proc = rtld_exit_ptr;
  769. *objp = obj_main;
  770. return (func_ptr_type) obj_main->entry;
  771. }
  772. void *
  773. rtld_resolve_ifunc(const Obj_Entry *obj, const Elf_Sym *def)
  774. {
  775. void *ptr;
  776. Elf_Addr target;
  777. ptr = (void *)make_function_pointer(def, obj);
  778. target = call_ifunc_resolver(ptr);
  779. return ((void *)target);
  780. }
  781. /*
  782. * NB: MIPS uses a private version of this function (_mips_rtld_bind).
  783. * Changes to this function should be applied there as well.
  784. */
  785. Elf_Addr
  786. _rtld_bind(Obj_Entry *obj, Elf_Size reloff)
  787. {
  788. const Elf_Rel *rel;
  789. const Elf_Sym *def;
  790. const Obj_Entry *defobj;
  791. Elf_Addr *where;
  792. Elf_Addr target;
  793. RtldLockState lockstate;
  794. rlock_acquire(rtld_bind_lock, &lockstate);
  795. if (sigsetjmp(lockstate.env, 0) != 0)
  796. lock_upgrade(rtld_bind_lock, &lockstate);
  797. if (obj->pltrel)
  798. rel = (const Elf_Rel *)((const char *)obj->pltrel + reloff);
  799. else
  800. rel = (const Elf_Rel *)((const char *)obj->pltrela + reloff);
  801. where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
  802. def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, SYMLOOK_IN_PLT,
  803. NULL, &lockstate);
  804. if (def == NULL)
  805. rtld_die();
  806. if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC)
  807. target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
  808. else
  809. target = (Elf_Addr)(defobj->relocbase + def->st_value);
  810. dbg("\"%s\" in \"%s\" ==> %p in \"%s\"",
  811. defobj->strtab + def->st_name, basename(obj->path),
  812. (void *)target, basename(defobj->path));
  813. /*
  814. * Write the new contents for the jmpslot. Note that depending on
  815. * architecture, the value which we need to return back to the
  816. * lazy binding trampoline may or may not be the target
  817. * address. The value returned from reloc_jmpslot() is the value
  818. * that the trampoline needs.
  819. */
  820. target = reloc_jmpslot(where, target, defobj, obj, rel);
  821. lock_release(rtld_bind_lock, &lockstate);
  822. return target;
  823. }
  824. /*
  825. * Error reporting function. Use it like printf. If formats the message
  826. * into a buffer, and sets things up so that the next call to dlerror()
  827. * will return the message.
  828. */
  829. void
  830. _rtld_error(const char *fmt, ...)
  831. {
  832. static char buf[512];
  833. va_list ap;
  834. va_start(ap, fmt);
  835. rtld_vsnprintf(buf, sizeof buf, fmt, ap);
  836. error_message = buf;
  837. va_end(ap);
  838. LD_UTRACE(UTRACE_RTLD_ERROR, NULL, NULL, 0, 0, error_message);
  839. }
  840. /*
  841. * Return a dynamically-allocated copy of the current error message, if any.
  842. */
  843. static char *
  844. errmsg_save(void)
  845. {
  846. return error_message == NULL ? NULL : xstrdup(error_message);
  847. }
  848. /*
  849. * Restore the current error message from a copy which was previously saved
  850. * by errmsg_save(). The copy is freed.
  851. */
  852. static void
  853. errmsg_restore(char *saved_msg)
  854. {
  855. if (saved_msg == NULL)
  856. error_message = NULL;
  857. else {
  858. _rtld_error("%s", saved_msg);
  859. free(saved_msg);
  860. }
  861. }
  862. static const char *
  863. basename(const char *name)
  864. {
  865. const char *p = strrchr(name, '/');
  866. return p != NULL ? p + 1 : name;
  867. }
  868. static struct utsname uts;
  869. static char *
  870. origin_subst_one(Obj_Entry *obj, char *real, const char *kw,
  871. const char *subst, bool may_free)
  872. {
  873. char *p, *p1, *res, *resp;
  874. int subst_len, kw_len, subst_count, old_len, new_len;
  875. kw_len = strlen(kw);
  876. /*
  877. * First, count the number of the keyword occurrences, to
  878. * preallocate the final string.
  879. */
  880. for (p = real, subst_count = 0;; p = p1 + kw_len, subst_count++) {
  881. p1 = strstr(p, kw);
  882. if (p1 == NULL)
  883. break;
  884. }
  885. /*
  886. * If the keyword is not found, just return.
  887. *
  888. * Return non-substituted string if resolution failed. We
  889. * cannot do anything more reasonable, the failure mode of the
  890. * caller is unresolved library anyway.
  891. */
  892. if (subst_count == 0 || (obj != NULL && !obj_resolve_origin(obj)))
  893. return (may_free ? real : xstrdup(real));
  894. if (obj != NULL)
  895. subst = obj->origin_path;
  896. /*
  897. * There is indeed something to substitute. Calculate the
  898. * length of the resulting string, and allocate it.
  899. */
  900. subst_len = strlen(subst);
  901. old_len = strlen(real);
  902. new_len = old_len + (subst_len - kw_len) * subst_count;
  903. res = xmalloc(new_len + 1);
  904. /*
  905. * Now, execute the substitution loop.
  906. */
  907. for (p = real, resp = res, *resp = '\0';;) {
  908. p1 = strstr(p, kw);
  909. if (p1 != NULL) {
  910. /* Copy the prefix before keyword. */
  911. memcpy(resp, p, p1 - p);
  912. resp += p1 - p;
  913. /* Keyword replacement. */
  914. memcpy(resp, subst, subst_len);
  915. resp += subst_len;
  916. *resp = '\0';
  917. p = p1 + kw_len;
  918. } else
  919. break;
  920. }
  921. /* Copy to the end of string and finish. */
  922. strcat(resp, p);
  923. if (may_free)
  924. free(real);
  925. return (res);
  926. }
  927. static char *
  928. origin_subst(Obj_Entry *obj, const char *real)
  929. {
  930. char *res1, *res2, *res3, *res4;
  931. if (obj == NULL || !trust)
  932. return (xstrdup(real));
  933. if (uts.sysname[0] == '\0') {
  934. if (uname(&uts) != 0) {
  935. _rtld_error("utsname failed: %d", errno);
  936. return (NULL);
  937. }
  938. }
  939. /* __DECONST is safe here since without may_free real is unchanged */
  940. res1 = origin_subst_one(obj, __DECONST(char *, real), "$ORIGIN", NULL,
  941. false);
  942. res2 = origin_subst_one(NULL, res1, "$OSNAME", uts.sysname, true);
  943. res3 = origin_subst_one(NULL, res2, "$OSREL", uts.release, true);
  944. res4 = origin_subst_one(NULL, res3, "$PLATFORM", uts.machine, true);
  945. return (res4);
  946. }
  947. void
  948. rtld_die(void)
  949. {
  950. const char *msg = dlerror();
  951. if (msg == NULL)
  952. msg = "Fatal error";
  953. rtld_fdputstr(STDERR_FILENO, _BASENAME_RTLD ": ");
  954. rtld_fdputstr(STDERR_FILENO, msg);
  955. rtld_fdputchar(STDERR_FILENO, '\n');
  956. _exit(1);
  957. }
  958. /*
  959. * Process a shared object's DYNAMIC section, and save the important
  960. * information in its Obj_Entry structure.
  961. */
  962. static void
  963. digest_dynamic1(Obj_Entry *obj, int early, const Elf_Dyn **dyn_rpath,
  964. const Elf_Dyn **dyn_soname, const Elf_Dyn **dyn_runpath)
  965. {
  966. const Elf_Dyn *dynp;
  967. Needed_Entry **needed_tail = &obj->needed;
  968. Needed_Entry **needed_filtees_tail = &obj->needed_filtees;
  969. Needed_Entry **needed_aux_filtees_tail = &obj->needed_aux_filtees;
  970. const Elf_Hashelt *hashtab;
  971. const Elf32_Word *hashval;
  972. Elf32_Word bkt, nmaskwords;
  973. unsigned int bloom_size32;
  974. int plttype = DT_REL;
  975. *dyn_rpath = NULL;
  976. *dyn_soname = NULL;
  977. *dyn_runpath = NULL;
  978. obj->bind_now = false;
  979. for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) {
  980. switch (dynp->d_tag) {
  981. case DT_REL:
  982. obj->rel = (const Elf_Rel *)(obj->relocbase + dynp->d_un.d_ptr);
  983. break;
  984. case DT_RELSZ:
  985. obj->relsize = dynp->d_un.d_val;
  986. break;
  987. case DT_RELENT:
  988. assert(dynp->d_un.d_val == sizeof(Elf_Rel));
  989. break;
  990. case DT_JMPREL:
  991. obj->pltrel = (const Elf_Rel *)
  992. (obj->relocbase + dynp->d_un.d_ptr);
  993. break;
  994. case DT_PLTRELSZ:
  995. obj->pltrelsize = dynp->d_un.d_val;
  996. break;
  997. case DT_RELA:
  998. obj->rela = (const Elf_Rela *)(obj->relocbase + dynp->d_un.d_ptr);
  999. break;
  1000. case DT_RELASZ:
  1001. obj->relasize = dynp->d_un.d_val;
  1002. break;
  1003. case DT_RELAENT:
  1004. assert(dynp->d_un.d_val == sizeof(Elf_Rela));
  1005. break;
  1006. case DT_PLTREL:
  1007. plttype = dynp->d_un.d_val;
  1008. assert(dynp->d_un.d_val == DT_REL || plttype == DT_RELA);
  1009. break;
  1010. case DT_SYMTAB:
  1011. obj->symtab = (const Elf_Sym *)
  1012. (obj->relocbase + dynp->d_un.d_ptr);
  1013. break;
  1014. case DT_SYMENT:
  1015. assert(dynp->d_un.d_val == sizeof(Elf_Sym));
  1016. break;
  1017. case DT_STRTAB:
  1018. obj->strtab = (const char *)(obj->relocbase + dynp->d_un.d_ptr);
  1019. break;
  1020. case DT_STRSZ:
  1021. obj->strsize = dynp->d_un.d_val;
  1022. break;
  1023. case DT_VERNEED:
  1024. obj->verneed = (const Elf_Verneed *)(obj->relocbase +
  1025. dynp->d_un.d_val);
  1026. break;
  1027. case DT_VERNEEDNUM:
  1028. obj->verneednum = dynp->d_un.d_val;
  1029. break;
  1030. case DT_VERDEF:
  1031. obj->verdef = (const Elf_Verdef *)(obj->relocbase +
  1032. dynp->d_un.d_val);
  1033. break;
  1034. case DT_VERDEFNUM:
  1035. obj->verdefnum = dynp->d_un.d_val;
  1036. break;
  1037. case DT_VERSYM:
  1038. obj->versyms = (const Elf_Versym *)(obj->relocbase +
  1039. dynp->d_un.d_val);
  1040. break;
  1041. case DT_HASH:
  1042. {
  1043. hashtab = (const Elf_Hashelt *)(obj->relocbase +
  1044. dynp->d_un.d_ptr);
  1045. obj->nbuckets = hashtab[0];
  1046. obj->nchains = hashtab[1];
  1047. obj->buckets = hashtab + 2;
  1048. obj->chains = obj->buckets + obj->nbuckets;
  1049. obj->valid_hash_sysv = obj->nbuckets > 0 && obj->nchains > 0 &&
  1050. obj->buckets != NULL;
  1051. }
  1052. break;
  1053. case DT_GNU_HASH:
  1054. {
  1055. hashtab = (const Elf_Hashelt *)(obj->relocbase +
  1056. dynp->d_un.d_ptr);
  1057. obj->nbuckets_gnu = hashtab[0];
  1058. obj->symndx_gnu = hashtab[1];
  1059. nmaskwords = hashtab[2];
  1060. bloom_size32 = (__ELF_WORD_SIZE / 32) * nmaskwords;
  1061. obj->maskwords_bm_gnu = nmaskwords - 1;
  1062. obj->shift2_gnu = hashtab[3];
  1063. obj->bloom_gnu = (const Elf_Addr *)(hashtab + 4);
  1064. obj->buckets_gnu = hashtab + 4 + bloom_size32;
  1065. obj->chain_zero_gnu = obj->buckets_gnu + obj->nbuckets_gnu -
  1066. obj->symndx_gnu;
  1067. /* Number of bitmask words is required to be power of 2 */
  1068. obj->valid_hash_gnu = powerof2(nmaskwords) &&
  1069. obj->nbuckets_gnu > 0 && obj->buckets_gnu != NULL;
  1070. }
  1071. break;
  1072. case DT_NEEDED:
  1073. if (!obj->rtld) {
  1074. Needed_Entry *nep = NEW(Needed_Entry);
  1075. nep->name = dynp->d_un.d_val;
  1076. nep->obj = NULL;
  1077. nep->next = NULL;
  1078. *needed_tail = nep;
  1079. needed_tail = &nep->next;
  1080. }
  1081. break;
  1082. case DT_FILTER:
  1083. if (!obj->rtld) {
  1084. Needed_Entry *nep = NEW(Needed_Entry);
  1085. nep->name = dynp->d_un.d_val;
  1086. nep->obj = NULL;
  1087. nep->next = NULL;
  1088. *needed_filtees_tail = nep;
  1089. needed_filtees_tail = &nep->next;
  1090. if (obj->linkmap.l_refname == NULL)
  1091. obj->linkmap.l_refname = (char *)dynp->d_un.d_val;
  1092. }
  1093. break;
  1094. case DT_AUXILIARY:
  1095. if (!obj->rtld) {
  1096. Needed_Entry *nep = NEW(Needed_Entry);
  1097. nep->name = dynp->d_un.d_val;
  1098. nep->obj = NULL;
  1099. nep->next = NULL;
  1100. *needed_aux_filtees_tail = nep;
  1101. needed_aux_filtees_tail = &nep->next;
  1102. }
  1103. break;
  1104. case DT_PLTGOT:
  1105. obj->pltgot = (Elf_Addr *)(obj->relocbase + dynp->d_un.d_ptr);
  1106. break;
  1107. case DT_TEXTREL:
  1108. obj->textrel = true;
  1109. break;
  1110. case DT_SYMBOLIC:
  1111. obj->symbolic = true;
  1112. break;
  1113. case DT_RPATH:
  1114. /*
  1115. * We have to wait until later to process this, because we
  1116. * might not have gotten the address of the string table yet.
  1117. */
  1118. *dyn_rpath = dynp;
  1119. break;
  1120. case DT_SONAME:
  1121. *dyn_soname = dynp;
  1122. break;
  1123. case DT_RUNPATH:
  1124. *dyn_runpath = dynp;
  1125. break;
  1126. case DT_INIT:
  1127. obj->init = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
  1128. break;
  1129. case DT_PREINIT_ARRAY:
  1130. obj->preinit_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
  1131. break;
  1132. case DT_PREINIT_ARRAYSZ:
  1133. obj->preinit_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
  1134. break;
  1135. case DT_INIT_ARRAY:
  1136. obj->init_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
  1137. break;
  1138. case DT_INIT_ARRAYSZ:
  1139. obj->init_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
  1140. break;
  1141. case DT_FINI:
  1142. obj->fini = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
  1143. break;
  1144. case DT_FINI_ARRAY:
  1145. obj->fini_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
  1146. break;
  1147. case DT_FINI_ARRAYSZ:
  1148. obj->fini_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
  1149. break;
  1150. /*
  1151. * Don't process DT_DEBUG on MIPS as the dynamic section
  1152. * is mapped read-only. DT_MIPS_RLD_MAP is used instead.
  1153. */
  1154. #ifndef __mips__
  1155. case DT_DEBUG:
  1156. if (!early)
  1157. dbg("Filling in DT_DEBUG entry");
  1158. (__DECONST(Elf_Dyn *, dynp))->d_un.d_ptr = (Elf_Addr)&r_debug;
  1159. break;
  1160. #endif
  1161. case DT_FLAGS:
  1162. if (dynp->d_un.d_val & DF_ORIGIN)
  1163. obj->z_origin = true;
  1164. if (dynp->d_un.d_val & DF_SYMBOLIC)
  1165. obj->symbolic = true;
  1166. if (dynp->d_un.d_val & DF_TEXTREL)
  1167. obj->textrel = true;
  1168. if (dynp->d_un.d_val & DF_BIND_NOW)
  1169. obj->bind_now = true;
  1170. if (dynp->d_un.d_val & DF_STATIC_TLS)
  1171. obj->static_tls = true;
  1172. break;
  1173. #ifdef __mips__
  1174. case DT_MIPS_LOCAL_GOTNO:
  1175. obj->local_gotno = dynp->d_un.d_val;
  1176. break;
  1177. case DT_MIPS_SYMTABNO:
  1178. obj->symtabno = dynp->d_un.d_val;
  1179. break;
  1180. case DT_MIPS_GOTSYM:
  1181. obj->gotsym = dynp->d_un.d_val;
  1182. break;
  1183. case DT_MIPS_RLD_MAP:
  1184. *((Elf_Addr *)(dynp->d_un.d_ptr)) = (Elf_Addr) &r_debug;
  1185. break;
  1186. case DT_MIPS_PLTGOT:
  1187. obj->mips_pltgot = (Elf_Addr *)(obj->relocbase +
  1188. dynp->d_un.d_ptr);
  1189. break;
  1190. #endif
  1191. #ifdef __powerpc64__
  1192. case DT_PPC64_GLINK:
  1193. obj->glink = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
  1194. break;
  1195. #endif
  1196. case DT_FLAGS_1:
  1197. if (dynp->d_un.d_val & DF_1_NOOPEN)
  1198. obj->z_noopen = true;
  1199. if (dynp->d_un.d_val & DF_1_ORIGIN)
  1200. obj->z_origin = true;
  1201. if (dynp->d_un.d_val & DF_1_GLOBAL)
  1202. obj->z_global = true;
  1203. if (dynp->d_un.d_val & DF_1_BIND_NOW)
  1204. obj->bind_now = true;
  1205. if (dynp->d_un.d_val & DF_1_NODELETE)
  1206. obj->z_nodelete = true;
  1207. if (dynp->d_un.d_val & DF_1_LOADFLTR)
  1208. obj->z_loadfltr = true;
  1209. if (dynp->d_un.d_val & DF_1_INTERPOSE)
  1210. obj->z_interpose = true;
  1211. if (dynp->d_un.d_val & DF_1_NODEFLIB)
  1212. obj->z_nodeflib = true;
  1213. if (dynp->d_un.d_val & DF_1_PIE)
  1214. obj->z_pie = true;
  1215. break;
  1216. default:
  1217. if (!early) {
  1218. dbg("Ignoring d_tag %ld = %#lx", (long)dynp->d_tag,
  1219. (long)dynp->d_tag);
  1220. }
  1221. break;
  1222. }
  1223. }
  1224. obj->traced = false;
  1225. if (plttype == DT_RELA) {
  1226. obj->pltrela = (const Elf_Rela *) obj->pltrel;
  1227. obj->pltrel = NULL;
  1228. obj->pltrelasize = obj->pltrelsize;
  1229. obj->pltrelsize = 0;
  1230. }
  1231. /* Determine size of dynsym table (equal to nchains of sysv hash) */
  1232. if (obj->valid_hash_sysv)
  1233. obj->dynsymcount = obj->nchains;
  1234. else if (obj->valid_hash_gnu) {
  1235. obj->dynsymcount = 0;
  1236. for (bkt = 0; bkt < obj->nbuckets_gnu; bkt++) {
  1237. if (obj->buckets_gnu[bkt] == 0)
  1238. continue;
  1239. hashval = &obj->chain_zero_gnu[obj->buckets_gnu[bkt]];
  1240. do
  1241. obj->dynsymcount++;
  1242. while ((*hashval++ & 1u) == 0);
  1243. }
  1244. obj->dynsymcount += obj->symndx_gnu;
  1245. }
  1246. if (obj->linkmap.l_refname != NULL)
  1247. obj->linkmap.l_refname = obj->strtab + (unsigned long)obj->
  1248. linkmap.l_refname;
  1249. }
  1250. static bool
  1251. obj_resolve_origin(Obj_Entry *obj)
  1252. {
  1253. if (obj->origin_path != NULL)
  1254. return (true);
  1255. obj->origin_path = xmalloc(PATH_MAX);
  1256. return (rtld_dirname_abs(obj->path, obj->origin_path) != -1);
  1257. }
  1258. static bool
  1259. digest_dynamic2(Obj_Entry *obj, const Elf_Dyn *dyn_rpath,
  1260. const Elf_Dyn *dyn_soname, const Elf_Dyn *dyn_runpath)
  1261. {
  1262. if (obj->z_origin && !obj_resolve_origin(obj))
  1263. return (false);
  1264. if (dyn_runpath != NULL) {
  1265. obj->runpath = (const char *)obj->strtab + dyn_runpath->d_un.d_val;
  1266. obj->runpath = origin_subst(obj, obj->runpath);
  1267. } else if (dyn_rpath != NULL) {
  1268. obj->rpath = (const char *)obj->strtab + dyn_rpath->d_un.d_val;
  1269. obj->rpath = origin_subst(obj, obj->rpath);
  1270. }
  1271. if (dyn_soname != NULL)
  1272. object_add_name(obj, obj->strtab + dyn_soname->d_un.d_val);
  1273. return (true);
  1274. }
  1275. static bool
  1276. digest_dynamic(Obj_Entry *obj, int early)
  1277. {
  1278. const Elf_Dyn *dyn_rpath;
  1279. const Elf_Dyn *dyn_soname;
  1280. const Elf_Dyn *dyn_runpath;
  1281. digest_dynamic1(obj, early, &dyn_rpath, &dyn_soname, &dyn_runpath);
  1282. return (digest_dynamic2(obj, dyn_rpath, dyn_soname, dyn_runpath));
  1283. }
  1284. /*
  1285. * Process a shared object's program header. This is used only for the
  1286. * main program, when the kernel has already loaded the main program
  1287. * into memory before calling the dynamic linker. It creates and
  1288. * returns an Obj_Entry structure.
  1289. */
  1290. static Obj_Entry *
  1291. digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry, const char *path)
  1292. {
  1293. Obj_Entry *obj;
  1294. const Elf_Phdr *phlimit = phdr + phnum;
  1295. const Elf_Phdr *ph;
  1296. Elf_Addr note_start, note_end;
  1297. int nsegs = 0;
  1298. obj = obj_new();
  1299. for (ph = phdr; ph < phlimit; ph++) {
  1300. if (ph->p_type != PT_PHDR)
  1301. continue;
  1302. obj->phdr = phdr;
  1303. obj->phsize = ph->p_memsz;
  1304. obj->relocbase = __DECONST(char *, phdr) - ph->p_vaddr;
  1305. break;
  1306. }
  1307. obj->stack_flags = PF_R | PF_W;
  1308. for (ph = phdr; ph < phlimit; ph++) {
  1309. switch (ph->p_type) {
  1310. case PT_INTERP:
  1311. obj->interp = (const char *)(ph->p_vaddr + obj->relocbase);
  1312. break;
  1313. case PT_LOAD:
  1314. if (nsegs == 0) { /* First load segment */
  1315. obj->vaddrbase = trunc_page(ph->p_vaddr);
  1316. obj->mapbase = obj->vaddrbase + obj->relocbase;
  1317. obj->textsize = round_page(ph->p_vaddr + ph->p_memsz) -
  1318. obj->vaddrbase;
  1319. } else { /* Last load segment */
  1320. obj->mapsize = round_page(ph->p_vaddr + ph->p_memsz) -
  1321. obj->vaddrbase;
  1322. }
  1323. nsegs++;
  1324. break;
  1325. case PT_DYNAMIC:
  1326. obj->dynamic = (const Elf_Dyn *)(ph->p_vaddr + obj->relocbase);
  1327. break;
  1328. case PT_TLS:
  1329. obj->tlsindex = 1;
  1330. obj->tlssize = ph->p_memsz;
  1331. obj->tlsalign = ph->p_align;
  1332. obj->tlsinitsize = ph->p_filesz;
  1333. obj->tlsinit = (void*)(ph->p_vaddr + obj->relocbase);
  1334. obj->tlspoffset = ph->p_offset;
  1335. break;
  1336. case PT_GNU_STACK:
  1337. obj->stack_flags = ph->p_flags;
  1338. break;
  1339. case PT_GNU_RELRO:
  1340. obj->relro_page = obj->relocbase + trunc_page(ph->p_vaddr);
  1341. obj->relro_size = round_page(ph->p_memsz);
  1342. break;
  1343. case PT_NOTE:
  1344. note_start = (Elf_Addr)obj->relocbase + ph->p_vaddr;
  1345. note_end = note_start + ph->p_filesz;
  1346. digest_notes(obj, note_start, note_end);
  1347. break;
  1348. }
  1349. }
  1350. if (nsegs < 1) {
  1351. _rtld_error("%s: too few PT_LOAD segments", path);
  1352. return NULL;
  1353. }
  1354. obj->entry = entry;
  1355. return obj;
  1356. }
  1357. void
  1358. digest_notes(Obj_Entry *obj, Elf_Addr note_start, Elf_Addr note_end)
  1359. {
  1360. const Elf_Note *note;
  1361. const char *note_name;
  1362. uintptr_t p;
  1363. for (note = (const Elf_Note *)note_start; (Elf_Addr)note < note_end;
  1364. note = (const Elf_Note *)((const char *)(note + 1) +
  1365. roundup2(note->n_namesz, sizeof(Elf32_Addr)) +
  1366. roundup2(note->n_descsz, sizeof(Elf32_Addr)))) {
  1367. if (note->n_namesz != sizeof(NOTE_FREEBSD_VENDOR) ||
  1368. note->n_descsz != sizeof(int32_t))
  1369. continue;
  1370. if (note->n_type != NT_FREEBSD_ABI_TAG &&
  1371. note->n_type != NT_FREEBSD_FEATURE_CTL &&
  1372. note->n_type != NT_FREEBSD_NOINIT_TAG)
  1373. continue;
  1374. note_name = (const char *)(note + 1);
  1375. if (strncmp(NOTE_FREEBSD_VENDOR, note_name,
  1376. sizeof(NOTE_FREEBSD_VENDOR)) != 0)
  1377. continue;
  1378. switch (note->n_type) {
  1379. case NT_FREEBSD_ABI_TAG:
  1380. /* FreeBSD osrel note */
  1381. p = (uintptr_t)(note + 1);
  1382. p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
  1383. obj->osrel = *(const int32_t *)(p);
  1384. dbg("note osrel %d", obj->osrel);
  1385. break;
  1386. case NT_FREEBSD_FEATURE_CTL:
  1387. /* FreeBSD ABI feature control note */
  1388. p = (uintptr_t)(note + 1);
  1389. p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
  1390. obj->fctl0 = *(const uint32_t *)(p);
  1391. dbg("note fctl0 %#x", obj->fctl0);
  1392. break;
  1393. case NT_FREEBSD_NOINIT_TAG:
  1394. /* FreeBSD 'crt does not call init' note */
  1395. obj->crt_no_init = true;
  1396. dbg("note crt_no_init");
  1397. break;
  1398. }
  1399. }
  1400. }
  1401. static Obj_Entry *
  1402. dlcheck(void *handle)
  1403. {
  1404. Obj_Entry *obj;
  1405. TAILQ_FOREACH(obj, &obj_list, next) {
  1406. if (obj == (Obj_Entry *) handle)
  1407. break;
  1408. }
  1409. if (obj == NULL || obj->refcount == 0 || obj->dl_refcount == 0) {
  1410. _rtld_error("Invalid shared object handle %p", handle);
  1411. return NULL;
  1412. }
  1413. return obj;
  1414. }
  1415. /*
  1416. * If the given object is already in the donelist, return true. Otherwise
  1417. * add the object to the list and return false.
  1418. */
  1419. static bool
  1420. donelist_check(DoneList *dlp, const Obj_Entry *obj)
  1421. {
  1422. unsigned int i;
  1423. for (i = 0; i < dlp->num_used; i++)
  1424. if (dlp->objs[i] == obj)
  1425. return true;
  1426. /*
  1427. * Our donelist allocation should always be sufficient. But if
  1428. * our threads locking isn't working properly, more shared objects
  1429. * could have been loaded since we allocated the list. That should
  1430. * never happen, but we'll handle it properly just in case it does.
  1431. */
  1432. if (dlp->num_used < dlp->num_alloc)
  1433. dlp->objs[dlp->num_used++] = obj;
  1434. return false;
  1435. }
  1436. /*
  1437. * Hash function for symbol table lookup. Don't even think about changing
  1438. * this. It is specified by the System V ABI.
  1439. */
  1440. unsigned long
  1441. elf_hash(const char *name)
  1442. {
  1443. const unsigned char *p = (const unsigned char *) name;
  1444. unsigned long h = 0;
  1445. unsigned long g;
  1446. while (*p != '\0') {
  1447. h = (h << 4) + *p++;
  1448. if ((g = h & 0xf0000000) != 0)
  1449. h ^= g >> 24;
  1450. h &= ~g;
  1451. }
  1452. return h;
  1453. }
  1454. /*
  1455. * The GNU hash function is the Daniel J. Bernstein hash clipped to 32 bits
  1456. * unsigned in case it's implemented with a wider type.
  1457. */
  1458. static uint32_t
  1459. gnu_hash(const char *s)
  1460. {
  1461. uint32_t h;
  1462. unsigned char c;
  1463. h = 5381;
  1464. for (c = *s; c != '\0'; c = *++s)
  1465. h = h * 33 + c;
  1466. return (h & 0xffffffff);
  1467. }
  1468. /*
  1469. * Find the library with the given name, and return its full pathname.
  1470. * The returned string is dynamically allocated. Generates an error
  1471. * message and returns NULL if the library cannot be found.
  1472. *
  1473. * If the second argument is non-NULL, then it refers to an already-
  1474. * loaded shared object, whose library search path will be searched.
  1475. *
  1476. * If a library is successfully located via LD_LIBRARY_PATH_FDS, its
  1477. * descriptor (which is close-on-exec) will be passed out via the third
  1478. * argument.
  1479. *
  1480. * The search order is:
  1481. * DT_RPATH in the referencing file _unless_ DT_RUNPATH is present (1)
  1482. * DT_RPATH of the main object if DSO without defined DT_RUNPATH (1)
  1483. * LD_LIBRARY_PATH
  1484. * DT_RUNPATH in the referencing file
  1485. * ldconfig hints (if -z nodefaultlib, filter out default library directories
  1486. * from list)
  1487. * /lib:/usr/lib _unless_ the referencing file is linked with -z nodefaultlib
  1488. *
  1489. * (1) Handled in digest_dynamic2 - rpath left NULL if runpath defined.
  1490. */
  1491. static char *
  1492. find_library(const char *xname, const Obj_Entry *refobj, int *fdp)
  1493. {
  1494. char *pathname, *refobj_path;
  1495. const char *name;
  1496. bool nodeflib, objgiven;
  1497. objgiven = refobj != NULL;
  1498. if (libmap_disable || !objgiven ||
  1499. (name = lm_find(refobj->path, xname)) == NULL)
  1500. name = xname;
  1501. if (strchr(name, '/') != NULL) { /* Hard coded pathname */
  1502. if (name[0] != '/' && !trust) {
  1503. _rtld_error("Absolute pathname required "
  1504. "for shared object \"%s\"", name);
  1505. return (NULL);
  1506. }
  1507. return (origin_subst(__DECONST(Obj_Entry *, refobj),
  1508. __DECONST(char *, name)));
  1509. }
  1510. dbg(" Searching for \"%s\"", name);
  1511. refobj_path = objgiven ? refobj->path : NULL;
  1512. /*
  1513. * If refobj->rpath != NULL, then refobj->runpath is NULL. Fall
  1514. * back to pre-conforming behaviour if user requested so with
  1515. * LD_LIBRARY_PATH_RPATH environment variable and ignore -z
  1516. * nodeflib.
  1517. */
  1518. if (objgiven && refobj->rpath != NULL && ld_library_path_rpath) {
  1519. pathname = search_library_path(name, ld_library_path,
  1520. refobj_path, fdp);
  1521. if (pathname != NULL)
  1522. return (pathname);
  1523. if (refobj != NULL) {
  1524. pathname = search_library_path(name, refobj->rpath,
  1525. refobj_path, fdp);
  1526. if (pathname != NULL)
  1527. return (pathname);
  1528. }
  1529. pathname = search_library_pathfds(name, ld_library_dirs, fdp);
  1530. if (pathname != NULL)
  1531. return (pathname);
  1532. pathname = search_library_path(name, gethints(false),
  1533. refobj_path, fdp);
  1534. if (pathname != NULL)
  1535. return (pathname);
  1536. pathname = search_library_path(name, ld_standard_library_path,
  1537. refobj_path, fdp);
  1538. if (pathname != NULL)
  1539. return (pathname);
  1540. } else {
  1541. nodeflib = objgiven ? refobj->z_nodeflib : false;
  1542. if (objgiven) {
  1543. pathname = search_library_path(name, refobj->rpath,
  1544. refobj->path, fdp);
  1545. if (pathname != NULL)
  1546. return (pathname);
  1547. }
  1548. if (objgiven && refobj->runpath == NULL && refobj != obj_main) {
  1549. pathname = search_library_path(name, obj_main->rpath,
  1550. refobj_path, fdp);
  1551. if (pathname != NULL)
  1552. return (pathname);
  1553. }
  1554. pathname = search_library_path(name, ld_library_path,
  1555. refobj_path, fdp);
  1556. if (pathname != NULL)
  1557. return (pathname);
  1558. if (objgiven) {
  1559. pathname = search_library_path(name, refobj->runpath,
  1560. refobj_path, fdp);
  1561. if (pathname != NULL)
  1562. return (pathname);
  1563. }
  1564. pathname = search_library_pathfds(name, ld_library_dirs, fdp);
  1565. if (pathname != NULL)
  1566. return (pathname);
  1567. pathname = search_library_path(name, gethints(nodeflib),
  1568. refobj_path, fdp);
  1569. if (pathname != NULL)
  1570. return (pathname);
  1571. if (objgiven && !nodeflib) {
  1572. pathname = search_library_path(name,
  1573. ld_standard_library_path, refobj_path, fdp);
  1574. if (pathname != NULL)
  1575. return (pathname);
  1576. }
  1577. }
  1578. if (objgiven && refobj->path != NULL) {
  1579. _rtld_error("Shared object \"%s\" not found, "
  1580. "required by \"%s\"", name, basename(refobj->path));
  1581. } else {
  1582. _rtld_error("Shared object \"%s\" not found", name);
  1583. }
  1584. return (NULL);
  1585. }
  1586. /*
  1587. * Given a symbol number in a referencing object, find the corresponding
  1588. * definition of the symbol. Returns a pointer to the symbol, or NULL if
  1589. * no definition was found. Returns a pointer to the Obj_Entry of the
  1590. * defining object via the reference parameter DEFOBJ_OUT.
  1591. */
  1592. const Elf_Sym *
  1593. find_symdef(unsigned long symnum, const Obj_Entry *refobj,
  1594. const Obj_Entry **defobj_out, int flags, SymCache *cache,
  1595. RtldLockState *lockstate)
  1596. {
  1597. const Elf_Sym *ref;
  1598. const Elf_Sym *def;
  1599. const Obj_Entry *defobj;
  1600. const Ver_Entry *ve;
  1601. SymLook req;
  1602. const char *name;
  1603. int res;
  1604. /*
  1605. * If we have already found this symbol, get the information from
  1606. * the cache.
  1607. */
  1608. if (symnum >= refobj->dynsymcount)
  1609. return NULL; /* Bad object */
  1610. if (cache != NULL && cache[symnum].sym != NULL) {
  1611. *defobj_out = cache[symnum].obj;
  1612. return cache[symnum].sym;
  1613. }
  1614. ref = refobj->symtab + symnum;
  1615. name = refobj->strtab + ref->st_name;
  1616. def = NULL;
  1617. defobj = NULL;
  1618. ve = NULL;
  1619. /*
  1620. * We don't have to do a full scale lookup if the symbol is local.
  1621. * We know it will bind to the instance in this load module; to
  1622. * which we already have a pointer (ie ref). By not doing a lookup,
  1623. * we not only improve performance, but it also avoids unresolvable
  1624. * symbols when local symbols are not in the hash table. This has
  1625. * been seen with the ia64 toolchain.
  1626. */
  1627. if (ELF_ST_BIND(ref->st_info) != STB_LOCAL) {
  1628. if (ELF_ST_TYPE(ref->st_info) == STT_SECTION) {
  1629. _rtld_error("%s: Bogus symbol table entry %lu", refobj->path,
  1630. symnum);
  1631. }
  1632. symlook_init(&req, name);
  1633. req.flags = flags;
  1634. ve = req.ventry = fetch_ventry(refobj, symnum);
  1635. req.lockstate = lockstate;
  1636. res = symlook_default(&req, refobj);
  1637. if (res == 0) {
  1638. def = req.sym_out;
  1639. defobj = req.defobj_out;
  1640. }
  1641. } else {
  1642. def = ref;
  1643. defobj = refobj;
  1644. }
  1645. /*
  1646. * If we found no definition and the reference is weak, treat the
  1647. * symbol as having the value zero.
  1648. */
  1649. if (def == NULL && ELF_ST_BIND(ref->st_info) == STB_WEAK) {
  1650. def = &sym_zero;
  1651. defobj = obj_main;
  1652. }
  1653. if (def != NULL) {
  1654. *defobj_out = defobj;
  1655. /* Record the information in the cache to avoid subsequent lookups. */
  1656. if (cache != NULL) {
  1657. cache[symnum].sym = def;
  1658. cache[symnum].obj = defobj;
  1659. }
  1660. } else {
  1661. if (refobj != &obj_rtld)
  1662. _rtld_error("%s: Undefined symbol \"%s%s%s\"", refobj->path, name,
  1663. ve != NULL ? "@" : "", ve != NULL ? ve->name : "");
  1664. }
  1665. return def;
  1666. }
  1667. /*
  1668. * Return the search path from the ldconfig hints file, reading it if
  1669. * necessary. If nostdlib is true, then the default search paths are
  1670. * not added to result.
  1671. *
  1672. * Returns NULL if there are problems with the hints file,
  1673. * or if the search path there is empty.
  1674. */
  1675. static const char *
  1676. gethints(bool nostdlib)
  1677. {
  1678. static char *filtered_path;
  1679. static const char *hints;
  1680. static struct elfhints_hdr hdr;
  1681. struct fill_search_info_args sargs, hargs;
  1682. struct dl_serinfo smeta, hmeta, *SLPinfo, *hintinfo;
  1683. struct dl_serpath *SLPpath, *hintpath;
  1684. char *p;
  1685. struct stat hint_stat;
  1686. unsigned int SLPndx, hintndx, fndx, fcount;
  1687. int fd;
  1688. size_t flen;
  1689. uint32_t dl;
  1690. bool skip;
  1691. /* First call, read the hints file */
  1692. if (hints == NULL) {
  1693. /* Keep from trying again in case the hints file is bad. */
  1694. hints = "";
  1695. if ((fd = open(ld_elf_hints_path, O_RDONLY | O_CLOEXEC)) == -1)
  1696. return (NULL);
  1697. /*
  1698. * Check of hdr.dirlistlen value against type limit
  1699. * intends to pacify static analyzers. Further
  1700. * paranoia leads to checks that dirlist is fully
  1701. * contained in the file range.
  1702. */
  1703. if (read(fd, &hdr, sizeof hdr) != sizeof hdr ||
  1704. hdr.magic != ELFHINTS_MAGIC ||
  1705. hdr.version != 1 || hdr.dirlistlen > UINT_MAX / 2 ||
  1706. fstat(fd, &hint_stat) == -1) {
  1707. cleanup1:
  1708. close(fd);
  1709. hdr.dirlistlen = 0;
  1710. return (NULL);
  1711. }
  1712. dl = hdr.strtab;
  1713. if (dl + hdr.dirlist < dl)
  1714. goto cleanup1;
  1715. dl += hdr.dirlist;
  1716. if (dl + hdr.dirlistlen < dl)
  1717. goto cleanup1;
  1718. dl += hdr.dirlistlen;
  1719. if (dl > hint_stat.st_size)
  1720. goto cleanup1;
  1721. p = xmalloc(hdr.dirlistlen + 1);
  1722. if (pread(fd, p, hdr.dirlistlen + 1,
  1723. hdr.strtab + hdr.dirlist) != (ssize_t)hdr.dirlistlen + 1 ||
  1724. p[hdr.dirlistlen] != '\0') {
  1725. free(p);
  1726. goto cleanup1;
  1727. }
  1728. hints = p;
  1729. close(fd);
  1730. }
  1731. /*
  1732. * If caller agreed to receive list which includes the default
  1733. * paths, we are done. Otherwise, if we still did not
  1734. * calculated filtered result, do it now.
  1735. */
  1736. if (!nostdlib)
  1737. return (hints[0] != '\0' ? hints : NULL);
  1738. if (filtered_path != NULL)
  1739. goto filt_ret;
  1740. /*
  1741. * Obtain the list of all configured search paths, and the
  1742. * list of the default paths.
  1743. *
  1744. * First estimate the size of the results.
  1745. */
  1746. smeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
  1747. smeta.dls_cnt = 0;
  1748. hmeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
  1749. hmeta.dls_cnt = 0;
  1750. sargs.request = RTLD_DI_SERINFOSIZE;
  1751. sargs.serinfo = &smeta;
  1752. hargs.request = RTLD_DI_SERINFOSIZE;
  1753. hargs.serinfo = &hmeta;
  1754. path_enumerate(ld_standard_library_path, fill_search_info, NULL,
  1755. &sargs);
  1756. path_enumerate(hints, fill_search_info, NULL, &hargs);
  1757. SLPinfo = xmalloc(smeta.dls_size);
  1758. hintinfo = xmalloc(hmeta.dls_size);
  1759. /*
  1760. * Next fetch both sets of paths.
  1761. */
  1762. sargs.request = RTLD_DI_SERINFO;
  1763. sargs.serinfo = SLPinfo;
  1764. sargs.serpath = &SLPinfo->dls_serpath[0];
  1765. sargs.strspace = (char *)&SLPinfo->dls_serpath[smeta.dls_cnt];
  1766. hargs.request = RTLD_DI_SERINFO;
  1767. hargs.serinfo = hintinfo;
  1768. hargs.serpath = &hintinfo->dls_serpath[0];
  1769. hargs.strspace = (char *)&hintinfo->dls_serpath[hmeta.dls_cnt];
  1770. path_enumerate(ld_standard_library_path, fill_search_info, NULL,
  1771. &sargs);
  1772. path_enumerate(hints, fill_search_info, NULL, &hargs);
  1773. /*
  1774. * Now calculate the difference between two sets, by excluding
  1775. * standard paths from the full set.
  1776. */
  1777. fndx = 0;
  1778. fcount = 0;
  1779. filtered_path = xmalloc(hdr.dirlistlen + 1);
  1780. hintpath = &hintinfo->dls_serpath[0];
  1781. for (hintndx = 0; hintndx < hmeta.dls_cnt; hintndx++, hintpath++) {
  1782. skip = false;
  1783. SLPpath = &SLPinfo->dls_serpath[0];
  1784. /*
  1785. * Check each standard path against current.
  1786. */
  1787. for (SLPndx = 0; SLPndx < smeta.dls_cnt; SLPndx++, SLPpath++) {
  1788. /* matched, skip the path */
  1789. if (!strcmp(hintpath->dls_name, SLPpath->dls_name)) {
  1790. skip = true;
  1791. break;
  1792. }
  1793. }
  1794. if (skip)
  1795. continue;
  1796. /*
  1797. * Not matched against any standard path, add the path
  1798. * to result. Separate consequtive paths with ':'.
  1799. */
  1800. if (fcount > 0) {
  1801. filtered_path[fndx] = ':';
  1802. fndx++;
  1803. }
  1804. fcount++;
  1805. flen = strlen(hintpath->dls_name);
  1806. strncpy((filtered_path + fndx), hintpath->dls_name, flen);
  1807. fndx += flen;
  1808. }
  1809. filtered_path[fndx] = '\0';
  1810. free(SLPinfo);
  1811. free(hintinfo);
  1812. filt_ret:
  1813. return (filtered_path[0] != '\0' ? filtered_path : NULL);
  1814. }
  1815. static void
  1816. init_dag(Obj_Entry *root)
  1817. {
  1818. const Needed_Entry *needed;
  1819. const Objlist_Entry *elm;
  1820. DoneList donelist;
  1821. if (root->dag_inited)
  1822. return;
  1823. donelist_init(&donelist);
  1824. /* Root object belongs to own DAG. */
  1825. objlist_push_tail(&root->dldags, root);
  1826. objlist_push_tail(&root->dagmembers, root);
  1827. donelist_check(&donelist, root);
  1828. /*
  1829. * Add dependencies of root object to DAG in breadth order
  1830. * by exploiting the fact that each new object get added
  1831. * to the tail of the dagmembers list.
  1832. */
  1833. STAILQ_FOREACH(elm, &root->dagmembers, link) {
  1834. for (needed = elm->obj->needed; needed != NULL; needed = needed->next) {
  1835. if (needed->obj == NULL || donelist_check(&donelist, needed->obj))
  1836. continue;
  1837. objlist_push_tail(&needed->obj->dldags, root);
  1838. objlist_push_tail(&root->dagmembers, needed->obj);
  1839. }
  1840. }
  1841. root->dag_inited = true;
  1842. }
  1843. static void
  1844. init_marker(Obj_Entry *marker)
  1845. {
  1846. bzero(marker, sizeof(*marker));
  1847. marker->marker = true;
  1848. }
  1849. Obj_Entry *
  1850. globallist_curr(const Obj_Entry *obj)
  1851. {
  1852. for (;;) {
  1853. if (obj == NULL)
  1854. return (NULL);
  1855. if (!obj->marker)
  1856. return (__DECONST(Obj_Entry *, obj));
  1857. obj = TAILQ_PREV(obj, obj_entry_q, next);
  1858. }
  1859. }
  1860. Obj_Entry *
  1861. globallist_next(const Obj_Entry *obj)
  1862. {
  1863. for (;;) {
  1864. obj = TAILQ_NEXT(obj, next);
  1865. if (obj == NULL)
  1866. return (NULL);
  1867. if (!obj->marker)
  1868. return (__DECONST(Obj_Entry *, obj));
  1869. }
  1870. }
  1871. /* Prevent the object from being unmapped while the bind lock is dropped. */
  1872. static void
  1873. hold_object(Obj_Entry *obj)
  1874. {
  1875. obj->holdcount++;
  1876. }
  1877. static void
  1878. unhold_object(Obj_Entry *obj)
  1879. {
  1880. assert(obj->holdcount > 0);
  1881. if (--obj->holdcount == 0 && obj->unholdfree)
  1882. release_object(obj);
  1883. }
  1884. static void
  1885. process_z(Obj_Entry *root)
  1886. {
  1887. const Objlist_Entry *elm;
  1888. Obj_Entry *obj;
  1889. /*
  1890. * Walk over object DAG and process every dependent object
  1891. * that is marked as DF_1_NODELETE or DF_1_GLOBAL. They need
  1892. * to grow their own DAG.
  1893. *
  1894. * For DF_1_GLOBAL, DAG is required for symbol lookups in
  1895. * symlook_global() to work.
  1896. *
  1897. * For DF_1_NODELETE, the DAG should have its reference upped.
  1898. */
  1899. STAILQ_FOREACH(elm, &root->dagmembers, link) {
  1900. obj = elm->obj;
  1901. if (obj == NULL)
  1902. continue;
  1903. if (obj->z_nodelete && !obj->ref_nodel) {
  1904. dbg("obj %s -z nodelete", obj->path);
  1905. init_dag(obj);
  1906. ref_dag(obj);
  1907. obj->ref_nodel = true;
  1908. }
  1909. if (obj->z_global && objlist_find(&list_global, obj) == NULL) {
  1910. dbg("obj %s -z global", obj->path);
  1911. objlist_push_tail(&list_global, obj);
  1912. init_dag(obj);
  1913. }
  1914. }
  1915. }
  1916. static void
  1917. parse_rtld_phdr(Obj_Entry *obj)
  1918. {
  1919. const Elf_Phdr *ph;
  1920. Elf_Addr note_start, note_end;
  1921. #ifdef HARDENEDBSD
  1922. obj->stack_flags = PF_R | PF_W;
  1923. #else
  1924. obj->stack_flags = PF_X | PF_R | PF_W;
  1925. #endif
  1926. for (ph = obj->phdr; (const char *)ph < (const char *)obj->phdr +
  1927. obj->phsize; ph++) {
  1928. switch (ph->p_type) {
  1929. case PT_GNU_STACK:
  1930. obj->stack_flags = ph->p_flags;
  1931. #ifdef HARDENEDBSD
  1932. /*
  1933. * XXX Shared objects that set RWX stack can
  1934. * die in a fire
  1935. */
  1936. obj->stack_flags &= ~(PF_X);
  1937. #endif
  1938. break;
  1939. case PT_GNU_RELRO:
  1940. obj->relro_page = obj->relocbase +
  1941. trunc_page(ph->p_vaddr);
  1942. obj->relro_size = round_page(ph->p_memsz);
  1943. break;
  1944. case PT_NOTE:
  1945. note_start = (Elf_Addr)obj->relocbase + ph->p_vaddr;
  1946. note_end = note_start + ph->p_filesz;
  1947. digest_notes(obj, note_start, note_end);
  1948. break;
  1949. }
  1950. }
  1951. }
  1952. /*
  1953. * Initialize the dynamic linker. The argument is the address at which
  1954. * the dynamic linker has been mapped into memory. The primary task of
  1955. * this function is to relocate the dynamic linker.
  1956. */
  1957. static void
  1958. init_rtld(caddr_t mapbase, Elf_Auxinfo **aux_info)
  1959. {
  1960. Obj_Entry objtmp; /* Temporary rtld object */
  1961. const Elf_Ehdr *ehdr;
  1962. const Elf_Dyn *dyn_rpath;
  1963. const Elf_Dyn *dyn_soname;
  1964. const Elf_Dyn *dyn_runpath;
  1965. #ifdef RTLD_INIT_PAGESIZES_EARLY
  1966. /* The page size is required by the dynamic memory allocator. */
  1967. init_pagesizes(aux_info);
  1968. #endif
  1969. /*
  1970. * Conjure up an Obj_Entry structure for the dynamic linker.
  1971. *
  1972. * The "path" member can't be initialized yet because string constants
  1973. * cannot yet be accessed. Below we will set it correctly.
  1974. */
  1975. memset(&objtmp, 0, sizeof(objtmp));
  1976. objtmp.path = NULL;
  1977. objtmp.rtld = true;
  1978. objtmp.mapbase = mapbase;
  1979. #ifdef PIC
  1980. objtmp.relocbase = mapbase;
  1981. #endif
  1982. objtmp.dynamic = rtld_dynamic(&objtmp);
  1983. digest_dynamic1(&objtmp, 1, &dyn_rpath, &dyn_soname, &dyn_runpath);
  1984. assert(objtmp.needed == NULL);
  1985. #if !defined(__mips__)
  1986. /* MIPS has a bogus DT_TEXTREL. */
  1987. assert(!objtmp.textrel);
  1988. #endif
  1989. /*
  1990. * Temporarily put the dynamic linker entry into the object list, so
  1991. * that symbols can be found.
  1992. */
  1993. relocate_objects(&objtmp, true, &objtmp, 0, NULL);
  1994. ehdr = (Elf_Ehdr *)mapbase;
  1995. objtmp.phdr = (Elf_Phdr *)((char *)mapbase + ehdr->e_phoff);
  1996. objtmp.phsize = ehdr->e_phnum * sizeof(objtmp.phdr[0]);
  1997. /* Initialize the object list. */
  1998. TAILQ_INIT(&obj_list);
  1999. /* Now that non-local variables can be accesses, copy out obj_rtld. */
  2000. memcpy(&obj_rtld, &objtmp, sizeof(obj_rtld));
  2001. #ifndef RTLD_INIT_PAGESIZES_EARLY
  2002. /* The page size is required by the dynamic memory allocator. */
  2003. init_pagesizes(aux_info);
  2004. #endif
  2005. if (aux_info[AT_OSRELDATE] != NULL)
  2006. osreldate = aux_info[AT_OSRELDATE]->a_un.a_val;
  2007. digest_dynamic2(&obj_rtld, dyn_rpath, dyn_soname, dyn_runpath);
  2008. /* Replace the path with a dynamically allocated copy. */
  2009. obj_rtld.path = xstrdup(ld_path_rtld);
  2010. parse_rtld_phdr(&obj_rtld);
  2011. obj_enforce_relro(&obj_rtld);
  2012. r_debug.r_brk = r_debug_state;
  2013. r_debug.r_state = RT_CONSISTENT;
  2014. }
  2015. /*
  2016. * Retrieve the array of supported page sizes. The kernel provides the page
  2017. * sizes in increasing order.
  2018. */
  2019. static void
  2020. init_pagesizes(Elf_Auxinfo **aux_info)
  2021. {
  2022. static size_t psa[MAXPAGESIZES];
  2023. int mib[2];
  2024. size_t len, size;
  2025. if (aux_info[AT_PAGESIZES] != NULL && aux_info[AT_PAGESIZESLEN] !=
  2026. NULL) {
  2027. size = aux_info[AT_PAGESIZESLEN]->a_un.a_val;
  2028. pagesizes = aux_info[AT_PAGESIZES]->a_un.a_ptr;
  2029. } else {
  2030. len = 2;
  2031. if (sysctlnametomib("hw.pagesizes", mib, &len) == 0)
  2032. size = sizeof(psa);
  2033. else {
  2034. /* As a fallback, retrieve the base page size. */
  2035. size = sizeof(psa[0]);
  2036. if (aux_info[AT_PAGESZ] != NULL) {
  2037. psa[0] = aux_info[AT_PAGESZ]->a_un.a_val;
  2038. goto psa_filled;
  2039. } else {
  2040. mib[0] = CTL_HW;
  2041. mib[1] = HW_PAGESIZE;
  2042. len = 2;
  2043. }
  2044. }
  2045. if (sysctl(mib, len, psa, &size, NULL, 0) == -1) {
  2046. _rtld_error("sysctl for hw.pagesize(s) failed");
  2047. rtld_die();
  2048. }
  2049. psa_filled:
  2050. pagesizes = psa;
  2051. }
  2052. npagesizes = size / sizeof(pagesizes[0]);
  2053. /* Discard any invalid entries at the end of the array. */
  2054. while (npagesizes > 0 && pagesizes[npagesizes - 1] == 0)
  2055. npagesizes--;
  2056. }
  2057. /*
  2058. * Add the init functions from a needed object list (and its recursive
  2059. * needed objects) to "list". This is not used directly; it is a helper
  2060. * function for initlist_add_objects(). The write lock must be held
  2061. * when this function is called.
  2062. */
  2063. static void
  2064. initlist_add_neededs(Needed_Entry *needed, Objlist *list)
  2065. {
  2066. /* Recursively process the successor needed objects. */
  2067. if (needed->next != NULL)
  2068. initlist_add_neededs(needed->next, list);
  2069. /* Process the current needed object. */
  2070. if (needed->obj != NULL)
  2071. initlist_add_objects(needed->obj, needed->obj, list);
  2072. }
  2073. /*
  2074. * Scan all of the DAGs rooted in the range of objects from "obj" to
  2075. * "tail" and add their init functions to "list". This recurses over
  2076. * the DAGs and ensure the proper init ordering such that each object's
  2077. * needed libraries are initialized before the object itself. At the
  2078. * same time, this function adds the objects to the global finalization
  2079. * list "list_fini" in the opposite order. The write lock must be
  2080. * held when this function is called.
  2081. */
  2082. static void
  2083. initlist_add_objects(Obj_Entry *obj, Obj_Entry *tail, Objlist *list)
  2084. {
  2085. Obj_Entry *nobj;
  2086. if (obj->init_scanned || obj->init_done)
  2087. return;
  2088. obj->init_scanned = true;
  2089. /* Recursively process the successor objects. */
  2090. nobj = globallist_next(obj);
  2091. if (nobj != NULL && obj != tail)
  2092. initlist_add_objects(nobj, tail, list);
  2093. /* Recursively process the needed objects. */
  2094. if (obj->needed != NULL)
  2095. initlist_add_neededs(obj->needed, list);
  2096. if (obj->needed_filtees != NULL)
  2097. initlist_add_neededs(obj->needed_filtees, list);
  2098. if (obj->needed_aux_filtees != NULL)
  2099. initlist_add_neededs(obj->needed_aux_filtees, list);
  2100. /* Add the object to the init list. */
  2101. objlist_push_tail(list, obj);
  2102. /* Add the object to the global fini list in the reverse order. */
  2103. if ((obj->fini != (Elf_Addr)NULL || obj->fini_array != (Elf_Addr)NULL)
  2104. && !obj->on_fini_list) {
  2105. objlist_push_head(&list_fini, obj);
  2106. obj->on_fini_list = true;
  2107. }
  2108. }
  2109. #ifndef FPTR_TARGET
  2110. #define FPTR_TARGET(f) ((Elf_Addr) (f))
  2111. #endif
  2112. static void
  2113. free_needed_filtees(Needed_Entry *n, RtldLockState *lockstate)
  2114. {
  2115. Needed_Entry *needed, *needed1;
  2116. for (needed = n; needed != NULL; needed = needed->next) {
  2117. if (needed->obj != NULL) {
  2118. dlclose_locked(needed->obj, lockstate);
  2119. needed->obj = NULL;
  2120. }
  2121. }
  2122. for (needed = n; needed != NULL; needed = needed1) {
  2123. needed1 = needed->next;
  2124. free(needed);
  2125. }
  2126. }
  2127. static void
  2128. unload_filtees(Obj_Entry *obj, RtldLockState *lockstate)
  2129. {
  2130. free_needed_filtees(obj->needed_filtees, lockstate);
  2131. obj->needed_filtees = NULL;
  2132. free_needed_filtees(obj->needed_aux_filtees, lockstate);
  2133. obj->needed_aux_filtees = NULL;
  2134. obj->filtees_loaded = false;
  2135. }
  2136. static void
  2137. load_filtee1(Obj_Entry *obj, Needed_Entry *needed, int flags,
  2138. RtldLockState *lockstate)
  2139. {
  2140. for (; needed != NULL; needed = needed->next) {
  2141. needed->obj = dlopen_object(obj->strtab + needed->name, -1, obj,
  2142. flags, ((ld_loadfltr || obj->z_loadfltr) ? RTLD_NOW : RTLD_LAZY) |
  2143. RTLD_LOCAL, lockstate);
  2144. }
  2145. }
  2146. static void
  2147. load_filtees(Obj_Entry *obj, int flags, RtldLockState *lockstate)
  2148. {
  2149. lock_restart_for_upgrade(lockstate);
  2150. if (!obj->filtees_loaded) {
  2151. load_filtee1(obj, obj->needed_filtees, flags, lockstate);
  2152. load_filtee1(obj, obj->needed_aux_filtees, flags, lockstate);
  2153. obj->filtees_loaded = true;
  2154. }
  2155. }
  2156. static int
  2157. process_needed(Obj_Entry *obj, Needed_Entry *needed, int flags)
  2158. {
  2159. Obj_Entry *obj1;
  2160. for (; needed != NULL; needed = needed->next) {
  2161. obj1 = needed->obj = load_object(obj->strtab + needed->name, -1, obj,
  2162. flags & ~RTLD_LO_NOLOAD);
  2163. if (obj1 == NULL && !ld_tracing && (flags & RTLD_LO_FILTEES) == 0)
  2164. return (-1);
  2165. }
  2166. return (0);
  2167. }
  2168. #if defined(HARDENEDBSD) && defined(SHLIBRANDOM)
  2169. static void
  2170. randomize_neededs(Obj_Entry *obj, int flags)
  2171. {
  2172. Needed_Entry **needs=NULL, *need=NULL;
  2173. unsigned int i, j, nneed;
  2174. size_t sz = sizeof(unsigned int);
  2175. int mib[2];
  2176. if (!(obj->needed) || (flags & RTLD_LO_FILTEES))
  2177. return;
  2178. mib[0] = CTL_KERN;
  2179. mib[1] = KERN_ARND;
  2180. for (nneed = 0, need = obj->needed; need != NULL; need = need->next)
  2181. nneed++;
  2182. if (nneed > 1) {
  2183. needs = xcalloc(nneed, sizeof(Needed_Entry **));
  2184. for (i = 0, need = obj->needed; i < nneed; i++, need = need->next)
  2185. needs[i] = need;
  2186. for (i=0; i < nneed; i++) {
  2187. do {
  2188. if (sysctl(mib, 2, &j, &sz, NULL, 0))
  2189. goto err;
  2190. j %= nneed;
  2191. } while (j == i);
  2192. need = needs[i];
  2193. needs[i] = needs[j];
  2194. needs[j] = need;
  2195. }
  2196. for (i=0; i < nneed; i++)
  2197. needs[i]->next = i + 1 < nneed ? needs[i + 1] : NULL;
  2198. obj->needed = needs[0];
  2199. }
  2200. err:
  2201. if (needs != NULL)
  2202. free(needs);
  2203. return;
  2204. }
  2205. #endif
  2206. /*
  2207. * Given a shared object, traverse its list of needed objects, and load
  2208. * each of them. Returns 0 on success. Generates an error message and
  2209. * returns -1 on failure.
  2210. */
  2211. static int
  2212. load_needed_objects(Obj_Entry *first, int flags)
  2213. {
  2214. Obj_Entry *obj;
  2215. for (obj = first; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
  2216. if (obj->marker)
  2217. continue;
  2218. #if defined(HARDENEDBSD) && defined(SHLIBRANDOM)
  2219. if ((pax_flags & (PAX_HARDENING_NOSHLIBRANDOM | PAX_HARDENING_SHLIBRANDOM)) !=
  2220. PAX_HARDENING_NOSHLIBRANDOM)
  2221. randomize_neededs(obj, flags);
  2222. #endif
  2223. if (process_needed(obj, obj->needed, flags) == -1)
  2224. return (-1);
  2225. }
  2226. return (0);
  2227. }
  2228. static int
  2229. load_preload_objects(void)
  2230. {
  2231. char *p = ld_preload;
  2232. Obj_Entry *obj;
  2233. static const char delim[] = " \t:;";
  2234. if (p == NULL)
  2235. return 0;
  2236. p += strspn(p, delim);
  2237. while (*p != '\0') {
  2238. size_t len = strcspn(p, delim);
  2239. char savech;
  2240. savech = p[len];
  2241. p[len] = '\0';
  2242. obj = load_object(p, -1, NULL, 0);
  2243. if (obj == NULL)
  2244. return -1; /* XXX - cleanup */
  2245. obj->z_interpose = true;
  2246. p[len] = savech;
  2247. p += len;
  2248. p += strspn(p, delim);
  2249. }
  2250. LD_UTRACE(UTRACE_PRELOAD_FINISHED, NULL, NULL, 0, 0, NULL);
  2251. return 0;
  2252. }
  2253. static const char *
  2254. printable_path(const char *path)
  2255. {
  2256. return (path == NULL ? "<unknown>" : path);
  2257. }
  2258. /*
  2259. * Load a shared object into memory, if it is not already loaded. The
  2260. * object may be specified by name or by user-supplied file descriptor
  2261. * fd_u. In the later case, the fd_u descriptor is not closed, but its
  2262. * duplicate is.
  2263. *
  2264. * Returns a pointer to the Obj_Entry for the object. Returns NULL
  2265. * on failure.
  2266. */
  2267. static Obj_Entry *
  2268. load_object(const char *name, int fd_u, const Obj_Entry *refobj, int flags)
  2269. {
  2270. Obj_Entry *obj;
  2271. int fd;
  2272. struct stat sb;
  2273. char *path;
  2274. fd = -1;
  2275. if (name != NULL) {
  2276. TAILQ_FOREACH(obj, &obj_list, next) {
  2277. if (obj->marker || obj->doomed)
  2278. continue;
  2279. if (object_match_name(obj, name))
  2280. return (obj);
  2281. }
  2282. path = find_library(name, refobj, &fd);
  2283. if (path == NULL)
  2284. return (NULL);
  2285. } else
  2286. path = NULL;
  2287. if (fd >= 0) {
  2288. /*
  2289. * search_library_pathfds() opens a fresh file descriptor for the
  2290. * library, so there is no need to dup().
  2291. */
  2292. } else if (fd_u == -1) {
  2293. /*
  2294. * If we didn't find a match by pathname, or the name is not
  2295. * supplied, open the file and check again by device and inode.
  2296. * This avoids false mismatches caused by multiple links or ".."
  2297. * in pathnames.
  2298. *
  2299. * To avoid a race, we open the file and use fstat() rather than
  2300. * using stat().
  2301. */
  2302. if ((fd = open(path, O_RDONLY | O_CLOEXEC | O_VERIFY)) == -1) {
  2303. _rtld_error("Cannot open \"%s\"", path);
  2304. free(path);
  2305. return (NULL);
  2306. }
  2307. } else {
  2308. fd = fcntl(fd_u, F_DUPFD_CLOEXEC, 0);
  2309. if (fd == -1) {
  2310. _rtld_error("Cannot dup fd");
  2311. free(path);
  2312. return (NULL);
  2313. }
  2314. }
  2315. if (fstat(fd, &sb) == -1) {
  2316. _rtld_error("Cannot fstat \"%s\"", printable_path(path));
  2317. close(fd);
  2318. free(path);
  2319. return NULL;
  2320. }
  2321. TAILQ_FOREACH(obj, &obj_list, next) {
  2322. if (obj->marker || obj->doomed)
  2323. continue;
  2324. if (obj->ino == sb.st_ino && obj->dev == sb.st_dev)
  2325. break;
  2326. }
  2327. if (obj != NULL && name != NULL) {
  2328. object_add_name(obj, name);
  2329. free(path);
  2330. close(fd);
  2331. return obj;
  2332. }
  2333. if (flags & RTLD_LO_NOLOAD) {
  2334. free(path);
  2335. close(fd);
  2336. return (NULL);
  2337. }
  2338. /* First use of this object, so we must map it in */
  2339. obj = do_load_object(fd, name, path, &sb, flags);
  2340. if (obj == NULL)
  2341. free(path);
  2342. close(fd);
  2343. return obj;
  2344. }
  2345. static Obj_Entry *
  2346. do_load_object(int fd, const char *name, char *path, struct stat *sbp,
  2347. int flags)
  2348. {
  2349. Obj_Entry *obj;
  2350. struct statfs fs;
  2351. #ifdef HARDENEDBSD
  2352. struct integriforce_so_check check;
  2353. int res, err;
  2354. size_t sz;
  2355. #endif
  2356. /*
  2357. * but first, make sure that environment variables haven't been
  2358. * used to circumvent the noexec flag on a filesystem.
  2359. */
  2360. if (dangerous_ld_env) {
  2361. if (fstatfs(fd, &fs) != 0) {
  2362. _rtld_error("Cannot fstatfs \"%s\"", printable_path(path));
  2363. return NULL;
  2364. }
  2365. if (fs.f_flags & MNT_NOEXEC) {
  2366. _rtld_error("Cannot execute objects on %s", fs.f_mntonname);
  2367. return NULL;
  2368. }
  2369. }
  2370. #ifdef HARDENEDBSD
  2371. if (path != NULL) {
  2372. sz = sizeof(int);
  2373. err = sysctlbyname("kern.features.integriforce",
  2374. &res, &sz, NULL, 0);
  2375. if (err == 0 && res == 1) {
  2376. strlcpy(check.isc_path, path, MAXPATHLEN);
  2377. check.isc_result = 0;
  2378. sz = sizeof(struct integriforce_so_check);
  2379. err = sysctlbyname("hardening.secadm.integriforce_so",
  2380. &check, &sz, &check, sizeof(struct integriforce_so_check));
  2381. if (err == 0 && check.isc_result != 0) {
  2382. _rtld_error("Integriforce validation failed on %s. Aborting.\n", path);
  2383. return (NULL);
  2384. }
  2385. }
  2386. }
  2387. #endif
  2388. dbg("loading \"%s\"", printable_path(path));
  2389. obj = map_object(fd, printable_path(path), sbp);
  2390. if (obj == NULL)
  2391. return NULL;
  2392. /*
  2393. * If DT_SONAME is present in the object, digest_dynamic2 already
  2394. * added it to the object names.
  2395. */
  2396. if (name != NULL)
  2397. object_add_name(obj, name);
  2398. obj->path = path;
  2399. if (!digest_dynamic(obj, 0))
  2400. goto errp;
  2401. dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d", obj->path,
  2402. obj->valid_hash_sysv, obj->valid_hash_gnu, obj->dynsymcount);
  2403. if (obj->z_pie && (flags & RTLD_LO_TRACE) == 0) {
  2404. dbg("refusing to load PIE executable \"%s\"", obj->path);
  2405. _rtld_error("Cannot load PIE binary %s as DSO", obj->path);
  2406. goto errp;
  2407. }
  2408. if (obj->z_noopen && (flags & (RTLD_LO_DLOPEN | RTLD_LO_TRACE)) ==
  2409. RTLD_LO_DLOPEN) {
  2410. dbg("refusing to load non-loadable \"%s\"", obj->path);
  2411. _rtld_error("Cannot dlopen non-loadable %s", obj->path);
  2412. goto errp;
  2413. }
  2414. obj->dlopened = (flags & RTLD_LO_DLOPEN) != 0;
  2415. TAILQ_INSERT_TAIL(&obj_list, obj, next);
  2416. obj_count++;
  2417. obj_loads++;
  2418. linkmap_add(obj); /* for GDB & dlinfo() */
  2419. max_stack_flags |= obj->stack_flags;
  2420. if ((max_stack_flags & PF_X) == PF_X)
  2421. if ((stack_prot & PROT_EXEC) == 0)
  2422. max_stack_flags &= ~(PF_X);
  2423. dbg(" %p .. %p: %s", obj->mapbase,
  2424. obj->mapbase + obj->mapsize - 1, obj->path);
  2425. if (obj->textrel)
  2426. dbg(" WARNING: %s has impure text", obj->path);
  2427. LD_UTRACE(UTRACE_LOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0,
  2428. obj->path);
  2429. return (obj);
  2430. errp:
  2431. munmap(obj->mapbase, obj->mapsize);
  2432. obj_free(obj);
  2433. return (NULL);
  2434. }
  2435. static Obj_Entry *
  2436. obj_from_addr(const void *addr)
  2437. {
  2438. Obj_Entry *obj;
  2439. TAILQ_FOREACH(obj, &obj_list, next) {
  2440. if (obj->marker)
  2441. continue;
  2442. if (addr < (void *) obj->mapbase)
  2443. continue;
  2444. if (addr < (void *)(obj->mapbase + obj->mapsize))
  2445. return obj;
  2446. }
  2447. return NULL;
  2448. }
  2449. static void
  2450. preinit_main(void)
  2451. {
  2452. Elf_Addr *preinit_addr;
  2453. int index;
  2454. preinit_addr = (Elf_Addr *)obj_main->preinit_array;
  2455. if (preinit_addr == NULL)
  2456. return;
  2457. for (index = 0; index < obj_main->preinit_array_num; index++) {
  2458. if (preinit_addr[index] != 0 && preinit_addr[index] != 1) {
  2459. dbg("calling preinit function for %s at %p", obj_main->path,
  2460. (void *)preinit_addr[index]);
  2461. LD_UTRACE(UTRACE_INIT_CALL, obj_main, (void *)preinit_addr[index],
  2462. 0, 0, obj_main->path);
  2463. call_init_pointer(obj_main, preinit_addr[index]);
  2464. }
  2465. }
  2466. }
  2467. /*
  2468. * Call the finalization functions for each of the objects in "list"
  2469. * belonging to the DAG of "root" and referenced once. If NULL "root"
  2470. * is specified, every finalization function will be called regardless
  2471. * of the reference count and the list elements won't be freed. All of
  2472. * the objects are expected to have non-NULL fini functions.
  2473. */
  2474. static void
  2475. objlist_call_fini(Objlist *list, Obj_Entry *root, RtldLockState *lockstate)
  2476. {
  2477. Objlist_Entry *elm;
  2478. char *saved_msg;
  2479. Elf_Addr *fini_addr;
  2480. int index;
  2481. assert(root == NULL || root->refcount == 1);
  2482. if (root != NULL)
  2483. root->doomed = true;
  2484. /*
  2485. * Preserve the current error message since a fini function might
  2486. * call into the dynamic linker and overwrite it.
  2487. */
  2488. saved_msg = errmsg_save();
  2489. do {
  2490. STAILQ_FOREACH(elm, list, link) {
  2491. if (root != NULL && (elm->obj->refcount != 1 ||
  2492. objlist_find(&root->dagmembers, elm->obj) == NULL))
  2493. continue;
  2494. /* Remove object from fini list to prevent recursive invocation. */
  2495. STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link);
  2496. /* Ensure that new references cannot be acquired. */
  2497. elm->obj->doomed = true;
  2498. hold_object(elm->obj);
  2499. lock_release(rtld_bind_lock, lockstate);
  2500. /*
  2501. * It is legal to have both DT_FINI and DT_FINI_ARRAY defined.
  2502. * When this happens, DT_FINI_ARRAY is processed first.
  2503. */
  2504. fini_addr = (Elf_Addr *)elm->obj->fini_array;
  2505. if (fini_addr != NULL && elm->obj->fini_array_num > 0) {
  2506. for (index = elm->obj->fini_array_num - 1; index >= 0;
  2507. index--) {
  2508. if (fini_addr[index] != 0 && fini_addr[index] != 1) {
  2509. dbg("calling fini function for %s at %p",
  2510. elm->obj->path, (void *)fini_addr[index]);
  2511. LD_UTRACE(UTRACE_FINI_CALL, elm->obj,
  2512. (void *)fini_addr[index], 0, 0, elm->obj->path);
  2513. call_initfini_pointer(elm->obj, fini_addr[index]);
  2514. }
  2515. }
  2516. }
  2517. if (elm->obj->fini != (Elf_Addr)NULL) {
  2518. dbg("calling fini function for %s at %p", elm->obj->path,
  2519. (void *)elm->obj->fini);
  2520. LD_UTRACE(UTRACE_FINI_CALL, elm->obj, (void *)elm->obj->fini,
  2521. 0, 0, elm->obj->path);
  2522. call_initfini_pointer(elm->obj, elm->obj->fini);
  2523. }
  2524. wlock_acquire(rtld_bind_lock, lockstate);
  2525. unhold_object(elm->obj);
  2526. /* No need to free anything if process is going down. */
  2527. if (root != NULL)
  2528. free(elm);
  2529. /*
  2530. * We must restart the list traversal after every fini call
  2531. * because a dlclose() call from the fini function or from
  2532. * another thread might have modified the reference counts.
  2533. */
  2534. break;
  2535. }
  2536. } while (elm != NULL);
  2537. errmsg_restore(saved_msg);
  2538. }
  2539. /*
  2540. * Call the initialization functions for each of the objects in
  2541. * "list". All of the objects are expected to have non-NULL init
  2542. * functions.
  2543. */
  2544. static void
  2545. objlist_call_init(Objlist *list, RtldLockState *lockstate)
  2546. {
  2547. Objlist_Entry *elm;
  2548. Obj_Entry *obj;
  2549. char *saved_msg;
  2550. Elf_Addr *init_addr;
  2551. void (*reg)(void (*)(void));
  2552. int index;
  2553. /*
  2554. * Clean init_scanned flag so that objects can be rechecked and
  2555. * possibly initialized earlier if any of vectors called below
  2556. * cause the change by using dlopen.
  2557. */
  2558. TAILQ_FOREACH(obj, &obj_list, next) {
  2559. if (obj->marker)
  2560. continue;
  2561. obj->init_scanned = false;
  2562. }
  2563. /*
  2564. * Preserve the current error message since an init function might
  2565. * call into the dynamic linker and overwrite it.
  2566. */
  2567. saved_msg = errmsg_save();
  2568. STAILQ_FOREACH(elm, list, link) {
  2569. if (elm->obj->init_done) /* Initialized early. */
  2570. continue;
  2571. /*
  2572. * Race: other thread might try to use this object before current
  2573. * one completes the initialization. Not much can be done here
  2574. * without better locking.
  2575. */
  2576. elm->obj->init_done = true;
  2577. hold_object(elm->obj);
  2578. reg = NULL;
  2579. if (elm->obj == obj_main && obj_main->crt_no_init) {
  2580. reg = (void (*)(void (*)(void)))get_program_var_addr(
  2581. "__libc_atexit", lockstate);
  2582. }
  2583. lock_release(rtld_bind_lock, lockstate);
  2584. if (reg != NULL) {
  2585. reg(rtld_exit);
  2586. rtld_exit_ptr = rtld_nop_exit;
  2587. }
  2588. /*
  2589. * It is legal to have both DT_INIT and DT_INIT_ARRAY defined.
  2590. * When this happens, DT_INIT is processed first.
  2591. */
  2592. if (elm->obj->init != (Elf_Addr)NULL) {
  2593. dbg("calling init function for %s at %p", elm->obj->path,
  2594. (void *)elm->obj->init);
  2595. LD_UTRACE(UTRACE_INIT_CALL, elm->obj, (void *)elm->obj->init,
  2596. 0, 0, elm->obj->path);
  2597. call_initfini_pointer(elm->obj, elm->obj->init);
  2598. }
  2599. init_addr = (Elf_Addr *)elm->obj->init_array;
  2600. if (init_addr != NULL) {
  2601. for (index = 0; index < elm->obj->init_array_num; index++) {
  2602. if (init_addr[index] != 0 && init_addr[index] != 1) {
  2603. dbg("calling init function for %s at %p", elm->obj->path,
  2604. (void *)init_addr[index]);
  2605. LD_UTRACE(UTRACE_INIT_CALL, elm->obj,
  2606. (void *)init_addr[index], 0, 0, elm->obj->path);
  2607. call_init_pointer(elm->obj, init_addr[index]);
  2608. }
  2609. }
  2610. }
  2611. wlock_acquire(rtld_bind_lock, lockstate);
  2612. unhold_object(elm->obj);
  2613. }
  2614. errmsg_restore(saved_msg);
  2615. }
  2616. static void
  2617. objlist_clear(Objlist *list)
  2618. {
  2619. Objlist_Entry *elm;
  2620. while (!STAILQ_EMPTY(list)) {
  2621. elm = STAILQ_FIRST(list);
  2622. STAILQ_REMOVE_HEAD(list, link);
  2623. free(elm);
  2624. }
  2625. }
  2626. static Objlist_Entry *
  2627. objlist_find(Objlist *list, const Obj_Entry *obj)
  2628. {
  2629. Objlist_Entry *elm;
  2630. STAILQ_FOREACH(elm, list, link)
  2631. if (elm->obj == obj)
  2632. return elm;
  2633. return NULL;
  2634. }
  2635. static void
  2636. objlist_init(Objlist *list)
  2637. {
  2638. STAILQ_INIT(list);
  2639. }
  2640. static void
  2641. objlist_push_head(Objlist *list, Obj_Entry *obj)
  2642. {
  2643. Objlist_Entry *elm;
  2644. elm = NEW(Objlist_Entry);
  2645. elm->obj = obj;
  2646. STAILQ_INSERT_HEAD(list, elm, link);
  2647. }
  2648. static void
  2649. objlist_push_tail(Objlist *list, Obj_Entry *obj)
  2650. {
  2651. Objlist_Entry *elm;
  2652. elm = NEW(Objlist_Entry);
  2653. elm->obj = obj;
  2654. STAILQ_INSERT_TAIL(list, elm, link);
  2655. }
  2656. static void
  2657. objlist_put_after(Objlist *list, Obj_Entry *listobj, Obj_Entry *obj)
  2658. {
  2659. Objlist_Entry *elm, *listelm;
  2660. STAILQ_FOREACH(listelm, list, link) {
  2661. if (listelm->obj == listobj)
  2662. break;
  2663. }
  2664. elm = NEW(Objlist_Entry);
  2665. elm->obj = obj;
  2666. if (listelm != NULL)
  2667. STAILQ_INSERT_AFTER(list, listelm, elm, link);
  2668. else
  2669. STAILQ_INSERT_TAIL(list, elm, link);
  2670. }
  2671. static void
  2672. objlist_remove(Objlist *list, Obj_Entry *obj)
  2673. {
  2674. Objlist_Entry *elm;
  2675. if ((elm = objlist_find(list, obj)) != NULL) {
  2676. STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link);
  2677. free(elm);
  2678. }
  2679. }
  2680. /*
  2681. * Relocate dag rooted in the specified object.
  2682. * Returns 0 on success, or -1 on failure.
  2683. */
  2684. static int
  2685. relocate_object_dag(Obj_Entry *root, bool bind_now, Obj_Entry *rtldobj,
  2686. int flags, RtldLockState *lockstate)
  2687. {
  2688. Objlist_Entry *elm;
  2689. int error;
  2690. error = 0;
  2691. STAILQ_FOREACH(elm, &root->dagmembers, link) {
  2692. error = relocate_object(elm->obj, bind_now, rtldobj, flags,
  2693. lockstate);
  2694. if (error == -1)
  2695. break;
  2696. }
  2697. return (error);
  2698. }
  2699. /*
  2700. * Prepare for, or clean after, relocating an object marked with
  2701. * DT_TEXTREL or DF_TEXTREL. Before relocating, all read-only
  2702. * segments are remapped read-write. After relocations are done, the
  2703. * segment's permissions are returned back to the modes specified in
  2704. * the phdrs. If any relocation happened, or always for wired
  2705. * program, COW is triggered.
  2706. */
  2707. static int
  2708. reloc_textrel_prot(Obj_Entry *obj, bool before)
  2709. {
  2710. const Elf_Phdr *ph;
  2711. void *base;
  2712. size_t l, sz;
  2713. int prot;
  2714. for (l = obj->phsize / sizeof(*ph), ph = obj->phdr; l > 0;
  2715. l--, ph++) {
  2716. if (ph->p_type != PT_LOAD || (ph->p_flags & PF_W) != 0)
  2717. continue;
  2718. base = obj->relocbase + trunc_page(ph->p_vaddr);
  2719. sz = round_page(ph->p_vaddr + ph->p_filesz) -
  2720. trunc_page(ph->p_vaddr);
  2721. prot = convert_prot(ph->p_flags) | (before ? PROT_WRITE : 0);
  2722. if (mprotect(base, sz, prot) == -1) {
  2723. _rtld_error("%s: Cannot write-%sable text segment: %s",
  2724. obj->path, before ? "en" : "dis",
  2725. rtld_strerror(errno));
  2726. return (-1);
  2727. }
  2728. }
  2729. return (0);
  2730. }
  2731. /*
  2732. * Relocate single object.
  2733. * Returns 0 on success, or -1 on failure.
  2734. */
  2735. static int
  2736. relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj,
  2737. int flags, RtldLockState *lockstate)
  2738. {
  2739. if (obj->relocated)
  2740. return (0);
  2741. obj->relocated = true;
  2742. if (obj != rtldobj)
  2743. dbg("relocating \"%s\"", obj->path);
  2744. if (obj->symtab == NULL || obj->strtab == NULL ||
  2745. !(obj->valid_hash_sysv || obj->valid_hash_gnu)) {
  2746. _rtld_error("%s: Shared object has no run-time symbol table",
  2747. obj->path);
  2748. return (-1);
  2749. }
  2750. /* There are relocations to the write-protected text segment. */
  2751. if (obj->textrel && reloc_textrel_prot(obj, true) != 0)
  2752. return (-1);
  2753. /* Process the non-PLT non-IFUNC relocations. */
  2754. if (reloc_non_plt(obj, rtldobj, flags, lockstate))
  2755. return (-1);
  2756. /* Re-protected the text segment. */
  2757. if (obj->textrel && reloc_textrel_prot(obj, false) != 0)
  2758. return (-1);
  2759. /* Set the special PLT or GOT entries. */
  2760. init_pltgot(obj);
  2761. /* Process the PLT relocations. */
  2762. if (reloc_plt(obj, flags, lockstate) == -1)
  2763. return (-1);
  2764. /* Relocate the jump slots if we are doing immediate binding. */
  2765. if ((obj->bind_now || bind_now) && reloc_jmpslots(obj, flags,
  2766. lockstate) == -1)
  2767. return (-1);
  2768. if (!obj->mainprog && obj_enforce_relro(obj) == -1)
  2769. return (-1);
  2770. /*
  2771. * Set up the magic number and version in the Obj_Entry. These
  2772. * were checked in the crt1.o from the original ElfKit, so we
  2773. * set them for backward compatibility.
  2774. */
  2775. obj->magic = RTLD_MAGIC;
  2776. obj->version = RTLD_VERSION;
  2777. return (0);
  2778. }
  2779. /*
  2780. * Relocate newly-loaded shared objects. The argument is a pointer to
  2781. * the Obj_Entry for the first such object. All objects from the first
  2782. * to the end of the list of objects are relocated. Returns 0 on success,
  2783. * or -1 on failure.
  2784. */
  2785. static int
  2786. relocate_objects(Obj_Entry *first, bool bind_now, Obj_Entry *rtldobj,
  2787. int flags, RtldLockState *lockstate)
  2788. {
  2789. Obj_Entry *obj;
  2790. int error;
  2791. for (error = 0, obj = first; obj != NULL;
  2792. obj = TAILQ_NEXT(obj, next)) {
  2793. if (obj->marker)
  2794. continue;
  2795. error = relocate_object(obj, bind_now, rtldobj, flags,
  2796. lockstate);
  2797. if (error == -1)
  2798. break;
  2799. }
  2800. return (error);
  2801. }
  2802. /*
  2803. * The handling of R_MACHINE_IRELATIVE relocations and jumpslots
  2804. * referencing STT_GNU_IFUNC symbols is postponed till the other
  2805. * relocations are done. The indirect functions specified as
  2806. * ifunc are allowed to call other symbols, so we need to have
  2807. * objects relocated before asking for resolution from indirects.
  2808. *
  2809. * The R_MACHINE_IRELATIVE slots are resolved in greedy fashion,
  2810. * instead of the usual lazy handling of PLT slots. It is
  2811. * consistent with how GNU does it.
  2812. */
  2813. static int
  2814. resolve_object_ifunc(Obj_Entry *obj, bool bind_now, int flags,
  2815. RtldLockState *lockstate)
  2816. {
  2817. if (obj->ifuncs_resolved)
  2818. return (0);
  2819. obj->ifuncs_resolved = true;
  2820. if (!obj->irelative && !obj->irelative_nonplt &&
  2821. !((obj->bind_now || bind_now) && obj->gnu_ifunc))
  2822. return (0);
  2823. if (obj_disable_relro(obj) == -1 ||
  2824. (obj->irelative && reloc_iresolve(obj, lockstate) == -1) ||
  2825. (obj->irelative_nonplt && reloc_iresolve_nonplt(obj,
  2826. lockstate) == -1) ||
  2827. ((obj->bind_now || bind_now) && obj->gnu_ifunc &&
  2828. reloc_gnu_ifunc(obj, flags, lockstate) == -1) ||
  2829. obj_enforce_relro(obj) == -1)
  2830. return (-1);
  2831. return (0);
  2832. }
  2833. static int
  2834. initlist_objects_ifunc(Objlist *list, bool bind_now, int flags,
  2835. RtldLockState *lockstate)
  2836. {
  2837. Objlist_Entry *elm;
  2838. Obj_Entry *obj;
  2839. STAILQ_FOREACH(elm, list, link) {
  2840. obj = elm->obj;
  2841. if (obj->marker)
  2842. continue;
  2843. if (resolve_object_ifunc(obj, bind_now, flags,
  2844. lockstate) == -1)
  2845. return (-1);
  2846. }
  2847. return (0);
  2848. }
  2849. /*
  2850. * Cleanup procedure. It will be called (by the atexit mechanism) just
  2851. * before the process exits.
  2852. */
  2853. static void
  2854. rtld_exit(void)
  2855. {
  2856. RtldLockState lockstate;
  2857. wlock_acquire(rtld_bind_lock, &lockstate);
  2858. dbg("rtld_exit()");
  2859. objlist_call_fini(&list_fini, NULL, &lockstate);
  2860. /* No need to remove the items from the list, since we are exiting. */
  2861. if (!libmap_disable)
  2862. lm_fini();
  2863. lock_release(rtld_bind_lock, &lockstate);
  2864. }
  2865. static void
  2866. rtld_nop_exit(void)
  2867. {
  2868. }
  2869. /*
  2870. * Iterate over a search path, translate each element, and invoke the
  2871. * callback on the result.
  2872. */
  2873. static void *
  2874. path_enumerate(const char *path, path_enum_proc callback,
  2875. const char *refobj_path, void *arg)
  2876. {
  2877. const char *trans;
  2878. if (path == NULL)
  2879. return (NULL);
  2880. path += strspn(path, ":;");
  2881. while (*path != '\0') {
  2882. size_t len;
  2883. char *res;
  2884. len = strcspn(path, ":;");
  2885. trans = lm_findn(refobj_path, path, len);
  2886. if (trans)
  2887. res = callback(trans, strlen(trans), arg);
  2888. else
  2889. res = callback(path, len, arg);
  2890. if (res != NULL)
  2891. return (res);
  2892. path += len;
  2893. path += strspn(path, ":;");
  2894. }
  2895. return (NULL);
  2896. }
  2897. struct try_library_args {
  2898. const char *name;
  2899. size_t namelen;
  2900. char *buffer;
  2901. size_t buflen;
  2902. int fd;
  2903. };
  2904. static void *
  2905. try_library_path(const char *dir, size_t dirlen, void *param)
  2906. {
  2907. struct try_library_args *arg;
  2908. int fd;
  2909. arg = param;
  2910. if (*dir == '/' || trust) {
  2911. char *pathname;
  2912. if (dirlen + 1 + arg->namelen + 1 > arg->buflen)
  2913. return (NULL);
  2914. pathname = arg->buffer;
  2915. strncpy(pathname, dir, dirlen);
  2916. pathname[dirlen] = '/';
  2917. strcpy(pathname + dirlen + 1, arg->name);
  2918. dbg(" Trying \"%s\"", pathname);
  2919. fd = open(pathname, O_RDONLY | O_CLOEXEC | O_VERIFY);
  2920. if (fd >= 0) {
  2921. dbg(" Opened \"%s\", fd %d", pathname, fd);
  2922. pathname = xmalloc(dirlen + 1 + arg->namelen + 1);
  2923. strcpy(pathname, arg->buffer);
  2924. arg->fd = fd;
  2925. return (pathname);
  2926. } else {
  2927. dbg(" Failed to open \"%s\": %s",
  2928. pathname, rtld_strerror(errno));
  2929. }
  2930. }
  2931. return (NULL);
  2932. }
  2933. static char *
  2934. search_library_path(const char *name, const char *path,
  2935. const char *refobj_path, int *fdp)
  2936. {
  2937. char *p;
  2938. struct try_library_args arg;
  2939. if (path == NULL)
  2940. return NULL;
  2941. arg.name = name;
  2942. arg.namelen = strlen(name);
  2943. arg.buffer = xmalloc(PATH_MAX);
  2944. arg.buflen = PATH_MAX;
  2945. arg.fd = -1;
  2946. p = path_enumerate(path, try_library_path, refobj_path, &arg);
  2947. *fdp = arg.fd;
  2948. free(arg.buffer);
  2949. return (p);
  2950. }
  2951. /*
  2952. * Finds the library with the given name using the directory descriptors
  2953. * listed in the LD_LIBRARY_PATH_FDS environment variable.
  2954. *
  2955. * Returns a freshly-opened close-on-exec file descriptor for the library,
  2956. * or -1 if the library cannot be found.
  2957. */
  2958. static char *
  2959. search_library_pathfds(const char *name, const char *path, int *fdp)
  2960. {
  2961. char *envcopy, *fdstr, *found, *last_token;
  2962. size_t len;
  2963. int dirfd, fd;
  2964. dbg("%s('%s', '%s', fdp)", __func__, name, path);
  2965. /* Don't load from user-specified libdirs into setuid binaries. */
  2966. if (!trust)
  2967. return (NULL);
  2968. /* We can't do anything if LD_LIBRARY_PATH_FDS isn't set. */
  2969. if (path == NULL)
  2970. return (NULL);
  2971. /* LD_LIBRARY_PATH_FDS only works with relative paths. */
  2972. if (name[0] == '/') {
  2973. dbg("Absolute path (%s) passed to %s", name, __func__);
  2974. return (NULL);
  2975. }
  2976. /*
  2977. * Use strtok_r() to walk the FD:FD:FD list. This requires a local
  2978. * copy of the path, as strtok_r rewrites separator tokens
  2979. * with '\0'.
  2980. */
  2981. found = NULL;
  2982. envcopy = xstrdup(path);
  2983. for (fdstr = strtok_r(envcopy, ":", &last_token); fdstr != NULL;
  2984. fdstr = strtok_r(NULL, ":", &last_token)) {
  2985. dirfd = parse_integer(fdstr);
  2986. if (dirfd < 0) {
  2987. _rtld_error("failed to parse directory FD: '%s'",
  2988. fdstr);
  2989. break;
  2990. }
  2991. fd = __sys_openat(dirfd, name, O_RDONLY | O_CLOEXEC | O_VERIFY);
  2992. if (fd >= 0) {
  2993. *fdp = fd;
  2994. len = strlen(fdstr) + strlen(name) + 3;
  2995. found = xmalloc(len);
  2996. if (rtld_snprintf(found, len, "#%d/%s", dirfd, name) < 0) {
  2997. _rtld_error("error generating '%d/%s'",
  2998. dirfd, name);
  2999. rtld_die();
  3000. }
  3001. dbg("open('%s') => %d", found, fd);
  3002. break;
  3003. }
  3004. }
  3005. free(envcopy);
  3006. return (found);
  3007. }
  3008. int
  3009. dlclose(void *handle)
  3010. {
  3011. RtldLockState lockstate;
  3012. int error;
  3013. wlock_acquire(rtld_bind_lock, &lockstate);
  3014. error = dlclose_locked(handle, &lockstate);
  3015. lock_release(rtld_bind_lock, &lockstate);
  3016. return (error);
  3017. }
  3018. static int
  3019. dlclose_locked(void *handle, RtldLockState *lockstate)
  3020. {
  3021. Obj_Entry *root;
  3022. root = dlcheck(handle);
  3023. if (root == NULL)
  3024. return -1;
  3025. LD_UTRACE(UTRACE_DLCLOSE_START, handle, NULL, 0, root->dl_refcount,
  3026. root->path);
  3027. /* Unreference the object and its dependencies. */
  3028. root->dl_refcount--;
  3029. if (root->refcount == 1) {
  3030. /*
  3031. * The object will be no longer referenced, so we must unload it.
  3032. * First, call the fini functions.
  3033. */
  3034. objlist_call_fini(&list_fini, root, lockstate);
  3035. unref_dag(root);
  3036. /* Finish cleaning up the newly-unreferenced objects. */
  3037. GDB_STATE(RT_DELETE,&root->linkmap);
  3038. unload_object(root, lockstate);
  3039. GDB_STATE(RT_CONSISTENT,NULL);
  3040. } else
  3041. unref_dag(root);
  3042. LD_UTRACE(UTRACE_DLCLOSE_STOP, handle, NULL, 0, 0, NULL);
  3043. return 0;
  3044. }
  3045. char *
  3046. dlerror(void)
  3047. {
  3048. char *msg = error_message;
  3049. error_message = NULL;
  3050. return msg;
  3051. }
  3052. /*
  3053. * This function is deprecated and has no effect.
  3054. */
  3055. void
  3056. dllockinit(void *context,
  3057. void *(*_lock_create)(void *context) __unused,
  3058. void (*_rlock_acquire)(void *lock) __unused,
  3059. void (*_wlock_acquire)(void *lock) __unused,
  3060. void (*_lock_release)(void *lock) __unused,
  3061. void (*_lock_destroy)(void *lock) __unused,
  3062. void (*context_destroy)(void *context))
  3063. {
  3064. static void *cur_context;
  3065. static void (*cur_context_destroy)(void *);
  3066. /* Just destroy the context from the previous call, if necessary. */
  3067. if (cur_context_destroy != NULL)
  3068. cur_context_destroy(cur_context);
  3069. cur_context = context;
  3070. cur_context_destroy = context_destroy;
  3071. }
  3072. void *
  3073. dlopen(const char *name, int mode)
  3074. {
  3075. return (rtld_dlopen(name, -1, mode));
  3076. }
  3077. void *
  3078. fdlopen(int fd, int mode)
  3079. {
  3080. return (rtld_dlopen(NULL, fd, mode));
  3081. }
  3082. static void *
  3083. rtld_dlopen(const char *name, int fd, int mode)
  3084. {
  3085. RtldLockState lockstate;
  3086. int lo_flags;
  3087. LD_UTRACE(UTRACE_DLOPEN_START, NULL, NULL, 0, mode, name);
  3088. ld_tracing = (mode & RTLD_TRACE) == 0 ? NULL : "1";
  3089. if (ld_tracing != NULL) {
  3090. rlock_acquire(rtld_bind_lock, &lockstate);
  3091. if (sigsetjmp(lockstate.env, 0) != 0)
  3092. lock_upgrade(rtld_bind_lock, &lockstate);
  3093. environ = __DECONST(char **, *get_program_var_addr("environ", &lockstate));
  3094. lock_release(rtld_bind_lock, &lockstate);
  3095. }
  3096. lo_flags = RTLD_LO_DLOPEN;
  3097. if (mode & RTLD_NODELETE)
  3098. lo_flags |= RTLD_LO_NODELETE;
  3099. if (mode & RTLD_NOLOAD)
  3100. lo_flags |= RTLD_LO_NOLOAD;
  3101. if (mode & RTLD_DEEPBIND)
  3102. lo_flags |= RTLD_LO_DEEPBIND;
  3103. if (ld_tracing != NULL)
  3104. lo_flags |= RTLD_LO_TRACE | RTLD_LO_IGNSTLS;
  3105. return (dlopen_object(name, fd, obj_main, lo_flags,
  3106. mode & (RTLD_MODEMASK | RTLD_GLOBAL), NULL));
  3107. }
  3108. static void
  3109. dlopen_cleanup(Obj_Entry *obj, RtldLockState *lockstate)
  3110. {
  3111. obj->dl_refcount--;
  3112. unref_dag(obj);
  3113. if (obj->refcount == 0)
  3114. unload_object(obj, lockstate);
  3115. }
  3116. static Obj_Entry *
  3117. dlopen_object(const char *name, int fd, Obj_Entry *refobj, int lo_flags,
  3118. int mode, RtldLockState *lockstate)
  3119. {
  3120. Obj_Entry *old_obj_tail;
  3121. Obj_Entry *obj;
  3122. Objlist initlist;
  3123. RtldLockState mlockstate;
  3124. int result;
  3125. dbg("dlopen_object name \"%s\" fd %d refobj \"%s\" lo_flags %#x mode %#x",
  3126. name != NULL ? name : "<null>", fd, refobj == NULL ? "<null>" :
  3127. refobj->path, lo_flags, mode);
  3128. objlist_init(&initlist);
  3129. if (lockstate == NULL && !(lo_flags & RTLD_LO_EARLY)) {
  3130. wlock_acquire(rtld_bind_lock, &mlockstate);
  3131. lockstate = &mlockstate;
  3132. }
  3133. GDB_STATE(RT_ADD,NULL);
  3134. old_obj_tail = globallist_curr(TAILQ_LAST(&obj_list, obj_entry_q));
  3135. obj = NULL;
  3136. if (name == NULL && fd == -1) {
  3137. obj = obj_main;
  3138. obj->refcount++;
  3139. } else {
  3140. obj = load_object(name, fd, refobj, lo_flags);
  3141. }
  3142. if (obj) {
  3143. obj->dl_refcount++;
  3144. if (mode & RTLD_GLOBAL && objlist_find(&list_global, obj) == NULL)
  3145. objlist_push_tail(&list_global, obj);
  3146. if (globallist_next(old_obj_tail) != NULL) {
  3147. /* We loaded something new. */
  3148. assert(globallist_next(old_obj_tail) == obj);
  3149. if ((lo_flags & RTLD_LO_DEEPBIND) != 0)
  3150. obj->symbolic = true;
  3151. result = 0;
  3152. if ((lo_flags & (RTLD_LO_EARLY | RTLD_LO_IGNSTLS)) == 0 &&
  3153. obj->static_tls && !allocate_tls_offset(obj)) {
  3154. _rtld_error("%s: No space available "
  3155. "for static Thread Local Storage", obj->path);
  3156. result = -1;
  3157. }
  3158. if (result != -1)
  3159. result = load_needed_objects(obj, lo_flags & (RTLD_LO_DLOPEN |
  3160. RTLD_LO_EARLY | RTLD_LO_IGNSTLS | RTLD_LO_TRACE));
  3161. init_dag(obj);
  3162. ref_dag(obj);
  3163. if (result != -1)
  3164. result = rtld_verify_versions(&obj->dagmembers);
  3165. if (result != -1 && ld_tracing)
  3166. goto trace;
  3167. if (result == -1 || relocate_object_dag(obj,
  3168. (mode & RTLD_MODEMASK) == RTLD_NOW, &obj_rtld,
  3169. (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0,
  3170. lockstate) == -1) {
  3171. dlopen_cleanup(obj, lockstate);
  3172. obj = NULL;
  3173. } else if (lo_flags & RTLD_LO_EARLY) {
  3174. /*
  3175. * Do not call the init functions for early loaded
  3176. * filtees. The image is still not initialized enough
  3177. * for them to work.
  3178. *
  3179. * Our object is found by the global object list and
  3180. * will be ordered among all init calls done right
  3181. * before transferring control to main.
  3182. */
  3183. } else {
  3184. /* Make list of init functions to call. */
  3185. initlist_add_objects(obj, obj, &initlist);
  3186. }
  3187. /*
  3188. * Process all no_delete or global objects here, given
  3189. * them own DAGs to prevent their dependencies from being
  3190. * unloaded. This has to be done after we have loaded all
  3191. * of the dependencies, so that we do not miss any.
  3192. */
  3193. if (obj != NULL)
  3194. process_z(obj);
  3195. } else {
  3196. /*
  3197. * Bump the reference counts for objects on this DAG. If
  3198. * this is the first dlopen() call for the object that was
  3199. * already loaded as a dependency, initialize the dag
  3200. * starting at it.
  3201. */
  3202. init_dag(obj);
  3203. ref_dag(obj);
  3204. if ((lo_flags & RTLD_LO_TRACE) != 0)
  3205. goto trace;
  3206. }
  3207. if (obj != NULL && ((lo_flags & RTLD_LO_NODELETE) != 0 ||
  3208. obj->z_nodelete) && !obj->ref_nodel) {
  3209. dbg("obj %s nodelete", obj->path);
  3210. ref_dag(obj);
  3211. obj->z_nodelete = obj->ref_nodel = true;
  3212. }
  3213. }
  3214. LD_UTRACE(UTRACE_DLOPEN_STOP, obj, NULL, 0, obj ? obj->dl_refcount : 0,
  3215. name);
  3216. GDB_STATE(RT_CONSISTENT,obj ? &obj->linkmap : NULL);
  3217. if ((lo_flags & RTLD_LO_EARLY) == 0) {
  3218. map_stacks_exec(lockstate);
  3219. if (obj != NULL)
  3220. distribute_static_tls(&initlist, lockstate);
  3221. }
  3222. if (initlist_objects_ifunc(&initlist, (mode & RTLD_MODEMASK) == RTLD_NOW,
  3223. (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0,
  3224. lockstate) == -1) {
  3225. objlist_clear(&initlist);
  3226. dlopen_cleanup(obj, lockstate);
  3227. if (lockstate == &mlockstate)
  3228. lock_release(rtld_bind_lock, lockstate);
  3229. return (NULL);
  3230. }
  3231. if (!(lo_flags & RTLD_LO_EARLY)) {
  3232. /* Call the init functions. */
  3233. objlist_call_init(&initlist, lockstate);
  3234. }
  3235. objlist_clear(&initlist);
  3236. if (lockstate == &mlockstate)
  3237. lock_release(rtld_bind_lock, lockstate);
  3238. return obj;
  3239. trace:
  3240. trace_loaded_objects(obj);
  3241. if (lockstate == &mlockstate)
  3242. lock_release(rtld_bind_lock, lockstate);
  3243. exit(0);
  3244. }
  3245. static void *
  3246. do_dlsym(void *handle, const char *name, void *retaddr, const Ver_Entry *ve,
  3247. int flags)
  3248. {
  3249. DoneList donelist;
  3250. const Obj_Entry *obj, *defobj;
  3251. const Elf_Sym *def;
  3252. SymLook req;
  3253. RtldLockState lockstate;
  3254. tls_index ti;
  3255. void *sym;
  3256. int res;
  3257. def = NULL;
  3258. defobj = NULL;
  3259. symlook_init(&req, name);
  3260. req.ventry = ve;
  3261. req.flags = flags | SYMLOOK_IN_PLT;
  3262. req.lockstate = &lockstate;
  3263. LD_UTRACE(UTRACE_DLSYM_START, handle, NULL, 0, 0, name);
  3264. rlock_acquire(rtld_bind_lock, &lockstate);
  3265. if (sigsetjmp(lockstate.env, 0) != 0)
  3266. lock_upgrade(rtld_bind_lock, &lockstate);
  3267. if (handle == NULL || handle == RTLD_NEXT ||
  3268. handle == RTLD_DEFAULT || handle == RTLD_SELF) {
  3269. if ((obj = obj_from_addr(retaddr)) == NULL) {
  3270. _rtld_error("Cannot determine caller's shared object");
  3271. lock_release(rtld_bind_lock, &lockstate);
  3272. LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name);
  3273. return NULL;
  3274. }
  3275. if (handle == NULL) { /* Just the caller's shared object. */
  3276. res = symlook_obj(&req, obj);
  3277. if (res == 0) {
  3278. def = req.sym_out;
  3279. defobj = req.defobj_out;
  3280. }
  3281. } else if (handle == RTLD_NEXT || /* Objects after caller's */
  3282. handle == RTLD_SELF) { /* ... caller included */
  3283. if (handle == RTLD_NEXT)
  3284. obj = globallist_next(obj);
  3285. for (; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
  3286. if (obj->marker)
  3287. continue;
  3288. res = symlook_obj(&req, obj);
  3289. if (res == 0) {
  3290. if (def == NULL ||
  3291. ELF_ST_BIND(req.sym_out->st_info) != STB_WEAK) {
  3292. def = req.sym_out;
  3293. defobj = req.defobj_out;
  3294. if (ELF_ST_BIND(def->st_info) != STB_WEAK)
  3295. break;
  3296. }
  3297. }
  3298. }
  3299. /*
  3300. * Search the dynamic linker itself, and possibly resolve the
  3301. * symbol from there. This is how the application links to
  3302. * dynamic linker services such as dlopen.
  3303. */
  3304. if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
  3305. res = symlook_obj(&req, &obj_rtld);
  3306. if (res == 0) {
  3307. def = req.sym_out;
  3308. defobj = req.defobj_out;
  3309. }
  3310. }
  3311. } else {
  3312. assert(handle == RTLD_DEFAULT);
  3313. res = symlook_default(&req, obj);
  3314. if (res == 0) {
  3315. defobj = req.defobj_out;
  3316. def = req.sym_out;
  3317. }
  3318. }
  3319. } else {
  3320. if ((obj = dlcheck(handle)) == NULL) {
  3321. lock_release(rtld_bind_lock, &lockstate);
  3322. LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name);
  3323. return NULL;
  3324. }
  3325. donelist_init(&donelist);
  3326. if (obj->mainprog) {
  3327. /* Handle obtained by dlopen(NULL, ...) implies global scope. */
  3328. res = symlook_global(&req, &donelist);
  3329. if (res == 0) {
  3330. def = req.sym_out;
  3331. defobj = req.defobj_out;
  3332. }
  3333. /*
  3334. * Search the dynamic linker itself, and possibly resolve the
  3335. * symbol from there. This is how the application links to
  3336. * dynamic linker services such as dlopen.
  3337. */
  3338. if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
  3339. res = symlook_obj(&req, &obj_rtld);
  3340. if (res == 0) {
  3341. def = req.sym_out;
  3342. defobj = req.defobj_out;
  3343. }
  3344. }
  3345. }
  3346. else {
  3347. /* Search the whole DAG rooted at the given object. */
  3348. res = symlook_list(&req, &obj->dagmembers, &donelist);
  3349. if (res == 0) {
  3350. def = req.sym_out;
  3351. defobj = req.defobj_out;
  3352. }
  3353. }
  3354. }
  3355. if (def != NULL) {
  3356. lock_release(rtld_bind_lock, &lockstate);
  3357. /*
  3358. * The value required by the caller is derived from the value
  3359. * of the symbol. this is simply the relocated value of the
  3360. * symbol.
  3361. */
  3362. if (ELF_ST_TYPE(def->st_info) == STT_FUNC)
  3363. sym = make_function_pointer(def, defobj);
  3364. else if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC)
  3365. sym = rtld_resolve_ifunc(defobj, def);
  3366. else if (ELF_ST_TYPE(def->st_info) == STT_TLS) {
  3367. ti.ti_module = defobj->tlsindex;
  3368. ti.ti_offset = def->st_value;
  3369. sym = __tls_get_addr(&ti);
  3370. } else
  3371. sym = defobj->relocbase + def->st_value;
  3372. LD_UTRACE(UTRACE_DLSYM_STOP, handle, sym, 0, 0, name);
  3373. return (sym);
  3374. }
  3375. _rtld_error("Undefined symbol \"%s%s%s\"", name, ve != NULL ? "@" : "",
  3376. ve != NULL ? ve->name : "");
  3377. lock_release(rtld_bind_lock, &lockstate);
  3378. LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name);
  3379. return NULL;
  3380. }
  3381. void *
  3382. dlsym(void *handle, const char *name)
  3383. {
  3384. return do_dlsym(handle, name, __builtin_return_address(0), NULL,
  3385. SYMLOOK_DLSYM);
  3386. }
  3387. dlfunc_t
  3388. dlfunc(void *handle, const char *name)
  3389. {
  3390. union {
  3391. void *d;
  3392. dlfunc_t f;
  3393. } rv;
  3394. rv.d = do_dlsym(handle, name, __builtin_return_address(0), NULL,
  3395. SYMLOOK_DLSYM);
  3396. return (rv.f);
  3397. }
  3398. void *
  3399. dlvsym(void *handle, const char *name, const char *version)
  3400. {
  3401. Ver_Entry ventry;
  3402. ventry.name = version;
  3403. ventry.file = NULL;
  3404. ventry.hash = elf_hash(version);
  3405. ventry.flags= 0;
  3406. return do_dlsym(handle, name, __builtin_return_address(0), &ventry,
  3407. SYMLOOK_DLSYM);
  3408. }
  3409. int
  3410. _rtld_addr_phdr(const void *addr, struct dl_phdr_info *phdr_info)
  3411. {
  3412. const Obj_Entry *obj;
  3413. RtldLockState lockstate;
  3414. rlock_acquire(rtld_bind_lock, &lockstate);
  3415. obj = obj_from_addr(addr);
  3416. if (obj == NULL) {
  3417. _rtld_error("No shared object contains address");
  3418. lock_release(rtld_bind_lock, &lockstate);
  3419. return (0);
  3420. }
  3421. rtld_fill_dl_phdr_info(obj, phdr_info);
  3422. lock_release(rtld_bind_lock, &lockstate);
  3423. return (1);
  3424. }
  3425. int
  3426. dladdr(const void *addr, Dl_info *info)
  3427. {
  3428. const Obj_Entry *obj;
  3429. const Elf_Sym *def;
  3430. void *symbol_addr;
  3431. unsigned long symoffset;
  3432. RtldLockState lockstate;
  3433. rlock_acquire(rtld_bind_lock, &lockstate);
  3434. obj = obj_from_addr(addr);
  3435. if (obj == NULL) {
  3436. _rtld_error("No shared object contains address");
  3437. lock_release(rtld_bind_lock, &lockstate);
  3438. return 0;
  3439. }
  3440. info->dli_fname = obj->path;
  3441. info->dli_fbase = obj->mapbase;
  3442. info->dli_saddr = (void *)0;
  3443. info->dli_sname = NULL;
  3444. /*
  3445. * Walk the symbol list looking for the symbol whose address is
  3446. * closest to the address sent in.
  3447. */
  3448. for (symoffset = 0; symoffset < obj->dynsymcount; symoffset++) {
  3449. def = obj->symtab + symoffset;
  3450. /*
  3451. * For skip the symbol if st_shndx is either SHN_UNDEF or
  3452. * SHN_COMMON.
  3453. */
  3454. if (def->st_shndx == SHN_UNDEF || def->st_shndx == SHN_COMMON)
  3455. continue;
  3456. /*
  3457. * If the symbol is greater than the specified address, or if it
  3458. * is further away from addr than the current nearest symbol,
  3459. * then reject it.
  3460. */
  3461. symbol_addr = obj->relocbase + def->st_value;
  3462. if (symbol_addr > addr || symbol_addr < info->dli_saddr)
  3463. continue;
  3464. /* Update our idea of the nearest symbol. */
  3465. info->dli_sname = obj->strtab + def->st_name;
  3466. info->dli_saddr = symbol_addr;
  3467. /* Exact match? */
  3468. if (info->dli_saddr == addr)
  3469. break;
  3470. }
  3471. lock_release(rtld_bind_lock, &lockstate);
  3472. return 1;
  3473. }
  3474. int
  3475. dlinfo(void *handle, int request, void *p)
  3476. {
  3477. const Obj_Entry *obj;
  3478. RtldLockState lockstate;
  3479. int error;
  3480. rlock_acquire(rtld_bind_lock, &lockstate);
  3481. if (handle == NULL || handle == RTLD_SELF) {
  3482. void *retaddr;
  3483. retaddr = __builtin_return_address(0); /* __GNUC__ only */
  3484. if ((obj = obj_from_addr(retaddr)) == NULL)
  3485. _rtld_error("Cannot determine caller's shared object");
  3486. } else
  3487. obj = dlcheck(handle);
  3488. if (obj == NULL) {
  3489. lock_release(rtld_bind_lock, &lockstate);
  3490. return (-1);
  3491. }
  3492. error = 0;
  3493. switch (request) {
  3494. case RTLD_DI_LINKMAP:
  3495. *((struct link_map const **)p) = &obj->linkmap;
  3496. break;
  3497. case RTLD_DI_ORIGIN:
  3498. error = rtld_dirname(obj->path, p);
  3499. break;
  3500. case RTLD_DI_SERINFOSIZE:
  3501. case RTLD_DI_SERINFO:
  3502. error = do_search_info(obj, request, (struct dl_serinfo *)p);
  3503. break;
  3504. default:
  3505. _rtld_error("Invalid request %d passed to dlinfo()", request);
  3506. error = -1;
  3507. }
  3508. lock_release(rtld_bind_lock, &lockstate);
  3509. return (error);
  3510. }
  3511. static void
  3512. rtld_fill_dl_phdr_info(const Obj_Entry *obj, struct dl_phdr_info *phdr_info)
  3513. {
  3514. phdr_info->dlpi_addr = (Elf_Addr)obj->relocbase;
  3515. phdr_info->dlpi_name = obj->path;
  3516. phdr_info->dlpi_phdr = obj->phdr;
  3517. phdr_info->dlpi_phnum = obj->phsize / sizeof(obj->phdr[0]);
  3518. phdr_info->dlpi_tls_modid = obj->tlsindex;
  3519. phdr_info->dlpi_tls_data = obj->tlsinit;
  3520. phdr_info->dlpi_adds = obj_loads;
  3521. phdr_info->dlpi_subs = obj_loads - obj_count;
  3522. }
  3523. int
  3524. dl_iterate_phdr(__dl_iterate_hdr_callback callback, void *param)
  3525. {
  3526. struct dl_phdr_info phdr_info;
  3527. Obj_Entry *obj, marker;
  3528. RtldLockState bind_lockstate, phdr_lockstate;
  3529. int error;
  3530. init_marker(&marker);
  3531. error = 0;
  3532. wlock_acquire(rtld_phdr_lock, &phdr_lockstate);
  3533. wlock_acquire(rtld_bind_lock, &bind_lockstate);
  3534. for (obj = globallist_curr(TAILQ_FIRST(&obj_list)); obj != NULL;) {
  3535. TAILQ_INSERT_AFTER(&obj_list, obj, &marker, next);
  3536. rtld_fill_dl_phdr_info(obj, &phdr_info);
  3537. hold_object(obj);
  3538. lock_release(rtld_bind_lock, &bind_lockstate);
  3539. error = callback(&phdr_info, sizeof phdr_info, param);
  3540. wlock_acquire(rtld_bind_lock, &bind_lockstate);
  3541. unhold_object(obj);
  3542. obj = globallist_next(&marker);
  3543. TAILQ_REMOVE(&obj_list, &marker, next);
  3544. if (error != 0) {
  3545. lock_release(rtld_bind_lock, &bind_lockstate);
  3546. lock_release(rtld_phdr_lock, &phdr_lockstate);
  3547. return (error);
  3548. }
  3549. }
  3550. if (error == 0) {
  3551. rtld_fill_dl_phdr_info(&obj_rtld, &phdr_info);
  3552. lock_release(rtld_bind_lock, &bind_lockstate);
  3553. error = callback(&phdr_info, sizeof(phdr_info), param);
  3554. }
  3555. lock_release(rtld_phdr_lock, &phdr_lockstate);
  3556. return (error);
  3557. }
  3558. static void *
  3559. fill_search_info(const char *dir, size_t dirlen, void *param)
  3560. {
  3561. struct fill_search_info_args *arg;
  3562. arg = param;
  3563. if (arg->request == RTLD_DI_SERINFOSIZE) {
  3564. arg->serinfo->dls_cnt ++;
  3565. arg->serinfo->dls_size += sizeof(struct dl_serpath) + dirlen + 1;
  3566. } else {
  3567. struct dl_serpath *s_entry;
  3568. s_entry = arg->serpath;
  3569. s_entry->dls_name = arg->strspace;
  3570. s_entry->dls_flags = arg->flags;
  3571. strncpy(arg->strspace, dir, dirlen);
  3572. arg->strspace[dirlen] = '\0';
  3573. arg->strspace += dirlen + 1;
  3574. arg->serpath++;
  3575. }
  3576. return (NULL);
  3577. }
  3578. static int
  3579. do_search_info(const Obj_Entry *obj, int request, struct dl_serinfo *info)
  3580. {
  3581. struct dl_serinfo _info;
  3582. struct fill_search_info_args args;
  3583. args.request = RTLD_DI_SERINFOSIZE;
  3584. args.serinfo = &_info;
  3585. _info.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
  3586. _info.dls_cnt = 0;
  3587. path_enumerate(obj->rpath, fill_search_info, NULL, &args);
  3588. path_enumerate(ld_library_path, fill_search_info, NULL, &args);
  3589. path_enumerate(obj->runpath, fill_search_info, NULL, &args);
  3590. path_enumerate(gethints(obj->z_nodeflib), fill_search_info, NULL, &args);
  3591. if (!obj->z_nodeflib)
  3592. path_enumerate(ld_standard_library_path, fill_search_info, NULL, &args);
  3593. if (request == RTLD_DI_SERINFOSIZE) {
  3594. info->dls_size = _info.dls_size;
  3595. info->dls_cnt = _info.dls_cnt;
  3596. return (0);
  3597. }
  3598. if (info->dls_cnt != _info.dls_cnt || info->dls_size != _info.dls_size) {
  3599. _rtld_error("Uninitialized Dl_serinfo struct passed to dlinfo()");
  3600. return (-1);
  3601. }
  3602. args.request = RTLD_DI_SERINFO;
  3603. args.serinfo = info;
  3604. args.serpath = &info->dls_serpath[0];
  3605. args.strspace = (char *)&info->dls_serpath[_info.dls_cnt];
  3606. args.flags = LA_SER_RUNPATH;
  3607. if (path_enumerate(obj->rpath, fill_search_info, NULL, &args) != NULL)
  3608. return (-1);
  3609. args.flags = LA_SER_LIBPATH;
  3610. if (path_enumerate(ld_library_path, fill_search_info, NULL, &args) != NULL)
  3611. return (-1);
  3612. args.flags = LA_SER_RUNPATH;
  3613. if (path_enumerate(obj->runpath, fill_search_info, NULL, &args) != NULL)
  3614. return (-1);
  3615. args.flags = LA_SER_CONFIG;
  3616. if (path_enumerate(gethints(obj->z_nodeflib), fill_search_info, NULL, &args)
  3617. != NULL)
  3618. return (-1);
  3619. args.flags = LA_SER_DEFAULT;
  3620. if (!obj->z_nodeflib && path_enumerate(ld_standard_library_path,
  3621. fill_search_info, NULL, &args) != NULL)
  3622. return (-1);
  3623. return (0);
  3624. }
  3625. static int
  3626. rtld_dirname(const char *path, char *bname)
  3627. {
  3628. const char *endp;
  3629. /* Empty or NULL string gets treated as "." */
  3630. if (path == NULL || *path == '\0') {
  3631. bname[0] = '.';
  3632. bname[1] = '\0';
  3633. return (0);
  3634. }
  3635. /* Strip trailing slashes */
  3636. endp = path + strlen(path) - 1;
  3637. while (endp > path && *endp == '/')
  3638. endp--;
  3639. /* Find the start of the dir */
  3640. while (endp > path && *endp != '/')
  3641. endp--;
  3642. /* Either the dir is "/" or there are no slashes */
  3643. if (endp == path) {
  3644. bname[0] = *endp == '/' ? '/' : '.';
  3645. bname[1] = '\0';
  3646. return (0);
  3647. } else {
  3648. do {
  3649. endp--;
  3650. } while (endp > path && *endp == '/');
  3651. }
  3652. if (endp - path + 2 > PATH_MAX)
  3653. {
  3654. _rtld_error("Filename is too long: %s", path);
  3655. return(-1);
  3656. }
  3657. strncpy(bname, path, endp - path + 1);
  3658. bname[endp - path + 1] = '\0';
  3659. return (0);
  3660. }
  3661. static int
  3662. rtld_dirname_abs(const char *path, char *base)
  3663. {
  3664. char *last;
  3665. if (realpath(path, base) == NULL) {
  3666. _rtld_error("realpath \"%s\" failed (%s)", path,
  3667. rtld_strerror(errno));
  3668. return (-1);
  3669. }
  3670. dbg("%s -> %s", path, base);
  3671. last = strrchr(base, '/');
  3672. if (last == NULL) {
  3673. _rtld_error("non-abs result from realpath \"%s\"", path);
  3674. return (-1);
  3675. }
  3676. if (last != base)
  3677. *last = '\0';
  3678. return (0);
  3679. }
  3680. static void
  3681. linkmap_add(Obj_Entry *obj)
  3682. {
  3683. struct link_map *l, *prev;
  3684. l = &obj->linkmap;
  3685. l->l_name = obj->path;
  3686. l->l_base = obj->mapbase;
  3687. l->l_ld = obj->dynamic;
  3688. l->l_addr = obj->relocbase;
  3689. if (r_debug.r_map == NULL) {
  3690. r_debug.r_map = l;
  3691. return;
  3692. }
  3693. /*
  3694. * Scan to the end of the list, but not past the entry for the
  3695. * dynamic linker, which we want to keep at the very end.
  3696. */
  3697. for (prev = r_debug.r_map;
  3698. prev->l_next != NULL && prev->l_next != &obj_rtld.linkmap;
  3699. prev = prev->l_next)
  3700. ;
  3701. /* Link in the new entry. */
  3702. l->l_prev = prev;
  3703. l->l_next = prev->l_next;
  3704. if (l->l_next != NULL)
  3705. l->l_next->l_prev = l;
  3706. prev->l_next = l;
  3707. }
  3708. static void
  3709. linkmap_delete(Obj_Entry *obj)
  3710. {
  3711. struct link_map *l;
  3712. l = &obj->linkmap;
  3713. if (l->l_prev == NULL) {
  3714. if ((r_debug.r_map = l->l_next) != NULL)
  3715. l->l_next->l_prev = NULL;
  3716. return;
  3717. }
  3718. if ((l->l_prev->l_next = l->l_next) != NULL)
  3719. l->l_next->l_prev = l->l_prev;
  3720. }
  3721. /*
  3722. * Function for the debugger to set a breakpoint on to gain control.
  3723. *
  3724. * The two parameters allow the debugger to easily find and determine
  3725. * what the runtime loader is doing and to whom it is doing it.
  3726. *
  3727. * When the loadhook trap is hit (r_debug_state, set at program
  3728. * initialization), the arguments can be found on the stack:
  3729. *
  3730. * +8 struct link_map *m
  3731. * +4 struct r_debug *rd
  3732. * +0 RetAddr
  3733. */
  3734. void
  3735. r_debug_state(struct r_debug* rd __unused, struct link_map *m __unused)
  3736. {
  3737. /*
  3738. * The following is a hack to force the compiler to emit calls to
  3739. * this function, even when optimizing. If the function is empty,
  3740. * the compiler is not obliged to emit any code for calls to it,
  3741. * even when marked __noinline. However, gdb depends on those
  3742. * calls being made.
  3743. */
  3744. __compiler_membar();
  3745. }
  3746. /*
  3747. * A function called after init routines have completed. This can be used to
  3748. * break before a program's entry routine is called, and can be used when
  3749. * main is not available in the symbol table.
  3750. */
  3751. void
  3752. _r_debug_postinit(struct link_map *m __unused)
  3753. {
  3754. /* See r_debug_state(). */
  3755. __compiler_membar();
  3756. }
  3757. static void
  3758. release_object(Obj_Entry *obj)
  3759. {
  3760. if (obj->holdcount > 0) {
  3761. obj->unholdfree = true;
  3762. return;
  3763. }
  3764. munmap(obj->mapbase, obj->mapsize);
  3765. linkmap_delete(obj);
  3766. obj_free(obj);
  3767. }
  3768. /*
  3769. * Get address of the pointer variable in the main program.
  3770. * Prefer non-weak symbol over the weak one.
  3771. */
  3772. static const void **
  3773. get_program_var_addr(const char *name, RtldLockState *lockstate)
  3774. {
  3775. SymLook req;
  3776. DoneList donelist;
  3777. symlook_init(&req, name);
  3778. req.lockstate = lockstate;
  3779. donelist_init(&donelist);
  3780. if (symlook_global(&req, &donelist) != 0)
  3781. return (NULL);
  3782. if (ELF_ST_TYPE(req.sym_out->st_info) == STT_FUNC)
  3783. return ((const void **)make_function_pointer(req.sym_out,
  3784. req.defobj_out));
  3785. else if (ELF_ST_TYPE(req.sym_out->st_info) == STT_GNU_IFUNC)
  3786. return ((const void **)rtld_resolve_ifunc(req.defobj_out, req.sym_out));
  3787. else
  3788. return ((const void **)(req.defobj_out->relocbase +
  3789. req.sym_out->st_value));
  3790. }
  3791. /*
  3792. * Set a pointer variable in the main program to the given value. This
  3793. * is used to set key variables such as "environ" before any of the
  3794. * init functions are called.
  3795. */
  3796. static void
  3797. set_program_var(const char *name, const void *value)
  3798. {
  3799. const void **addr;
  3800. if ((addr = get_program_var_addr(name, NULL)) != NULL) {
  3801. dbg("\"%s\": *%p <-- %p", name, addr, value);
  3802. *addr = value;
  3803. }
  3804. }
  3805. /*
  3806. * Search the global objects, including dependencies and main object,
  3807. * for the given symbol.
  3808. */
  3809. static int
  3810. symlook_global(SymLook *req, DoneList *donelist)
  3811. {
  3812. SymLook req1;
  3813. const Objlist_Entry *elm;
  3814. int res;
  3815. symlook_init_from_req(&req1, req);
  3816. /* Search all objects loaded at program start up. */
  3817. if (req->defobj_out == NULL ||
  3818. ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) {
  3819. res = symlook_list(&req1, &list_main, donelist);
  3820. if (res == 0 && (req->defobj_out == NULL ||
  3821. ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
  3822. req->sym_out = req1.sym_out;
  3823. req->defobj_out = req1.defobj_out;
  3824. assert(req->defobj_out != NULL);
  3825. }
  3826. }
  3827. /* Search all DAGs whose roots are RTLD_GLOBAL objects. */
  3828. STAILQ_FOREACH(elm, &list_global, link) {
  3829. if (req->defobj_out != NULL &&
  3830. ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK)
  3831. break;
  3832. res = symlook_list(&req1, &elm->obj->dagmembers, donelist);
  3833. if (res == 0 && (req->defobj_out == NULL ||
  3834. ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
  3835. req->sym_out = req1.sym_out;
  3836. req->defobj_out = req1.defobj_out;
  3837. assert(req->defobj_out != NULL);
  3838. }
  3839. }
  3840. return (req->sym_out != NULL ? 0 : ESRCH);
  3841. }
  3842. /*
  3843. * Given a symbol name in a referencing object, find the corresponding
  3844. * definition of the symbol. Returns a pointer to the symbol, or NULL if
  3845. * no definition was found. Returns a pointer to the Obj_Entry of the
  3846. * defining object via the reference parameter DEFOBJ_OUT.
  3847. */
  3848. static int
  3849. symlook_default(SymLook *req, const Obj_Entry *refobj)
  3850. {
  3851. DoneList donelist;
  3852. const Objlist_Entry *elm;
  3853. SymLook req1;
  3854. int res;
  3855. donelist_init(&donelist);
  3856. symlook_init_from_req(&req1, req);
  3857. /*
  3858. * Look first in the referencing object if linked symbolically,
  3859. * and similarly handle protected symbols.
  3860. */
  3861. res = symlook_obj(&req1, refobj);
  3862. if (res == 0 && (refobj->symbolic ||
  3863. ELF_ST_VISIBILITY(req1.sym_out->st_other) == STV_PROTECTED)) {
  3864. req->sym_out = req1.sym_out;
  3865. req->defobj_out = req1.defobj_out;
  3866. assert(req->defobj_out != NULL);
  3867. }
  3868. if (refobj->symbolic || req->defobj_out != NULL)
  3869. donelist_check(&donelist, refobj);
  3870. symlook_global(req, &donelist);
  3871. /* Search all dlopened DAGs containing the referencing object. */
  3872. STAILQ_FOREACH(elm, &refobj->dldags, link) {
  3873. if (req->sym_out != NULL &&
  3874. ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK)
  3875. break;
  3876. res = symlook_list(&req1, &elm->obj->dagmembers, &donelist);
  3877. if (res == 0 && (req->sym_out == NULL ||
  3878. ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
  3879. req->sym_out = req1.sym_out;
  3880. req->defobj_out = req1.defobj_out;
  3881. assert(req->defobj_out != NULL);
  3882. }
  3883. }
  3884. /*
  3885. * Search the dynamic linker itself, and possibly resolve the
  3886. * symbol from there. This is how the application links to
  3887. * dynamic linker services such as dlopen.
  3888. */
  3889. if (req->sym_out == NULL ||
  3890. ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) {
  3891. res = symlook_obj(&req1, &obj_rtld);
  3892. if (res == 0) {
  3893. req->sym_out = req1.sym_out;
  3894. req->defobj_out = req1.defobj_out;
  3895. assert(req->defobj_out != NULL);
  3896. }
  3897. }
  3898. return (req->sym_out != NULL ? 0 : ESRCH);
  3899. }
  3900. static int
  3901. symlook_list(SymLook *req, const Objlist *objlist, DoneList *dlp)
  3902. {
  3903. const Elf_Sym *def;
  3904. const Obj_Entry *defobj;
  3905. const Objlist_Entry *elm;
  3906. SymLook req1;
  3907. int res;
  3908. def = NULL;
  3909. defobj = NULL;
  3910. STAILQ_FOREACH(elm, objlist, link) {
  3911. if (donelist_check(dlp, elm->obj))
  3912. continue;
  3913. symlook_init_from_req(&req1, req);
  3914. if ((res = symlook_obj(&req1, elm->obj)) == 0) {
  3915. if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) {
  3916. def = req1.sym_out;
  3917. defobj = req1.defobj_out;
  3918. if (ELF_ST_BIND(def->st_info) != STB_WEAK)
  3919. break;
  3920. }
  3921. }
  3922. }
  3923. if (def != NULL) {
  3924. req->sym_out = def;
  3925. req->defobj_out = defobj;
  3926. return (0);
  3927. }
  3928. return (ESRCH);
  3929. }
  3930. /*
  3931. * Search the chain of DAGS cointed to by the given Needed_Entry
  3932. * for a symbol of the given name. Each DAG is scanned completely
  3933. * before advancing to the next one. Returns a pointer to the symbol,
  3934. * or NULL if no definition was found.
  3935. */
  3936. static int
  3937. symlook_needed(SymLook *req, const Needed_Entry *needed, DoneList *dlp)
  3938. {
  3939. const Elf_Sym *def;
  3940. const Needed_Entry *n;
  3941. const Obj_Entry *defobj;
  3942. SymLook req1;
  3943. int res;
  3944. def = NULL;
  3945. defobj = NULL;
  3946. symlook_init_from_req(&req1, req);
  3947. for (n = needed; n != NULL; n = n->next) {
  3948. if (n->obj == NULL ||
  3949. (res = symlook_list(&req1, &n->obj->dagmembers, dlp)) != 0)
  3950. continue;
  3951. if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) {
  3952. def = req1.sym_out;
  3953. defobj = req1.defobj_out;
  3954. if (ELF_ST_BIND(def->st_info) != STB_WEAK)
  3955. break;
  3956. }
  3957. }
  3958. if (def != NULL) {
  3959. req->sym_out = def;
  3960. req->defobj_out = defobj;
  3961. return (0);
  3962. }
  3963. return (ESRCH);
  3964. }
  3965. /*
  3966. * Search the symbol table of a single shared object for a symbol of
  3967. * the given name and version, if requested. Returns a pointer to the
  3968. * symbol, or NULL if no definition was found. If the object is
  3969. * filter, return filtered symbol from filtee.
  3970. *
  3971. * The symbol's hash value is passed in for efficiency reasons; that
  3972. * eliminates many recomputations of the hash value.
  3973. */
  3974. int
  3975. symlook_obj(SymLook *req, const Obj_Entry *obj)
  3976. {
  3977. DoneList donelist;
  3978. SymLook req1;
  3979. int flags, res, mres;
  3980. /*
  3981. * If there is at least one valid hash at this point, we prefer to
  3982. * use the faster GNU version if available.
  3983. */
  3984. if (obj->valid_hash_gnu)
  3985. mres = symlook_obj1_gnu(req, obj);
  3986. else if (obj->valid_hash_sysv)
  3987. mres = symlook_obj1_sysv(req, obj);
  3988. else
  3989. return (EINVAL);
  3990. if (mres == 0) {
  3991. if (obj->needed_filtees != NULL) {
  3992. flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0;
  3993. load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate);
  3994. donelist_init(&donelist);
  3995. symlook_init_from_req(&req1, req);
  3996. res = symlook_needed(&req1, obj->needed_filtees, &donelist);
  3997. if (res == 0) {
  3998. req->sym_out = req1.sym_out;
  3999. req->defobj_out = req1.defobj_out;
  4000. }
  4001. return (res);
  4002. }
  4003. if (obj->needed_aux_filtees != NULL) {
  4004. flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0;
  4005. load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate);
  4006. donelist_init(&donelist);
  4007. symlook_init_from_req(&req1, req);
  4008. res = symlook_needed(&req1, obj->needed_aux_filtees, &donelist);
  4009. if (res == 0) {
  4010. req->sym_out = req1.sym_out;
  4011. req->defobj_out = req1.defobj_out;
  4012. return (res);
  4013. }
  4014. }
  4015. }
  4016. return (mres);
  4017. }
  4018. /* Symbol match routine common to both hash functions */
  4019. static bool
  4020. matched_symbol(SymLook *req, const Obj_Entry *obj, Sym_Match_Result *result,
  4021. const unsigned long symnum)
  4022. {
  4023. Elf_Versym verndx;
  4024. const Elf_Sym *symp;
  4025. const char *strp;
  4026. symp = obj->symtab + symnum;
  4027. strp = obj->strtab + symp->st_name;
  4028. switch (ELF_ST_TYPE(symp->st_info)) {
  4029. case STT_FUNC:
  4030. case STT_NOTYPE:
  4031. case STT_OBJECT:
  4032. case STT_COMMON:
  4033. case STT_GNU_IFUNC:
  4034. if (symp->st_value == 0)
  4035. return (false);
  4036. /* fallthrough */
  4037. case STT_TLS:
  4038. if (symp->st_shndx != SHN_UNDEF)
  4039. break;
  4040. #ifndef __mips__
  4041. else if (((req->flags & SYMLOOK_IN_PLT) == 0) &&
  4042. (ELF_ST_TYPE(symp->st_info) == STT_FUNC))
  4043. break;
  4044. #endif
  4045. /* fallthrough */
  4046. default:
  4047. return (false);
  4048. }
  4049. if (req->name[0] != strp[0] || strcmp(req->name, strp) != 0)
  4050. return (false);
  4051. if (req->ventry == NULL) {
  4052. if (obj->versyms != NULL) {
  4053. verndx = VER_NDX(obj->versyms[symnum]);
  4054. if (verndx > obj->vernum) {
  4055. _rtld_error(
  4056. "%s: symbol %s references wrong version %d",
  4057. obj->path, obj->strtab + symnum, verndx);
  4058. return (false);
  4059. }
  4060. /*
  4061. * If we are not called from dlsym (i.e. this
  4062. * is a normal relocation from unversioned
  4063. * binary), accept the symbol immediately if
  4064. * it happens to have first version after this
  4065. * shared object became versioned. Otherwise,
  4066. * if symbol is versioned and not hidden,
  4067. * remember it. If it is the only symbol with
  4068. * this name exported by the shared object, it
  4069. * will be returned as a match by the calling
  4070. * function. If symbol is global (verndx < 2)
  4071. * accept it unconditionally.
  4072. */
  4073. if ((req->flags & SYMLOOK_DLSYM) == 0 &&
  4074. verndx == VER_NDX_GIVEN) {
  4075. result->sym_out = symp;
  4076. return (true);
  4077. }
  4078. else if (verndx >= VER_NDX_GIVEN) {
  4079. if ((obj->versyms[symnum] & VER_NDX_HIDDEN)
  4080. == 0) {
  4081. if (result->vsymp == NULL)
  4082. result->vsymp = symp;
  4083. result->vcount++;
  4084. }
  4085. return (false);
  4086. }
  4087. }
  4088. result->sym_out = symp;
  4089. return (true);
  4090. }
  4091. if (obj->versyms == NULL) {
  4092. if (object_match_name(obj, req->ventry->name)) {
  4093. _rtld_error("%s: object %s should provide version %s "
  4094. "for symbol %s", obj_rtld.path, obj->path,
  4095. req->ventry->name, obj->strtab + symnum);
  4096. return (false);
  4097. }
  4098. } else {
  4099. verndx = VER_NDX(obj->versyms[symnum]);
  4100. if (verndx > obj->vernum) {
  4101. _rtld_error("%s: symbol %s references wrong version %d",
  4102. obj->path, obj->strtab + symnum, verndx);
  4103. return (false);
  4104. }
  4105. if (obj->vertab[verndx].hash != req->ventry->hash ||
  4106. strcmp(obj->vertab[verndx].name, req->ventry->name)) {
  4107. /*
  4108. * Version does not match. Look if this is a
  4109. * global symbol and if it is not hidden. If
  4110. * global symbol (verndx < 2) is available,
  4111. * use it. Do not return symbol if we are
  4112. * called by dlvsym, because dlvsym looks for
  4113. * a specific version and default one is not
  4114. * what dlvsym wants.
  4115. */
  4116. if ((req->flags & SYMLOOK_DLSYM) ||
  4117. (verndx >= VER_NDX_GIVEN) ||
  4118. (obj->versyms[symnum] & VER_NDX_HIDDEN))
  4119. return (false);
  4120. }
  4121. }
  4122. result->sym_out = symp;
  4123. return (true);
  4124. }
  4125. /*
  4126. * Search for symbol using SysV hash function.
  4127. * obj->buckets is known not to be NULL at this point; the test for this was
  4128. * performed with the obj->valid_hash_sysv assignment.
  4129. */
  4130. static int
  4131. symlook_obj1_sysv(SymLook *req, const Obj_Entry *obj)
  4132. {
  4133. unsigned long symnum;
  4134. Sym_Match_Result matchres;
  4135. matchres.sym_out = NULL;
  4136. matchres.vsymp = NULL;
  4137. matchres.vcount = 0;
  4138. for (symnum = obj->buckets[req->hash % obj->nbuckets];
  4139. symnum != STN_UNDEF; symnum = obj->chains[symnum]) {
  4140. if (symnum >= obj->nchains)
  4141. return (ESRCH); /* Bad object */
  4142. if (matched_symbol(req, obj, &matchres, symnum)) {
  4143. req->sym_out = matchres.sym_out;
  4144. req->defobj_out = obj;
  4145. return (0);
  4146. }
  4147. }
  4148. if (matchres.vcount == 1) {
  4149. req->sym_out = matchres.vsymp;
  4150. req->defobj_out = obj;
  4151. return (0);
  4152. }
  4153. return (ESRCH);
  4154. }
  4155. /* Search for symbol using GNU hash function */
  4156. static int
  4157. symlook_obj1_gnu(SymLook *req, const Obj_Entry *obj)
  4158. {
  4159. Elf_Addr bloom_word;
  4160. const Elf32_Word *hashval;
  4161. Elf32_Word bucket;
  4162. Sym_Match_Result matchres;
  4163. unsigned int h1, h2;
  4164. unsigned long symnum;
  4165. matchres.sym_out = NULL;
  4166. matchres.vsymp = NULL;
  4167. matchres.vcount = 0;
  4168. /* Pick right bitmask word from Bloom filter array */
  4169. bloom_word = obj->bloom_gnu[(req->hash_gnu / __ELF_WORD_SIZE) &
  4170. obj->maskwords_bm_gnu];
  4171. /* Calculate modulus word size of gnu hash and its derivative */
  4172. h1 = req->hash_gnu & (__ELF_WORD_SIZE - 1);
  4173. h2 = ((req->hash_gnu >> obj->shift2_gnu) & (__ELF_WORD_SIZE - 1));
  4174. /* Filter out the "definitely not in set" queries */
  4175. if (((bloom_word >> h1) & (bloom_word >> h2) & 1) == 0)
  4176. return (ESRCH);
  4177. /* Locate hash chain and corresponding value element*/
  4178. bucket = obj->buckets_gnu[req->hash_gnu % obj->nbuckets_gnu];
  4179. if (bucket == 0)
  4180. return (ESRCH);
  4181. hashval = &obj->chain_zero_gnu[bucket];
  4182. do {
  4183. if (((*hashval ^ req->hash_gnu) >> 1) == 0) {
  4184. symnum = hashval - obj->chain_zero_gnu;
  4185. if (matched_symbol(req, obj, &matchres, symnum)) {
  4186. req->sym_out = matchres.sym_out;
  4187. req->defobj_out = obj;
  4188. return (0);
  4189. }
  4190. }
  4191. } while ((*hashval++ & 1) == 0);
  4192. if (matchres.vcount == 1) {
  4193. req->sym_out = matchres.vsymp;
  4194. req->defobj_out = obj;
  4195. return (0);
  4196. }
  4197. return (ESRCH);
  4198. }
  4199. static void
  4200. trace_loaded_objects(Obj_Entry *obj)
  4201. {
  4202. const char *fmt1, *fmt2, *fmt, *main_local, *list_containers;
  4203. int c;
  4204. if ((main_local = getenv(_LD("TRACE_LOADED_OBJECTS_PROGNAME"))) == NULL)
  4205. main_local = "";
  4206. if ((fmt1 = getenv(_LD("TRACE_LOADED_OBJECTS_FMT1"))) == NULL)
  4207. fmt1 = "\t%o => %p (%x)\n";
  4208. if ((fmt2 = getenv(_LD("TRACE_LOADED_OBJECTS_FMT2"))) == NULL)
  4209. fmt2 = "\t%o (%x)\n";
  4210. list_containers = getenv(_LD("TRACE_LOADED_OBJECTS_ALL"));
  4211. for (; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
  4212. Needed_Entry *needed;
  4213. const char *name, *path;
  4214. bool is_lib;
  4215. if (obj->marker)
  4216. continue;
  4217. if (list_containers && obj->needed != NULL)
  4218. rtld_printf("%s:\n", obj->path);
  4219. for (needed = obj->needed; needed; needed = needed->next) {
  4220. if (needed->obj != NULL) {
  4221. if (needed->obj->traced && !list_containers)
  4222. continue;
  4223. needed->obj->traced = true;
  4224. path = needed->obj->path;
  4225. } else
  4226. path = "not found";
  4227. name = obj->strtab + needed->name;
  4228. is_lib = strncmp(name, "lib", 3) == 0; /* XXX - bogus */
  4229. fmt = is_lib ? fmt1 : fmt2;
  4230. while ((c = *fmt++) != '\0') {
  4231. switch (c) {
  4232. default:
  4233. rtld_putchar(c);
  4234. continue;
  4235. case '\\':
  4236. switch (c = *fmt) {
  4237. case '\0':
  4238. continue;
  4239. case 'n':
  4240. rtld_putchar('\n');
  4241. break;
  4242. case 't':
  4243. rtld_putchar('\t');
  4244. break;
  4245. }
  4246. break;
  4247. case '%':
  4248. switch (c = *fmt) {
  4249. case '\0':
  4250. continue;
  4251. case '%':
  4252. default:
  4253. rtld_putchar(c);
  4254. break;
  4255. case 'A':
  4256. rtld_putstr(main_local);
  4257. break;
  4258. case 'a':
  4259. rtld_putstr(obj_main->path);
  4260. break;
  4261. case 'o':
  4262. rtld_putstr(name);
  4263. break;
  4264. #if 0
  4265. case 'm':
  4266. rtld_printf("%d", sodp->sod_major);
  4267. break;
  4268. case 'n':
  4269. rtld_printf("%d", sodp->sod_minor);
  4270. break;
  4271. #endif
  4272. case 'p':
  4273. rtld_putstr(path);
  4274. break;
  4275. case 'x':
  4276. rtld_printf("%p", needed->obj ? needed->obj->mapbase :
  4277. 0);
  4278. break;
  4279. }
  4280. break;
  4281. }
  4282. ++fmt;
  4283. }
  4284. }
  4285. }
  4286. }
  4287. /*
  4288. * Unload a dlopened object and its dependencies from memory and from
  4289. * our data structures. It is assumed that the DAG rooted in the
  4290. * object has already been unreferenced, and that the object has a
  4291. * reference count of 0.
  4292. */
  4293. static void
  4294. unload_object(Obj_Entry *root, RtldLockState *lockstate)
  4295. {
  4296. Obj_Entry marker, *obj, *next;
  4297. assert(root->refcount == 0);
  4298. /*
  4299. * Pass over the DAG removing unreferenced objects from
  4300. * appropriate lists.
  4301. */
  4302. unlink_object(root);
  4303. /* Unmap all objects that are no longer referenced. */
  4304. for (obj = TAILQ_FIRST(&obj_list); obj != NULL; obj = next) {
  4305. next = TAILQ_NEXT(obj, next);
  4306. if (obj->marker || obj->refcount != 0)
  4307. continue;
  4308. LD_UTRACE(UTRACE_UNLOAD_OBJECT, obj, obj->mapbase,
  4309. obj->mapsize, 0, obj->path);
  4310. dbg("unloading \"%s\"", obj->path);
  4311. /*
  4312. * Unlink the object now to prevent new references from
  4313. * being acquired while the bind lock is dropped in
  4314. * recursive dlclose() invocations.
  4315. */
  4316. TAILQ_REMOVE(&obj_list, obj, next);
  4317. obj_count--;
  4318. if (obj->filtees_loaded) {
  4319. if (next != NULL) {
  4320. init_marker(&marker);
  4321. TAILQ_INSERT_BEFORE(next, &marker, next);
  4322. unload_filtees(obj, lockstate);
  4323. next = TAILQ_NEXT(&marker, next);
  4324. TAILQ_REMOVE(&obj_list, &marker, next);
  4325. } else
  4326. unload_filtees(obj, lockstate);
  4327. }
  4328. release_object(obj);
  4329. }
  4330. }
  4331. static void
  4332. unlink_object(Obj_Entry *root)
  4333. {
  4334. Objlist_Entry *elm;
  4335. if (root->refcount == 0) {
  4336. /* Remove the object from the RTLD_GLOBAL list. */
  4337. objlist_remove(&list_global, root);
  4338. /* Remove the object from all objects' DAG lists. */
  4339. STAILQ_FOREACH(elm, &root->dagmembers, link) {
  4340. objlist_remove(&elm->obj->dldags, root);
  4341. if (elm->obj != root)
  4342. unlink_object(elm->obj);
  4343. }
  4344. }
  4345. }
  4346. static void
  4347. ref_dag(Obj_Entry *root)
  4348. {
  4349. Objlist_Entry *elm;
  4350. assert(root->dag_inited);
  4351. STAILQ_FOREACH(elm, &root->dagmembers, link)
  4352. elm->obj->refcount++;
  4353. }
  4354. static void
  4355. unref_dag(Obj_Entry *root)
  4356. {
  4357. Objlist_Entry *elm;
  4358. assert(root->dag_inited);
  4359. STAILQ_FOREACH(elm, &root->dagmembers, link)
  4360. elm->obj->refcount--;
  4361. }
  4362. /*
  4363. * Common code for MD __tls_get_addr().
  4364. */
  4365. static void *tls_get_addr_slow(Elf_Addr **, int, size_t) __noinline;
  4366. static void *
  4367. tls_get_addr_slow(Elf_Addr **dtvp, int index, size_t offset)
  4368. {
  4369. Elf_Addr *newdtv, *dtv;
  4370. RtldLockState lockstate;
  4371. int to_copy;
  4372. dtv = *dtvp;
  4373. /* Check dtv generation in case new modules have arrived */
  4374. if (dtv[0] != tls_dtv_generation) {
  4375. wlock_acquire(rtld_bind_lock, &lockstate);
  4376. newdtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
  4377. to_copy = dtv[1];
  4378. if (to_copy > tls_max_index)
  4379. to_copy = tls_max_index;
  4380. memcpy(&newdtv[2], &dtv[2], to_copy * sizeof(Elf_Addr));
  4381. newdtv[0] = tls_dtv_generation;
  4382. newdtv[1] = tls_max_index;
  4383. free(dtv);
  4384. lock_release(rtld_bind_lock, &lockstate);
  4385. dtv = *dtvp = newdtv;
  4386. }
  4387. /* Dynamically allocate module TLS if necessary */
  4388. if (dtv[index + 1] == 0) {
  4389. /* Signal safe, wlock will block out signals. */
  4390. wlock_acquire(rtld_bind_lock, &lockstate);
  4391. if (!dtv[index + 1])
  4392. dtv[index + 1] = (Elf_Addr)allocate_module_tls(index);
  4393. lock_release(rtld_bind_lock, &lockstate);
  4394. }
  4395. return ((void *)(dtv[index + 1] + offset));
  4396. }
  4397. void *
  4398. tls_get_addr_common(Elf_Addr **dtvp, int index, size_t offset)
  4399. {
  4400. Elf_Addr *dtv;
  4401. dtv = *dtvp;
  4402. /* Check dtv generation in case new modules have arrived */
  4403. if (__predict_true(dtv[0] == tls_dtv_generation &&
  4404. dtv[index + 1] != 0))
  4405. return ((void *)(dtv[index + 1] + offset));
  4406. return (tls_get_addr_slow(dtvp, index, offset));
  4407. }
  4408. #if defined(__aarch64__) || defined(__arm__) || defined(__mips__) || \
  4409. defined(__powerpc__) || defined(__riscv)
  4410. /*
  4411. * Return pointer to allocated TLS block
  4412. */
  4413. static void *
  4414. get_tls_block_ptr(void *tcb, size_t tcbsize)
  4415. {
  4416. size_t extra_size, post_size, pre_size, tls_block_size;
  4417. size_t tls_init_align;
  4418. tls_init_align = MAX(obj_main->tlsalign, 1);
  4419. /* Compute fragments sizes. */
  4420. extra_size = tcbsize - TLS_TCB_SIZE;
  4421. post_size = calculate_tls_post_size(tls_init_align);
  4422. tls_block_size = tcbsize + post_size;
  4423. pre_size = roundup2(tls_block_size, tls_init_align) - tls_block_size;
  4424. return ((char *)tcb - pre_size - extra_size);
  4425. }
  4426. /*
  4427. * Allocate Static TLS using the Variant I method.
  4428. *
  4429. * For details on the layout, see lib/libc/gen/tls.c.
  4430. *
  4431. * NB: rtld's tls_static_space variable includes TLS_TCB_SIZE and post_size as
  4432. * it is based on tls_last_offset, and TLS offsets here are really TCB
  4433. * offsets, whereas libc's tls_static_space is just the executable's static
  4434. * TLS segment.
  4435. */
  4436. void *
  4437. allocate_tls(Obj_Entry *objs, void *oldtcb, size_t tcbsize, size_t tcbalign)
  4438. {
  4439. Obj_Entry *obj;
  4440. char *tls_block;
  4441. Elf_Addr *dtv, **tcb;
  4442. Elf_Addr addr;
  4443. Elf_Addr i;
  4444. size_t extra_size, maxalign, post_size, pre_size, tls_block_size;
  4445. size_t tls_init_align, tls_init_offset;
  4446. if (oldtcb != NULL && tcbsize == TLS_TCB_SIZE)
  4447. return (oldtcb);
  4448. assert(tcbsize >= TLS_TCB_SIZE);
  4449. maxalign = MAX(tcbalign, tls_static_max_align);
  4450. tls_init_align = MAX(obj_main->tlsalign, 1);
  4451. /* Compute fragmets sizes. */
  4452. extra_size = tcbsize - TLS_TCB_SIZE;
  4453. post_size = calculate_tls_post_size(tls_init_align);
  4454. tls_block_size = tcbsize + post_size;
  4455. pre_size = roundup2(tls_block_size, tls_init_align) - tls_block_size;
  4456. tls_block_size += pre_size + tls_static_space - TLS_TCB_SIZE - post_size;
  4457. /* Allocate whole TLS block */
  4458. tls_block = malloc_aligned(tls_block_size, maxalign, 0);
  4459. tcb = (Elf_Addr **)(tls_block + pre_size + extra_size);
  4460. if (oldtcb != NULL) {
  4461. memcpy(tls_block, get_tls_block_ptr(oldtcb, tcbsize),
  4462. tls_static_space);
  4463. free_aligned(get_tls_block_ptr(oldtcb, tcbsize));
  4464. /* Adjust the DTV. */
  4465. dtv = tcb[0];
  4466. for (i = 0; i < dtv[1]; i++) {
  4467. if (dtv[i+2] >= (Elf_Addr)oldtcb &&
  4468. dtv[i+2] < (Elf_Addr)oldtcb + tls_static_space) {
  4469. dtv[i+2] = dtv[i+2] - (Elf_Addr)oldtcb + (Elf_Addr)tcb;
  4470. }
  4471. }
  4472. } else {
  4473. dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
  4474. tcb[0] = dtv;
  4475. dtv[0] = tls_dtv_generation;
  4476. dtv[1] = tls_max_index;
  4477. for (obj = globallist_curr(objs); obj != NULL;
  4478. obj = globallist_next(obj)) {
  4479. if (obj->tlsoffset == 0)
  4480. continue;
  4481. tls_init_offset = obj->tlspoffset & (obj->tlsalign - 1);
  4482. addr = (Elf_Addr)tcb + obj->tlsoffset;
  4483. if (tls_init_offset > 0)
  4484. memset((void *)addr, 0, tls_init_offset);
  4485. if (obj->tlsinitsize > 0) {
  4486. memcpy((void *)(addr + tls_init_offset), obj->tlsinit,
  4487. obj->tlsinitsize);
  4488. }
  4489. if (obj->tlssize > obj->tlsinitsize) {
  4490. memset((void *)(addr + tls_init_offset + obj->tlsinitsize),
  4491. 0, obj->tlssize - obj->tlsinitsize - tls_init_offset);
  4492. }
  4493. dtv[obj->tlsindex + 1] = addr;
  4494. }
  4495. }
  4496. return (tcb);
  4497. }
  4498. void
  4499. free_tls(void *tcb, size_t tcbsize, size_t tcbalign __unused)
  4500. {
  4501. Elf_Addr *dtv;
  4502. Elf_Addr tlsstart, tlsend;
  4503. size_t post_size;
  4504. size_t dtvsize, i, tls_init_align;
  4505. assert(tcbsize >= TLS_TCB_SIZE);
  4506. tls_init_align = MAX(obj_main->tlsalign, 1);
  4507. /* Compute fragments sizes. */
  4508. post_size = calculate_tls_post_size(tls_init_align);
  4509. tlsstart = (Elf_Addr)tcb + TLS_TCB_SIZE + post_size;
  4510. tlsend = (Elf_Addr)tcb + tls_static_space;
  4511. dtv = *(Elf_Addr **)tcb;
  4512. dtvsize = dtv[1];
  4513. for (i = 0; i < dtvsize; i++) {
  4514. if (dtv[i+2] && (dtv[i+2] < tlsstart || dtv[i+2] >= tlsend)) {
  4515. free((void*)dtv[i+2]);
  4516. }
  4517. }
  4518. free(dtv);
  4519. free_aligned(get_tls_block_ptr(tcb, tcbsize));
  4520. }
  4521. #endif
  4522. #if defined(__i386__) || defined(__amd64__) || defined(__sparc64__)
  4523. /*
  4524. * Allocate Static TLS using the Variant II method.
  4525. */
  4526. void *
  4527. allocate_tls(Obj_Entry *objs, void *oldtls, size_t tcbsize, size_t tcbalign)
  4528. {
  4529. Obj_Entry *obj;
  4530. size_t size, ralign;
  4531. char *tls;
  4532. Elf_Addr *dtv, *olddtv;
  4533. Elf_Addr segbase, oldsegbase, addr;
  4534. size_t i;
  4535. ralign = tcbalign;
  4536. if (tls_static_max_align > ralign)
  4537. ralign = tls_static_max_align;
  4538. size = roundup(tls_static_space, ralign) + roundup(tcbsize, ralign);
  4539. assert(tcbsize >= 2*sizeof(Elf_Addr));
  4540. tls = malloc_aligned(size, ralign, 0 /* XXX */);
  4541. dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
  4542. segbase = (Elf_Addr)(tls + roundup(tls_static_space, ralign));
  4543. ((Elf_Addr*)segbase)[0] = segbase;
  4544. ((Elf_Addr*)segbase)[1] = (Elf_Addr) dtv;
  4545. dtv[0] = tls_dtv_generation;
  4546. dtv[1] = tls_max_index;
  4547. if (oldtls) {
  4548. /*
  4549. * Copy the static TLS block over whole.
  4550. */
  4551. oldsegbase = (Elf_Addr) oldtls;
  4552. memcpy((void *)(segbase - tls_static_space),
  4553. (const void *)(oldsegbase - tls_static_space),
  4554. tls_static_space);
  4555. /*
  4556. * If any dynamic TLS blocks have been created tls_get_addr(),
  4557. * move them over.
  4558. */
  4559. olddtv = ((Elf_Addr**)oldsegbase)[1];
  4560. for (i = 0; i < olddtv[1]; i++) {
  4561. if (olddtv[i+2] < oldsegbase - size || olddtv[i+2] > oldsegbase) {
  4562. dtv[i+2] = olddtv[i+2];
  4563. olddtv[i+2] = 0;
  4564. }
  4565. }
  4566. /*
  4567. * We assume that this block was the one we created with
  4568. * allocate_initial_tls().
  4569. */
  4570. free_tls(oldtls, 2*sizeof(Elf_Addr), sizeof(Elf_Addr));
  4571. } else {
  4572. for (obj = objs; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
  4573. if (obj->marker || obj->tlsoffset == 0)
  4574. continue;
  4575. addr = segbase - obj->tlsoffset;
  4576. memset((void*)(addr + obj->tlsinitsize),
  4577. 0, obj->tlssize - obj->tlsinitsize);
  4578. if (obj->tlsinit) {
  4579. memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize);
  4580. obj->static_tls_copied = true;
  4581. }
  4582. dtv[obj->tlsindex + 1] = addr;
  4583. }
  4584. }
  4585. return (void*) segbase;
  4586. }
  4587. void
  4588. free_tls(void *tls, size_t tcbsize __unused, size_t tcbalign)
  4589. {
  4590. Elf_Addr* dtv;
  4591. size_t size, ralign;
  4592. int dtvsize, i;
  4593. Elf_Addr tlsstart, tlsend;
  4594. /*
  4595. * Figure out the size of the initial TLS block so that we can
  4596. * find stuff which ___tls_get_addr() allocated dynamically.
  4597. */
  4598. ralign = tcbalign;
  4599. if (tls_static_max_align > ralign)
  4600. ralign = tls_static_max_align;
  4601. size = roundup(tls_static_space, ralign);
  4602. dtv = ((Elf_Addr**)tls)[1];
  4603. dtvsize = dtv[1];
  4604. tlsend = (Elf_Addr) tls;
  4605. tlsstart = tlsend - size;
  4606. for (i = 0; i < dtvsize; i++) {
  4607. if (dtv[i + 2] != 0 && (dtv[i + 2] < tlsstart || dtv[i + 2] > tlsend)) {
  4608. free_aligned((void *)dtv[i + 2]);
  4609. }
  4610. }
  4611. free_aligned((void *)tlsstart);
  4612. free((void*) dtv);
  4613. }
  4614. #endif
  4615. /*
  4616. * Allocate TLS block for module with given index.
  4617. */
  4618. void *
  4619. allocate_module_tls(int index)
  4620. {
  4621. Obj_Entry *obj;
  4622. char *p;
  4623. TAILQ_FOREACH(obj, &obj_list, next) {
  4624. if (obj->marker)
  4625. continue;
  4626. if (obj->tlsindex == index)
  4627. break;
  4628. }
  4629. if (obj == NULL) {
  4630. _rtld_error("Can't find module with TLS index %d", index);
  4631. rtld_die();
  4632. }
  4633. p = malloc_aligned(obj->tlssize, obj->tlsalign, obj->tlspoffset);
  4634. memcpy(p, obj->tlsinit, obj->tlsinitsize);
  4635. memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize);
  4636. return (p);
  4637. }
  4638. bool
  4639. allocate_tls_offset(Obj_Entry *obj)
  4640. {
  4641. size_t off;
  4642. if (obj->tls_done)
  4643. return true;
  4644. if (obj->tlssize == 0) {
  4645. obj->tls_done = true;
  4646. return true;
  4647. }
  4648. if (tls_last_offset == 0)
  4649. off = calculate_first_tls_offset(obj->tlssize, obj->tlsalign,
  4650. obj->tlspoffset);
  4651. else
  4652. off = calculate_tls_offset(tls_last_offset, tls_last_size,
  4653. obj->tlssize, obj->tlsalign, obj->tlspoffset);
  4654. /*
  4655. * If we have already fixed the size of the static TLS block, we
  4656. * must stay within that size. When allocating the static TLS, we
  4657. * leave a small amount of space spare to be used for dynamically
  4658. * loading modules which use static TLS.
  4659. */
  4660. if (tls_static_space != 0) {
  4661. if (calculate_tls_end(off, obj->tlssize) > tls_static_space)
  4662. return false;
  4663. } else if (obj->tlsalign > tls_static_max_align) {
  4664. tls_static_max_align = obj->tlsalign;
  4665. }
  4666. tls_last_offset = obj->tlsoffset = off;
  4667. tls_last_size = obj->tlssize;
  4668. obj->tls_done = true;
  4669. return true;
  4670. }
  4671. void
  4672. free_tls_offset(Obj_Entry *obj)
  4673. {
  4674. /*
  4675. * If we were the last thing to allocate out of the static TLS
  4676. * block, we give our space back to the 'allocator'. This is a
  4677. * simplistic workaround to allow libGL.so.1 to be loaded and
  4678. * unloaded multiple times.
  4679. */
  4680. if (calculate_tls_end(obj->tlsoffset, obj->tlssize)
  4681. == calculate_tls_end(tls_last_offset, tls_last_size)) {
  4682. tls_last_offset -= obj->tlssize;
  4683. tls_last_size = 0;
  4684. }
  4685. }
  4686. void *
  4687. _rtld_allocate_tls(void *oldtls, size_t tcbsize, size_t tcbalign)
  4688. {
  4689. void *ret;
  4690. RtldLockState lockstate;
  4691. wlock_acquire(rtld_bind_lock, &lockstate);
  4692. ret = allocate_tls(globallist_curr(TAILQ_FIRST(&obj_list)), oldtls,
  4693. tcbsize, tcbalign);
  4694. lock_release(rtld_bind_lock, &lockstate);
  4695. return (ret);
  4696. }
  4697. void
  4698. _rtld_free_tls(void *tcb, size_t tcbsize, size_t tcbalign)
  4699. {
  4700. RtldLockState lockstate;
  4701. wlock_acquire(rtld_bind_lock, &lockstate);
  4702. free_tls(tcb, tcbsize, tcbalign);
  4703. lock_release(rtld_bind_lock, &lockstate);
  4704. }
  4705. static void
  4706. object_add_name(Obj_Entry *obj, const char *name)
  4707. {
  4708. Name_Entry *entry;
  4709. size_t len;
  4710. len = strlen(name);
  4711. entry = malloc(sizeof(Name_Entry) + len);
  4712. if (entry != NULL) {
  4713. strcpy(entry->name, name);
  4714. STAILQ_INSERT_TAIL(&obj->names, entry, link);
  4715. }
  4716. }
  4717. static int
  4718. object_match_name(const Obj_Entry *obj, const char *name)
  4719. {
  4720. Name_Entry *entry;
  4721. STAILQ_FOREACH(entry, &obj->names, link) {
  4722. if (strcmp(name, entry->name) == 0)
  4723. return (1);
  4724. }
  4725. return (0);
  4726. }
  4727. static Obj_Entry *
  4728. locate_dependency(const Obj_Entry *obj, const char *name)
  4729. {
  4730. const Objlist_Entry *entry;
  4731. const Needed_Entry *needed;
  4732. STAILQ_FOREACH(entry, &list_main, link) {
  4733. if (object_match_name(entry->obj, name))
  4734. return entry->obj;
  4735. }
  4736. for (needed = obj->needed; needed != NULL; needed = needed->next) {
  4737. if (strcmp(obj->strtab + needed->name, name) == 0 ||
  4738. (needed->obj != NULL && object_match_name(needed->obj, name))) {
  4739. /*
  4740. * If there is DT_NEEDED for the name we are looking for,
  4741. * we are all set. Note that object might not be found if
  4742. * dependency was not loaded yet, so the function can
  4743. * return NULL here. This is expected and handled
  4744. * properly by the caller.
  4745. */
  4746. return (needed->obj);
  4747. }
  4748. }
  4749. _rtld_error("%s: Unexpected inconsistency: dependency %s not found",
  4750. obj->path, name);
  4751. rtld_die();
  4752. }
  4753. static int
  4754. check_object_provided_version(Obj_Entry *refobj, const Obj_Entry *depobj,
  4755. const Elf_Vernaux *vna)
  4756. {
  4757. const Elf_Verdef *vd;
  4758. const char *vername;
  4759. vername = refobj->strtab + vna->vna_name;
  4760. vd = depobj->verdef;
  4761. if (vd == NULL) {
  4762. _rtld_error("%s: version %s required by %s not defined",
  4763. depobj->path, vername, refobj->path);
  4764. return (-1);
  4765. }
  4766. for (;;) {
  4767. if (vd->vd_version != VER_DEF_CURRENT) {
  4768. _rtld_error("%s: Unsupported version %d of Elf_Verdef entry",
  4769. depobj->path, vd->vd_version);
  4770. return (-1);
  4771. }
  4772. if (vna->vna_hash == vd->vd_hash) {
  4773. const Elf_Verdaux *aux = (const Elf_Verdaux *)
  4774. ((const char *)vd + vd->vd_aux);
  4775. if (strcmp(vername, depobj->strtab + aux->vda_name) == 0)
  4776. return (0);
  4777. }
  4778. if (vd->vd_next == 0)
  4779. break;
  4780. vd = (const Elf_Verdef *)((const char *)vd + vd->vd_next);
  4781. }
  4782. if (vna->vna_flags & VER_FLG_WEAK)
  4783. return (0);
  4784. _rtld_error("%s: version %s required by %s not found",
  4785. depobj->path, vername, refobj->path);
  4786. return (-1);
  4787. }
  4788. static int
  4789. rtld_verify_object_versions(Obj_Entry *obj)
  4790. {
  4791. const Elf_Verneed *vn;
  4792. const Elf_Verdef *vd;
  4793. const Elf_Verdaux *vda;
  4794. const Elf_Vernaux *vna;
  4795. const Obj_Entry *depobj;
  4796. int maxvernum, vernum;
  4797. if (obj->ver_checked)
  4798. return (0);
  4799. obj->ver_checked = true;
  4800. maxvernum = 0;
  4801. /*
  4802. * Walk over defined and required version records and figure out
  4803. * max index used by any of them. Do very basic sanity checking
  4804. * while there.
  4805. */
  4806. vn = obj->verneed;
  4807. while (vn != NULL) {
  4808. if (vn->vn_version != VER_NEED_CURRENT) {
  4809. _rtld_error("%s: Unsupported version %d of Elf_Verneed entry",
  4810. obj->path, vn->vn_version);
  4811. return (-1);
  4812. }
  4813. vna = (const Elf_Vernaux *)((const char *)vn + vn->vn_aux);
  4814. for (;;) {
  4815. vernum = VER_NEED_IDX(vna->vna_other);
  4816. if (vernum > maxvernum)
  4817. maxvernum = vernum;
  4818. if (vna->vna_next == 0)
  4819. break;
  4820. vna = (const Elf_Vernaux *)((const char *)vna + vna->vna_next);
  4821. }
  4822. if (vn->vn_next == 0)
  4823. break;
  4824. vn = (const Elf_Verneed *)((const char *)vn + vn->vn_next);
  4825. }
  4826. vd = obj->verdef;
  4827. while (vd != NULL) {
  4828. if (vd->vd_version != VER_DEF_CURRENT) {
  4829. _rtld_error("%s: Unsupported version %d of Elf_Verdef entry",
  4830. obj->path, vd->vd_version);
  4831. return (-1);
  4832. }
  4833. vernum = VER_DEF_IDX(vd->vd_ndx);
  4834. if (vernum > maxvernum)
  4835. maxvernum = vernum;
  4836. if (vd->vd_next == 0)
  4837. break;
  4838. vd = (const Elf_Verdef *)((const char *)vd + vd->vd_next);
  4839. }
  4840. if (maxvernum == 0)
  4841. return (0);
  4842. /*
  4843. * Store version information in array indexable by version index.
  4844. * Verify that object version requirements are satisfied along the
  4845. * way.
  4846. */
  4847. obj->vernum = maxvernum + 1;
  4848. obj->vertab = xcalloc(obj->vernum, sizeof(Ver_Entry));
  4849. vd = obj->verdef;
  4850. while (vd != NULL) {
  4851. if ((vd->vd_flags & VER_FLG_BASE) == 0) {
  4852. vernum = VER_DEF_IDX(vd->vd_ndx);
  4853. assert(vernum <= maxvernum);
  4854. vda = (const Elf_Verdaux *)((const char *)vd + vd->vd_aux);
  4855. obj->vertab[vernum].hash = vd->vd_hash;
  4856. obj->vertab[vernum].name = obj->strtab + vda->vda_name;
  4857. obj->vertab[vernum].file = NULL;
  4858. obj->vertab[vernum].flags = 0;
  4859. }
  4860. if (vd->vd_next == 0)
  4861. break;
  4862. vd = (const Elf_Verdef *)((const char *)vd + vd->vd_next);
  4863. }
  4864. vn = obj->verneed;
  4865. while (vn != NULL) {
  4866. depobj = locate_dependency(obj, obj->strtab + vn->vn_file);
  4867. if (depobj == NULL)
  4868. return (-1);
  4869. vna = (const Elf_Vernaux *)((const char *)vn + vn->vn_aux);
  4870. for (;;) {
  4871. if (check_object_provided_version(obj, depobj, vna))
  4872. return (-1);
  4873. vernum = VER_NEED_IDX(vna->vna_other);
  4874. assert(vernum <= maxvernum);
  4875. obj->vertab[vernum].hash = vna->vna_hash;
  4876. obj->vertab[vernum].name = obj->strtab + vna->vna_name;
  4877. obj->vertab[vernum].file = obj->strtab + vn->vn_file;
  4878. obj->vertab[vernum].flags = (vna->vna_other & VER_NEED_HIDDEN) ?
  4879. VER_INFO_HIDDEN : 0;
  4880. if (vna->vna_next == 0)
  4881. break;
  4882. vna = (const Elf_Vernaux *)((const char *)vna + vna->vna_next);
  4883. }
  4884. if (vn->vn_next == 0)
  4885. break;
  4886. vn = (const Elf_Verneed *)((const char *)vn + vn->vn_next);
  4887. }
  4888. return 0;
  4889. }
  4890. static int
  4891. rtld_verify_versions(const Objlist *objlist)
  4892. {
  4893. Objlist_Entry *entry;
  4894. int rc;
  4895. rc = 0;
  4896. STAILQ_FOREACH(entry, objlist, link) {
  4897. /*
  4898. * Skip dummy objects or objects that have their version requirements
  4899. * already checked.
  4900. */
  4901. if (entry->obj->strtab == NULL || entry->obj->vertab != NULL)
  4902. continue;
  4903. if (rtld_verify_object_versions(entry->obj) == -1) {
  4904. rc = -1;
  4905. if (ld_tracing == NULL)
  4906. break;
  4907. }
  4908. }
  4909. if (rc == 0 || ld_tracing != NULL)
  4910. rc = rtld_verify_object_versions(&obj_rtld);
  4911. return rc;
  4912. }
  4913. const Ver_Entry *
  4914. fetch_ventry(const Obj_Entry *obj, unsigned long symnum)
  4915. {
  4916. Elf_Versym vernum;
  4917. if (obj->vertab) {
  4918. vernum = VER_NDX(obj->versyms[symnum]);
  4919. if (vernum >= obj->vernum) {
  4920. _rtld_error("%s: symbol %s has wrong verneed value %d",
  4921. obj->path, obj->strtab + symnum, vernum);
  4922. } else if (obj->vertab[vernum].hash != 0) {
  4923. return &obj->vertab[vernum];
  4924. }
  4925. }
  4926. return NULL;
  4927. }
  4928. int
  4929. _rtld_get_stack_prot(void)
  4930. {
  4931. return (stack_prot);
  4932. }
  4933. int
  4934. _rtld_is_dlopened(void *arg)
  4935. {
  4936. Obj_Entry *obj;
  4937. RtldLockState lockstate;
  4938. int res;
  4939. rlock_acquire(rtld_bind_lock, &lockstate);
  4940. obj = dlcheck(arg);
  4941. if (obj == NULL)
  4942. obj = obj_from_addr(arg);
  4943. if (obj == NULL) {
  4944. _rtld_error("No shared object contains address");
  4945. lock_release(rtld_bind_lock, &lockstate);
  4946. return (-1);
  4947. }
  4948. res = obj->dlopened ? 1 : 0;
  4949. lock_release(rtld_bind_lock, &lockstate);
  4950. return (res);
  4951. }
  4952. static int
  4953. obj_remap_relro(Obj_Entry *obj, int prot)
  4954. {
  4955. if (obj->relro_size > 0 && mprotect(obj->relro_page, obj->relro_size,
  4956. prot) == -1) {
  4957. _rtld_error("%s: Cannot set relro protection to %#x: %s",
  4958. obj->path, prot, rtld_strerror(errno));
  4959. return (-1);
  4960. }
  4961. return (0);
  4962. }
  4963. static int
  4964. obj_disable_relro(Obj_Entry *obj)
  4965. {
  4966. return (obj_remap_relro(obj, PROT_READ | PROT_WRITE));
  4967. }
  4968. static int
  4969. obj_enforce_relro(Obj_Entry *obj)
  4970. {
  4971. return (obj_remap_relro(obj, PROT_READ));
  4972. }
  4973. static void
  4974. map_stacks_exec(RtldLockState *lockstate)
  4975. {
  4976. void (*thr_map_stacks_exec)(void);
  4977. if ((max_stack_flags & PF_X) == 0 || (stack_prot & PROT_EXEC) != 0)
  4978. return;
  4979. thr_map_stacks_exec = (void (*)(void))(uintptr_t)
  4980. get_program_var_addr("__pthread_map_stacks_exec", lockstate);
  4981. if (thr_map_stacks_exec != NULL) {
  4982. stack_prot |= PROT_EXEC;
  4983. thr_map_stacks_exec();
  4984. }
  4985. }
  4986. static void
  4987. distribute_static_tls(Objlist *list, RtldLockState *lockstate)
  4988. {
  4989. Objlist_Entry *elm;
  4990. Obj_Entry *obj;
  4991. void (*distrib)(size_t, void *, size_t, size_t);
  4992. distrib = (void (*)(size_t, void *, size_t, size_t))(uintptr_t)
  4993. get_program_var_addr("__pthread_distribute_static_tls", lockstate);
  4994. if (distrib == NULL)
  4995. return;
  4996. STAILQ_FOREACH(elm, list, link) {
  4997. obj = elm->obj;
  4998. if (obj->marker || !obj->tls_done || obj->static_tls_copied)
  4999. continue;
  5000. distrib(obj->tlsoffset, obj->tlsinit, obj->tlsinitsize,
  5001. obj->tlssize);
  5002. obj->static_tls_copied = true;
  5003. }
  5004. }
  5005. void
  5006. symlook_init(SymLook *dst, const char *name)
  5007. {
  5008. bzero(dst, sizeof(*dst));
  5009. dst->name = name;
  5010. dst->hash = elf_hash(name);
  5011. dst->hash_gnu = gnu_hash(name);
  5012. }
  5013. static void
  5014. symlook_init_from_req(SymLook *dst, const SymLook *src)
  5015. {
  5016. dst->name = src->name;
  5017. dst->hash = src->hash;
  5018. dst->hash_gnu = src->hash_gnu;
  5019. dst->ventry = src->ventry;
  5020. dst->flags = src->flags;
  5021. dst->defobj_out = NULL;
  5022. dst->sym_out = NULL;
  5023. dst->lockstate = src->lockstate;
  5024. }
  5025. static int
  5026. open_binary_fd(const char *argv0, bool search_in_path,
  5027. const char **binpath_res)
  5028. {
  5029. char *binpath, *pathenv, *pe, *res1;
  5030. const char *res;
  5031. int fd;
  5032. binpath = NULL;
  5033. res = NULL;
  5034. if (search_in_path && strchr(argv0, '/') == NULL) {
  5035. binpath = xmalloc(PATH_MAX);
  5036. pathenv = getenv("PATH");
  5037. if (pathenv == NULL) {
  5038. _rtld_error("-p and no PATH environment variable");
  5039. rtld_die();
  5040. }
  5041. pathenv = strdup(pathenv);
  5042. if (pathenv == NULL) {
  5043. _rtld_error("Cannot allocate memory");
  5044. rtld_die();
  5045. }
  5046. fd = -1;
  5047. errno = ENOENT;
  5048. while ((pe = strsep(&pathenv, ":")) != NULL) {
  5049. if (strlcpy(binpath, pe, PATH_MAX) >= PATH_MAX)
  5050. continue;
  5051. if (binpath[0] != '\0' &&
  5052. strlcat(binpath, "/", PATH_MAX) >= PATH_MAX)
  5053. continue;
  5054. if (strlcat(binpath, argv0, PATH_MAX) >= PATH_MAX)
  5055. continue;
  5056. fd = open(binpath, O_RDONLY | O_CLOEXEC | O_VERIFY);
  5057. if (fd != -1 || errno != ENOENT) {
  5058. res = binpath;
  5059. break;
  5060. }
  5061. }
  5062. free(pathenv);
  5063. } else {
  5064. fd = open(argv0, O_RDONLY | O_CLOEXEC | O_VERIFY);
  5065. res = argv0;
  5066. }
  5067. if (fd == -1) {
  5068. _rtld_error("Cannot open %s: %s", argv0, rtld_strerror(errno));
  5069. rtld_die();
  5070. }
  5071. if (res != NULL && res[0] != '/') {
  5072. res1 = xmalloc(PATH_MAX);
  5073. if (realpath(res, res1) != NULL) {
  5074. if (res != argv0)
  5075. free(__DECONST(char *, res));
  5076. res = res1;
  5077. } else {
  5078. free(res1);
  5079. }
  5080. }
  5081. *binpath_res = res;
  5082. return (fd);
  5083. }
  5084. /*
  5085. * Parse a set of command-line arguments.
  5086. */
  5087. static int
  5088. parse_args(char* argv[], int argc, bool *use_pathp, int *fdp,
  5089. const char **argv0)
  5090. {
  5091. const char *arg;
  5092. char machine[64];
  5093. size_t sz;
  5094. int arglen, fd, i, j, mib[2];
  5095. char opt;
  5096. bool seen_b, seen_f;
  5097. dbg("Parsing command-line arguments");
  5098. *use_pathp = false;
  5099. *fdp = -1;
  5100. seen_b = seen_f = false;
  5101. for (i = 1; i < argc; i++ ) {
  5102. arg = argv[i];
  5103. dbg("argv[%d]: '%s'", i, arg);
  5104. /*
  5105. * rtld arguments end with an explicit "--" or with the first
  5106. * non-prefixed argument.
  5107. */
  5108. if (strcmp(arg, "--") == 0) {
  5109. i++;
  5110. break;
  5111. }
  5112. if (arg[0] != '-')
  5113. break;
  5114. /*
  5115. * All other arguments are single-character options that can
  5116. * be combined, so we need to search through `arg` for them.
  5117. */
  5118. arglen = strlen(arg);
  5119. for (j = 1; j < arglen; j++) {
  5120. opt = arg[j];
  5121. if (opt == 'h') {
  5122. print_usage(argv[0]);
  5123. _exit(0);
  5124. } else if (opt == 'b') {
  5125. if (seen_f) {
  5126. _rtld_error("Both -b and -f specified");
  5127. rtld_die();
  5128. }
  5129. i++;
  5130. *argv0 = argv[i];
  5131. seen_b = true;
  5132. break;
  5133. } else if (opt == 'f') {
  5134. if (seen_b) {
  5135. _rtld_error("Both -b and -f specified");
  5136. rtld_die();
  5137. }
  5138. /*
  5139. * -f XX can be used to specify a
  5140. * descriptor for the binary named at
  5141. * the command line (i.e., the later
  5142. * argument will specify the process
  5143. * name but the descriptor is what
  5144. * will actually be executed).
  5145. *
  5146. * -f must be the last option in, e.g., -abcf.
  5147. */
  5148. if (j != arglen - 1) {
  5149. _rtld_error("Invalid options: %s", arg);
  5150. rtld_die();
  5151. }
  5152. i++;
  5153. fd = parse_integer(argv[i]);
  5154. if (fd == -1) {
  5155. _rtld_error(
  5156. "Invalid file descriptor: '%s'",
  5157. argv[i]);
  5158. rtld_die();
  5159. }
  5160. *fdp = fd;
  5161. seen_f = true;
  5162. break;
  5163. } else if (opt == 'p') {
  5164. *use_pathp = true;
  5165. } else if (opt == 'v') {
  5166. machine[0] = '\0';
  5167. mib[0] = CTL_HW;
  5168. mib[1] = HW_MACHINE;
  5169. sz = sizeof(machine);
  5170. sysctl(mib, nitems(mib), machine, &sz, NULL, 0);
  5171. rtld_printf(
  5172. "FreeBSD ld-elf.so.1 %s\n"
  5173. "FreeBSD_version %d\n"
  5174. "Default lib path %s\n"
  5175. "Env prefix %s\n"
  5176. "Hint file %s\n"
  5177. "libmap file %s\n",
  5178. machine,
  5179. __FreeBSD_version, ld_standard_library_path,
  5180. ld_env_prefix, ld_elf_hints_default,
  5181. ld_path_libmap_conf);
  5182. _exit(0);
  5183. } else {
  5184. _rtld_error("Invalid argument: '%s'", arg);
  5185. print_usage(argv[0]);
  5186. rtld_die();
  5187. }
  5188. }
  5189. }
  5190. if (!seen_b)
  5191. *argv0 = argv[i];
  5192. return (i);
  5193. }
  5194. /*
  5195. * Parse a file descriptor number without pulling in more of libc (e.g. atoi).
  5196. */
  5197. static int
  5198. parse_integer(const char *str)
  5199. {
  5200. static const int RADIX = 10; /* XXXJA: possibly support hex? */
  5201. const char *orig;
  5202. int n;
  5203. char c;
  5204. orig = str;
  5205. n = 0;
  5206. for (c = *str; c != '\0'; c = *++str) {
  5207. if (c < '0' || c > '9')
  5208. return (-1);
  5209. n *= RADIX;
  5210. n += c - '0';
  5211. }
  5212. /* Make sure we actually parsed something. */
  5213. if (str == orig)
  5214. return (-1);
  5215. return (n);
  5216. }
  5217. static void
  5218. print_usage(const char *argv0)
  5219. {
  5220. rtld_printf(
  5221. "Usage: %s [-h] [-b <exe>] [-f <FD>] [-p] [--] <binary> [<args>]\n"
  5222. "\n"
  5223. "Options:\n"
  5224. " -h Display this help message\n"
  5225. " -b <exe> Execute <exe> instead of <binary>, arg0 is <binary>\n"
  5226. " -f <FD> Execute <FD> instead of searching for <binary>\n"
  5227. " -p Search in PATH for named binary\n"
  5228. " -v Display identification information\n"
  5229. " -- End of RTLD options\n"
  5230. " <binary> Name of process to execute\n"
  5231. " <args> Arguments to the executed process\n", argv0);
  5232. }
  5233. /*
  5234. * Overrides for libc_pic-provided functions.
  5235. */
  5236. int
  5237. __getosreldate(void)
  5238. {
  5239. size_t len;
  5240. int oid[2];
  5241. int error, osrel;
  5242. if (osreldate != 0)
  5243. return (osreldate);
  5244. oid[0] = CTL_KERN;
  5245. oid[1] = KERN_OSRELDATE;
  5246. osrel = 0;
  5247. len = sizeof(osrel);
  5248. error = sysctl(oid, 2, &osrel, &len, NULL, 0);
  5249. if (error == 0 && osrel > 0 && len == sizeof(osrel))
  5250. osreldate = osrel;
  5251. return (osreldate);
  5252. }
  5253. void
  5254. exit(int status)
  5255. {
  5256. _exit(status);
  5257. }
  5258. void (*__cleanup)(void);
  5259. int __isthreaded = 0;
  5260. int _thread_autoinit_dummy_decl = 1;
  5261. /*
  5262. * No unresolved symbols for rtld.
  5263. */
  5264. void
  5265. __pthread_cxa_finalize(struct dl_phdr_info *a __unused)
  5266. {
  5267. }
  5268. const char *
  5269. rtld_strerror(int errnum)
  5270. {
  5271. if (errnum < 0 || errnum >= sys_nerr)
  5272. return ("Unknown error");
  5273. return (sys_errlist[errnum]);
  5274. }
  5275. /*
  5276. * No ifunc relocations.
  5277. */
  5278. void *
  5279. memset(void *dest, int c, size_t len)
  5280. {
  5281. size_t i;
  5282. for (i = 0; i < len; i++)
  5283. ((char *)dest)[i] = c;
  5284. return (dest);
  5285. }
  5286. void
  5287. bzero(void *dest, size_t len)
  5288. {
  5289. size_t i;
  5290. for (i = 0; i < len; i++)
  5291. ((char *)dest)[i] = 0;
  5292. }
  5293. /* malloc */
  5294. void *
  5295. malloc(size_t nbytes)
  5296. {
  5297. return (__crt_malloc(nbytes));
  5298. }
  5299. void *
  5300. calloc(size_t num, size_t size)
  5301. {
  5302. return (__crt_calloc(num, size));
  5303. }
  5304. void
  5305. free(void *cp)
  5306. {
  5307. __crt_free(cp);
  5308. }
  5309. void *
  5310. realloc(void *cp, size_t nbytes)
  5311. {
  5312. return (__crt_realloc(cp, nbytes));
  5313. }
  5314. extern int _rtld_version__FreeBSD_version __exported;
  5315. int _rtld_version__FreeBSD_version = __FreeBSD_version;
  5316. extern char _rtld_version_laddr_offset __exported;
  5317. char _rtld_version_laddr_offset;