Browse Source

Merge branch 'freebsd/current/master' into hardened/current/master

* freebsd/current/master:
  Unbreak build. It seems that mips and amd64 still pull in link_elf.c, so we need to have elf_cpu_parse_dynamic() everywhere after all to avoid an undefined symbol.
  sleep(9), sleepqueue(9): const'ify wchan pointers
  [PowerPC] powerpc32 rtld IFUNC handling code
  [PowerPC] powerpc64 rtld IFUNC handling code
  [PowerPC64]  Use ld.bfd to build LIB32 and STAND - when using llvm
  [PowerPC] Implement Secure-PLT jump table processing for ppc32.
  Convert the mpr driver to use busdma templates.
  Bump __FreeBSD_version for the addition of busdma templates.
  Introduce the concept of busdma tag templates. A template can be allocated off the stack, initialized to default values, and then filled in with driver-specific values, all without having to worry about the numerous other fields in the tag. The resulting template is then passed into busdma and the normal opaque tag object created.  See the man page for details on how to initialize a template.
hardened/current/master
HardenedBSD Sync Service 5 months ago
parent
commit
973644f16d
45 changed files with 1249 additions and 364 deletions
  1. +227
    -95
      libexec/rtld-elf/powerpc/reloc.c
  2. +8
    -2
      libexec/rtld-elf/powerpc/rtld_machdep.h
  3. +229
    -80
      libexec/rtld-elf/powerpc64/reloc.c
  4. +8
    -2
      libexec/rtld-elf/powerpc64/rtld_machdep.h
  5. +65
    -0
      share/man/man9/bus_dma.9
  6. +9
    -9
      share/man/man9/sleep.9
  7. +15
    -15
      share/man/man9/sleepqueue.9
  8. +10
    -1
      share/mk/bsd.compat.mk
  9. +14
    -0
      share/mk/bsd.cpu.mk
  10. +4
    -0
      stand/defs.mk
  11. +7
    -0
      sys/amd64/amd64/elf_machdep.c
  12. +51
    -0
      sys/arm/arm/busdma_machdep-v4.c
  13. +51
    -0
      sys/arm/arm/busdma_machdep-v6.c
  14. +7
    -0
      sys/arm/arm/elf_machdep.c
  15. +54
    -0
      sys/arm64/arm64/busdma_machdep.c
  16. +7
    -0
      sys/arm64/arm64/elf_machdep.c
  17. +1
    -1
      sys/ddb/db_ps.c
  18. +43
    -79
      sys/dev/mpr/mpr.c
  19. +3
    -11
      sys/dev/mpr/mpr_pci.c
  20. +6
    -11
      sys/dev/mpr/mpr_user.c
  21. +7
    -0
      sys/i386/i386/elf_machdep.c
  22. +1
    -1
      sys/kern/kern_clock.c
  23. +1
    -1
      sys/kern/kern_lock.c
  24. +1
    -1
      sys/kern/kern_proc.c
  25. +1
    -1
      sys/kern/kern_sx.c
  26. +6
    -6
      sys/kern/kern_synch.c
  27. +1
    -1
      sys/kern/link_elf.c
  28. +23
    -23
      sys/kern/subr_sleepqueue.c
  29. +51
    -0
      sys/mips/mips/busdma_machdep.c
  30. +7
    -0
      sys/mips/mips/elf_machdep.c
  31. +51
    -0
      sys/powerpc/powerpc/busdma_machdep.c
  32. +57
    -0
      sys/powerpc/powerpc/elf32_machdep.c
  33. +7
    -0
      sys/powerpc/powerpc/elf64_machdep.c
  34. +54
    -0
      sys/riscv/riscv/busdma_machdep.c
  35. +7
    -0
      sys/riscv/riscv/elf_machdep.c
  36. +51
    -0
      sys/sparc64/sparc64/bus_machdep.c
  37. +7
    -0
      sys/sparc64/sparc64/elf_machdep.c
  38. +18
    -0
      sys/sys/bus_dma.h
  39. +1
    -0
      sys/sys/linker.h
  40. +1
    -1
      sys/sys/param.h
  41. +1
    -1
      sys/sys/proc.h
  42. +16
    -16
      sys/sys/sleepqueue.h
  43. +5
    -5
      sys/sys/systm.h
  44. +1
    -1
      sys/sys/user.h
  45. +54
    -0
      sys/x86/x86/busdma_machdep.c

+ 227
- 95
libexec/rtld-elf/powerpc/reloc.c View File

@@ -166,82 +166,84 @@ static int
reloc_nonplt_object(Obj_Entry *obj_rtld __unused, Obj_Entry *obj,
const Elf_Rela *rela, SymCache *cache, int flags, RtldLockState *lockstate)
{
Elf_Addr *where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
const Elf_Sym *def;
const Obj_Entry *defobj;
Elf_Addr tmp;
const Elf_Sym *def = NULL;
const Obj_Entry *defobj;
Elf_Addr *where, symval = 0;

/*
* First, resolve symbol for relocations which
* reference symbols.
*/
switch (ELF_R_TYPE(rela->r_info)) {

case R_PPC_NONE:
break;

case R_PPC_ADDR32: /* word32 S + A */
case R_PPC_GLOB_DAT: /* word32 S + A */
case R_PPC_UADDR32: /* word32 S + A */
case R_PPC_ADDR32:
case R_PPC_GLOB_DAT: /* word32 S + A */
case R_PPC_DTPMOD32:
case R_PPC_TPREL32:
case R_PPC_DTPREL32:
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
flags, cache, lockstate);
if (def == NULL) {
return (-1);
}

tmp = (Elf_Addr)(defobj->relocbase + def->st_value +
rela->r_addend);

/* Don't issue write if unnecessary; avoid COW page fault */
if (*where != tmp) {
*where = tmp;
}
break;

case R_PPC_RELATIVE: /* word32 B + A */
tmp = (Elf_Addr)(obj->relocbase + rela->r_addend);

/* As above, don't issue write unnecessarily */
if (*where != tmp) {
*where = tmp;
}
break;

case R_PPC_COPY:
/*
* These are deferred until all other relocations
* have been done. All we do here is make sure
* that the COPY relocation is not in a shared
* library. They are allowed only in executable
* files.
* If symbol is IFUNC, only perform relocation
* when caller allowed it by passing
* SYMLOOK_IFUNC flag. Skip the relocations
* otherwise.
*
* Also error out in case IFUNC relocations
* are specified for TLS, which cannot be
* usefully interpreted.
*/
if (!obj->mainprog) {
_rtld_error("%s: Unexpected R_COPY "
" relocation in shared library",
obj->path);
return (-1);
if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
switch (ELF_R_TYPE(rela->r_info)) {
case R_PPC_UADDR32:
case R_PPC_ADDR32:
case R_PPC_GLOB_DAT:
if ((flags & SYMLOOK_IFUNC) == 0) {
dbg("Non-PLT reference to IFUNC found!");
obj->non_plt_gnu_ifunc = true;
return (0);
}
symval = (Elf_Addr)rtld_resolve_ifunc(
defobj, def);
break;
default:
_rtld_error("%s: IFUNC for TLS reloc",
obj->path);
return (-1);
}
} else {
if ((flags & SYMLOOK_IFUNC) != 0)
return (0);
symval = (Elf_Addr)defobj->relocbase +
def->st_value;
}
break;
default:
if ((flags & SYMLOOK_IFUNC) != 0)
return (0);
}
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);

case R_PPC_JMP_SLOT:
/*
* These will be handled by the plt/jmpslot routines
*/
switch (ELF_R_TYPE(rela->r_info)) {
case R_PPC_NONE:
break;
case R_PPC_UADDR32:
case R_PPC_ADDR32:
case R_PPC_GLOB_DAT:
/* Don't issue write if unnecessary; avoid COW page fault */
if (*where != symval + rela->r_addend) {
*where = symval + rela->r_addend;
}
break;

case R_PPC_DTPMOD32:
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
flags, cache, lockstate);

if (def == NULL)
return (-1);

*where = (Elf_Addr) defobj->tlsindex;

break;

case R_PPC_TPREL32:
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
flags, cache, lockstate);

if (def == NULL)
return (-1);

/*
* We lazily allocate offsets for static TLS as we
* see the first relocation that references the
@@ -262,27 +264,52 @@ reloc_nonplt_object(Obj_Entry *obj_rtld __unused, Obj_Entry *obj,
*(Elf_Addr **)where = *where * sizeof(Elf_Addr)
+ (Elf_Addr *)(def->st_value + rela->r_addend
+ defobj->tlsoffset - TLS_TP_OFFSET - TLS_TCB_SIZE);
break;
case R_PPC_DTPREL32:
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
flags, cache, lockstate);

if (def == NULL)
return (-1);

*where += (Elf_Addr)(def->st_value + rela->r_addend
- TLS_DTV_OFFSET);
break;
case R_PPC_RELATIVE: /* word32 B + A */
symval = (Elf_Addr)(obj->relocbase + rela->r_addend);

/* As above, don't issue write unnecessarily */
if (*where != symval) {
*where = symval;
}
break;
case R_PPC_COPY:
/*
* These are deferred until all other relocations
* have been done. All we do here is make sure
* that the COPY relocation is not in a shared
* library. They are allowed only in executable
* files.
*/
if (!obj->mainprog) {
_rtld_error("%s: Unexpected R_COPY "
" relocation in shared library",
obj->path);
return (-1);
}
break;
case R_PPC_IRELATIVE:
/*
* These will be handled by reloc_iresolve().
*/
obj->irelative = true;
break;
case R_PPC_JMP_SLOT:
/*
* These will be handled by the plt/jmpslot routines
*/
break;

default:
_rtld_error("%s: Unsupported relocation type %d"
" in non-PLT relocations\n", obj->path,
ELF_R_TYPE(rela->r_info));
return (-1);
}
}
return (0);
}

@@ -300,10 +327,6 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
SymCache *cache;
int r = -1;

if ((flags & SYMLOOK_IFUNC) != 0)
/* XXX not implemented */
return (0);

/*
* The dynamic loader may be called from a thread, we have
* limited amounts of stack available so we cannot use alloca().
@@ -404,7 +427,6 @@ reloc_plt_object(Obj_Entry *obj, const Elf_Rela *rela)
return (0);
}


/*
* Process the PLT relocations.
*/
@@ -420,6 +442,17 @@ reloc_plt(Obj_Entry *obj, int flags __unused, RtldLockState *lockstate __unused)
relalim = (const Elf_Rela *)((const char *)obj->pltrela +
obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {
if (ELF_R_TYPE(rela->r_info) == R_PPC_IRELATIVE) {
dbg("ABI violation - found IRELATIVE in the PLT.");
obj->irelative = true;
continue;
}

/*
* PowerPC(64) .rela.plt is composed of an array of
* R_PPC_JMP_SLOT relocations. Unlike other platforms,
* this is the ONLY relocation type that is valid here.
*/
assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT);

if (reloc_plt_object(obj, rela) < 0) {
@@ -438,7 +471,6 @@ reloc_plt(Obj_Entry *obj, int flags __unused, RtldLockState *lockstate __unused)
return (0);
}


/*
* LD_BIND_NOW was set - force relocation for all jump slots
*/
@@ -455,6 +487,9 @@ reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
relalim = (const Elf_Rela *)((const char *)obj->pltrela +
obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {
/* This isn't actually a jump slot, ignore it. */
if (ELF_R_TYPE(rela->r_info) == R_PPC_IRELATIVE)
continue;
assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT);
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
@@ -466,15 +501,18 @@ reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)

target = (Elf_Addr)(defobj->relocbase + def->st_value);

#if 0
/* PG XXX */
dbg("\"%s\" in \"%s\" --> %p in \"%s\"",
defobj->strtab + def->st_name, basename(obj->path),
(void *)target, basename(defobj->path));
#endif

reloc_jmpslot(where, target, defobj, obj,
(const Elf_Rel *) rela);
if (def == &sym_zero) {
/* Zero undefined weak symbols */
*where = 0;
} else {
if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
/* LD_BIND_NOW, ifunc in shared lib.*/
obj->gnu_ifunc = true;
continue;
}
reloc_jmpslot(where, target, defobj, obj,
(const Elf_Rel *) rela);
}
}

obj->jmpslots_done = true;
@@ -484,9 +522,7 @@ reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)


/*
* Update the value of a PLT jump slot. Branch directly to the target if
* it is within +/- 32Mb, otherwise go indirectly via the pltcall
* trampoline call and jump table.
* Update the value of a PLT jump slot.
*/
Elf_Addr
reloc_jmpslot(Elf_Addr *wherep, Elf_Addr target,
@@ -501,22 +537,32 @@ reloc_jmpslot(Elf_Addr *wherep, Elf_Addr target,
if (ld_bind_not)
goto out;


/*
* At the PLT entry pointed at by `wherep', construct
* a direct transfer to the now fully resolved function
* address.
* Process Secure-PLT.
*/
offset = target - (Elf_Addr)wherep;

if (obj->gotptr != NULL) {
assert(wherep >= (Elf_Word *)obj->pltgot);
assert(wherep <
(Elf_Word *)obj->pltgot + obj->pltrelasize);
*wherep = target;
if (*wherep != target)
*wherep = target;
goto out;
}

/*
* BSS-PLT optimization:
* Branch directly to the target if it is within +/- 32Mb,
* otherwise go indirectly via the pltcall trampoline call and
* jump table.
*/
offset = target - (Elf_Addr)wherep;
if (abs((int)offset) < 32*1024*1024) { /* inside 32MB? */
/*
* At the PLT entry pointed at by `wherep', construct
* a direct transfer to the now fully resolved function
* address.
*/
/* b value # branch directly */
*wherep = 0x48000000 | (offset & 0x03fffffc);
__syncicache(wherep, 4);
@@ -557,11 +603,52 @@ out:
}

int
reloc_iresolve(Obj_Entry *obj __unused,
struct Struct_RtldLockState *lockstate __unused)
reloc_iresolve(Obj_Entry *obj,
struct Struct_RtldLockState *lockstate)
{
/*
* Since PLT slots on PowerPC are always R_PPC_JMP_SLOT,
* R_PPC_IRELATIVE is in RELA.
*/
const Elf_Rela *relalim;
const Elf_Rela *rela;
Elf_Addr *where, target, *ptr;

if (!obj->irelative)
return (0);

relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
for (rela = obj->rela; rela < relalim; rela++) {
if (ELF_R_TYPE(rela->r_info) == R_PPC_IRELATIVE) {
ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);

lock_release(rtld_bind_lock, lockstate);
target = call_ifunc_resolver(ptr);
wlock_acquire(rtld_bind_lock, lockstate);

*where = target;
}
}
/*
* XXX Remove me when lld is fixed!
* LLD currently makes illegal relocations in the PLT.
*/
relalim = (const Elf_Rela *)((const char *)obj->pltrela + obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {
if (ELF_R_TYPE(rela->r_info) == R_PPC_IRELATIVE) {
ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);

lock_release(rtld_bind_lock, lockstate);
target = call_ifunc_resolver(ptr);
wlock_acquire(rtld_bind_lock, lockstate);

*where = target;
}
}

/* XXX not implemented */
obj->irelative = false;
return (0);
}

@@ -569,8 +656,32 @@ int
reloc_gnu_ifunc(Obj_Entry *obj __unused, int flags __unused,
struct Struct_RtldLockState *lockstate __unused)
{
const Elf_Rela *relalim;
const Elf_Rela *rela;
Elf_Addr *where, target;
const Elf_Sym *def;
const Obj_Entry *defobj;

/* XXX not implemented */
if (!obj->gnu_ifunc)
return (0);
relalim = (const Elf_Rela *)((const char *)obj->pltrela + obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {
if (ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT) {
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
SYMLOOK_IN_PLT | flags, NULL, lockstate);
if (def == NULL)
return (-1);
if (ELF_ST_TYPE(def->st_info) != STT_GNU_IFUNC)
continue;
lock_release(rtld_bind_lock, lockstate);
target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
wlock_acquire(rtld_bind_lock, lockstate);
reloc_jmpslot(where, target, defobj, obj,
(const Elf_Rel *)rela);
}
}
obj->gnu_ifunc = false;
return (0);
}

@@ -664,6 +775,27 @@ init_pltgot(Obj_Entry *obj)
*/
}

/*
* 32 bit cpu feature flag fields.
*/
u_long cpu_features;
u_long cpu_features2;

void
powerpc_abi_variant_hook(Elf_Auxinfo** aux_info)
{
/*
* Since aux_info[] is easier to work with than aux, go ahead and
* initialize cpu_features / cpu_features2.
*/
cpu_features = -1UL;
cpu_features2 = -1UL;
if (aux_info[AT_HWCAP] != NULL)
cpu_features = aux_info[AT_HWCAP]->a_un.a_val;
if (aux_info[AT_HWCAP2] != NULL)
cpu_features2 = aux_info[AT_HWCAP2]->a_un.a_val;
}

void
ifunc_init(Elf_Auxinfo aux_info[__min_size(AT_COUNT)] __unused)
{

+ 8
- 2
libexec/rtld-elf/powerpc/rtld_machdep.h View File

@@ -53,8 +53,13 @@ void reloc_non_plt_self(Elf_Dyn *dynp, Elf_Addr relocbase);
#define call_init_pointer(obj, target) \
(((InitArrFunc)(target))(main_argc, main_argv, environ))

extern u_long cpu_features; /* r3 */
extern u_long cpu_features2; /* r4 */
/* r5-10: ifunc resolver parameters reserved for future assignment. */
#define call_ifunc_resolver(ptr) \
(((Elf_Addr (*)(void))ptr)())
(((Elf_Addr (*)(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, \
uint32_t, uint32_t, uint32_t))ptr)((uint32_t)cpu_features, \
(uint32_t)cpu_features2, 0, 0, 0, 0, 0, 0))

/*
* PLT functions. Not really correct prototypes, but the
@@ -91,6 +96,7 @@ extern void *__tls_get_addr(tls_index* ti);
#define RTLD_DEFAULT_STACK_PF_EXEC PF_X
#define RTLD_DEFAULT_STACK_EXEC PROT_EXEC

#define md_abi_variant_hook(x)
extern void powerpc_abi_variant_hook(Elf_Auxinfo **);
#define md_abi_variant_hook(x) powerpc_abi_variant_hook(x)

#endif

+ 229
- 80
libexec/rtld-elf/powerpc64/reloc.c View File

@@ -33,6 +33,7 @@

#include <sys/param.h>
#include <sys/mman.h>
#include <sys/sysctl.h>

#include <errno.h>
#include <stdio.h>
@@ -160,83 +161,84 @@ static int
reloc_nonplt_object(Obj_Entry *obj_rtld __unused, Obj_Entry *obj,
const Elf_Rela *rela, SymCache *cache, int flags, RtldLockState *lockstate)
{
Elf_Addr *where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
const Elf_Sym *def;
const Obj_Entry *defobj;
Elf_Addr tmp;
const Elf_Sym *def = NULL;
const Obj_Entry *defobj;
Elf_Addr *where, symval = 0;

/*
* First, resolve symbol for relocations which
* reference symbols.
*/
switch (ELF_R_TYPE(rela->r_info)) {

case R_PPC_NONE:
break;
case R_PPC64_UADDR64: /* doubleword64 S + A */
case R_PPC64_ADDR64:
case R_PPC_GLOB_DAT:
case R_PPC64_UADDR64: /* doubleword64 S + A */
case R_PPC64_ADDR64:
case R_PPC_GLOB_DAT:
case R_PPC64_DTPMOD64:
case R_PPC64_TPREL64:
case R_PPC64_DTPREL64:
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
flags, cache, lockstate);
if (def == NULL) {
return (-1);
}

tmp = (Elf_Addr)(defobj->relocbase + def->st_value +
rela->r_addend);

/* Don't issue write if unnecessary; avoid COW page fault */
if (*where != tmp) {
*where = tmp;
}
break;

case R_PPC_RELATIVE: /* doubleword64 B + A */
tmp = (Elf_Addr)(obj->relocbase + rela->r_addend);

/* As above, don't issue write unnecessarily */
if (*where != tmp) {
*where = tmp;
}
break;

case R_PPC_COPY:
/*
* These are deferred until all other relocations
* have been done. All we do here is make sure
* that the COPY relocation is not in a shared
* library. They are allowed only in executable
* files.
* If symbol is IFUNC, only perform relocation
* when caller allowed it by passing
* SYMLOOK_IFUNC flag. Skip the relocations
* otherwise.
*
* Also error out in case IFUNC relocations
* are specified for TLS, which cannot be
* usefully interpreted.
*/
if (!obj->mainprog) {
_rtld_error("%s: Unexpected R_COPY "
" relocation in shared library",
obj->path);
return (-1);
if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
switch (ELF_R_TYPE(rela->r_info)) {
case R_PPC64_UADDR64:
case R_PPC64_ADDR64:
case R_PPC_GLOB_DAT:
if ((flags & SYMLOOK_IFUNC) == 0) {
dbg("Non-PLT reference to IFUNC found!");
obj->non_plt_gnu_ifunc = true;
return (0);
}
symval = (Elf_Addr)rtld_resolve_ifunc(
defobj, def);
break;
default:
_rtld_error("%s: IFUNC for TLS reloc",
obj->path);
return (-1);
}
} else {
if ((flags & SYMLOOK_IFUNC) != 0)
return (0);
symval = (Elf_Addr)defobj->relocbase +
def->st_value;
}
break;
default:
if ((flags & SYMLOOK_IFUNC) != 0)
return (0);
}

case R_PPC_JMP_SLOT:
/*
* These will be handled by the plt/jmpslot routines
*/
break;
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);

switch (ELF_R_TYPE(rela->r_info)) {
case R_PPC_NONE:
break;
case R_PPC64_UADDR64:
case R_PPC64_ADDR64:
case R_PPC_GLOB_DAT:
/* Don't issue write if unnecessary; avoid COW page fault */
if (*where != symval + rela->r_addend) {
*where = symval + rela->r_addend;
}
break;
case R_PPC64_DTPMOD64:
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
flags, cache, lockstate);

if (def == NULL)
return (-1);

*where = (Elf_Addr) defobj->tlsindex;

break;

case R_PPC64_TPREL64:
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
flags, cache, lockstate);

if (def == NULL)
return (-1);

/*
* We lazily allocate offsets for static TLS as we
* see the first relocation that references the
@@ -257,19 +259,44 @@ reloc_nonplt_object(Obj_Entry *obj_rtld __unused, Obj_Entry *obj,
*(Elf_Addr **)where = *where * sizeof(Elf_Addr)
+ (Elf_Addr *)(def->st_value + rela->r_addend
+ defobj->tlsoffset - TLS_TP_OFFSET - TLS_TCB_SIZE);

break;

case R_PPC64_DTPREL64:
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
flags, cache, lockstate);

if (def == NULL)
return (-1);

*where += (Elf_Addr)(def->st_value + rela->r_addend
- TLS_DTV_OFFSET);
break;
case R_PPC_RELATIVE: /* doubleword64 B + A */
symval = (Elf_Addr)(obj->relocbase + rela->r_addend);

/* As above, don't issue write unnecessarily */
if (*where != symval) {
*where = symval;
}
break;
case R_PPC_COPY:
/*
* These are deferred until all other relocations
* have been done. All we do here is make sure
* that the COPY relocation is not in a shared
* library. They are allowed only in executable
* files.
*/
if (!obj->mainprog) {
_rtld_error("%s: Unexpected R_COPY "
" relocation in shared library",
obj->path);
return (-1);
}
break;
case R_PPC_IRELATIVE:
/*
* These will be handled by reloc_iresolve().
*/
obj->irelative = true;
break;
case R_PPC_JMP_SLOT:
/*
* These will be handled by the plt/jmpslot routines
*/
break;

default:
@@ -277,7 +304,7 @@ reloc_nonplt_object(Obj_Entry *obj_rtld __unused, Obj_Entry *obj,
" in non-PLT relocations\n", obj->path,
ELF_R_TYPE(rela->r_info));
return (-1);
}
}
return (0);
}

@@ -296,10 +323,6 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
int bytes = obj->dynsymcount * sizeof(SymCache);
int r = -1;

if ((flags & SYMLOOK_IFUNC) != 0)
/* XXX not implemented */
return (0);

/*
* The dynamic loader may be called from a thread, we have
* limited amounts of stack available so we cannot use alloca().
@@ -365,13 +388,13 @@ reloc_plt_object(Obj_Entry *obj, const Elf_Rela *rela)
8*((reloff < 0x8000) ? reloff : 0x8000) +
12*((reloff < 0x8000) ? 0 : (reloff - 0x8000));
#else
/* 64-Bit ELF V2 ABI Specification, sec. 4.2.5.3. */
*where = (Elf_Addr)obj->glink + 4*reloff + 32;
#endif

return (0);
}


/*
* Process the PLT relocations.
*/
@@ -385,6 +408,19 @@ reloc_plt(Obj_Entry *obj, int flags __unused, RtldLockState *lockstate __unused)
relalim = (const Elf_Rela *)((const char *)obj->pltrela +
obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {

#if defined(_CALL_ELF) && _CALL_ELF == 2
if (ELF_R_TYPE(rela->r_info) == R_PPC_IRELATIVE) {
dbg("ABI violation - found IRELATIVE in the PLT.");
obj->irelative = true;
continue;
}
#endif
/*
* PowerPC(64) .rela.plt is composed of an array of
* R_PPC_JMP_SLOT relocations. Unlike other platforms,
* this is the ONLY relocation type that is valid here.
*/
assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT);

if (reloc_plt_object(obj, rela) < 0) {
@@ -396,7 +432,6 @@ reloc_plt(Obj_Entry *obj, int flags __unused, RtldLockState *lockstate __unused)
return (0);
}


/*
* LD_BIND_NOW was set - force relocation for all jump slots
*/
@@ -413,6 +448,9 @@ reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
relalim = (const Elf_Rela *)((const char *)obj->pltrela +
obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {
/* This isn't actually a jump slot, ignore it. */
if (ELF_R_TYPE(rela->r_info) == R_PPC_IRELATIVE)
continue;
assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT);
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
@@ -432,6 +470,11 @@ reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
*where = 0;
#endif
} else {
if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
/* LD_BIND_NOW, ifunc in shared lib.*/
obj->gnu_ifunc = true;
continue;
}
reloc_jmpslot(where, target, defobj, obj,
(const Elf_Rel *) rela);
}
@@ -494,34 +537,119 @@ reloc_jmpslot(Elf_Addr *wherep, Elf_Addr target, const Obj_Entry *defobj __unuse
((struct funcdesc *)(wherep))->toc +=
(Elf_Addr)defobj->relocbase;
}
out:
#else
dbg(" reloc_jmpslot: where=%p, target=%p", (void *)wherep,
(void *)target);

if (!ld_bind_not)
assert(target >= (Elf_Addr)defobj->relocbase);

if (ld_bind_not)
goto out;

if (*wherep != target)
*wherep = target;

#endif
out:

return (target);
}

int
reloc_iresolve(Obj_Entry *obj __unused,
struct Struct_RtldLockState *lockstate __unused)
reloc_iresolve(Obj_Entry *obj,
struct Struct_RtldLockState *lockstate)
{

/*
* Since PLT slots on PowerPC64 are always R_PPC_JMP_SLOT,
* R_PPC_IRELATIVE is in RELA.
*/
#if !defined(_CALL_ELF) || _CALL_ELF == 1
(void)(obj);
(void)(lockstate);
/* XXX not implemented */
return (0);
#else
const Elf_Rela *relalim;
const Elf_Rela *rela;
Elf_Addr *where, target, *ptr;

if (!obj->irelative)
return (0);

relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
for (rela = obj->rela; rela < relalim; rela++) {
if (ELF_R_TYPE(rela->r_info) == R_PPC_IRELATIVE) {
ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);

lock_release(rtld_bind_lock, lockstate);
target = call_ifunc_resolver(ptr);
wlock_acquire(rtld_bind_lock, lockstate);

*where = target;
}
}
/*
* XXX Remove me when lld is fixed!
* LLD currently makes illegal relocations in the PLT.
*/
relalim = (const Elf_Rela *)((const char *)obj->pltrela + obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {
if (ELF_R_TYPE(rela->r_info) == R_PPC_IRELATIVE) {
ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);

lock_release(rtld_bind_lock, lockstate);
target = call_ifunc_resolver(ptr);
wlock_acquire(rtld_bind_lock, lockstate);

*where = target;
}
}

obj->irelative = false;
return (0);
#endif
}

int
reloc_gnu_ifunc(Obj_Entry *obj __unused, int flags __unused,
struct Struct_RtldLockState *lockstate __unused)
{

#if !defined(_CALL_ELF) || _CALL_ELF == 1
_rtld_error("reloc_gnu_ifunc(): Not implemented!");
/* XXX not implemented */
return (-1);
#else

const Elf_Rela *relalim;
const Elf_Rela *rela;
Elf_Addr *where, target;
const Elf_Sym *def;
const Obj_Entry *defobj;

if (!obj->gnu_ifunc)
return (0);
relalim = (const Elf_Rela *)((const char *)obj->pltrela + obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {
if (ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT) {
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
SYMLOOK_IN_PLT | flags, NULL, lockstate);
if (def == NULL)
return (-1);
if (ELF_ST_TYPE(def->st_info) != STT_GNU_IFUNC)
continue;
lock_release(rtld_bind_lock, lockstate);
target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
wlock_acquire(rtld_bind_lock, lockstate);
reloc_jmpslot(where, target, defobj, obj,
(const Elf_Rel *)rela);
}
}
obj->gnu_ifunc = false;
return (0);
#endif
}

void
@@ -544,6 +672,27 @@ init_pltgot(Obj_Entry *obj)
#endif
}

/*
* Actual values are 32 bit.
*/
u_long cpu_features;
u_long cpu_features2;

void
powerpc64_abi_variant_hook(Elf_Auxinfo** aux_info)
{
/*
* Since aux_info[] is easier to work with than aux, go ahead and
* initialize cpu_features / cpu_features2.
*/
cpu_features = -1UL;
cpu_features2 = -1UL;
if (aux_info[AT_HWCAP] != NULL)
cpu_features = (uint32_t)aux_info[AT_HWCAP]->a_un.a_val;
if (aux_info[AT_HWCAP2] != NULL)
cpu_features2 = (uint32_t)aux_info[AT_HWCAP2]->a_un.a_val;
}

void
ifunc_init(Elf_Auxinfo aux_info[__min_size(AT_COUNT)] __unused)
{

+ 8
- 2
libexec/rtld-elf/powerpc64/rtld_machdep.h View File

@@ -53,8 +53,13 @@ void reloc_non_plt_self(Elf_Dyn *dynp, Elf_Addr relocbase);
#define call_init_pointer(obj, target) \
(((InitArrFunc)(target))(main_argc, main_argv, environ))

extern u_long cpu_features; /* r3 */
extern u_long cpu_features2; /* r4 */
/* r5-r10: ifunc resolver parameters reserved for future assignment. */
#define call_ifunc_resolver(ptr) \
(((Elf_Addr (*)(void))ptr)())
(((Elf_Addr (*)(uint32_t, uint32_t, uint64_t, uint64_t, uint64_t, \
uint64_t, uint64_t, uint64_t))ptr)((uint32_t)cpu_features, \
(uint32_t)cpu_features2, 0, 0, 0, 0, 0, 0))

/*
* TLS
@@ -83,6 +88,7 @@ extern void *__tls_get_addr(tls_index* ti);
#define RTLD_DEFAULT_STACK_PF_EXEC PF_X
#define RTLD_DEFAULT_STACK_EXEC PROT_EXEC

#define md_abi_variant_hook(x)
extern void powerpc64_abi_variant_hook(Elf_Auxinfo **);
#define md_abi_variant_hook(x) powerpc64_abi_variant_hook(x)

#endif

+ 65
- 0
share/man/man9/bus_dma.9 View File

@@ -60,6 +60,9 @@
.Nm bus_dma ,
.Nm bus_dma_tag_create ,
.Nm bus_dma_tag_destroy ,
.Nm bus_dma_template_init ,
.Nm bus_dma_template_tag ,
.Nm bus_dma_template_clone ,
.Nm bus_dmamap_create ,
.Nm bus_dmamap_destroy ,
.Nm bus_dmamap_load ,
@@ -83,6 +86,21 @@
"void *lockfuncarg" "bus_dma_tag_t *dmat"
.Ft int
.Fn bus_dma_tag_destroy "bus_dma_tag_t dmat"
.Ft void
.Fo bus_dma_template_init
.Fa "bus_dma_template_t template"
.Fa "bus_dma_tag_t parent"
.Fc
.Ft int
.Fo bus_dma_template_tag
.Fa "bus_dma_template_t template"
.Fa "bus_dma_tag_t *dmat"
.Fc
.Ft void
.Fo bus_dma_template_clone
.Fa "bus_dma_template_t template"
.Fa "bus_dma_tag_t dmat"
.Fc
.Ft int
.Fn bus_dmamap_create "bus_dma_tag_t dmat" "int flags" "bus_dmamap_t *mapp"
.Ft int
@@ -282,6 +300,34 @@ DMA tags are organized into a hierarchy, with each child
tag inheriting the restrictions of its parent.
This allows all devices along the path of DMA transactions
to contribute to the constraints of those transactions.
.It Vt bus_dma_template_t
A template structure for creating a
.Fa bus_dma_tag_t
from a set of defaults.
Once initialized with
.Fn bus_dma_template_init ,
a driver can over-ride individual fields to suit its needs.
The following fields have the indicated values:
.Bd -literal
alignment 1
boundary 0
lowaddr BUS_SPACE_MAXADDR
highaddr BUS_SPACE_MAXADDR
maxsize BUS_SPACE_MAXSIZE
nsegments BUS_SPACE_UNRESTRICTED
maxsegsize BUS_SPACE_MAXSIZE
flags 0
lockfunc NULL
lockfuncarg NULL
.Ed
.Pp
Descriptions of each field are documented with
.Fn bus_dma_tag_create .
Note that the
.Fa filtfunc
and
.Fa filtfuncarg
attributes of the DMA tag are not supported with templates.
.It Vt bus_dma_filter_t
Client specified address filter having the format:
.Bl -tag -width indent
@@ -633,6 +679,25 @@ if any DMA maps remain associated with
or
.Ql 0
on success.
.It Fn bus_dma_template_init "*template" "parent"
Initializes a
.Fa bus_dma_template_t
structure and associates it with an optional
.Fa parent .
The
.Fa parent
argument may be NULL.
.It Fn bus_dma_template_tag "*template" "*dmat"
Unpacks a template into a tag, and returns the tag via the
.Fa dmat .
All return values are identical to
.Fn bus_dma_tag_create .
.It Fn bus_dma_template_clone "*template" "dmat"
Clones the fields from a tag to a template.
This is useful for cloning tags when paired with
.Fn bus_dma_template_tag .
A template that is filled in as a clone does not need to be initialized
first.
.It Fn bus_dmamap_create "dmat" "flags" "*mapp"
Allocates and initializes a DMA map.
Arguments are as follows:

+ 9
- 9
share/man/man9/sleep.9 View File

@@ -47,14 +47,14 @@
.In sys/systm.h
.In sys/proc.h
.Ft int
.Fn msleep "void *chan" "struct mtx *mtx" "int priority" "const char *wmesg" "int timo"
.Fn msleep "const void *chan" "struct mtx *mtx" "int priority" "const char *wmesg" "int timo"
.Ft int
.Fn msleep_sbt "void *chan" "struct mtx *mtx" "int priority" \
.Fn msleep_sbt "const void *chan" "struct mtx *mtx" "int priority" \
"const char *wmesg" "sbintime_t sbt" "sbintime_t pr" "int flags"
.Ft int
.Fn msleep_spin "void *chan" "struct mtx *mtx" "const char *wmesg" "int timo"
.Fn msleep_spin "const void *chan" "struct mtx *mtx" "const char *wmesg" "int timo"
.Ft int
.Fn msleep_spin_sbt "void *chan" "struct mtx *mtx" "const char *wmesg" \
.Fn msleep_spin_sbt "const void *chan" "struct mtx *mtx" "const char *wmesg" \
"sbintime_t sbt" "sbintime_t pr" "int flags"
.Ft int
.Fn pause "const char *wmesg" "int timo"
@@ -64,16 +64,16 @@
.Fn pause_sbt "const char *wmesg" "sbintime_t sbt" "sbintime_t pr" \
"int flags"
.Ft int
.Fn tsleep "void *chan" "int priority" "const char *wmesg" "int timo"
.Fn tsleep "const void *chan" "int priority" "const char *wmesg" "int timo"
.Ft int
.Fn tsleep_sbt "void *chan" "int priority" "const char *wmesg" \
.Fn tsleep_sbt "const void *chan" "int priority" "const char *wmesg" \
"sbintime_t sbt" "sbintime_t pr" "int flags"
.Ft void
.Fn wakeup "void *chan"
.Fn wakeup "const void *chan"
.Ft void
.Fn wakeup_one "void *chan"
.Fn wakeup_one "const void *chan"
.Ft void
.Fn wakeup_any "void *chan"
.Fn wakeup_any "const void *chan"
.Sh DESCRIPTION
The functions
.Fn tsleep ,

+ 15
- 15
share/man/man9/sleepqueue.9 View File

@@ -54,40 +54,40 @@
.Ft int
.Fn sleepq_abort "struct thread *td"
.Ft void
.Fn sleepq_add "void *wchan" "struct lock_object *lock" "const char *wmesg" "int flags" "int queue"
.Fn sleepq_add "const void *wchan" "struct lock_object *lock" "const char *wmesg" "int flags" "int queue"
.Ft struct sleepqueue *
.Fn sleepq_alloc "void"
.Ft int
.Fn sleepq_broadcast "void *wchan" "int flags" "int pri" "int queue"
.Fn sleepq_broadcast "const void *wchan" "int flags" "int pri" "int queue"
.Ft void
.Fn sleepq_free "struct sleepqueue *sq"
.Ft struct sleepqueue *
.Fn sleepq_lookup "void *wchan"
.Fn sleepq_lookup "const void *wchan"
.Ft void
.Fn sleepq_lock "void *wchan"
.Fn sleepq_lock "const void *wchan"
.Ft void
.Fn sleepq_release "void *wchan"
.Fn sleepq_release "const void *wchan"
.Ft void
.Fn sleepq_remove "struct thread *td" "void *wchan"
.Fn sleepq_remove "struct thread *td" "const void *wchan"
.Ft int
.Fn sleepq_signal "void *wchan" "int flags" "int pri" "int queue"
.Fn sleepq_signal "const void *wchan" "int flags" "int pri" "int queue"
.Ft void
.Fn sleepq_set_timeout "void *wchan" "int timo"
.Fn sleepq_set_timeout "const void *wchan" "int timo"
.Ft void
.Fn sleepq_set_timeout_sbt "void *wchan" "sbintime_t sbt" \
.Fn sleepq_set_timeout_sbt "const void *wchan" "sbintime_t sbt" \
"sbintime_t pr" "int flags"
.Ft u_int
.Fn sleepq_sleepcnt "void *wchan" "int queue"
.Fn sleepq_sleepcnt "const void *wchan" "int queue"
.Ft int
.Fn sleepq_timedwait "void *wchan" "int pri"
.Fn sleepq_timedwait "const void *wchan" "int pri"
.Ft int
.Fn sleepq_timedwait_sig "void *wchan" "int pri"
.Fn sleepq_timedwait_sig "const void *wchan" "int pri"
.Ft int
.Fn sleepq_type "void *wchan"
.Fn sleepq_type "const void *wchan"
.Ft void
.Fn sleepq_wait "void *wchan" "int pri"
.Fn sleepq_wait "const void *wchan" "int pri"
.Ft int
.Fn sleepq_wait_sig "void *wchan" "int pri"
.Fn sleepq_wait_sig "const void *wchan" "int pri"
.Sh DESCRIPTION
Sleep queues provide a mechanism for suspending execution of a thread until
some condition is met.

+ 10
- 1
share/mk/bsd.compat.mk View File

@@ -47,11 +47,20 @@ LIB32CPUFLAGS= -mcpu=powerpc
.else
LIB32CPUFLAGS= -mcpu=${COMPAT_CPUTYPE}
.endif

.if ${COMPAT_COMPILER_TYPE} == "gcc"
LIB32CPUFLAGS+= -m32
.else
LIB32CPUFLAGS+= -target powerpc-unknown-freebsd13.0

# Use BFD to workaround ld.lld issues on PowerPC 32 bit
LIB32CPUFLAGS+= -fuse-ld=${LD_BFD}
.endif

LIB32_MACHINE= powerpc
LIB32_MACHINE_ARCH= powerpc
LIB32WMAKEFLAGS= \
LD="${XLD} -m elf32ppc_fbsd"
LD="${LD_BFD} -m elf32ppc_fbsd"

.elif ${COMPAT_ARCH:Mmips64*} != ""
HAS_COMPAT=32

+ 14
- 0
share/mk/bsd.cpu.mk View File

@@ -412,3 +412,17 @@ CFLAGS_NO_SIMD += ${CFLAGS_NO_SIMD.${COMPILER_TYPE}}
# These come from make.conf or the command line or the environment.
CFLAGS += ${CFLAGS.${MACHINE_ARCH}}
CXXFLAGS += ${CXXFLAGS.${MACHINE_ARCH}}


# Defines a variable for Binutils linker, to be used to workaround some
# issue with LLVM LLD (i.e. support for PowerPC32 bit on PowerPC64)
#
# This is an unavoidable cross coupling with Makefile.inc1 and
# normal builds works when CROSS_BINUTILS_PREFIX and could be removed
# when LLD PowerPC 32 bit support is completed
.if defined(CROSS_BINUTILS_PREFIX)
LD_BFD=${LOCALBASE}/bin/${CROSS_BINUTILS_PREFIX}-ld.bfd
.else
LD_BFD=${OBJTOP}/tmp/usr/bin/ld.bfd
.endif


+ 4
- 0
stand/defs.mk View File

@@ -103,6 +103,10 @@ CFLAGS+= -DLOADER_DISK_SUPPORT
# or powerpc64.
.if ${MACHINE_ARCH} == "powerpc64"
CFLAGS+= -m32 -mcpu=powerpc
# Use ld.bfd to workaround ld.lld issues on PowerPC 32 bit
.if "${COMPILER_TYPE}" == "clang" && "${LINKER_TYPE}" == "lld"
CFLAGS+= -fuse-ld=${LD_BFD}
.endif
.endif

# For amd64, there's a bit of mixed bag. Some of the tree (i386, lib*32) is

+ 7
- 0
sys/amd64/amd64/elf_machdep.c View File

@@ -335,3 +335,10 @@ elf_cpu_unload_file(linker_file_t lf __unused)

return (0);
}

int
elf_cpu_parse_dynamic(linker_file_t lf __unused, Elf_Dyn *dynamic __unused)
{

return (0);
}

+ 51
- 0
sys/arm/arm/busdma_machdep-v4.c View File

@@ -501,6 +501,57 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
return (error);
}

void
bus_dma_template_init(bus_dma_tag_template_t *t, bus_dma_tag_t parent)
{

if (t == NULL)
return;

t->parent = parent;
t->alignment = 1;
t->boundary = 0;
t->lowaddr = t->highaddr = BUS_SPACE_MAXADDR;
t->maxsize = t->maxsegsize = BUS_SPACE_MAXSIZE;
t->nsegments = BUS_SPACE_UNRESTRICTED;
t->lockfunc = NULL;
t->lockfuncarg = NULL;
t->flags = 0;
}

int
bus_dma_template_tag(bus_dma_tag_template_t *t, bus_dma_tag_t *dmat)
{

if (t == NULL || dmat == NULL)
return (EINVAL);

return (bus_dma_tag_create(t->parent, t->alignment, t->boundary,
t->lowaddr, t->highaddr, NULL, NULL, t->maxsize,
t->nsegments, t->maxsegsize, t->flags, t->lockfunc, t->lockfuncarg,
dmat));
}

void
bus_dma_template_clone(bus_dma_tag_template_t *t, bus_dma_tag_t dmat)
{

if (t == NULL || dmat == NULL)
return;

t->parent = dmat->parent;
t->alignment = dmat->alignment;
t->boundary = dmat->boundary;
t->lowaddr = dmat->lowaddr;
t->highaddr = dmat->highaddr;
t->maxsize = dmat->maxsize;
t->nsegments = dmat->nsegments;
t->maxsegsize = dmat->maxsegsz;
t->flags = dmat->flags;
t->lockfunc = dmat->lockfunc;
t->lockfuncarg = dmat->lockfuncarg;
}

int
bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
{

+ 51
- 0
sys/arm/arm/busdma_machdep-v6.c View File

@@ -575,6 +575,57 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
return (error);
}

void
bus_dma_template_init(bus_dma_tag_template_t *t, bus_dma_tag_t parent)
{

if (t == NULL)
return;

t->parent = parent;
t->alignment = 1;
t->boundary = 0;
t->lowaddr = t->highaddr = BUS_SPACE_MAXADDR;
t->maxsize = t->maxsegsize = BUS_SPACE_MAXSIZE;
t->nsegments = BUS_SPACE_UNRESTRICTED;
t->lockfunc = NULL;
t->lockfuncarg = NULL;
t->flags = 0;
}

int
bus_dma_template_tag(bus_dma_tag_template_t *t, bus_dma_tag_t *dmat)
{

if (t == NULL || dmat == NULL)
return (EINVAL);

return (bus_dma_tag_create(t->parent, t->alignment, t->boundary,
t->lowaddr, t->highaddr, NULL, NULL, t->maxsize,
t->nsegments, t->maxsegsize, t->flags, t->lockfunc, t->lockfuncarg,
dmat));
}

void
bus_dma_template_clone(bus_dma_tag_template_t *t, bus_dma_tag_t dmat)
{

if (t == NULL || dmat == NULL)
return;

t->parent = dmat->parent;
t->alignment = dmat->alignment;
t->boundary = dmat->boundary;
t->lowaddr = dmat->lowaddr;
t->highaddr = dmat->highaddr;
t->maxsize = dmat->maxsize;
t->nsegments = dmat->nsegments;
t->maxsegsize = dmat->maxsegsz;
t->flags = dmat->flags;
t->lockfunc = dmat->lockfunc;
t->lockfuncarg = dmat->lockfuncarg;
}

int
bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
{

+ 7
- 0
sys/arm/arm/elf_machdep.c View File

@@ -324,6 +324,13 @@ elf_cpu_load_file(linker_file_t lf)
return (0);
}

int
elf_cpu_parse_dynamic(linker_file_t lf __unused, Elf_Dyn *dynamic __unused)
{

return (0);
}

int
elf_cpu_unload_file(linker_file_t lf)
{

+ 54
- 0
sys/arm64/arm64/busdma_machdep.c View File

@@ -214,6 +214,60 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
return (error);
}

void
bus_dma_template_init(bus_dma_tag_template_t *t, bus_dma_tag_t parent)
{

if (t == NULL)
return;

t->parent = parent;
t->alignment = 1;
t->boundary = 0;
t->lowaddr = t->highaddr = BUS_SPACE_MAXADDR;
t->maxsize = t->maxsegsize = BUS_SPACE_MAXSIZE;
t->nsegments = BUS_SPACE_UNRESTRICTED;
t->lockfunc = NULL;
t->lockfuncarg = NULL;
t->flags = 0;
}

int
bus_dma_template_tag(bus_dma_tag_template_t *t, bus_dma_tag_t *dmat)
{

if (t == NULL || dmat == NULL)
return (EINVAL);

return (bus_dma_tag_create(t->parent, t->alignment, t->boundary,
t->lowaddr, t->highaddr, NULL, NULL, t->maxsize,
t->nsegments, t->maxsegsize, t->flags, t->lockfunc, t->lockfuncarg,
dmat));
}

void
bus_dma_template_clone(bus_dma_tag_template_t *t, bus_dma_tag_t dmat)
{
struct bus_dma_tag_common *common;

if (t == NULL || dmat == NULL)
return;

common = (struct bus_dma_tag_common *)dmat;

t->parent = (bus_dma_tag_t)common->parent;
t->alignment = common->alignment;
t->boundary = common->boundary;
t->lowaddr = common->lowaddr;
t->highaddr = common->highaddr;
t->maxsize = common->maxsize;
t->nsegments = common->nsegments;
t->maxsegsize = common->maxsegsz;
t->flags = common->flags;
t->lockfunc = common->lockfunc;
t->lockfuncarg = common->lockfuncarg;
}

int
bus_dma_tag_destroy(bus_dma_tag_t dmat)
{

+ 7
- 0
sys/arm64/arm64/elf_machdep.c View File

@@ -219,3 +219,10 @@ elf_cpu_unload_file(linker_file_t lf __unused)

return (0);
}

int
elf_cpu_parse_dynamic(linker_file_t lf __unused, Elf_Dyn *dynamic __unused)
{

return (0);
}

+ 1
- 1
sys/ddb/db_ps.c View File

@@ -267,7 +267,7 @@ dumpthread(volatile struct proc *p, volatile struct thread *td, int all)
{
char state[9], wprefix;
const char *wmesg;
void *wchan;
const void *wchan;
if (all) {
db_printf("%6d ", td->td_tid);

+ 43
- 79
sys/dev/mpr/mpr.c View File

@@ -1311,6 +1311,7 @@ mpr_alloc_queues(struct mpr_softc *sc)
static int
mpr_alloc_hw_queues(struct mpr_softc *sc)
{
bus_dma_tag_template_t t;
bus_addr_t queues_busaddr;
uint8_t *queues;
int qsize, fqsize, pqsize;
@@ -1332,17 +1333,12 @@ mpr_alloc_hw_queues(struct mpr_softc *sc)
pqsize = sc->pqdepth * 8;
qsize = fqsize + pqsize;

if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
16, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
qsize, /* maxsize */
1, /* nsegments */
qsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->queues_dmat)) {
bus_dma_template_init(&t, sc->mpr_parent_dmat);
t.alignment = 16;
t.lowaddr = BUS_SPACE_MAXADDR_32BIT;
t.maxsize = t.maxsegsize = qsize;
t.nsegments = 1;
if (bus_dma_template_tag(&t, &sc->queues_dmat)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues DMA tag\n");
return (ENOMEM);
}
@@ -1370,6 +1366,7 @@ mpr_alloc_hw_queues(struct mpr_softc *sc)
static int
mpr_alloc_replies(struct mpr_softc *sc)
{
bus_dma_tag_template_t t;
int rsize, num_replies;

/* Store the reply frame size in bytes rather than as 32bit words */
@@ -1383,17 +1380,12 @@ mpr_alloc_replies(struct mpr_softc *sc)
num_replies = max(sc->fqdepth, sc->num_replies);

rsize = sc->replyframesz * num_replies;
if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
4, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
rsize, /* maxsize */
1, /* nsegments */
rsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->reply_dmat)) {
bus_dma_template_init(&t, sc->mpr_parent_dmat);
t.alignment = 4;
t.lowaddr = BUS_SPACE_MAXADDR_32BIT;
t.maxsize = t.maxsegsize = rsize;
t.nsegments = 1;
if (bus_dma_template_tag(&t, &sc->reply_dmat)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies DMA tag\n");
return (ENOMEM);
}
@@ -1440,21 +1432,17 @@ mpr_load_chains_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
static int
mpr_alloc_requests(struct mpr_softc *sc)
{
bus_dma_tag_template_t t;
struct mpr_command *cm;
int i, rsize, nsegs;

rsize = sc->reqframesz * sc->num_reqs;
if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
16, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
rsize, /* maxsize */
1, /* nsegments */
rsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->req_dmat)) {
bus_dma_template_init(&t, sc->mpr_parent_dmat);
t.alignment = 16;
t.lowaddr = BUS_SPACE_MAXADDR_32BIT;
t.maxsize = t.maxsegsize = rsize;
t.nsegments = 1;
if (bus_dma_template_tag(&t, &sc->req_dmat)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate request DMA tag\n");
return (ENOMEM);
}
@@ -1476,17 +1464,11 @@ mpr_alloc_requests(struct mpr_softc *sc)
return (ENOMEM);
}
rsize = sc->chain_frame_size * sc->num_chains;
if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
16, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
rsize, /* maxsize */
howmany(rsize, PAGE_SIZE), /* nsegments */
rsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->chain_dmat)) {
bus_dma_template_init(&t, sc->mpr_parent_dmat);
t.alignment = 16;
t.maxsize = t.maxsegsize = rsize;
t.nsegments = howmany(rsize, PAGE_SIZE);
if (bus_dma_template_tag(&t, &sc->chain_dmat)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain DMA tag\n");
return (ENOMEM);
}
@@ -1504,17 +1486,9 @@ mpr_alloc_requests(struct mpr_softc *sc)
}

rsize = MPR_SENSE_LEN * sc->num_reqs;
if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
rsize, /* maxsize */
1, /* nsegments */
rsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->sense_dmat)) {
bus_dma_template_clone(&t, sc->req_dmat);
t.maxsize = t.maxsegsize = rsize;
if (bus_dma_template_tag(&t, &sc->sense_dmat)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense DMA tag\n");
return (ENOMEM);
}
@@ -1540,18 +1514,12 @@ mpr_alloc_requests(struct mpr_softc *sc)
}

nsegs = (sc->maxio / PAGE_SIZE) + 1;
if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
nsegs, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
busdma_lock_mutex, /* lockfunc */
&sc->mpr_mtx, /* lockarg */
&sc->buffer_dmat)) {
bus_dma_template_init(&t, sc->mpr_parent_dmat);
t.nsegments = nsegs;
t.flags = BUS_DMA_ALLOCNOW;
t.lockfunc = busdma_lock_mutex;
t.lockfuncarg = &sc->mpr_mtx;
if (bus_dma_template_tag(&t, &sc->buffer_dmat)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate buffer DMA tag\n");
return (ENOMEM);
}
@@ -1608,9 +1576,10 @@ mpr_alloc_requests(struct mpr_softc *sc)
static int
mpr_alloc_nvme_prp_pages(struct mpr_softc *sc)
{
bus_dma_tag_template_t t;
struct mpr_prp_page *prp_page;
int PRPs_per_page, PRPs_required, pages_required;
int rsize, i;
struct mpr_prp_page *prp_page;

/*
* Assuming a MAX_IO_SIZE of 1MB and a PAGE_SIZE of 4k, the max number
@@ -1637,17 +1606,12 @@ mpr_alloc_nvme_prp_pages(struct mpr_softc *sc)

sc->prp_buffer_size = PAGE_SIZE * pages_required;
rsize = sc->prp_buffer_size * NVME_QDEPTH;
if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
4, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
rsize, /* maxsize */
1, /* nsegments */
rsize, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->prp_page_dmat)) {
bus_dma_template_init(&t, sc->mpr_parent_dmat);
t.alignment = 4;
t.lowaddr = BUS_SPACE_MAXADDR_32BIT;
t.maxsize = t.maxsegsize = rsize;
t.nsegments = 1;
if (bus_dma_template_tag(&t, &sc->prp_page_dmat)) {
mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP DMA "
"tag\n");
return (ENOMEM);

+ 3
- 11
sys/dev/mpr/mpr_pci.c View File

@@ -220,6 +220,7 @@ mpr_pci_probe(device_t dev)
static int
mpr_pci_attach(device_t dev)
{
bus_dma_tag_template_t t;
struct mpr_softc *sc;
struct mpr_ident *m;
int error, i;
@@ -267,17 +268,8 @@ mpr_pci_attach(device_t dev)
sc->mpr_bhandle = rman_get_bushandle(sc->mpr_regs_resource);

/* Allocate the parent DMA tag */
if (bus_dma_tag_create( bus_get_dma_tag(dev), /* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
BUS_SPACE_UNRESTRICTED, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->mpr_parent_dmat)) {
bus_dma_template_init(&t, bus_get_dma_tag(dev));
if (bus_dma_template_tag(&t, &sc->mpr_parent_dmat)) {
mpr_printf(sc, "Cannot allocate parent DMA tag\n");
mpr_pci_free(sc);
return (ENOMEM);

+ 6
- 11
sys/dev/mpr/mpr_user.c View File

@@ -1452,6 +1452,7 @@ static int
mpr_diag_register(struct mpr_softc *sc, mpr_fw_diag_register_t *diag_register,
uint32_t *return_code)
{
bus_dma_tag_template_t t;
mpr_fw_diagnostic_buffer_t *pBuffer;
struct mpr_busdma_context *ctx;
uint8_t extended_type, buffer_type, i;
@@ -1514,17 +1515,11 @@ mpr_diag_register(struct mpr_softc *sc, mpr_fw_diag_register_t *diag_register,
*return_code = MPR_FW_DIAG_ERROR_NO_BUFFER;
return (MPR_DIAG_FAILURE);
}
if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
buffer_size, /* maxsize */
1, /* nsegments */
buffer_size, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->fw_diag_dmat)) {
bus_dma_template_init(&t, sc->mpr_parent_dmat);
t.lowaddr = BUS_SPACE_MAXADDR_32BIT;
t.maxsize = t.maxsegsize = buffer_size;
t.nsegments = 1;
if (bus_dma_template_tag(&t, &sc->fw_diag_dmat)) {
mpr_dprint(sc, MPR_ERROR,
"Cannot allocate FW diag buffer DMA tag\n");
*return_code = MPR_FW_DIAG_ERROR_NO_BUFFER;

+ 7
- 0
sys/i386/i386/elf_machdep.c View File

@@ -298,3 +298,10 @@ elf_cpu_unload_file(linker_file_t lf __unused)

return (0);
}

int
elf_cpu_parse_dynamic(linker_file_t lf __unused, Elf_Dyn *dynamic __unused)
{

return (0);
}

+ 1
- 1
sys/kern/kern_clock.c View File

@@ -212,7 +212,7 @@ deadlres_td_on_lock(struct proc *p, struct thread *td, int blkticks)
static void
deadlres_td_sleep_q(struct proc *p, struct thread *td, int slpticks)
{
void *wchan;
const void *wchan;
int i, slptype, tticks;

sx_assert(&allproc_lock, SX_LOCKED);

+ 1
- 1
sys/kern/kern_lock.c View File

@@ -1733,7 +1733,7 @@ _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
int
lockmgr_chain(struct thread *td, struct thread **ownerp)
{
struct lock *lk;
const struct lock *lk;

lk = td->td_wchan;


+ 1
- 1
sys/kern/kern_proc.c View File

@@ -1297,7 +1297,7 @@ pstats_free(struct pstats *ps)
* it can be replaced by assignment of zero.
*/
static inline uint32_t
ptr32_trim(void *ptr)
ptr32_trim(const void *ptr)
{
uintptr_t uptr;


+ 1
- 1
sys/kern/kern_sx.c View File

@@ -1526,7 +1526,7 @@ db_show_sx(const struct lock_object *lock)
int
sx_chain(struct thread *td, struct thread **ownerp)
{
struct sx *sx;
const struct sx *sx;

/*
* Check to see if this thread is blocked on an sx lock.

+ 6
- 6
sys/kern/kern_synch.c View File

@@ -77,7 +77,7 @@ SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup,
NULL);

int hogticks;
static char pause_wchan[MAXCPU];
static const char pause_wchan[MAXCPU];

static struct callout loadav_callout;

@@ -131,7 +131,7 @@ SYSINIT(sleepinit, SI_SUB_KMEM, SI_ORDER_ANY, sleepinit, NULL);
* flag the lock is not re-locked before returning.
*/
int
_sleep(void *ident, struct lock_object *lock, int priority,
_sleep(const void *ident, struct lock_object *lock, int priority,
const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
{
struct thread *td;
@@ -233,7 +233,7 @@ _sleep(void *ident, struct lock_object *lock, int priority,
}

int
msleep_spin_sbt(void *ident, struct mtx *mtx, const char *wmesg,
msleep_spin_sbt(const void *ident, struct mtx *mtx, const char *wmesg,
sbintime_t sbt, sbintime_t pr, int flags)
{
struct thread *td;
@@ -409,7 +409,7 @@ refcount_sleep(volatile u_int *count, const char *wmesg, int pri)
* Make all threads sleeping on the specified identifier runnable.
*/
void
wakeup(void *ident)
wakeup(const void *ident)
{
int wakeup_swapper;

@@ -429,7 +429,7 @@ wakeup(void *ident)
* swapped out.
*/
void
wakeup_one(void *ident)
wakeup_one(const void *ident)
{
int wakeup_swapper;

@@ -441,7 +441,7 @@ wakeup_one(void *ident)
}

void
wakeup_any(void *ident)
wakeup_any(const void *ident)
{
int wakeup_swapper;


+ 1
- 1
sys/kern/link_elf.c View File

@@ -611,7 +611,7 @@ parse_dynamic(elf_file_t ef)
ef->ddbstrtab = ef->strtab;
ef->ddbstrcnt = ef->strsz;

return (0);
return elf_cpu_parse_dynamic(&ef->lf, ef->dynamic);
}

#define LS_PADDING 0x90909090

+ 23
- 23
sys/kern/subr_sleepqueue.c View File

@@ -130,7 +130,7 @@ struct sleepqueue {
u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */
LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
void *sq_wchan; /* (c) Wait channel. */
const void *sq_wchan; /* (c) Wait channel. */
int sq_type; /* (c) Queue type. */
#ifdef INVARIANTS
struct lock_object *sq_lock; /* (c) Associated lock. */
@@ -163,7 +163,7 @@ static uma_zone_t sleepq_zone;
/*
* Prototypes for non-exported routines.
*/
static int sleepq_catch_signals(void *wchan, int pri);
static int sleepq_catch_signals(const void *wchan, int pri);
static inline int sleepq_check_signals(void);
static inline int sleepq_check_timeout(void);
#ifdef INVARIANTS
@@ -173,7 +173,7 @@ static int sleepq_init(void *mem, int size, int flags);
static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
int pri, int srqflags);
static void sleepq_remove_thread(struct sleepqueue *sq, struct thread *td);
static void sleepq_switch(void *wchan, int pri);
static void sleepq_switch(const void *wchan, int pri);
static void sleepq_timeout(void *arg);

SDT_PROBE_DECLARE(sched, , , sleep);
@@ -257,7 +257,7 @@ sleepq_free(struct sleepqueue *sq)
* Lock the sleep queue chain associated with the specified wait channel.
*/
void
sleepq_lock(void *wchan)
sleepq_lock(const void *wchan)
{
struct sleepqueue_chain *sc;

@@ -271,7 +271,7 @@ sleepq_lock(void *wchan)
* the table, NULL is returned.
*/
struct sleepqueue *
sleepq_lookup(void *wchan)
sleepq_lookup(const void *wchan)
{
struct sleepqueue_chain *sc;
struct sleepqueue *sq;
@@ -289,7 +289,7 @@ sleepq_lookup(void *wchan)
* Unlock the sleep queue chain associated with a given wait channel.
*/
void
sleepq_release(void *wchan)
sleepq_release(const void *wchan)
{
struct sleepqueue_chain *sc;

@@ -304,8 +304,8 @@ sleepq_release(void *wchan)
* woken up.
*/
void
sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
int queue)
sleepq_add(const void *wchan, struct lock_object *lock, const char *wmesg,
int flags, int queue)
{
struct sleepqueue_chain *sc;
struct sleepqueue *sq;
@@ -390,7 +390,7 @@ sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
* sleep queue after timo ticks if the thread has not already been awakened.
*/
void
sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr,
sleepq_set_timeout_sbt(const void *wchan, sbintime_t sbt, sbintime_t pr,
int flags)
{
struct sleepqueue_chain *sc __unused;
@@ -419,7 +419,7 @@ sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr,
* Return the number of actual sleepers for the specified queue.
*/
u_int
sleepq_sleepcnt(void *wchan, int queue)
sleepq_sleepcnt(const void *wchan, int queue)
{
struct sleepqueue *sq;

@@ -438,7 +438,7 @@ sleepq_sleepcnt(void *wchan, int queue)
* may have transitioned from the sleepq lock to a run lock.
*/
static int
sleepq_catch_signals(void *wchan, int pri)
sleepq_catch_signals(const void *wchan, int pri)
{
struct sleepqueue_chain *sc;
struct sleepqueue *sq;
@@ -558,7 +558,7 @@ out:
* Returns with thread lock.
*/
static void
sleepq_switch(void *wchan, int pri)
sleepq_switch(const void *wchan, int pri)
{
struct sleepqueue_chain *sc;
struct sleepqueue *sq;
@@ -664,7 +664,7 @@ sleepq_check_signals(void)
* Block the current thread until it is awakened from its sleep queue.
*/
void
sleepq_wait(void *wchan, int pri)
sleepq_wait(const void *wchan, int pri)
{
struct thread *td;

@@ -679,7 +679,7 @@ sleepq_wait(void *wchan, int pri)
* or it is interrupted by a signal.
*/
int
sleepq_wait_sig(void *wchan, int pri)
sleepq_wait_sig(const void *wchan, int pri)
{
int rcatch;

@@ -694,7 +694,7 @@ sleepq_wait_sig(void *wchan, int pri)
* or it times out while waiting.
*/
int
sleepq_timedwait(void *wchan, int pri)
sleepq_timedwait(const void *wchan, int pri)
{
struct thread *td;

@@ -712,7 +712,7 @@ sleepq_timedwait(void *wchan, int pri)
* it is interrupted by a signal, or it times out waiting to be awakened.
*/
int
sleepq_timedwait_sig(void *wchan, int pri)
sleepq_timedwait_sig(const void *wchan, int pri)
{
int rcatch, rvalt, rvals;

@@ -731,7 +731,7 @@ sleepq_timedwait_sig(void *wchan, int pri)
* Returns the type of sleepqueue given a waitchannel.
*/
int
sleepq_type(void *wchan)
sleepq_type(const void *wchan)
{
struct sleepqueue *sq;
int type;
@@ -910,7 +910,7 @@ sleepq_init(void *mem, int size, int flags)
* Find thread sleeping on a wait channel and resume it.
*/
int
sleepq_signal(void *wchan, int flags, int pri, int queue)
sleepq_signal(const void *wchan, int flags, int pri, int queue)
{
struct sleepqueue_chain *sc;
struct sleepqueue *sq;
@@ -971,7 +971,7 @@ match_any(struct thread *td __unused)
* Resume all threads sleeping on a specified wait channel.
*/
int
sleepq_broadcast(void *wchan, int flags, int pri, int queue)
sleepq_broadcast(const void *wchan, int flags, int pri, int queue)
{
struct sleepqueue *sq;

@@ -1023,7 +1023,7 @@ sleepq_timeout(void *arg)
struct sleepqueue_chain *sc __unused;
struct sleepqueue *sq;
struct thread *td;
void *wchan;
const void *wchan;
int wakeup_swapper;

td = arg;
@@ -1067,7 +1067,7 @@ sleepq_timeout(void *arg)
* wait channel if it is on that queue.
*/
void
sleepq_remove(struct thread *td, void *wchan)
sleepq_remove(struct thread *td, const void *wchan)
{
struct sleepqueue_chain *sc;
struct sleepqueue *sq;
@@ -1111,7 +1111,7 @@ int
sleepq_abort(struct thread *td, int intrval)
{
struct sleepqueue *sq;
void *wchan;
const void *wchan;

THREAD_LOCK_ASSERT(td, MA_OWNED);
MPASS(TD_ON_SLEEPQ(td));
@@ -1183,7 +1183,7 @@ sleepq_chains_remove_matching(bool (*matches)(struct thread *))
*/
#ifdef STACK
int
sleepq_sbuf_print_stacks(struct sbuf *sb, void *wchan, int queue,
sleepq_sbuf_print_stacks(struct sbuf *sb, const void *wchan, int queue,
int *count_stacks_printed)
{
struct thread *td, *td_next;

+ 51
- 0
sys/mips/mips/busdma_machdep.c View File

@@ -476,6 +476,57 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
return (error);
}

void
bus_dma_template_init(bus_dma_tag_template_t *t, bus_dma_tag_t parent)
{

if (t == NULL)
return;

t->parent = parent;
t->alignment = 1;
t->boundary = 0;
t->lowaddr = t->highaddr = BUS_SPACE_MAXADDR;
t->maxsize = t->maxsegsize = BUS_SPACE_MAXSIZE;
t->nsegments = BUS_SPACE_UNRESTRICTED;
t->lockfunc = NULL;
t->lockfuncarg = NULL;
t->flags = 0;
}

int
bus_dma_template_tag(bus_dma_tag_template_t *t, bus_dma_tag_t *dmat)
{

if (t == NULL || dmat == NULL)
return (EINVAL);

return (bus_dma_tag_create(t->parent, t->alignment, t->boundary,
t->lowaddr, t->highaddr, NULL, NULL, t->maxsize,
t->nsegments, t->maxsegsize, t->flags, t->lockfunc, t->lockfuncarg,
dmat));
}

void
bus_dma_template_clone(bus_dma_tag_template_t *t, bus_dma_tag_t dmat)
{

if (t == NULL || dmat == NULL)
return;

t->parent = dmat->parent;
t->alignment = dmat->alignment;
t->boundary = dmat->boundary;
t->lowaddr = dmat->lowaddr;
t->highaddr = dmat->highaddr;
t->maxsize = dmat->maxsize;
t->nsegments = dmat->nsegments;
t->maxsegsize = dmat->maxsegsz;
t->flags = dmat->flags;
t->lockfunc = dmat->lockfunc;
t->lockfuncarg = dmat->lockfuncarg;
}

int
bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
{

+ 7
- 0
sys/mips/mips/elf_machdep.c View File

@@ -505,3 +505,10 @@ elf_cpu_unload_file(linker_file_t lf __unused)

return (0);
}

int
elf_cpu_parse_dynamic(linker_file_t lf __unused, Elf_Dyn *dynamic __unused)
{

return (0);
}

+ 51
- 0
sys/powerpc/powerpc/busdma_machdep.c View File

@@ -340,6 +340,57 @@ bus_dma_tag_create(bus_dma_tag