Browse Source

Merge branch 'freebsd/current/master' into hardened/current/master

* freebsd/current/master:
  Remove VMware MSI-X from the PCI blacklist.
  Remove useless code from in6_rmx.c
  vnic: Relax PHY node matching after r336281.
  cxgbe(4): check if the firmware supports FW_RI_FR_NSMR_TPTE_WR work request.
  oce: Tighten input validation in the SIOCGI2C handler.
  Add missing "ereport." prefixes of ZFS events.
  When pmap_enter_{l2,pde}() are called to create a kernel mapping, they are incrementing (and decrementing) the ref_count on kernel page table pages. They should not do this.  Kernel page table pages are expected to have a fixed ref_count.  Address this problem by refactoring pmap_alloc{_l2,pde}() and their callers.  This also eliminates some duplicated code from the callers.
hardened/current/master
HardenedBSD Sync Service 6 months ago
parent
commit
4a3d8cb3cd
9 changed files with 238 additions and 259 deletions
  1. +9
    -9
      sbin/devd/zfs.conf
  2. +106
    -89
      sys/amd64/amd64/pmap.c
  3. +100
    -76
      sys/arm64/arm64/pmap.c
  4. +8
    -0
      sys/dev/cxgbe/t4_main.c
  5. +8
    -9
      sys/dev/oce/oce_if.c
  6. +0
    -7
      sys/dev/pci/pci.c
  7. +7
    -4
      sys/dev/vnic/thunder_bgx_fdt.c
  8. +0
    -2
      sys/netinet/icmp6.h
  9. +0
    -63
      sys/netinet6/in6_rmx.c

+ 9
- 9
sbin/devd/zfs.conf View File

@@ -4,55 +4,55 @@

notify 10 {
match "system" "ZFS";
match "type" "fs.zfs.checksum";
match "type" "ereport.fs.zfs.checksum";
action "logger -p local7.warn -t ZFS 'checksum mismatch, zpool=$pool path=$vdev_path offset=$zio_offset size=$zio_size'";
};

notify 10 {
match "system" "ZFS";
match "type" "fs.zfs.io";
match "type" "ereport.fs.zfs.io";
action "logger -p local7.warn -t ZFS 'vdev I/O failure, zpool=$pool path=$vdev_path offset=$zio_offset size=$zio_size error=$zio_err'";
};

notify 10 {
match "system" "ZFS";
match "type" "fs.zfs.data";
match "type" "ereport.fs.zfs.data";
action "logger -p local7.warn -t ZFS 'pool I/O failure, zpool=$pool error=$zio_err'";
};

notify 10 {
match "system" "ZFS";
match "type" "fs.zfs.zpool";
match "type" "ereport.fs.zfs.zpool";
action "logger -p local7.err -t ZFS 'failed to load zpool $pool'";
};

notify 10 {
match "system" "ZFS";
match "type" "fs.zfs.vdev\..*";
match "type" "ereport.fs.zfs.vdev\..*";
action "logger -p local7.err -t ZFS 'vdev problem, zpool=$pool path=$vdev_path type=$type'";
};

notify 10 {
match "system" "ZFS";
match "type" "fs.zfs.io_failure";
match "type" "ereport.fs.zfs.io_failure";
action "logger -p local7.alert -t ZFS 'catastrophic pool I/O failure, zpool=$pool'";
};

notify 10 {
match "system" "ZFS";
match "type" "fs.zfs.probe_failure";
match "type" "ereport.fs.zfs.probe_failure";
action "logger -p local7.err -t ZFS 'vdev probe failure, zpool=$pool path=$vdev_path'";
};

notify 10 {
match "system" "ZFS";
match "type" "fs.zfs.log_replay";
match "type" "ereport.fs.zfs.log_replay";
action "logger -p local7.err -t ZFS 'pool log replay failure, zpool=$pool'";
};

notify 10 {
match "system" "ZFS";
match "type" "fs.zfs.config_cache_write";
match "type" "ereport.fs.zfs.config_cache_write";
action "logger -p local7.warn -t ZFS 'failed to write zpool.cache, zpool=$pool'";
};


+ 106
- 89
sys/amd64/amd64/pmap.c View File

@@ -1208,6 +1208,7 @@ static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
vm_offset_t va);

static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte);
static int pmap_change_props_locked(vm_offset_t va, vm_size_t size,
vm_prot_t prot, int mode, int flags);
static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
@@ -1262,7 +1263,7 @@ static void pmap_update_pde_invalidate(pmap_t, vm_offset_t va, pd_entry_t pde);

static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex,
struct rwlock **lockp);
static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va,
static pd_entry_t *pmap_alloc_pde(pmap_t pmap, vm_offset_t va, vm_page_t *pdpgp,
struct rwlock **lockp);
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
struct rwlock **lockp);
@@ -3609,6 +3610,27 @@ pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
return (pmap_unwire_ptp(pmap, va, mpte, free));
}

/*
* Release a page table page reference after a failed attempt to create a
* mapping.
*/
static void
pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
{
struct spglist free;

SLIST_INIT(&free);
if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
/*
* Although "va" was never mapped, paging-structure caches
* could nonetheless have entries that refer to the freed
* page table pages. Invalidate those entries.
*/
pmap_invalidate_page(pmap, va);
vm_page_free_pages_toq(&free, true);
}
}

void
pmap_pinit0(pmap_t pmap)
{
@@ -3914,30 +3936,44 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
return (m);
}

static vm_page_t
pmap_allocpde(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
static pd_entry_t *
pmap_alloc_pde(pmap_t pmap, vm_offset_t va, vm_page_t *pdpgp,
struct rwlock **lockp)
{
vm_pindex_t pdpindex, ptepindex;
pdp_entry_t *pdpe, PG_V;
pd_entry_t *pde;
vm_page_t pdpg;
vm_pindex_t pdpindex;

PG_V = pmap_valid_bit(pmap);

retry:
pdpe = pmap_pdpe(pmap, va);
if (pdpe != NULL && (*pdpe & PG_V) != 0) {
/* Add a reference to the pd page. */
pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
pdpg->ref_count++;
} else {
pde = pmap_pdpe_to_pde(pdpe, va);
if (va < VM_MAXUSER_ADDRESS) {
/* Add a reference to the pd page. */
pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
pdpg->ref_count++;
} else
pdpg = NULL;
} else if (va < VM_MAXUSER_ADDRESS) {
/* Allocate a pd page. */
ptepindex = pmap_pde_pindex(va);
pdpindex = ptepindex >> NPDPEPGSHIFT;
pdpindex = pmap_pde_pindex(va) >> NPDPEPGSHIFT;
pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, lockp);
if (pdpg == NULL && lockp != NULL)
goto retry;
}
return (pdpg);
if (pdpg == NULL) {
if (lockp != NULL)
goto retry;
else
return (NULL);
}
pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
pde = &pde[pmap_pde_index(va)];
} else
panic("pmap_alloc_pde: missing page table page for va %#lx",
va);
*pdpgp = pdpg;
return (pde);
}

static vm_page_t
@@ -6225,6 +6261,24 @@ pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
KERN_SUCCESS);
}

/*
* Returns true if every page table entry in the specified page table page is
* zero.
*/
static bool
pmap_every_pte_zero(vm_paddr_t pa)
{
pt_entry_t *pt_end, *pte;

KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
pte = (pt_entry_t *)PHYS_TO_DMAP(pa);
for (pt_end = pte + NPTEPG; pte < pt_end; pte++) {
if (*pte != 0)
return (false);
}
return (true);
}

/*
* Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if
* the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
@@ -6260,8 +6314,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
" in pmap %p", va, pmap);
return (KERN_FAILURE);
}
if ((pdpg = pmap_allocpde(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ?
NULL : lockp)) == NULL) {
if ((pde = pmap_alloc_pde(pmap, va, &pdpg, (flags &
PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp)) == NULL) {
CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
" in pmap %p", va, pmap);
return (KERN_RESOURCE_SHORTAGE);
@@ -6273,11 +6327,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
* it could sleep.
*/
if (!pmap_pkru_same(pmap, va, va + NBPDR)) {
SLIST_INIT(&free);
if (pmap_unwire_ptp(pmap, va, pdpg, &free)) {
pmap_invalidate_page(pmap, va);
vm_page_free_pages_toq(&free, true);
}
pmap_abort_ptp(pmap, va, pdpg);
return (KERN_FAILURE);
}
if (va < VM_MAXUSER_ADDRESS && pmap->pm_type == PT_X86) {
@@ -6285,14 +6335,18 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
newpde |= pmap_pkru_get(pmap, va);
}

pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
pde = &pde[pmap_pde_index(va)];
/*
* If there are existing mappings, either abort or remove them.
*/
oldpde = *pde;
if ((oldpde & PG_V) != 0) {
KASSERT(pdpg->ref_count > 1,
KASSERT(pdpg == NULL || pdpg->ref_count > 1,
("pmap_enter_pde: pdpg's reference count is too low"));
if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
pdpg->ref_count--;
if ((flags & PMAP_ENTER_NOREPLACE) != 0 && (va <
VM_MAXUSER_ADDRESS || (oldpde & PG_PS) != 0 ||
pmap_every_pte_zero(oldpde & PG_FRAME))) {
if (pdpg != NULL)
pdpg->ref_count--;
CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
" in pmap %p", va, pmap);
return (KERN_FAILURE);
@@ -6302,7 +6356,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
if ((oldpde & PG_PS) != 0) {
/*
* The reference to the PD page that was acquired by
* pmap_allocpde() ensures that it won't be freed.
* pmap_alloc_pde() ensures that it won't be freed.
* However, if the PDE resulted from a promotion, then
* a reserved PT page could be freed.
*/
@@ -6316,8 +6370,14 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
pmap_invalidate_all(pmap);
pmap_delayed_invl_finish();
}
vm_page_free_pages_toq(&free, true);
if (va >= VM_MAXUSER_ADDRESS) {
if (va < VM_MAXUSER_ADDRESS) {
vm_page_free_pages_toq(&free, true);
KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p",
pde));
} else {
KASSERT(SLIST_EMPTY(&free),
("pmap_enter_pde: freed kernel page table page"));

/*
* Both pmap_remove_pde() and pmap_remove_ptes() will
* leave the kernel page table page zero filled.
@@ -6325,26 +6385,16 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
mt = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
if (pmap_insert_pt_page(pmap, mt, false))
panic("pmap_enter_pde: trie insert failed");
} else
KASSERT(*pde == 0, ("pmap_enter_pde: non-zero pde %p",
pde));
}
}

if ((newpde & PG_MANAGED) != 0) {
/*
* Abort this mapping if its PV entry could not be created.
*/
if (!pmap_pv_insert_pde(pmap, va, newpde, flags, lockp)) {
SLIST_INIT(&free);
if (pmap_unwire_ptp(pmap, va, pdpg, &free)) {
/*
* Although "va" is not mapped, paging-
* structure caches could nonetheless have
* entries that refer to the freed page table
* pages. Invalidate those entries.
*/
pmap_invalidate_page(pmap, va);
vm_page_free_pages_toq(&free, true);
}
if (pdpg != NULL)
pmap_abort_ptp(pmap, va, pdpg);
CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
" in pmap %p", va, pmap);
return (KERN_RESOURCE_SHORTAGE);
@@ -6369,8 +6419,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, u_int flags,
pde_store(pde, newpde);

atomic_add_long(&pmap_pde_mappings, 1);
CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
" in pmap %p", va, pmap);
CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx in pmap %p",
va, pmap);
return (KERN_SUCCESS);
}

@@ -6445,7 +6495,6 @@ static vm_page_t
pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
{
struct spglist free;
pt_entry_t newpte, *pte, PG_V;

KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
@@ -6502,11 +6551,9 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
pte = vtopte(va);
}
if (*pte) {
if (mpte != NULL) {
if (mpte != NULL)
mpte->ref_count--;
mpte = NULL;
}
return (mpte);
return (NULL);
}

/*
@@ -6514,21 +6561,9 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
*/
if ((m->oflags & VPO_UNMANAGED) == 0 &&
!pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
if (mpte != NULL) {
SLIST_INIT(&free);
if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
/*
* Although "va" is not mapped, paging-
* structure caches could nonetheless have
* entries that refer to the freed page table
* pages. Invalidate those entries.
*/
pmap_invalidate_page(pmap, va);
vm_page_free_pages_toq(&free, true);
}
mpte = NULL;
}
return (mpte);
if (mpte != NULL)
pmap_abort_ptp(pmap, va, mpte);
return (NULL);
}

/*
@@ -6628,8 +6663,8 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
PMAP_LOCK(pmap);
for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1);
pa < ptepa + size; pa += NBPDR) {
pdpg = pmap_allocpde(pmap, addr, NULL);
if (pdpg == NULL) {
pde = pmap_alloc_pde(pmap, addr, &pdpg, NULL);
if (pde == NULL) {
/*
* The creation of mappings below is only an
* optimization. If a page directory page
@@ -6640,8 +6675,6 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
addr += NBPDR;
continue;
}
pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
pde = &pde[pmap_pde_index(addr)];
if ((*pde & PG_V) == 0) {
pde_store(pde, pa | PG_PS | PG_M | PG_A |
PG_U | PG_RW | PG_V);
@@ -6755,7 +6788,6 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
vm_offset_t src_addr)
{
struct rwlock *lock;
struct spglist free;
pml4_entry_t *pml4e;
pdp_entry_t *pdpe;
pd_entry_t *pde, srcptepaddr;
@@ -6826,12 +6858,9 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
if (srcptepaddr & PG_PS) {
if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr)
continue;
dst_pdpg = pmap_allocpde(dst_pmap, addr, NULL);
if (dst_pdpg == NULL)
pde = pmap_alloc_pde(dst_pmap, addr, &dst_pdpg, NULL);
if (pde == NULL)
break;
pde = (pd_entry_t *)
PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_pdpg));
pde = &pde[pmap_pde_index(addr)];
if (*pde == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr,
PMAP_ENTER_NORECLAIM, &lock))) {
@@ -6840,7 +6869,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
PAGE_SIZE);
atomic_add_long(&pmap_pde_mappings, 1);
} else
dst_pdpg->ref_count--;
pmap_abort_ptp(dst_pmap, addr, dst_pdpg);
continue;
}

@@ -6885,19 +6914,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
*dst_pte = ptetemp & ~(PG_W | PG_M | PG_A);
pmap_resident_count_inc(dst_pmap, 1);
} else {
SLIST_INIT(&free);
if (pmap_unwire_ptp(dst_pmap, addr, dstmpte,
&free)) {
/*
* Although "addr" is not mapped,
* paging-structure caches could
* nonetheless have entries that refer
* to the freed page table pages.
* Invalidate those entries.
*/
pmap_invalidate_page(dst_pmap, addr);
vm_page_free_pages_toq(&free, true);
}
pmap_abort_ptp(dst_pmap, addr, dstmpte);
goto out;
}
/* Have we copied all of the valid mappings? */

+ 100
- 76
sys/arm64/arm64/pmap.c View File

@@ -332,6 +332,7 @@ static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
vm_offset_t va);

static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte);
static bool pmap_activate_int(pmap_t pmap);
static void pmap_alloc_asid(pmap_t pmap);
static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
@@ -1501,6 +1502,29 @@ pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
return (pmap_unwire_l3(pmap, va, mpte, free));
}

/*
* Release a page table page reference after a failed attempt to create a
* mapping.
*/
static void
pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
{
struct spglist free;

SLIST_INIT(&free);
if (pmap_unwire_l3(pmap, va, mpte, &free)) {
/*
* Although "va" was never mapped, the TLB could nonetheless
* have intermediate entries that refer to the freed page
* table pages. Invalidate those entries.
*
* XXX redundant invalidation (See _pmap_unwire_l3().)
*/
pmap_invalidate_page(pmap, va);
vm_page_free_pages_toq(&free, true);
}
}

void
pmap_pinit0(pmap_t pmap)
{
@@ -1678,27 +1702,41 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
return (m);
}

static vm_page_t
pmap_alloc_l2(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
static pd_entry_t *
pmap_alloc_l2(pmap_t pmap, vm_offset_t va, vm_page_t *l2pgp,
struct rwlock **lockp)
{
pd_entry_t *l1;
pd_entry_t *l1, *l2;
vm_page_t l2pg;
vm_pindex_t l2pindex;

retry:
l1 = pmap_l1(pmap, va);
if (l1 != NULL && (pmap_load(l1) & ATTR_DESCR_MASK) == L1_TABLE) {
/* Add a reference to the L2 page. */
l2pg = PHYS_TO_VM_PAGE(pmap_load(l1) & ~ATTR_MASK);
l2pg->ref_count++;
} else {
l2 = pmap_l1_to_l2(l1, va);
if (va < VM_MAXUSER_ADDRESS) {
/* Add a reference to the L2 page. */
l2pg = PHYS_TO_VM_PAGE(pmap_load(l1) & ~ATTR_MASK);
l2pg->ref_count++;
} else
l2pg = NULL;
} else if (va < VM_MAXUSER_ADDRESS) {
/* Allocate a L2 page. */
l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT;
l2pg = _pmap_alloc_l3(pmap, NUL2E + l2pindex, lockp);
if (l2pg == NULL && lockp != NULL)
goto retry;
}
return (l2pg);
if (l2pg == NULL) {
if (lockp != NULL)
goto retry;
else
return (NULL);
}
l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg));
l2 = &l2[pmap_l2_index(va)];
} else
panic("pmap_alloc_l2: missing page table page for va %#lx",
va);
*l2pgp = l2pg;
return (l2);
}

static vm_page_t
@@ -3553,6 +3591,24 @@ pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
KERN_SUCCESS);
}

/*
* Returns true if every page table entry in the specified page table is
* zero.
*/
static bool
pmap_every_pte_zero(vm_paddr_t pa)
{
pt_entry_t *pt_end, *pte;

KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
pte = (pt_entry_t *)PHYS_TO_DMAP(pa);
for (pt_end = pte + Ln_ENTRIES; pte < pt_end; pte++) {
if (*pte != 0)
return (false);
}
return (true);
}

/*
* Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if
* the mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE
@@ -3574,23 +3630,26 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,

PMAP_LOCK_ASSERT(pmap, MA_OWNED);

if ((l2pg = pmap_alloc_l2(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ?
NULL : lockp)) == NULL) {
if ((l2 = pmap_alloc_l2(pmap, va, &l2pg, (flags &
PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp)) == NULL) {
CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx in pmap %p",
va, pmap);
return (KERN_RESOURCE_SHORTAGE);
}

l2 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(l2pg));
l2 = &l2[pmap_l2_index(va)];
/*
* If there are existing mappings, either abort or remove them.
*/
if ((old_l2 = pmap_load(l2)) != 0) {
KASSERT(l2pg->ref_count > 1,
KASSERT(l2pg == NULL || l2pg->ref_count > 1,
("pmap_enter_l2: l2pg's ref count is too low"));
if ((flags & PMAP_ENTER_NOREPLACE) != 0) {
l2pg->ref_count--;
CTR2(KTR_PMAP,
"pmap_enter_l2: failure for va %#lx in pmap %p",
va, pmap);
if ((flags & PMAP_ENTER_NOREPLACE) != 0 && (va <
VM_MAXUSER_ADDRESS || (old_l2 & ATTR_DESCR_MASK) ==
L2_BLOCK || pmap_every_pte_zero(old_l2 & ~ATTR_MASK))) {
if (l2pg != NULL)
l2pg->ref_count--;
CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx"
" in pmap %p", va, pmap);
return (KERN_FAILURE);
}
SLIST_INIT(&free);
@@ -3600,8 +3659,14 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
else
pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE,
&free, lockp);
vm_page_free_pages_toq(&free, true);
if (va >= VM_MAXUSER_ADDRESS) {
if (va < VM_MAXUSER_ADDRESS) {
vm_page_free_pages_toq(&free, true);
KASSERT(pmap_load(l2) == 0,
("pmap_enter_l2: non-zero L2 entry %p", l2));
} else {
KASSERT(SLIST_EMPTY(&free),
("pmap_enter_l2: freed kernel page table page"));

/*
* Both pmap_remove_l2() and pmap_remove_l3_range()
* will leave the kernel page table page zero filled.
@@ -3613,9 +3678,7 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
panic("pmap_enter_l2: trie insert failed");
pmap_clear(l2);
pmap_invalidate_page(pmap, va);
} else
KASSERT(pmap_load(l2) == 0,
("pmap_enter_l2: non-zero L2 entry %p", l2));
}
}

if ((new_l2 & ATTR_SW_MANAGED) != 0) {
@@ -3623,20 +3686,8 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
* Abort this mapping if its PV entry could not be created.
*/
if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) {
SLIST_INIT(&free);
if (pmap_unwire_l3(pmap, va, l2pg, &free)) {
/*
* Although "va" is not mapped, the TLB could
* nonetheless have intermediate entries that
* refer to the freed page table pages.
* Invalidate those entries.
*
* XXX redundant invalidation (See
* _pmap_unwire_l3().)
*/
pmap_invalidate_page(pmap, va);
vm_page_free_pages_toq(&free, true);
}
if (l2pg != NULL)
pmap_abort_ptp(pmap, va, l2pg);
CTR2(KTR_PMAP,
"pmap_enter_l2: failure for va %#lx in pmap %p",
va, pmap);
@@ -3737,7 +3788,6 @@ static vm_page_t
pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
{
struct spglist free;
pd_entry_t *pde;
pt_entry_t *l2, *l3, l3_val;
vm_paddr_t pa;
@@ -3811,11 +3861,9 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
* Abort if a mapping already exists.
*/
if (pmap_load(l3) != 0) {
if (mpte != NULL) {
if (mpte != NULL)
mpte->ref_count--;
mpte = NULL;
}
return (mpte);
return (NULL);
}

/*
@@ -3823,15 +3871,9 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
*/
if ((m->oflags & VPO_UNMANAGED) == 0 &&
!pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
if (mpte != NULL) {
SLIST_INIT(&free);
if (pmap_unwire_l3(pmap, va, mpte, &free)) {
pmap_invalidate_page(pmap, va);
vm_page_free_pages_toq(&free, true);
}
mpte = NULL;
}
return (mpte);
if (mpte != NULL)
pmap_abort_ptp(pmap, va, mpte);
return (NULL);
}

/*
@@ -3985,7 +4027,6 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
vm_offset_t src_addr)
{
struct rwlock *lock;
struct spglist free;
pd_entry_t *l0, *l1, *l2, srcptepaddr;
pt_entry_t *dst_pte, mask, nbits, ptetemp, *src_pte;
vm_offset_t addr, end_addr, va_next;
@@ -4028,12 +4069,9 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
if ((addr & L2_OFFSET) != 0 ||
addr + L2_SIZE > end_addr)
continue;
dst_l2pg = pmap_alloc_l2(dst_pmap, addr, NULL);
if (dst_l2pg == NULL)
l2 = pmap_alloc_l2(dst_pmap, addr, &dst_l2pg, NULL);
if (l2 == NULL)
break;
l2 = (pd_entry_t *)
PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dst_l2pg));
l2 = &l2[pmap_l2_index(addr)];
if (pmap_load(l2) == 0 &&
((srcptepaddr & ATTR_SW_MANAGED) == 0 ||
pmap_pv_insert_l2(dst_pmap, addr, srcptepaddr,
@@ -4047,7 +4085,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
PAGE_SIZE);
atomic_add_long(&pmap_l2_mappings, 1);
} else
dst_l2pg->ref_count--;
pmap_abort_ptp(dst_pmap, addr, dst_l2pg);
continue;
}
KASSERT((srcptepaddr & ATTR_DESCR_MASK) == L2_TABLE,
@@ -4094,21 +4132,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
pmap_store(dst_pte, (ptetemp & ~mask) | nbits);
pmap_resident_count_inc(dst_pmap, 1);
} else {
SLIST_INIT(&free);
if (pmap_unwire_l3(dst_pmap, addr, dstmpte,
&free)) {
/*
* Although "addr" is not mapped,
* the TLB could nonetheless have
* intermediate entries that refer
* to the freed page table pages.
* Invalidate those entries.
*
* XXX redundant invalidation
*/
pmap_invalidate_page(dst_pmap, addr);
vm_page_free_pages_toq(&free, true);
}
pmap_abort_ptp(dst_pmap, addr, dstmpte);
goto out;
}
/* Have we copied all of the valid mappings? */

+ 8
- 0
sys/dev/cxgbe/t4_main.c View File

@@ -4379,6 +4379,14 @@ get_params__post_init(struct adapter *sc)
else
sc->params.ulptx_memwrite_dsgl = false;

/* FW_RI_FR_NSMR_TPTE_WR support */
param[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
if (rc == 0)
sc->params.fr_nsmr_tpte_wr_support = val[0] != 0;
else
sc->params.fr_nsmr_tpte_wr_support = false;

/* get capabilites */
bzero(&caps, sizeof(caps));
caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |

+ 8
- 9
sys/dev/oce/oce_if.c View File

@@ -593,28 +593,27 @@ oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
if (rc)
break;

if (i2c.dev_addr != PAGE_NUM_A0 &&
i2c.dev_addr != PAGE_NUM_A2) {
if (i2c.dev_addr == PAGE_NUM_A0) {
offset = i2c.offset;
} else if (i2c.dev_addr == PAGE_NUM_A2) {
offset = TRANSCEIVER_A0_SIZE + i2c.offset;
} else {
rc = EINVAL;
break;
}

if (i2c.len > sizeof(i2c.data)) {
if (i2c.len > sizeof(i2c.data) ||
i2c.len + offset > sizeof(sfp_vpd_dump_buffer)) {
rc = EINVAL;
break;
}

rc = oce_mbox_read_transrecv_data(sc, i2c.dev_addr);
if(rc) {
if (rc) {
rc = -rc;
break;
}

if (i2c.dev_addr == PAGE_NUM_A0)
offset = i2c.offset;
else
offset = TRANSCEIVER_A0_SIZE + i2c.offset;

memcpy(&i2c.data[0], &sfp_vpd_dump_buffer[offset], i2c.len);

rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));

+ 0
- 7
sys/dev/pci/pci.c View File

@@ -274,13 +274,6 @@ static const struct pci_quirk pci_quirks[] = {
*/
{ 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },

/*
* MSI-X allocation doesn't work properly for devices passed through
* by VMware up to at least ESXi 5.1.
*/
{ 0x079015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCI/PCI-X */
{ 0x07a015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCIe */

/*
* Some virtualization environments emulate an older chipset
* but support MSI just fine. QEMU uses the Intel 82440.

+ 7
- 4
sys/dev/vnic/thunder_bgx_fdt.c View File

@@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$");
#include <sys/bitset.h>
#include <sys/bitstring.h>
#include <sys/bus.h>
#include <sys/ctype.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
@@ -151,6 +152,7 @@ bgx_fdt_phy_name_match(struct bgx *bgx, char *phy_name, ssize_t size)
{
const char *type;
ssize_t sz;
char last;

switch (bgx->qlm_mode) {
case QLM_MODE_SGMII:
@@ -193,10 +195,11 @@ bgx_fdt_phy_name_match(struct bgx *bgx, char *phy_name, ssize_t size)

if (sz > size)
return (FALSE);
if (strncmp(phy_name, type, sz - 1) == 0 &&
(phy_name[sz - 1] == '\0' || phy_name[sz - 1] == '@'))
return (TRUE);

if (strncmp(phy_name, type, sz - 1) == 0) {
last = phy_name[sz - 1];
if (last == '\0' || last == '@' || isdigit(last))
return (TRUE);
}
return (FALSE);
}


+ 0
- 2
sys/netinet/icmp6.h View File

@@ -687,8 +687,6 @@ void kmod_icmp6stat_inc(int statnum);
#define ICMPV6CTL_NODEINFO_OLDMCPREFIX 25
#define ICMPV6CTL_MAXID 26

#define RTF_PROBEMTU RTF_PROTO1

#ifdef _KERNEL
# ifdef __STDC__
struct rtentry;

+ 0
- 63
sys/netinet6/in6_rmx.c View File

@@ -152,65 +152,9 @@ in6_addroute(void *v_arg, void *n_arg, struct radix_head *head,
return (rn_addroute(v_arg, n_arg, head, treenodes));
}

/*
* Age old PMTUs.
*/
struct mtuex_arg {
struct rib_head *rnh;
time_t nextstop;
};
VNET_DEFINE_STATIC(struct callout, rtq_mtutimer);
#define V_rtq_mtutimer VNET(rtq_mtutimer)

static int
in6_mtuexpire(struct rtentry *rt, void *rock)
{
struct mtuex_arg *ap = rock;

if (rt->rt_expire && !(rt->rt_flags & RTF_PROBEMTU)) {
if (rt->rt_expire <= time_uptime) {
rt->rt_flags |= RTF_PROBEMTU;
} else {
ap->nextstop = lmin(ap->nextstop, rt->rt_expire);
}
}

return (0);
}

#define MTUTIMO_DEFAULT (60*1)

static void
in6_mtutimo_setwa(struct rib_head *rnh, uint32_t fibum, int af,
void *_arg)
{
struct mtuex_arg *arg;

arg = (struct mtuex_arg *)_arg;

arg->rnh = rnh;
}

static void
in6_mtutimo(void *rock)
{
CURVNET_SET_QUIET((struct vnet *) rock);
struct timeval atv;
struct mtuex_arg arg;

rt_foreach_fib_walk(AF_INET6, in6_mtutimo_setwa, in6_mtuexpire, &arg);

atv.tv_sec = MTUTIMO_DEFAULT;
atv.tv_usec = 0;
callout_reset(&V_rtq_mtutimer, tvtohz(&atv), in6_mtutimo, rock);
CURVNET_RESTORE();
}

/*
* Initialize our routing tree.
*/
VNET_DEFINE_STATIC(int, _in6_rt_was_here);
#define V__in6_rt_was_here VNET(_in6_rt_was_here)

int
in6_inithead(void **head, int off)
@@ -224,12 +168,6 @@ in6_inithead(void **head, int off)
rh->rnh_addaddr = in6_addroute;
*head = (void *)rh;

if (V__in6_rt_was_here == 0) {
callout_init(&V_rtq_mtutimer, 1);
in6_mtutimo(curvnet); /* kick off timeout first time */
V__in6_rt_was_here = 1;
}

return (1);
}

@@ -238,7 +176,6 @@ int
in6_detachhead(void **head, int off)
{

callout_drain(&V_rtq_mtutimer);
rt_table_destroy((struct rib_head *)(*head));

return (1);

Loading…
Cancel
Save