Unverified Commit 9ec646d6 authored by Shawn Webb's avatar Shawn Webb
Browse files

Merge remote-tracking branch 'upstream/master' into hardened/features/relro

parents 1048661f 41e76c86
......@@ -34,7 +34,7 @@ COMPILER_RT_ABI fp_t __floatditf(di_int a) {
}
// Exponent of (fp_t)a is the width of abs(a).
const int exponent = (aWidth - 1) - __builtin_clz(a);
const int exponent = (aWidth - 1) - __builtin_clzll(a);
rep_t result;
// Shift a into the significand field and clear the implicit bit. Extra
......
......@@ -25,7 +25,7 @@ COMPILER_RT_ABI fp_t __floatunditf(du_int a) {
if (a == 0) return fromRep(0);
// Exponent of (fp_t)a is the width of abs(a).
const int exponent = (aWidth - 1) - __builtin_clz(a);
const int exponent = (aWidth - 1) - __builtin_clzll(a);
rep_t result;
// Shift a into the significand field and clear the implicit bit.
......
......@@ -43,7 +43,8 @@
#define M_USBDEV 0
#define USB_PROC_MAX 3
#define USB_BUS_GIANT_PROC(bus) (usb_process + 2)
#define USB_BUS_NON_GIANT_PROC(bus) (usb_process + 2)
#define USB_BUS_NON_GIANT_BULK_PROC(bus) (usb_process + 2)
#define USB_BUS_NON_GIANT_ISOC_PROC(bus) (usb_process + 2)
#define USB_BUS_EXPLORE_PROC(bus) (usb_process + 0)
#define USB_BUS_CONTROL_XFER_PROC(bus) (usb_process + 1)
#define SYSCTL_DECL(...)
......
......@@ -82,9 +82,9 @@
* types of locks: 1) the hash table lock array, and 2) the
* arc list locks.
*
* Buffers do not have their own mutexs, rather they rely on the
* hash table mutexs for the bulk of their protection (i.e. most
* fields in the arc_buf_hdr_t are protected by these mutexs).
* Buffers do not have their own mutexes, rather they rely on the
* hash table mutexes for the bulk of their protection (i.e. most
* fields in the arc_buf_hdr_t are protected by these mutexes).
*
* buf_hash_find() returns the appropriate mutex (held) when it
* locates the requested buffer in the hash table. It returns
......@@ -1027,21 +1027,21 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW,
&l2arc_norw, 0, "no reads during writes");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD,
&ARC_anon.arcs_size, 0, "size of anonymous state");
&ARC_anon.arcs_size.rc_count, 0, "size of anonymous state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_lsize, CTLFLAG_RD,
&ARC_anon.arcs_lsize[ARC_BUFC_METADATA], 0, "size of anonymous state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_lsize, CTLFLAG_RD,
&ARC_anon.arcs_lsize[ARC_BUFC_DATA], 0, "size of anonymous state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD,
&ARC_mru.arcs_size, 0, "size of mru state");
&ARC_mru.arcs_size.rc_count, 0, "size of mru state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_lsize, CTLFLAG_RD,
&ARC_mru.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mru state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_lsize, CTLFLAG_RD,
&ARC_mru.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mru state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD,
&ARC_mru_ghost.arcs_size, 0, "size of mru ghost state");
&ARC_mru_ghost.arcs_size.rc_count, 0, "size of mru ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_lsize, CTLFLAG_RD,
&ARC_mru_ghost.arcs_lsize[ARC_BUFC_METADATA], 0,
"size of metadata in mru ghost state");
......@@ -1050,14 +1050,14 @@ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_lsize, CTLFLAG_RD,
"size of data in mru ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD,
&ARC_mfu.arcs_size, 0, "size of mfu state");
&ARC_mfu.arcs_size.rc_count, 0, "size of mfu state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_lsize, CTLFLAG_RD,
&ARC_mfu.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mfu state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_lsize, CTLFLAG_RD,
&ARC_mfu.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mfu state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD,
&ARC_mfu_ghost.arcs_size, 0, "size of mfu ghost state");
&ARC_mfu_ghost.arcs_size.rc_count, 0, "size of mfu ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_lsize, CTLFLAG_RD,
&ARC_mfu_ghost.arcs_lsize[ARC_BUFC_METADATA], 0,
"size of metadata in mfu ghost state");
......@@ -1066,7 +1066,7 @@ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_lsize, CTLFLAG_RD,
"size of data in mfu ghost state");
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD,
&ARC_l2c_only.arcs_size, 0, "size of mru state");
&ARC_l2c_only.arcs_size.rc_count, 0, "size of mru state");
/*
* L2ARC Internals
......@@ -2413,6 +2413,7 @@ arc_hdr_destroy(arc_buf_hdr_t *hdr)
if (!BUF_EMPTY(hdr))
buf_discard_identity(hdr);
if (hdr->b_freeze_cksum != NULL) {
kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
hdr->b_freeze_cksum = NULL;
......@@ -4655,8 +4656,6 @@ arc_release(arc_buf_t *buf, void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* It would be nice to assert that if it's DMU metadata (level >
* 0 || it's the dnode file), then it must be syncing context.
......@@ -4664,6 +4663,9 @@ arc_release(arc_buf_t *buf, void *tag)
*/
mutex_enter(&buf->b_evict_lock);
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* We don't grab the hash lock prior to this check, because if
* the buffer's header is in the arc_anon state, it won't be
......@@ -5894,6 +5896,7 @@ l2arc_write_done(zio_t *zio)
/*
* Error - drop L2ARC entry.
*/
list_remove(buflist, hdr);
trim_map_free(hdr->b_l2hdr.b_dev->l2ad_vdev,
hdr->b_l2hdr.b_daddr, hdr->b_l2hdr.b_asize, 0);
hdr->b_flags &= ~ARC_FLAG_HAS_L2HDR;
......@@ -6413,14 +6416,6 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
buf_data = hdr->b_l1hdr.b_tmp_cdata;
buf_sz = hdr->b_l2hdr.b_asize;
/*
* If the data has not been compressed, then clear b_tmp_cdata
* to make sure that it points only to a temporary compression
* buffer.
*/
if (!L2ARC_IS_VALID_COMPRESS(HDR_GET_COMPRESS(hdr)))
hdr->b_l1hdr.b_tmp_cdata = NULL;
/*
* We need to do this regardless if buf_sz is zero or
* not, otherwise, when this l2hdr is evicted we'll
......@@ -6514,6 +6509,12 @@ l2arc_compress_buf(arc_buf_hdr_t *hdr)
csize = zio_compress_data(ZIO_COMPRESS_LZ4, hdr->b_l1hdr.b_tmp_cdata,
cdata, l2hdr->b_asize);
rounded = P2ROUNDUP(csize, (size_t)SPA_MINBLOCKSIZE);
if (rounded > csize) {
bzero((char *)cdata + csize, rounded - csize);
csize = rounded;
}
if (csize == 0) {
/* zero block, indicate that there's nothing to write */
zio_data_buf_free(cdata, len);
......@@ -6522,19 +6523,11 @@ l2arc_compress_buf(arc_buf_hdr_t *hdr)
hdr->b_l1hdr.b_tmp_cdata = NULL;
ARCSTAT_BUMP(arcstat_l2_compress_zeros);
return (B_TRUE);
}
rounded = P2ROUNDUP(csize,
(size_t)1 << l2hdr->b_dev->l2ad_vdev->vdev_ashift);
if (rounded < len) {
} else if (csize > 0 && csize < len) {
/*
* Compression succeeded, we'll keep the cdata around for
* writing and release it afterwards.
*/
if (rounded > csize) {
bzero((char *)cdata + csize, rounded - csize);
csize = rounded;
}
HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_LZ4);
l2hdr->b_asize = csize;
hdr->b_l1hdr.b_tmp_cdata = cdata;
......@@ -6651,6 +6644,7 @@ l2arc_release_cdata_buf(arc_buf_hdr_t *hdr)
hdr->b_size);
hdr->b_l1hdr.b_tmp_cdata = NULL;
}
}
/*
......
......@@ -1060,34 +1060,36 @@ dwc_otg_host_rate_check_interrupt(struct dwc_otg_softc *sc, struct dwc_otg_td *t
static uint8_t
dwc_otg_host_rate_check(struct dwc_otg_softc *sc, struct dwc_otg_td *td)
{
uint8_t frame_num = (uint8_t)sc->sc_last_frame_num;
if (td->ep_type == UE_ISOCHRONOUS) {
/* non TT isochronous traffic */
if ((td->tmr_val != 0) ||
(sc->sc_last_frame_num & (td->tmr_res - 1))) {
(frame_num & (td->tmr_res - 1))) {
goto busy;
}
td->tmr_val = 1; /* executed */
td->toggle = 0;
return (0);
} else if (td->ep_type == UE_INTERRUPT) {
if (!td->tt_scheduled)
goto busy;
td->tt_scheduled = 0;
return (0);
} else if (td->did_nak != 0) {
uint8_t frame_num = (uint8_t)sc->sc_last_frame_num;
/* check if we should pause sending queries for 125us */
if (td->tmr_res == frame_num) {
/* wait a bit */
dwc_otg_enable_sof_irq(sc);
goto busy;
}
/* query for data one more time */
td->tmr_res = frame_num;
td->did_nak = 0;
} else if (td->set_toggle) {
td->set_toggle = 0;
td->toggle = 1;
}
/* query for data one more time */
td->tmr_res = frame_num;
td->did_nak = 0;
return (0);
busy:
return (1);
......@@ -1658,7 +1660,11 @@ dwc_otg_host_data_tx(struct dwc_otg_softc *sc, struct dwc_otg_td *td)
td->offset += td->tx_bytes;
td->remainder -= td->tx_bytes;
td->toggle ^= 1;
td->did_nak = 0;
/* check if next response will be a NAK */
if (hcint & HCINT_NYET)
td->did_nak = 1;
else
td->did_nak = 0;
td->tt_scheduled = 0;
/* check remainder */
......@@ -2551,11 +2557,19 @@ static void
dwc_otg_interrupt_poll_locked(struct dwc_otg_softc *sc)
{
struct usb_xfer *xfer;
uint32_t count = 0;
uint32_t count;
uint32_t temp;
uint8_t got_rx_status;
uint8_t x;
if (sc->sc_flags.status_device_mode == 0) {
/*
* Update host transfer schedule, so that new
* transfers can be issued:
*/
dwc_otg_update_host_transfer_schedule_locked(sc);
}
count = 0;
repeat:
if (++count == 16) {
/* give other interrupts a chance */
......@@ -2659,12 +2673,6 @@ dwc_otg_interrupt_poll_locked(struct dwc_otg_softc *sc)
sc->sc_irq_mask &= ~GINTMSK_RXFLVLMSK;
DWC_OTG_WRITE_4(sc, DOTG_GINTMSK, sc->sc_irq_mask);
}
if (sc->sc_flags.status_device_mode == 0 && sc->sc_xfer_complete == 0) {
/* update host transfer schedule, so that new transfers can be issued */
if (dwc_otg_update_host_transfer_schedule_locked(sc))
goto repeat;
}
}
static void
......@@ -2944,12 +2952,6 @@ dwc_otg_interrupt(void *arg)
/* complete FIFOs, if any */
dwc_otg_interrupt_complete_locked(sc);
if (sc->sc_flags.status_device_mode == 0) {
/* update host transfer schedule, so that new transfers can be issued */
if (dwc_otg_update_host_transfer_schedule_locked(sc))
dwc_otg_interrupt_poll_locked(sc);
}
}
USB_BUS_SPIN_UNLOCK(&sc->sc_bus);
USB_BUS_UNLOCK(&sc->sc_bus);
......@@ -3950,11 +3952,6 @@ dwc_otg_do_poll(struct usb_bus *bus)
USB_BUS_SPIN_LOCK(&sc->sc_bus);
dwc_otg_interrupt_poll_locked(sc);
dwc_otg_interrupt_complete_locked(sc);
if (sc->sc_flags.status_device_mode == 0) {
/* update host transfer schedule, so that new transfers can be issued */
if (dwc_otg_update_host_transfer_schedule_locked(sc))
dwc_otg_interrupt_poll_locked(sc);
}
USB_BUS_SPIN_UNLOCK(&sc->sc_bus);
USB_BUS_UNLOCK(&sc->sc_bus);
}
......
......@@ -231,7 +231,8 @@ usb_detach(device_t dev)
/* Get rid of USB callback processes */
usb_proc_free(USB_BUS_GIANT_PROC(bus));
usb_proc_free(USB_BUS_NON_GIANT_PROC(bus));
usb_proc_free(USB_BUS_NON_GIANT_ISOC_PROC(bus));
usb_proc_free(USB_BUS_NON_GIANT_BULK_PROC(bus));
/* Get rid of USB explore process */
......@@ -395,7 +396,8 @@ usb_bus_explore(struct usb_proc_msg *pm)
*/
usb_proc_rewakeup(USB_BUS_CONTROL_XFER_PROC(bus));
usb_proc_rewakeup(USB_BUS_GIANT_PROC(bus));
usb_proc_rewakeup(USB_BUS_NON_GIANT_PROC(bus));
usb_proc_rewakeup(USB_BUS_NON_GIANT_ISOC_PROC(bus));
usb_proc_rewakeup(USB_BUS_NON_GIANT_BULK_PROC(bus));
#endif
USB_BUS_UNLOCK(bus);
......@@ -860,9 +862,13 @@ usb_attach_sub(device_t dev, struct usb_bus *bus)
&bus->bus_mtx, device_get_nameunit(dev), USB_PRI_MED)) {
device_printf(dev, "WARNING: Creation of USB Giant "
"callback process failed.\n");
} else if (usb_proc_create(USB_BUS_NON_GIANT_PROC(bus),
} else if (usb_proc_create(USB_BUS_NON_GIANT_ISOC_PROC(bus),
&bus->bus_mtx, device_get_nameunit(dev), USB_PRI_HIGHEST)) {
device_printf(dev, "WARNING: Creation of USB non-Giant ISOC "
"callback process failed.\n");
} else if (usb_proc_create(USB_BUS_NON_GIANT_BULK_PROC(bus),
&bus->bus_mtx, device_get_nameunit(dev), USB_PRI_HIGH)) {
device_printf(dev, "WARNING: Creation of USB non-Giant "
device_printf(dev, "WARNING: Creation of USB non-Giant BULK "
"callback process failed.\n");
} else if (usb_proc_create(USB_BUS_EXPLORE_PROC(bus),
&bus->bus_mtx, device_get_nameunit(dev), USB_PRI_MED)) {
......
......@@ -57,19 +57,26 @@ struct usb_bus {
struct root_hold_token *bus_roothold;
#endif
/* convenience macros */
#define USB_BUS_TT_PROC(bus) USB_BUS_NON_GIANT_ISOC_PROC(bus)
#define USB_BUS_CS_PROC(bus) USB_BUS_NON_GIANT_ISOC_PROC(bus)
#if USB_HAVE_PER_BUS_PROCESS
#define USB_BUS_GIANT_PROC(bus) (&(bus)->giant_callback_proc)
#define USB_BUS_NON_GIANT_PROC(bus) (&(bus)->non_giant_callback_proc)
#define USB_BUS_NON_GIANT_ISOC_PROC(bus) (&(bus)->non_giant_isoc_callback_proc)
#define USB_BUS_NON_GIANT_BULK_PROC(bus) (&(bus)->non_giant_bulk_callback_proc)
#define USB_BUS_EXPLORE_PROC(bus) (&(bus)->explore_proc)
#define USB_BUS_CONTROL_XFER_PROC(bus) (&(bus)->control_xfer_proc)
/*
* There are two callback processes. One for Giant locked
* callbacks. One for non-Giant locked callbacks. This should
* avoid congestion and reduce response time in most cases.
* There are three callback processes. One for Giant locked
* callbacks. One for non-Giant locked non-periodic callbacks
* and one for non-Giant locked periodic callbacks. This
* should avoid congestion and reduce response time in most
* cases.
*/
struct usb_process giant_callback_proc;
struct usb_process non_giant_callback_proc;
struct usb_process non_giant_isoc_callback_proc;
struct usb_process non_giant_bulk_callback_proc;
/* Explore process */
struct usb_process explore_proc;
......
......@@ -2181,7 +2181,7 @@ usb_free_device(struct usb_device *udev, uint8_t flag)
* anywhere:
*/
USB_BUS_LOCK(udev->bus);
usb_proc_mwait(USB_BUS_NON_GIANT_PROC(udev->bus),
usb_proc_mwait(USB_BUS_CS_PROC(udev->bus),
&udev->cs_msg[0], &udev->cs_msg[1]);
USB_BUS_UNLOCK(udev->bus);
......
......@@ -346,7 +346,7 @@ uhub_tt_buffer_reset_async_locked(struct usb_device *child, struct usb_endpoint
}
up->req_reset_tt = req;
/* get reset transfer started */
usb_proc_msignal(USB_BUS_NON_GIANT_PROC(udev->bus),
usb_proc_msignal(USB_BUS_TT_PROC(udev->bus),
&hub->tt_msg[0], &hub->tt_msg[1]);
}
#endif
......@@ -1579,7 +1579,7 @@ uhub_detach(device_t dev)
#if USB_HAVE_TT_SUPPORT
/* Make sure our TT messages are not queued anywhere */
USB_BUS_LOCK(bus);
usb_proc_mwait(USB_BUS_NON_GIANT_PROC(bus),
usb_proc_mwait(USB_BUS_TT_PROC(bus),
&hub->tt_msg[0], &hub->tt_msg[1]);
USB_BUS_UNLOCK(bus);
#endif
......
......@@ -34,6 +34,7 @@
#endif
/* defines */
#define USB_PRI_HIGHEST PI_SWI(SWI_TTY)
#define USB_PRI_HIGH PI_SWI(SWI_NET)
#define USB_PRI_MED PI_SWI(SWI_CAMBIO)
......
......@@ -872,6 +872,19 @@ usbd_transfer_setup_sub(struct usb_setup_params *parm)
}
}
static uint8_t
usbd_transfer_setup_has_bulk(const struct usb_config *setup_start,
uint16_t n_setup)
{
while (n_setup--) {
uint8_t type = setup_start[n_setup].type;
if (type == UE_BULK || type == UE_BULK_INTR ||
type == UE_TYPE_ANY)
return (1);
}
return (0);
}
/*------------------------------------------------------------------------*
* usbd_transfer_setup - setup an array of USB transfers
*
......@@ -1013,9 +1026,12 @@ usbd_transfer_setup(struct usb_device *udev,
else if (xfer_mtx == &Giant)
info->done_p =
USB_BUS_GIANT_PROC(udev->bus);
else if (usbd_transfer_setup_has_bulk(setup_start, n_setup))
info->done_p =
USB_BUS_NON_GIANT_BULK_PROC(udev->bus);
else
info->done_p =
USB_BUS_NON_GIANT_PROC(udev->bus);
USB_BUS_NON_GIANT_ISOC_PROC(udev->bus);
}
/* reset sizes */
......@@ -2280,10 +2296,8 @@ usbd_callback_ss_done_defer(struct usb_xfer *xfer)
* will have a Lock Order Reversal, LOR, if we try to
* proceed !
*/
if (usb_proc_msignal(info->done_p,
&info->done_m[0], &info->done_m[1])) {
/* ignore */
}
(void) usb_proc_msignal(info->done_p,
&info->done_m[0], &info->done_m[1]);
} else {
/* clear second recurse flag */
pq->recurse_2 = 0;
......@@ -2307,23 +2321,26 @@ usbd_callback_wrapper(struct usb_xfer_queue *pq)
struct usb_xfer_root *info = xfer->xroot;
USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
if (!mtx_owned(info->xfer_mtx) && !SCHEDULER_STOPPED()) {
if ((pq->recurse_3 != 0 || mtx_owned(info->xfer_mtx) == 0) &&
SCHEDULER_STOPPED() == 0) {
/*
* Cases that end up here:
*
* 5) HW interrupt done callback or other source.
* 6) HW completed transfer during callback
*/
DPRINTFN(3, "case 5\n");
DPRINTFN(3, "case 5 and 6\n");
/*
* We have to postpone the callback due to the fact we
* will have a Lock Order Reversal, LOR, if we try to
* proceed !
* proceed!
*
* Postponing the callback also ensures that other USB
* transfer queues get a chance.
*/
if (usb_proc_msignal(info->done_p,
&info->done_m[0], &info->done_m[1])) {
/* ignore */
}
(void) usb_proc_msignal(info->done_p,
&info->done_m[0], &info->done_m[1]);
return;
}
/*
......@@ -2694,7 +2711,7 @@ usbd_pipe_start(struct usb_xfer_queue *pq)
} else if (udev->ctrl_xfer[1]) {
info = udev->ctrl_xfer[1]->xroot;
usb_proc_msignal(
USB_BUS_NON_GIANT_PROC(info->bus),
USB_BUS_CS_PROC(info->bus),
&udev->cs_msg[0], &udev->cs_msg[1]);
} else {
/* should not happen */
......@@ -3019,9 +3036,11 @@ usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
if (!pq->recurse_1) {
do {
/* clear third recurse flag */
pq->recurse_3 = 0;
/* set both recurse flags */
do {
/* set two first recurse flags */
pq->recurse_1 = 1;
pq->recurse_2 = 1;
......@@ -3040,6 +3059,12 @@ usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
(pq->command) (pq);
DPRINTFN(6, "cb %p (leave)\n", pq->curr);
/*
* Set third recurse flag to indicate
* recursion happened:
*/
pq->recurse_3 = 1;
} while (!pq->recurse_2);
/* clear first recurse flag */
......@@ -3315,7 +3340,8 @@ usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
USB_BUS_CONTROL_XFER_PROC(udev->bus)->up_msleep = 0;
USB_BUS_EXPLORE_PROC(udev->bus)->up_msleep = 0;
USB_BUS_GIANT_PROC(udev->bus)->up_msleep = 0;
USB_BUS_NON_GIANT_PROC(udev->bus)->up_msleep = 0;
USB_BUS_NON_GIANT_ISOC_PROC(udev->bus)->up_msleep = 0;
USB_BUS_NON_GIANT_BULK_PROC(udev->bus)->up_msleep = 0;
/* poll USB hardware */
(udev->bus->methods->xfer_poll) (udev->bus);
......
......@@ -128,6 +128,8 @@ struct usb_xfer_queue {
void (*command) (struct usb_xfer_queue *pq);
uint8_t recurse_1:1;
uint8_t recurse_2:1;
uint8_t recurse_3:1;
uint8_t reserved:5;
};
/*
......
......@@ -1492,6 +1492,8 @@ sctp_pathmtu_timer(struct sctp_inpcb *inp,
#endif
if (mtu > next_mtu) {
net->mtu = next_mtu;
} else {
net->mtu = mtu;
}
}
}
......
......@@ -374,8 +374,9 @@ domain_pgtbl_map_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
KASSERT(lvl > 0,
("lost root page table page %p", domain));
/*
* Page table page does not exists, allocate
* it and create pte in the up level.
* Page table page does not exist, allocate
* it and create a pte in the preceeding page level
* to reference the allocated page table page.
*/
m = dmar_pgalloc(domain->pgtbl_obj, idx, flags |
DMAR_PGF_ZERO);
......
......@@ -1645,6 +1645,8 @@ pw_user_mod(int argc, char **argv, char *arg1)
if (lc == NULL || login_setcryptfmt(lc, "sha512", NULL) == NULL)
warn("setting crypt(3) format");
login_close(lc);
cnf->default_password = boolean_val(passwd,
cnf->default_password);
pwd->pw_passwd = pw_password(cnf, pwd->pw_name, dryrun);
edited = true;
}
......
......@@ -314,6 +314,19 @@ user_add_already_exists_body() {
${PW} useradd foo
}
atf_test_case user_add_w_yes
user_add_w_yes_body() {
populate_etc_skel
atf_check -s exit:0 ${PW} useradd foo -w yes
atf_check -s exit:0 \
-o match:'^foo:\$.*' \
grep "^foo" ${HOME}/master.passwd
atf_check -s exit:0 ${PW} usermod foo -w yes
atf_check -s exit:0 \
-o match:'^foo:\$.*' \
grep "^foo" ${HOME}/master.passwd
}
atf_init_test_cases() {
atf_add_test_case user_add
atf_add_test_case user_add_noupdate
......@@ -341,4 +354,5 @@ atf_init_test_cases() {
atf_add_test_case user_add_uid_too_large
atf_add_test_case user_add_bad_shell
atf_add_test_case user_add_already_exists
atf_add_test_case user_add_w_yes
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment