Compare commits
22 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| c9a59a8236 | |||
| 4cb2667a5f | |||
| b0f0ddf732 | |||
| 0522179927 | |||
| b66df96b78 | |||
| bcf36285df | |||
| 27c4c40c8f | |||
| e5581ce3a6 | |||
| 2177f6383e | |||
| 357e8b344c | |||
| 9695dcbc88 | |||
| 6a2503b7ae | |||
| 951328b933 | |||
| a2c430eb2b | |||
| 0f584be123 | |||
| 2053770ee6 | |||
| 8e18aa20f4 | |||
| e0426ee9ad | |||
| a4ffb9801c | |||
| 80691afb66 | |||
| b7f8705103 | |||
| 9f8dd40c68 |
@ -302,7 +302,7 @@ beneath or above the path of another overlay lower layer path.
|
||||
|
||||
Using an upper layer path and/or a workdir path that are already used by
|
||||
another overlay mount is not allowed and may fail with EBUSY. Using
|
||||
partially overlapping paths is not allowed but will not fail with EBUSY.
|
||||
partially overlapping paths is not allowed and may fail with EBUSY.
|
||||
If files are accessed from two overlayfs mounts which share or overlap the
|
||||
upper layer and/or workdir path the behavior of the overlay is undefined,
|
||||
though it will not result in a crash or deadlock.
|
||||
|
||||
@ -25,7 +25,7 @@ RE_function = re.compile(r'([\w_][\w\d_]+\(\))')
|
||||
# to the creation of incorrect and confusing cross references. So
|
||||
# just don't even try with these names.
|
||||
#
|
||||
Skipfuncs = [ 'open', 'close', 'read', 'write', 'fcntl', 'mmap'
|
||||
Skipfuncs = [ 'open', 'close', 'read', 'write', 'fcntl', 'mmap',
|
||||
'select', 'poll', 'fork', 'execve', 'clone', 'ioctl']
|
||||
|
||||
#
|
||||
|
||||
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 3
|
||||
SUBLEVEL = 0
|
||||
SUBLEVEL = 1
|
||||
EXTRAVERSION =
|
||||
NAME = Bobtail Squid
|
||||
|
||||
|
||||
@ -220,8 +220,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
|
||||
* Only if the new pte is valid and kernel, otherwise TLB maintenance
|
||||
* or update_mmu_cache() have the necessary barriers.
|
||||
*/
|
||||
if (pte_valid_not_user(pte))
|
||||
if (pte_valid_not_user(pte)) {
|
||||
dsb(ishst);
|
||||
isb();
|
||||
}
|
||||
}
|
||||
|
||||
extern void __sync_icache_dcache(pte_t pteval);
|
||||
@ -484,8 +486,10 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
|
||||
|
||||
WRITE_ONCE(*pmdp, pmd);
|
||||
|
||||
if (pmd_valid(pmd))
|
||||
if (pmd_valid(pmd)) {
|
||||
dsb(ishst);
|
||||
isb();
|
||||
}
|
||||
}
|
||||
|
||||
static inline void pmd_clear(pmd_t *pmdp)
|
||||
@ -543,8 +547,10 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
|
||||
|
||||
WRITE_ONCE(*pudp, pud);
|
||||
|
||||
if (pud_valid(pud))
|
||||
if (pud_valid(pud)) {
|
||||
dsb(ishst);
|
||||
isb();
|
||||
}
|
||||
}
|
||||
|
||||
static inline void pud_clear(pud_t *pudp)
|
||||
|
||||
@ -3780,7 +3780,7 @@ static int compat_getdrvprm(int drive,
|
||||
v.native_format = UDP->native_format;
|
||||
mutex_unlock(&floppy_mutex);
|
||||
|
||||
if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
|
||||
if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
@ -3816,7 +3816,7 @@ static int compat_getdrvstat(int drive, bool poll,
|
||||
v.bufblocks = UDRS->bufblocks;
|
||||
mutex_unlock(&floppy_mutex);
|
||||
|
||||
if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
|
||||
if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
Eintr:
|
||||
|
||||
@ -92,8 +92,8 @@ static int vpd_section_check_key_name(const u8 *key, s32 key_len)
|
||||
return VPD_OK;
|
||||
}
|
||||
|
||||
static int vpd_section_attrib_add(const u8 *key, s32 key_len,
|
||||
const u8 *value, s32 value_len,
|
||||
static int vpd_section_attrib_add(const u8 *key, u32 key_len,
|
||||
const u8 *value, u32 value_len,
|
||||
void *arg)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -9,8 +9,8 @@
|
||||
|
||||
#include "vpd_decode.h"
|
||||
|
||||
static int vpd_decode_len(const s32 max_len, const u8 *in,
|
||||
s32 *length, s32 *decoded_len)
|
||||
static int vpd_decode_len(const u32 max_len, const u8 *in,
|
||||
u32 *length, u32 *decoded_len)
|
||||
{
|
||||
u8 more;
|
||||
int i = 0;
|
||||
@ -30,18 +30,39 @@ static int vpd_decode_len(const s32 max_len, const u8 *in,
|
||||
} while (more);
|
||||
|
||||
*decoded_len = i;
|
||||
|
||||
return VPD_OK;
|
||||
}
|
||||
|
||||
int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
|
||||
static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
|
||||
u32 *_consumed, const u8 **entry, u32 *entry_len)
|
||||
{
|
||||
u32 decoded_len;
|
||||
u32 consumed = *_consumed;
|
||||
|
||||
if (vpd_decode_len(max_len - consumed, &input_buf[consumed],
|
||||
entry_len, &decoded_len) != VPD_OK)
|
||||
return VPD_FAIL;
|
||||
if (max_len - consumed < decoded_len)
|
||||
return VPD_FAIL;
|
||||
|
||||
consumed += decoded_len;
|
||||
*entry = input_buf + consumed;
|
||||
|
||||
/* entry_len is untrusted data and must be checked again. */
|
||||
if (max_len - consumed < *entry_len)
|
||||
return VPD_FAIL;
|
||||
|
||||
consumed += decoded_len;
|
||||
*_consumed = consumed;
|
||||
return VPD_OK;
|
||||
}
|
||||
|
||||
int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
|
||||
vpd_decode_callback callback, void *callback_arg)
|
||||
{
|
||||
int type;
|
||||
int res;
|
||||
s32 key_len;
|
||||
s32 value_len;
|
||||
s32 decoded_len;
|
||||
u32 key_len;
|
||||
u32 value_len;
|
||||
const u8 *key;
|
||||
const u8 *value;
|
||||
|
||||
@ -56,26 +77,14 @@ int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
|
||||
case VPD_TYPE_STRING:
|
||||
(*consumed)++;
|
||||
|
||||
/* key */
|
||||
res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
|
||||
&key_len, &decoded_len);
|
||||
if (res != VPD_OK || *consumed + decoded_len >= max_len)
|
||||
if (vpd_decode_entry(max_len, input_buf, consumed, &key,
|
||||
&key_len) != VPD_OK)
|
||||
return VPD_FAIL;
|
||||
|
||||
*consumed += decoded_len;
|
||||
key = &input_buf[*consumed];
|
||||
*consumed += key_len;
|
||||
|
||||
/* value */
|
||||
res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
|
||||
&value_len, &decoded_len);
|
||||
if (res != VPD_OK || *consumed + decoded_len > max_len)
|
||||
if (vpd_decode_entry(max_len, input_buf, consumed, &value,
|
||||
&value_len) != VPD_OK)
|
||||
return VPD_FAIL;
|
||||
|
||||
*consumed += decoded_len;
|
||||
value = &input_buf[*consumed];
|
||||
*consumed += value_len;
|
||||
|
||||
if (type == VPD_TYPE_STRING)
|
||||
return callback(key, key_len, value, value_len,
|
||||
callback_arg);
|
||||
|
||||
@ -25,8 +25,8 @@ enum {
|
||||
};
|
||||
|
||||
/* Callback for vpd_decode_string to invoke. */
|
||||
typedef int vpd_decode_callback(const u8 *key, s32 key_len,
|
||||
const u8 *value, s32 value_len,
|
||||
typedef int vpd_decode_callback(const u8 *key, u32 key_len,
|
||||
const u8 *value, u32 value_len,
|
||||
void *arg);
|
||||
|
||||
/*
|
||||
@ -44,7 +44,7 @@ typedef int vpd_decode_callback(const u8 *key, s32 key_len,
|
||||
* If one entry is successfully decoded, sends it to callback and returns the
|
||||
* result.
|
||||
*/
|
||||
int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
|
||||
int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
|
||||
vpd_decode_callback callback, void *callback_arg);
|
||||
|
||||
#endif /* __VPD_DECODE_H */
|
||||
|
||||
@ -608,10 +608,9 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
|
||||
static int technisat_usb2_get_ir(struct dvb_usb_device *d)
|
||||
{
|
||||
struct technisat_usb2_state *state = d->priv;
|
||||
u8 *buf = state->buf;
|
||||
u8 *b;
|
||||
int ret;
|
||||
struct ir_raw_event ev;
|
||||
u8 *buf = state->buf;
|
||||
int i, ret;
|
||||
|
||||
buf[0] = GET_IR_DATA_VENDOR_REQUEST;
|
||||
buf[1] = 0x08;
|
||||
@ -647,26 +646,25 @@ unlock:
|
||||
return 0; /* no key pressed */
|
||||
|
||||
/* decoding */
|
||||
b = buf+1;
|
||||
|
||||
#if 0
|
||||
deb_rc("RC: %d ", ret);
|
||||
debug_dump(b, ret, deb_rc);
|
||||
debug_dump(buf + 1, ret, deb_rc);
|
||||
#endif
|
||||
|
||||
ev.pulse = 0;
|
||||
while (1) {
|
||||
ev.pulse = !ev.pulse;
|
||||
ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000;
|
||||
ir_raw_event_store(d->rc_dev, &ev);
|
||||
|
||||
b++;
|
||||
if (*b == 0xff) {
|
||||
for (i = 1; i < ARRAY_SIZE(state->buf); i++) {
|
||||
if (buf[i] == 0xff) {
|
||||
ev.pulse = 0;
|
||||
ev.duration = 888888*2;
|
||||
ir_raw_event_store(d->rc_dev, &ev);
|
||||
break;
|
||||
}
|
||||
|
||||
ev.pulse = !ev.pulse;
|
||||
ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR *
|
||||
FIRMWARE_CLOCK_TICK) / 1000;
|
||||
ir_raw_event_store(d->rc_dev, &ev);
|
||||
}
|
||||
|
||||
ir_raw_event_handle(d->rc_dev);
|
||||
|
||||
@ -97,6 +97,7 @@ static void tm6000_urb_received(struct urb *urb)
|
||||
printk(KERN_ERR "tm6000: error %s\n", __func__);
|
||||
kfree(urb->transfer_buffer);
|
||||
usb_free_urb(urb);
|
||||
dev->dvb->bulk_urb = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -127,6 +128,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
|
||||
dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
|
||||
if (!dvb->bulk_urb->transfer_buffer) {
|
||||
usb_free_urb(dvb->bulk_urb);
|
||||
dvb->bulk_urb = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -153,6 +155,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
|
||||
|
||||
kfree(dvb->bulk_urb->transfer_buffer);
|
||||
usb_free_urb(dvb->bulk_urb);
|
||||
dvb->bulk_urb = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@ -4451,10 +4451,12 @@ int stmmac_suspend(struct device *dev)
|
||||
if (!ndev || !netif_running(ndev))
|
||||
return 0;
|
||||
|
||||
phylink_stop(priv->phylink);
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
rtnl_lock();
|
||||
phylink_stop(priv->phylink);
|
||||
rtnl_unlock();
|
||||
|
||||
netif_device_detach(ndev);
|
||||
stmmac_stop_all_queues(priv);
|
||||
|
||||
@ -4558,9 +4560,11 @@ int stmmac_resume(struct device *dev)
|
||||
|
||||
stmmac_start_all_queues(priv);
|
||||
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
rtnl_lock();
|
||||
phylink_start(priv->phylink);
|
||||
rtnl_unlock();
|
||||
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -906,7 +906,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
|
||||
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
|
||||
}
|
||||
if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
|
||||
queue->rx.rsp_cons = ++cons;
|
||||
queue->rx.rsp_cons = ++cons + skb_queue_len(list);
|
||||
kfree_skb(nskb);
|
||||
return ~0U;
|
||||
}
|
||||
|
||||
@ -35,7 +35,7 @@
|
||||
#define PLL_READY_GATE_EN BIT(3)
|
||||
/* QPHY_PCS_STATUS bit */
|
||||
#define PHYSTATUS BIT(6)
|
||||
/* QPHY_COM_PCS_READY_STATUS bit */
|
||||
/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
|
||||
#define PCS_READY BIT(0)
|
||||
|
||||
/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
|
||||
@ -115,6 +115,7 @@ enum qphy_reg_layout {
|
||||
QPHY_SW_RESET,
|
||||
QPHY_START_CTRL,
|
||||
QPHY_PCS_READY_STATUS,
|
||||
QPHY_PCS_STATUS,
|
||||
QPHY_PCS_AUTONOMOUS_MODE_CTRL,
|
||||
QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
|
||||
QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
|
||||
@ -133,7 +134,7 @@ static const unsigned int pciephy_regs_layout[] = {
|
||||
[QPHY_FLL_MAN_CODE] = 0xd4,
|
||||
[QPHY_SW_RESET] = 0x00,
|
||||
[QPHY_START_CTRL] = 0x08,
|
||||
[QPHY_PCS_READY_STATUS] = 0x174,
|
||||
[QPHY_PCS_STATUS] = 0x174,
|
||||
};
|
||||
|
||||
static const unsigned int usb3phy_regs_layout[] = {
|
||||
@ -144,7 +145,7 @@ static const unsigned int usb3phy_regs_layout[] = {
|
||||
[QPHY_FLL_MAN_CODE] = 0xd0,
|
||||
[QPHY_SW_RESET] = 0x00,
|
||||
[QPHY_START_CTRL] = 0x08,
|
||||
[QPHY_PCS_READY_STATUS] = 0x17c,
|
||||
[QPHY_PCS_STATUS] = 0x17c,
|
||||
[QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d4,
|
||||
[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0d8,
|
||||
[QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178,
|
||||
@ -153,7 +154,7 @@ static const unsigned int usb3phy_regs_layout[] = {
|
||||
static const unsigned int qmp_v3_usb3phy_regs_layout[] = {
|
||||
[QPHY_SW_RESET] = 0x00,
|
||||
[QPHY_START_CTRL] = 0x08,
|
||||
[QPHY_PCS_READY_STATUS] = 0x174,
|
||||
[QPHY_PCS_STATUS] = 0x174,
|
||||
[QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8,
|
||||
[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc,
|
||||
[QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
|
||||
@ -911,7 +912,6 @@ struct qmp_phy_cfg {
|
||||
|
||||
unsigned int start_ctrl;
|
||||
unsigned int pwrdn_ctrl;
|
||||
unsigned int mask_pcs_ready;
|
||||
unsigned int mask_com_pcs_ready;
|
||||
|
||||
/* true, if PHY has a separate PHY_COM control block */
|
||||
@ -1074,7 +1074,6 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
|
||||
|
||||
.start_ctrl = PCS_START | PLL_READY_GATE_EN,
|
||||
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
|
||||
.mask_pcs_ready = PHYSTATUS,
|
||||
.mask_com_pcs_ready = PCS_READY,
|
||||
|
||||
.has_phy_com_ctrl = true,
|
||||
@ -1106,7 +1105,6 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
|
||||
|
||||
.start_ctrl = SERDES_START | PCS_START,
|
||||
.pwrdn_ctrl = SW_PWRDN,
|
||||
.mask_pcs_ready = PHYSTATUS,
|
||||
};
|
||||
|
||||
/* list of resets */
|
||||
@ -1136,7 +1134,6 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
|
||||
|
||||
.start_ctrl = SERDES_START | PCS_START,
|
||||
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
|
||||
.mask_pcs_ready = PHYSTATUS,
|
||||
|
||||
.has_phy_com_ctrl = false,
|
||||
.has_lane_rst = false,
|
||||
@ -1167,7 +1164,6 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
|
||||
|
||||
.start_ctrl = SERDES_START | PCS_START,
|
||||
.pwrdn_ctrl = SW_PWRDN,
|
||||
.mask_pcs_ready = PHYSTATUS,
|
||||
|
||||
.has_pwrdn_delay = true,
|
||||
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
|
||||
@ -1199,7 +1195,6 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
|
||||
|
||||
.start_ctrl = SERDES_START | PCS_START,
|
||||
.pwrdn_ctrl = SW_PWRDN,
|
||||
.mask_pcs_ready = PHYSTATUS,
|
||||
|
||||
.has_pwrdn_delay = true,
|
||||
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
|
||||
@ -1226,7 +1221,6 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
|
||||
|
||||
.start_ctrl = SERDES_START,
|
||||
.pwrdn_ctrl = SW_PWRDN,
|
||||
.mask_pcs_ready = PCS_READY,
|
||||
|
||||
.is_dual_lane_phy = true,
|
||||
.no_pcs_sw_reset = true,
|
||||
@ -1254,7 +1248,6 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
|
||||
|
||||
.start_ctrl = SERDES_START | PCS_START,
|
||||
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
|
||||
.mask_pcs_ready = PHYSTATUS,
|
||||
};
|
||||
|
||||
static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
|
||||
@ -1279,7 +1272,6 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
|
||||
|
||||
.start_ctrl = SERDES_START | PCS_START,
|
||||
.pwrdn_ctrl = SW_PWRDN,
|
||||
.mask_pcs_ready = PHYSTATUS,
|
||||
|
||||
.is_dual_lane_phy = true,
|
||||
};
|
||||
@ -1457,7 +1449,7 @@ static int qcom_qmp_phy_enable(struct phy *phy)
|
||||
void __iomem *pcs = qphy->pcs;
|
||||
void __iomem *dp_com = qmp->dp_com;
|
||||
void __iomem *status;
|
||||
unsigned int mask, val;
|
||||
unsigned int mask, val, ready;
|
||||
int ret;
|
||||
|
||||
dev_vdbg(qmp->dev, "Initializing QMP phy\n");
|
||||
@ -1545,10 +1537,17 @@ static int qcom_qmp_phy_enable(struct phy *phy)
|
||||
/* start SerDes and Phy-Coding-Sublayer */
|
||||
qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
|
||||
|
||||
status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
|
||||
mask = cfg->mask_pcs_ready;
|
||||
if (cfg->type == PHY_TYPE_UFS) {
|
||||
status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
|
||||
mask = PCS_READY;
|
||||
ready = PCS_READY;
|
||||
} else {
|
||||
status = pcs + cfg->regs[QPHY_PCS_STATUS];
|
||||
mask = PHYSTATUS;
|
||||
ready = 0;
|
||||
}
|
||||
|
||||
ret = readl_poll_timeout(status, val, val & mask, 10,
|
||||
ret = readl_poll_timeout(status, val, (val & mask) == ready, 10,
|
||||
PHY_INIT_COMPLETE_TIMEOUT);
|
||||
if (ret) {
|
||||
dev_err(qmp->dev, "phy initialization timed-out\n");
|
||||
|
||||
@ -61,6 +61,7 @@
|
||||
USB2_OBINT_IDDIGCHG)
|
||||
|
||||
/* VBCTRL */
|
||||
#define USB2_VBCTRL_OCCLREN BIT(16)
|
||||
#define USB2_VBCTRL_DRVVBUSSEL BIT(8)
|
||||
|
||||
/* LINECTRL1 */
|
||||
@ -374,6 +375,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
|
||||
writel(val, usb2_base + USB2_LINECTRL1);
|
||||
|
||||
val = readl(usb2_base + USB2_VBCTRL);
|
||||
val &= ~USB2_VBCTRL_OCCLREN;
|
||||
writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL);
|
||||
val = readl(usb2_base + USB2_ADPCTRL);
|
||||
writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
|
||||
|
||||
@ -1400,7 +1400,6 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
|
||||
|
||||
atmel_port->hd_start_rx = false;
|
||||
atmel_start_rx(port);
|
||||
return;
|
||||
}
|
||||
|
||||
atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
|
||||
|
||||
@ -609,7 +609,7 @@ static inline void sprd_rx(struct uart_port *port)
|
||||
|
||||
if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE |
|
||||
SPRD_LSR_FE | SPRD_LSR_OE))
|
||||
if (handle_lsr_errors(port, &lsr, &flag))
|
||||
if (handle_lsr_errors(port, &flag, &lsr))
|
||||
continue;
|
||||
if (uart_handle_sysrq_char(port, ch))
|
||||
continue;
|
||||
|
||||
@ -921,7 +921,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
|
||||
struct usb_bos_descriptor *bos;
|
||||
struct usb_dev_cap_header *cap;
|
||||
struct usb_ssp_cap_descriptor *ssp_cap;
|
||||
unsigned char *buffer;
|
||||
unsigned char *buffer, *buffer0;
|
||||
int length, total_len, num, i, ssac;
|
||||
__u8 cap_type;
|
||||
int ret;
|
||||
@ -966,10 +966,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
|
||||
ret = -ENOMSG;
|
||||
goto err;
|
||||
}
|
||||
|
||||
buffer0 = buffer;
|
||||
total_len -= length;
|
||||
buffer += length;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
buffer += length;
|
||||
cap = (struct usb_dev_cap_header *)buffer;
|
||||
|
||||
if (total_len < sizeof(*cap) || total_len < cap->bLength) {
|
||||
@ -983,8 +985,6 @@ int usb_get_bos_descriptor(struct usb_device *dev)
|
||||
break;
|
||||
}
|
||||
|
||||
total_len -= length;
|
||||
|
||||
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
|
||||
dev_warn(ddev, "descriptor type invalid, skip\n");
|
||||
continue;
|
||||
@ -1019,7 +1019,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
total_len -= length;
|
||||
buffer += length;
|
||||
}
|
||||
dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0);
|
||||
|
||||
return 0;
|
||||
|
||||
|
||||
@ -66,6 +66,7 @@ struct ovl_fs {
|
||||
bool workdir_locked;
|
||||
/* Traps in ovl inode cache */
|
||||
struct inode *upperdir_trap;
|
||||
struct inode *workbasedir_trap;
|
||||
struct inode *workdir_trap;
|
||||
struct inode *indexdir_trap;
|
||||
/* Inode numbers in all layers do not use the high xino_bits */
|
||||
|
||||
@ -212,6 +212,7 @@ static void ovl_free_fs(struct ovl_fs *ofs)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
iput(ofs->workbasedir_trap);
|
||||
iput(ofs->indexdir_trap);
|
||||
iput(ofs->workdir_trap);
|
||||
iput(ofs->upperdir_trap);
|
||||
@ -1003,6 +1004,25 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine how we treat concurrent use of upperdir/workdir based on the
|
||||
* index feature. This is papering over mount leaks of container runtimes,
|
||||
* for example, an old overlay mount is leaked and now its upperdir is
|
||||
* attempted to be used as a lower layer in a new overlay mount.
|
||||
*/
|
||||
static int ovl_report_in_use(struct ovl_fs *ofs, const char *name)
|
||||
{
|
||||
if (ofs->config.index) {
|
||||
pr_err("overlayfs: %s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n",
|
||||
name);
|
||||
return -EBUSY;
|
||||
} else {
|
||||
pr_warn("overlayfs: %s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n",
|
||||
name);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
|
||||
struct path *upperpath)
|
||||
{
|
||||
@ -1040,14 +1060,12 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
|
||||
upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME);
|
||||
ofs->upper_mnt = upper_mnt;
|
||||
|
||||
err = -EBUSY;
|
||||
if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) {
|
||||
ofs->upperdir_locked = true;
|
||||
} else if (ofs->config.index) {
|
||||
pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
|
||||
goto out;
|
||||
} else {
|
||||
pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
|
||||
err = ovl_report_in_use(ofs, "upperdir");
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
@ -1157,16 +1175,19 @@ static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs,
|
||||
|
||||
ofs->workbasedir = dget(workpath.dentry);
|
||||
|
||||
err = -EBUSY;
|
||||
if (ovl_inuse_trylock(ofs->workbasedir)) {
|
||||
ofs->workdir_locked = true;
|
||||
} else if (ofs->config.index) {
|
||||
pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n");
|
||||
goto out;
|
||||
} else {
|
||||
pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
|
||||
err = ovl_report_in_use(ofs, "workdir");
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = ovl_setup_trap(sb, ofs->workbasedir, &ofs->workbasedir_trap,
|
||||
"workdir");
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = ovl_make_workdir(sb, ofs, &workpath);
|
||||
|
||||
out:
|
||||
@ -1313,16 +1334,16 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
err = -EBUSY;
|
||||
if (ovl_is_inuse(stack[i].dentry)) {
|
||||
pr_err("overlayfs: lowerdir is in-use as upperdir/workdir\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir");
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (ovl_is_inuse(stack[i].dentry)) {
|
||||
err = ovl_report_in_use(ofs, "lowerdir");
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
mnt = clone_private_mount(&stack[i]);
|
||||
err = PTR_ERR(mnt);
|
||||
if (IS_ERR(mnt)) {
|
||||
@ -1469,8 +1490,8 @@ out_err:
|
||||
* - another layer of this overlayfs instance
|
||||
* - upper/work dir of any overlayfs instance
|
||||
*/
|
||||
static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
|
||||
const char *name)
|
||||
static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
|
||||
struct dentry *dentry, const char *name)
|
||||
{
|
||||
struct dentry *next = dentry, *parent;
|
||||
int err = 0;
|
||||
@ -1482,13 +1503,11 @@ static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
|
||||
|
||||
/* Walk back ancestors to root (inclusive) looking for traps */
|
||||
while (!err && parent != next) {
|
||||
if (ovl_is_inuse(parent)) {
|
||||
err = -EBUSY;
|
||||
pr_err("overlayfs: %s path overlapping in-use upperdir/workdir\n",
|
||||
name);
|
||||
} else if (ovl_lookup_trap_inode(sb, parent)) {
|
||||
if (ovl_lookup_trap_inode(sb, parent)) {
|
||||
err = -ELOOP;
|
||||
pr_err("overlayfs: overlapping %s path\n", name);
|
||||
} else if (ovl_is_inuse(parent)) {
|
||||
err = ovl_report_in_use(ofs, name);
|
||||
}
|
||||
next = parent;
|
||||
parent = dget_parent(next);
|
||||
@ -1509,7 +1528,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
|
||||
int i, err;
|
||||
|
||||
if (ofs->upper_mnt) {
|
||||
err = ovl_check_layer(sb, ofs->upper_mnt->mnt_root, "upperdir");
|
||||
err = ovl_check_layer(sb, ofs, ofs->upper_mnt->mnt_root,
|
||||
"upperdir");
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -1520,13 +1540,14 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
|
||||
* workbasedir. In that case, we already have their traps in
|
||||
* inode cache and we will catch that case on lookup.
|
||||
*/
|
||||
err = ovl_check_layer(sb, ofs->workbasedir, "workdir");
|
||||
err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir");
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
for (i = 0; i < ofs->numlower; i++) {
|
||||
err = ovl_check_layer(sb, ofs->lower_layers[i].mnt->mnt_root,
|
||||
err = ovl_check_layer(sb, ofs,
|
||||
ofs->lower_layers[i].mnt->mnt_root,
|
||||
"lowerdir");
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -118,7 +118,12 @@ void __qdisc_run(struct Qdisc *q);
|
||||
static inline void qdisc_run(struct Qdisc *q)
|
||||
{
|
||||
if (qdisc_run_begin(q)) {
|
||||
__qdisc_run(q);
|
||||
/* NOLOCK qdisc must check 'state' under the qdisc seqlock
|
||||
* to avoid racing with dev_qdisc_reset()
|
||||
*/
|
||||
if (!(q->flags & TCQ_F_NOLOCK) ||
|
||||
likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
|
||||
__qdisc_run(q);
|
||||
qdisc_run_end(q);
|
||||
}
|
||||
}
|
||||
|
||||
@ -21,7 +21,8 @@ struct sock_reuseport {
|
||||
unsigned int synq_overflow_ts;
|
||||
/* ID stays the same even after the size of socks[] grows. */
|
||||
unsigned int reuseport_id;
|
||||
bool bind_inany;
|
||||
unsigned int bind_inany:1;
|
||||
unsigned int has_conns:1;
|
||||
struct bpf_prog __rcu *prog; /* optional BPF sock selector */
|
||||
struct sock *socks[0]; /* array of sock pointers */
|
||||
};
|
||||
@ -37,6 +38,23 @@ extern struct sock *reuseport_select_sock(struct sock *sk,
|
||||
extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
|
||||
extern int reuseport_detach_prog(struct sock *sk);
|
||||
|
||||
static inline bool reuseport_has_conns(struct sock *sk, bool set)
|
||||
{
|
||||
struct sock_reuseport *reuse;
|
||||
bool ret = false;
|
||||
|
||||
rcu_read_lock();
|
||||
reuse = rcu_dereference(sk->sk_reuseport_cb);
|
||||
if (reuse) {
|
||||
if (set)
|
||||
reuse->has_conns = 1;
|
||||
ret = reuse->has_conns;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int reuseport_get_id(struct sock_reuseport *reuse);
|
||||
|
||||
#endif /* _SOCK_REUSEPORT_H */
|
||||
|
||||
@ -3467,18 +3467,22 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
|
||||
qdisc_calculate_pkt_len(skb, q);
|
||||
|
||||
if (q->flags & TCQ_F_NOLOCK) {
|
||||
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
|
||||
__qdisc_drop(skb, &to_free);
|
||||
rc = NET_XMIT_DROP;
|
||||
} else if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
|
||||
qdisc_run_begin(q)) {
|
||||
if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
|
||||
qdisc_run_begin(q)) {
|
||||
if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
|
||||
&q->state))) {
|
||||
__qdisc_drop(skb, &to_free);
|
||||
rc = NET_XMIT_DROP;
|
||||
goto end_run;
|
||||
}
|
||||
qdisc_bstats_cpu_update(q, skb);
|
||||
|
||||
rc = NET_XMIT_SUCCESS;
|
||||
if (sch_direct_xmit(skb, q, dev, txq, NULL, true))
|
||||
__qdisc_run(q);
|
||||
|
||||
end_run:
|
||||
qdisc_run_end(q);
|
||||
rc = NET_XMIT_SUCCESS;
|
||||
} else {
|
||||
rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
|
||||
qdisc_run(q);
|
||||
|
||||
@ -295,8 +295,19 @@ struct sock *reuseport_select_sock(struct sock *sk,
|
||||
|
||||
select_by_hash:
|
||||
/* no bpf or invalid bpf result: fall back to hash usage */
|
||||
if (!sk2)
|
||||
sk2 = reuse->socks[reciprocal_scale(hash, socks)];
|
||||
if (!sk2) {
|
||||
int i, j;
|
||||
|
||||
i = j = reciprocal_scale(hash, socks);
|
||||
while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
|
||||
i++;
|
||||
if (i >= reuse->num_socks)
|
||||
i = 0;
|
||||
if (i == j)
|
||||
goto out;
|
||||
}
|
||||
sk2 = reuse->socks[i];
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
|
||||
@ -623,6 +623,8 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master)
|
||||
tag_protocol = ds->ops->get_tag_protocol(ds, dp->index);
|
||||
tag_ops = dsa_tag_driver_get(tag_protocol);
|
||||
if (IS_ERR(tag_ops)) {
|
||||
if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
|
||||
return -EPROBE_DEFER;
|
||||
dev_warn(ds->dev, "No tagger for this switch\n");
|
||||
return PTR_ERR(tag_ops);
|
||||
}
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
#include <net/sock.h>
|
||||
#include <net/route.h>
|
||||
#include <net/tcp_states.h>
|
||||
#include <net/sock_reuseport.h>
|
||||
|
||||
int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
||||
{
|
||||
@ -69,6 +70,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
|
||||
}
|
||||
inet->inet_daddr = fl4->daddr;
|
||||
inet->inet_dport = usin->sin_port;
|
||||
reuseport_has_conns(sk, true);
|
||||
sk->sk_state = TCP_ESTABLISHED;
|
||||
sk_set_txhash(sk);
|
||||
inet->inet_id = jiffies;
|
||||
|
||||
@ -423,12 +423,13 @@ static struct sock *udp4_lib_lookup2(struct net *net,
|
||||
score = compute_score(sk, net, saddr, sport,
|
||||
daddr, hnum, dif, sdif);
|
||||
if (score > badness) {
|
||||
if (sk->sk_reuseport) {
|
||||
if (sk->sk_reuseport &&
|
||||
sk->sk_state != TCP_ESTABLISHED) {
|
||||
hash = udp_ehashfn(net, daddr, hnum,
|
||||
saddr, sport);
|
||||
result = reuseport_select_sock(sk, hash, skb,
|
||||
sizeof(struct udphdr));
|
||||
if (result)
|
||||
if (result && !reuseport_has_conns(sk, false))
|
||||
return result;
|
||||
}
|
||||
badness = score;
|
||||
|
||||
@ -27,6 +27,7 @@
|
||||
#include <net/ip6_route.h>
|
||||
#include <net/tcp_states.h>
|
||||
#include <net/dsfield.h>
|
||||
#include <net/sock_reuseport.h>
|
||||
|
||||
#include <linux/errqueue.h>
|
||||
#include <linux/uaccess.h>
|
||||
@ -254,6 +255,7 @@ ipv4_connected:
|
||||
goto out;
|
||||
}
|
||||
|
||||
reuseport_has_conns(sk, true);
|
||||
sk->sk_state = TCP_ESTABLISHED;
|
||||
sk_set_txhash(sk);
|
||||
out:
|
||||
|
||||
@ -968,7 +968,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
||||
if (unlikely(!tun_info ||
|
||||
!(tun_info->mode & IP_TUNNEL_INFO_TX) ||
|
||||
ip_tunnel_info_af(tun_info) != AF_INET6))
|
||||
return -EINVAL;
|
||||
goto tx_err;
|
||||
|
||||
key = &tun_info->key;
|
||||
memset(&fl6, 0, sizeof(fl6));
|
||||
|
||||
@ -158,13 +158,14 @@ static struct sock *udp6_lib_lookup2(struct net *net,
|
||||
score = compute_score(sk, net, saddr, sport,
|
||||
daddr, hnum, dif, sdif);
|
||||
if (score > badness) {
|
||||
if (sk->sk_reuseport) {
|
||||
if (sk->sk_reuseport &&
|
||||
sk->sk_state != TCP_ESTABLISHED) {
|
||||
hash = udp6_ehashfn(net, daddr, hnum,
|
||||
saddr, sport);
|
||||
|
||||
result = reuseport_select_sock(sk, hash, skb,
|
||||
sizeof(struct udphdr));
|
||||
if (result)
|
||||
if (result && !reuseport_has_conns(sk, false))
|
||||
return result;
|
||||
}
|
||||
result = sk;
|
||||
|
||||
@ -985,6 +985,9 @@ static void qdisc_destroy(struct Qdisc *qdisc)
|
||||
|
||||
void qdisc_put(struct Qdisc *qdisc)
|
||||
{
|
||||
if (!qdisc)
|
||||
return;
|
||||
|
||||
if (qdisc->flags & TCQ_F_BUILTIN ||
|
||||
!refcount_dec_and_test(&qdisc->refcnt))
|
||||
return;
|
||||
|
||||
@ -10659,9 +10659,11 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
|
||||
hyst = wdev->cqm_config->rssi_hyst;
|
||||
n = wdev->cqm_config->n_rssi_thresholds;
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
for (i = 0; i < n; i++) {
|
||||
i = array_index_nospec(i, n);
|
||||
if (last < wdev->cqm_config->rssi_thresholds[i])
|
||||
break;
|
||||
}
|
||||
|
||||
low_index = i - 1;
|
||||
if (low_index >= 0) {
|
||||
|
||||
@ -40,7 +40,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
|
||||
static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
|
||||
{
|
||||
struct kvm_coalesced_mmio_ring *ring;
|
||||
unsigned avail;
|
||||
@ -52,7 +52,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
|
||||
* there is always one unused entry in the buffer
|
||||
*/
|
||||
ring = dev->kvm->coalesced_mmio_ring;
|
||||
avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
|
||||
avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
|
||||
if (avail == 0) {
|
||||
/* full */
|
||||
return 0;
|
||||
@ -67,25 +67,28 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
|
||||
{
|
||||
struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
|
||||
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
|
||||
__u32 insert;
|
||||
|
||||
if (!coalesced_mmio_in_range(dev, addr, len))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
spin_lock(&dev->kvm->ring_lock);
|
||||
|
||||
if (!coalesced_mmio_has_room(dev)) {
|
||||
insert = READ_ONCE(ring->last);
|
||||
if (!coalesced_mmio_has_room(dev, insert) ||
|
||||
insert >= KVM_COALESCED_MMIO_MAX) {
|
||||
spin_unlock(&dev->kvm->ring_lock);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* copy data in first free entry of the ring */
|
||||
|
||||
ring->coalesced_mmio[ring->last].phys_addr = addr;
|
||||
ring->coalesced_mmio[ring->last].len = len;
|
||||
memcpy(ring->coalesced_mmio[ring->last].data, val, len);
|
||||
ring->coalesced_mmio[ring->last].pio = dev->zone.pio;
|
||||
ring->coalesced_mmio[insert].phys_addr = addr;
|
||||
ring->coalesced_mmio[insert].len = len;
|
||||
memcpy(ring->coalesced_mmio[insert].data, val, len);
|
||||
ring->coalesced_mmio[insert].pio = dev->zone.pio;
|
||||
smp_wmb();
|
||||
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
|
||||
ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
|
||||
spin_unlock(&dev->kvm->ring_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user