Compare commits
39 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| ab35d16f66 | |||
| 2607611714 | |||
| 20417e9a54 | |||
| f85a3c8f00 | |||
| d56e029fc2 | |||
| 03cbf4a306 | |||
| 09d05eb32d | |||
| 0155f201e2 | |||
| dd236b3c9a | |||
| 6399cc16dd | |||
| 34bfc89473 | |||
| cb66218588 | |||
| ead4cb80db | |||
| 618ebd550b | |||
| 1b4ba31bb8 | |||
| dc427c08fd | |||
| ac74b66369 | |||
| 5a51f2febc | |||
| 6cd951eefd | |||
| 7084a918af | |||
| c329c44099 | |||
| c6508a3964 | |||
| 1272418996 | |||
| e1ebb00c1b | |||
| e9d8f71bda | |||
| e520cb6c01 | |||
| 23f2134fb9 | |||
| 132a4d817b | |||
| 3cd2a58902 | |||
| bf1befcaa5 | |||
| b555cf8458 | |||
| 1e35d149be | |||
| c3a90ecddf | |||
| 1ec27490ee | |||
| 9bef7d690e | |||
| 1338f79220 | |||
| 9a11d1f9c4 | |||
| d4ca0cfa3f | |||
| d0ccfd55b9 |
2
Makefile
2
Makefile
@ -1,6 +1,6 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 0
|
||||
SUBLEVEL = 2
|
||||
EXTRAVERSION =
|
||||
NAME = Fearless Coyote
|
||||
|
||||
|
||||
@ -261,6 +261,7 @@ static u64 pnv_deepest_stop_psscr_val;
|
||||
static u64 pnv_deepest_stop_psscr_mask;
|
||||
static bool deepest_stop_found;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/*
|
||||
* pnv_cpu_offline: A function that puts the CPU into the deepest
|
||||
* available platform idle state on a CPU-Offline.
|
||||
@ -293,6 +294,7 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
|
||||
|
||||
return srr1;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Power ISA 3.0 idle initialization.
|
||||
|
||||
@ -7,6 +7,7 @@
|
||||
bool pat_enabled(void);
|
||||
void pat_disable(const char *reason);
|
||||
extern void pat_init(void);
|
||||
extern void init_cache_modes(void);
|
||||
|
||||
extern int reserve_memtype(u64 start, u64 end,
|
||||
enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
|
||||
|
||||
@ -1075,6 +1075,13 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
max_possible_pfn = max_pfn;
|
||||
|
||||
/*
|
||||
* This call is required when the CPU does not support PAT. If
|
||||
* mtrr_bp_init() invoked it already via pat_init() the call has no
|
||||
* effect.
|
||||
*/
|
||||
init_cache_modes();
|
||||
|
||||
/*
|
||||
* Define random base addresses for memory sections after max_pfn is
|
||||
* defined and before each memory section base is used.
|
||||
|
||||
@ -37,7 +37,7 @@ ENTRY(copy_user_generic_unrolled)
|
||||
movl %edx,%ecx
|
||||
andl $63,%edx
|
||||
shrl $6,%ecx
|
||||
jz 17f
|
||||
jz .L_copy_short_string
|
||||
1: movq (%rsi),%r8
|
||||
2: movq 1*8(%rsi),%r9
|
||||
3: movq 2*8(%rsi),%r10
|
||||
@ -58,7 +58,8 @@ ENTRY(copy_user_generic_unrolled)
|
||||
leaq 64(%rdi),%rdi
|
||||
decl %ecx
|
||||
jnz 1b
|
||||
17: movl %edx,%ecx
|
||||
.L_copy_short_string:
|
||||
movl %edx,%ecx
|
||||
andl $7,%edx
|
||||
shrl $3,%ecx
|
||||
jz 20f
|
||||
@ -174,6 +175,8 @@ EXPORT_SYMBOL(copy_user_generic_string)
|
||||
*/
|
||||
ENTRY(copy_user_enhanced_fast_string)
|
||||
ASM_STAC
|
||||
cmpl $64,%edx
|
||||
jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
|
||||
movl %edx,%ecx
|
||||
1: rep
|
||||
movsb
|
||||
|
||||
@ -37,14 +37,14 @@
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "" fmt
|
||||
|
||||
static bool boot_cpu_done;
|
||||
|
||||
static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
|
||||
static void init_cache_modes(void);
|
||||
static bool __read_mostly boot_cpu_done;
|
||||
static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
|
||||
static bool __read_mostly pat_initialized;
|
||||
static bool __read_mostly init_cm_done;
|
||||
|
||||
void pat_disable(const char *reason)
|
||||
{
|
||||
if (!__pat_enabled)
|
||||
if (pat_disabled)
|
||||
return;
|
||||
|
||||
if (boot_cpu_done) {
|
||||
@ -52,10 +52,8 @@ void pat_disable(const char *reason)
|
||||
return;
|
||||
}
|
||||
|
||||
__pat_enabled = 0;
|
||||
pat_disabled = true;
|
||||
pr_info("x86/PAT: %s\n", reason);
|
||||
|
||||
init_cache_modes();
|
||||
}
|
||||
|
||||
static int __init nopat(char *str)
|
||||
@ -67,7 +65,7 @@ early_param("nopat", nopat);
|
||||
|
||||
bool pat_enabled(void)
|
||||
{
|
||||
return !!__pat_enabled;
|
||||
return pat_initialized;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pat_enabled);
|
||||
|
||||
@ -205,6 +203,8 @@ static void __init_cache_modes(u64 pat)
|
||||
update_cache_mode_entry(i, cache);
|
||||
}
|
||||
pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
|
||||
|
||||
init_cm_done = true;
|
||||
}
|
||||
|
||||
#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
|
||||
@ -225,6 +225,7 @@ static void pat_bsp_init(u64 pat)
|
||||
}
|
||||
|
||||
wrmsrl(MSR_IA32_CR_PAT, pat);
|
||||
pat_initialized = true;
|
||||
|
||||
__init_cache_modes(pat);
|
||||
}
|
||||
@ -242,10 +243,9 @@ static void pat_ap_init(u64 pat)
|
||||
wrmsrl(MSR_IA32_CR_PAT, pat);
|
||||
}
|
||||
|
||||
static void init_cache_modes(void)
|
||||
void init_cache_modes(void)
|
||||
{
|
||||
u64 pat = 0;
|
||||
static int init_cm_done;
|
||||
|
||||
if (init_cm_done)
|
||||
return;
|
||||
@ -287,8 +287,6 @@ static void init_cache_modes(void)
|
||||
}
|
||||
|
||||
__init_cache_modes(pat);
|
||||
|
||||
init_cm_done = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -306,10 +304,8 @@ void pat_init(void)
|
||||
u64 pat;
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
|
||||
if (!pat_enabled()) {
|
||||
init_cache_modes();
|
||||
if (pat_disabled)
|
||||
return;
|
||||
}
|
||||
|
||||
if ((c->x86_vendor == X86_VENDOR_INTEL) &&
|
||||
(((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
|
||||
|
||||
@ -1691,6 +1691,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
|
||||
return PTR_ERR(sk_tfm);
|
||||
}
|
||||
drbg->ctr_handle = sk_tfm;
|
||||
init_completion(&drbg->ctr_completion);
|
||||
|
||||
req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
|
||||
if (!req) {
|
||||
|
||||
@ -496,7 +496,7 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
|
||||
goto done;
|
||||
pos++;
|
||||
|
||||
if (memcmp(out_buf + pos, digest_info->data, digest_info->size))
|
||||
if (crypto_memneq(out_buf + pos, digest_info->data, digest_info->size))
|
||||
goto done;
|
||||
|
||||
pos += digest_info->size;
|
||||
|
||||
@ -2667,7 +2667,11 @@ void device_shutdown(void)
|
||||
pm_runtime_get_noresume(dev);
|
||||
pm_runtime_barrier(dev);
|
||||
|
||||
if (dev->bus && dev->bus->shutdown) {
|
||||
if (dev->class && dev->class->shutdown) {
|
||||
if (initcall_debug)
|
||||
dev_info(dev, "shutdown\n");
|
||||
dev->class->shutdown(dev);
|
||||
} else if (dev->bus && dev->bus->shutdown) {
|
||||
if (initcall_debug)
|
||||
dev_info(dev, "shutdown\n");
|
||||
dev->bus->shutdown(dev);
|
||||
|
||||
@ -866,7 +866,7 @@ static ssize_t driver_override_store(struct device *dev,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
char *driver_override, *old = pdev->driver_override, *cp;
|
||||
char *driver_override, *old, *cp;
|
||||
|
||||
if (count > PATH_MAX)
|
||||
return -EINVAL;
|
||||
@ -879,12 +879,15 @@ static ssize_t driver_override_store(struct device *dev,
|
||||
if (cp)
|
||||
*cp = '\0';
|
||||
|
||||
device_lock(dev);
|
||||
old = pdev->driver_override;
|
||||
if (strlen(driver_override)) {
|
||||
pdev->driver_override = driver_override;
|
||||
} else {
|
||||
kfree(driver_override);
|
||||
pdev->driver_override = NULL;
|
||||
}
|
||||
device_unlock(dev);
|
||||
|
||||
kfree(old);
|
||||
|
||||
@ -895,8 +898,12 @@ static ssize_t driver_override_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
ssize_t len;
|
||||
|
||||
return sprintf(buf, "%s\n", pdev->driver_override);
|
||||
device_lock(dev);
|
||||
len = sprintf(buf, "%s\n", pdev->driver_override);
|
||||
device_unlock(dev);
|
||||
return len;
|
||||
}
|
||||
static DEVICE_ATTR_RW(driver_override);
|
||||
|
||||
|
||||
@ -142,6 +142,39 @@ static void tpm_devs_release(struct device *dev)
|
||||
put_device(&chip->dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* tpm_class_shutdown() - prepare the TPM device for loss of power.
|
||||
* @dev: device to which the chip is associated.
|
||||
*
|
||||
* Issues a TPM2_Shutdown command prior to loss of power, as required by the
|
||||
* TPM 2.0 spec.
|
||||
* Then, calls bus- and device- specific shutdown code.
|
||||
*
|
||||
* XXX: This codepath relies on the fact that sysfs is not enabled for
|
||||
* TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2
|
||||
* has sysfs support enabled before TPM sysfs's implicit locking is fixed.
|
||||
*/
|
||||
static int tpm_class_shutdown(struct device *dev)
|
||||
{
|
||||
struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
|
||||
|
||||
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
|
||||
down_write(&chip->ops_sem);
|
||||
tpm2_shutdown(chip, TPM2_SU_CLEAR);
|
||||
chip->ops = NULL;
|
||||
up_write(&chip->ops_sem);
|
||||
}
|
||||
/* Allow bus- and device-specific code to run. Note: since chip->ops
|
||||
* is NULL, more-specific shutdown code will not be able to issue TPM
|
||||
* commands.
|
||||
*/
|
||||
if (dev->bus && dev->bus->shutdown)
|
||||
dev->bus->shutdown(dev);
|
||||
else if (dev->driver && dev->driver->shutdown)
|
||||
dev->driver->shutdown(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* tpm_chip_alloc() - allocate a new struct tpm_chip instance
|
||||
* @pdev: device to which the chip is associated
|
||||
@ -181,6 +214,7 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
|
||||
device_initialize(&chip->devs);
|
||||
|
||||
chip->dev.class = tpm_class;
|
||||
chip->dev.class->shutdown = tpm_class_shutdown;
|
||||
chip->dev.release = tpm_dev_release;
|
||||
chip->dev.parent = pdev;
|
||||
chip->dev.groups = chip->groups;
|
||||
|
||||
@ -36,9 +36,10 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
|
||||
ssize_t err;
|
||||
int i, rc;
|
||||
char *str = buf;
|
||||
|
||||
struct tpm_chip *chip = to_tpm_chip(dev);
|
||||
|
||||
memset(&tpm_cmd, 0, sizeof(tpm_cmd));
|
||||
|
||||
tpm_cmd.header.in = tpm_readpubek_header;
|
||||
err = tpm_transmit_cmd(chip, NULL, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
|
||||
READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
|
||||
@ -294,6 +295,9 @@ static const struct attribute_group tpm_dev_group = {
|
||||
|
||||
void tpm_sysfs_add_device(struct tpm_chip *chip)
|
||||
{
|
||||
/* XXX: If you wish to remove this restriction, you must first update
|
||||
* tpm_sysfs to explicitly lock chip->ops.
|
||||
*/
|
||||
if (chip->flags & TPM_CHIP_FLAG_TPM2)
|
||||
return;
|
||||
|
||||
|
||||
@ -1475,8 +1475,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
|
||||
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ?
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
||||
struct ablkcipher_edesc *edesc;
|
||||
|
||||
@ -1931,6 +1931,11 @@ static int modify_qp(struct ib_uverbs_file *file,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!rdma_is_port_valid(qp->device, cmd->base.port_num)) {
|
||||
ret = -EINVAL;
|
||||
goto release_qp;
|
||||
}
|
||||
|
||||
attr->qp_state = cmd->base.qp_state;
|
||||
attr->cur_qp_state = cmd->base.cur_qp_state;
|
||||
attr->path_mtu = cmd->base.path_mtu;
|
||||
@ -2541,6 +2546,9 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
|
||||
if (copy_from_user(&cmd, buf, sizeof cmd))
|
||||
return -EFAULT;
|
||||
|
||||
if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num))
|
||||
return -EINVAL;
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof(cmd),
|
||||
(unsigned long)cmd.response + sizeof(resp),
|
||||
in_len - sizeof(cmd), out_len - sizeof(resp));
|
||||
|
||||
@ -2915,6 +2915,7 @@ static int __init comedi_init(void)
|
||||
dev = comedi_alloc_board_minor(NULL);
|
||||
if (IS_ERR(dev)) {
|
||||
comedi_cleanup_board_minors();
|
||||
class_destroy(comedi_class);
|
||||
cdev_del(&comedi_cdev);
|
||||
unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
|
||||
COMEDI_NUM_MINORS);
|
||||
|
||||
@ -513,6 +513,9 @@ static int vnt_start(struct ieee80211_hw *hw)
|
||||
goto free_all;
|
||||
}
|
||||
|
||||
if (vnt_key_init_table(priv))
|
||||
goto free_all;
|
||||
|
||||
priv->int_interval = 1; /* bInterval is set to 1 */
|
||||
|
||||
vnt_int_start_interrupt(priv);
|
||||
|
||||
@ -1340,29 +1340,13 @@ static int imx_startup(struct uart_port *port)
|
||||
imx_enable_ms(&sport->port);
|
||||
|
||||
/*
|
||||
* If the serial port is opened for reading start RX DMA immediately
|
||||
* instead of waiting for RX FIFO interrupts. In our iMX53 the average
|
||||
* delay for the first reception dropped from approximately 35000
|
||||
* microseconds to 1000 microseconds.
|
||||
* Start RX DMA immediately instead of waiting for RX FIFO interrupts.
|
||||
* In our iMX53 the average delay for the first reception dropped from
|
||||
* approximately 35000 microseconds to 1000 microseconds.
|
||||
*/
|
||||
if (sport->dma_is_enabled) {
|
||||
struct tty_struct *tty = sport->port.state->port.tty;
|
||||
struct tty_file_private *file_priv;
|
||||
int readcnt = 0;
|
||||
|
||||
spin_lock(&tty->files_lock);
|
||||
|
||||
if (!list_empty(&tty->tty_files))
|
||||
list_for_each_entry(file_priv, &tty->tty_files, list)
|
||||
if (!(file_priv->file->f_flags & O_WRONLY))
|
||||
readcnt++;
|
||||
|
||||
spin_unlock(&tty->files_lock);
|
||||
|
||||
if (readcnt > 0) {
|
||||
imx_disable_rx_int(sport);
|
||||
start_rx_dma(sport);
|
||||
}
|
||||
imx_disable_rx_int(sport);
|
||||
start_rx_dma(sport);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&sport->port.lock, flags);
|
||||
|
||||
@ -223,6 +223,10 @@ static const struct usb_device_id usb_quirk_list[] = {
|
||||
/* Blackmagic Design UltraStudio SDI */
|
||||
{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
|
||||
|
||||
/* Hauppauge HVR-950q */
|
||||
{ USB_DEVICE(0x2040, 0x7200), .driver_info =
|
||||
USB_QUIRK_CONFIG_INTF_STRINGS },
|
||||
|
||||
/* INTEL VALUE SSD */
|
||||
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
|
||||
|
||||
|
||||
@ -416,6 +416,8 @@ static void usb_release_dev(struct device *dev)
|
||||
|
||||
usb_destroy_configuration(udev);
|
||||
usb_release_bos_descriptor(udev);
|
||||
if (udev->parent)
|
||||
of_node_put(dev->of_node);
|
||||
usb_put_hcd(hcd);
|
||||
kfree(udev->product);
|
||||
kfree(udev->manufacturer);
|
||||
|
||||
@ -230,7 +230,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
|
||||
|
||||
dwc3_data->syscfg_reg_off = res->start;
|
||||
|
||||
dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
|
||||
dev_vdbg(&pdev->dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n",
|
||||
dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
|
||||
|
||||
dwc3_data->rstc_pwrdn =
|
||||
|
||||
@ -1215,12 +1215,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
|
||||
return -ESHUTDOWN;
|
||||
}
|
||||
|
||||
if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
|
||||
&req->request, req->dep->name)) {
|
||||
dev_err(dwc->dev, "%s: request %p belongs to '%s'\n",
|
||||
dep->name, &req->request, req->dep->name);
|
||||
if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
|
||||
&req->request, req->dep->name))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pm_runtime_get(dwc->dev);
|
||||
|
||||
@ -1396,7 +1393,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
|
||||
}
|
||||
goto out1;
|
||||
}
|
||||
dev_err(dwc->dev, "request %p was not queued to %s\n",
|
||||
dev_err(dwc->dev, "request %pK was not queued to %s\n",
|
||||
request, ep->name);
|
||||
ret = -EINVAL;
|
||||
goto out0;
|
||||
|
||||
@ -1461,6 +1461,9 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
|
||||
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
|
||||
t2 &= ~PORT_WKDISC_E;
|
||||
}
|
||||
if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
|
||||
(hcd->speed < HCD_USB3))
|
||||
t2 &= ~PORT_WAKE_BITS;
|
||||
} else
|
||||
t2 &= ~PORT_WAKE_BITS;
|
||||
|
||||
|
||||
@ -54,6 +54,11 @@
|
||||
#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
|
||||
#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
|
||||
|
||||
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
|
||||
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
|
||||
#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
|
||||
#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
|
||||
|
||||
static const char hcd_name[] = "xhci_hcd";
|
||||
|
||||
static struct hc_driver __read_mostly xhci_pci_hc_driver;
|
||||
@ -135,6 +140,13 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
|
||||
if (pdev->vendor == PCI_VENDOR_ID_AMD)
|
||||
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
|
||||
|
||||
if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
|
||||
((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
|
||||
(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) ||
|
||||
(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) ||
|
||||
(pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
|
||||
xhci->quirks |= XHCI_U2_DISABLE_WAKE;
|
||||
|
||||
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
|
||||
xhci->quirks |= XHCI_LPM_SUPPORT;
|
||||
xhci->quirks |= XHCI_INTEL_HOST;
|
||||
|
||||
@ -1819,6 +1819,7 @@ struct xhci_hcd {
|
||||
/* For controller with a broken Port Disable implementation */
|
||||
#define XHCI_BROKEN_PORT_PED (1 << 25)
|
||||
#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
|
||||
#define XHCI_U2_DISABLE_WAKE (1 << 27)
|
||||
|
||||
unsigned int num_active_eps;
|
||||
unsigned int limit_active_eps;
|
||||
|
||||
@ -141,6 +141,7 @@ static const struct usb_device_id id_table[] = {
|
||||
{ USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
|
||||
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
|
||||
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
|
||||
{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
|
||||
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
|
||||
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
|
||||
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
|
||||
|
||||
@ -1877,6 +1877,10 @@ static const struct usb_device_id option_ids[] = {
|
||||
.driver_info = (kernel_ulong_t)&four_g_w100_blacklist
|
||||
},
|
||||
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
|
||||
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
|
||||
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
|
||||
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
|
||||
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
|
||||
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
|
||||
|
||||
@ -158,6 +158,7 @@ static const struct usb_device_id id_table[] = {
|
||||
{DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
|
||||
{DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
|
||||
{DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
|
||||
{DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */
|
||||
{DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
|
||||
{DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
|
||||
{DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
|
||||
|
||||
@ -262,7 +262,11 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
|
||||
kmem_cache_free(stub_priv_cache, priv);
|
||||
|
||||
kfree(urb->transfer_buffer);
|
||||
urb->transfer_buffer = NULL;
|
||||
|
||||
kfree(urb->setup_packet);
|
||||
urb->setup_packet = NULL;
|
||||
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
}
|
||||
|
||||
@ -28,7 +28,11 @@ static void stub_free_priv_and_urb(struct stub_priv *priv)
|
||||
struct urb *urb = priv->urb;
|
||||
|
||||
kfree(urb->setup_packet);
|
||||
urb->setup_packet = NULL;
|
||||
|
||||
kfree(urb->transfer_buffer);
|
||||
urb->transfer_buffer = NULL;
|
||||
|
||||
list_del(&priv->list);
|
||||
kmem_cache_free(stub_priv_cache, priv);
|
||||
usb_free_urb(urb);
|
||||
|
||||
@ -299,17 +299,7 @@ static int process_msg(void)
|
||||
mutex_lock(&xb_write_mutex);
|
||||
list_for_each_entry(req, &xs_reply_list, list) {
|
||||
if (req->msg.req_id == state.msg.req_id) {
|
||||
if (req->state == xb_req_state_wait_reply) {
|
||||
req->msg.type = state.msg.type;
|
||||
req->msg.len = state.msg.len;
|
||||
req->body = state.body;
|
||||
req->state = xb_req_state_got_reply;
|
||||
list_del(&req->list);
|
||||
req->cb(req);
|
||||
} else {
|
||||
list_del(&req->list);
|
||||
kfree(req);
|
||||
}
|
||||
list_del(&req->list);
|
||||
err = 0;
|
||||
break;
|
||||
}
|
||||
@ -317,6 +307,15 @@ static int process_msg(void)
|
||||
mutex_unlock(&xb_write_mutex);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (req->state == xb_req_state_wait_reply) {
|
||||
req->msg.type = state.msg.type;
|
||||
req->msg.len = state.msg.len;
|
||||
req->body = state.body;
|
||||
req->state = xb_req_state_got_reply;
|
||||
req->cb(req);
|
||||
} else
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
mutex_unlock(&xs_response_mutex);
|
||||
|
||||
@ -100,7 +100,7 @@ static ssize_t reserved_clusters_store(struct ext4_attr *a,
|
||||
int ret;
|
||||
|
||||
ret = kstrtoull(skip_spaces(buf), 0, &val);
|
||||
if (!ret || val >= clusters)
|
||||
if (ret || val >= clusters)
|
||||
return -EINVAL;
|
||||
|
||||
atomic64_set(&sbi->s_resv_clusters, val);
|
||||
|
||||
@ -80,9 +80,9 @@ static struct rhashtable_params ht_parms = {
|
||||
|
||||
static struct rhashtable gl_hash_table;
|
||||
|
||||
void gfs2_glock_free(struct gfs2_glock *gl)
|
||||
static void gfs2_glock_dealloc(struct rcu_head *rcu)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
|
||||
|
||||
if (gl->gl_ops->go_flags & GLOF_ASPACE) {
|
||||
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
|
||||
@ -90,6 +90,13 @@ void gfs2_glock_free(struct gfs2_glock *gl)
|
||||
kfree(gl->gl_lksb.sb_lvbptr);
|
||||
kmem_cache_free(gfs2_glock_cachep, gl);
|
||||
}
|
||||
}
|
||||
|
||||
void gfs2_glock_free(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
|
||||
call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
|
||||
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
|
||||
wake_up(&sdp->sd_glock_wait);
|
||||
}
|
||||
|
||||
@ -374,6 +374,7 @@ struct gfs2_glock {
|
||||
loff_t end;
|
||||
} gl_vm;
|
||||
};
|
||||
struct rcu_head gl_rcu;
|
||||
struct rhash_head gl_node;
|
||||
};
|
||||
|
||||
|
||||
@ -67,7 +67,7 @@ struct proc_inode {
|
||||
struct proc_dir_entry *pde;
|
||||
struct ctl_table_header *sysctl;
|
||||
struct ctl_table *sysctl_entry;
|
||||
struct list_head sysctl_inodes;
|
||||
struct hlist_node sysctl_inodes;
|
||||
const struct proc_ns_operations *ns_ops;
|
||||
struct inode vfs_inode;
|
||||
};
|
||||
|
||||
@ -191,7 +191,7 @@ static void init_header(struct ctl_table_header *head,
|
||||
head->set = set;
|
||||
head->parent = NULL;
|
||||
head->node = node;
|
||||
INIT_LIST_HEAD(&head->inodes);
|
||||
INIT_HLIST_HEAD(&head->inodes);
|
||||
if (node) {
|
||||
struct ctl_table *entry;
|
||||
for (entry = table; entry->procname; entry++, node++)
|
||||
@ -261,25 +261,42 @@ static void unuse_table(struct ctl_table_header *p)
|
||||
complete(p->unregistering);
|
||||
}
|
||||
|
||||
/* called under sysctl_lock */
|
||||
static void proc_sys_prune_dcache(struct ctl_table_header *head)
|
||||
{
|
||||
struct inode *inode, *prev = NULL;
|
||||
struct inode *inode;
|
||||
struct proc_inode *ei;
|
||||
struct hlist_node *node;
|
||||
struct super_block *sb;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ei, &head->inodes, sysctl_inodes) {
|
||||
inode = igrab(&ei->vfs_inode);
|
||||
if (inode) {
|
||||
rcu_read_unlock();
|
||||
iput(prev);
|
||||
prev = inode;
|
||||
d_prune_aliases(inode);
|
||||
for (;;) {
|
||||
node = hlist_first_rcu(&head->inodes);
|
||||
if (!node)
|
||||
break;
|
||||
ei = hlist_entry(node, struct proc_inode, sysctl_inodes);
|
||||
spin_lock(&sysctl_lock);
|
||||
hlist_del_init_rcu(&ei->sysctl_inodes);
|
||||
spin_unlock(&sysctl_lock);
|
||||
|
||||
inode = &ei->vfs_inode;
|
||||
sb = inode->i_sb;
|
||||
if (!atomic_inc_not_zero(&sb->s_active))
|
||||
continue;
|
||||
inode = igrab(inode);
|
||||
rcu_read_unlock();
|
||||
if (unlikely(!inode)) {
|
||||
deactivate_super(sb);
|
||||
rcu_read_lock();
|
||||
continue;
|
||||
}
|
||||
|
||||
d_prune_aliases(inode);
|
||||
iput(inode);
|
||||
deactivate_super(sb);
|
||||
|
||||
rcu_read_lock();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
iput(prev);
|
||||
}
|
||||
|
||||
/* called under sysctl_lock, will reacquire if has to wait */
|
||||
@ -461,7 +478,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
|
||||
}
|
||||
ei->sysctl = head;
|
||||
ei->sysctl_entry = table;
|
||||
list_add_rcu(&ei->sysctl_inodes, &head->inodes);
|
||||
hlist_add_head_rcu(&ei->sysctl_inodes, &head->inodes);
|
||||
head->count++;
|
||||
spin_unlock(&sysctl_lock);
|
||||
|
||||
@ -489,7 +506,7 @@ out:
|
||||
void proc_sys_evict_inode(struct inode *inode, struct ctl_table_header *head)
|
||||
{
|
||||
spin_lock(&sysctl_lock);
|
||||
list_del_rcu(&PROC_I(inode)->sysctl_inodes);
|
||||
hlist_del_init_rcu(&PROC_I(inode)->sysctl_inodes);
|
||||
if (!--head->count)
|
||||
kfree_rcu(head, rcu);
|
||||
spin_unlock(&sysctl_lock);
|
||||
|
||||
@ -236,6 +236,23 @@ unsigned int cpumask_local_spread(unsigned int i, int node);
|
||||
(cpu) = cpumask_next_zero((cpu), (mask)), \
|
||||
(cpu) < nr_cpu_ids;)
|
||||
|
||||
extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
|
||||
|
||||
/**
|
||||
* for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
|
||||
* @cpu: the (optionally unsigned) integer iterator
|
||||
* @mask: the cpumask poiter
|
||||
* @start: the start location
|
||||
*
|
||||
* The implementation does not assume any bit in @mask is set (including @start).
|
||||
*
|
||||
* After the loop, cpu is >= nr_cpu_ids.
|
||||
*/
|
||||
#define for_each_cpu_wrap(cpu, mask, start) \
|
||||
for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
|
||||
(cpu) < nr_cpumask_bits; \
|
||||
(cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
|
||||
|
||||
/**
|
||||
* for_each_cpu_and - iterate over every cpu in both masks
|
||||
* @cpu: the (optionally unsigned) integer iterator
|
||||
|
||||
@ -378,6 +378,7 @@ int subsys_virtual_register(struct bus_type *subsys,
|
||||
* @suspend: Used to put the device to sleep mode, usually to a low power
|
||||
* state.
|
||||
* @resume: Used to bring the device from the sleep mode.
|
||||
* @shutdown: Called at shut-down time to quiesce the device.
|
||||
* @ns_type: Callbacks so sysfs can detemine namespaces.
|
||||
* @namespace: Namespace of the device belongs to this class.
|
||||
* @pm: The default device power management operations of this class.
|
||||
@ -407,6 +408,7 @@ struct class {
|
||||
|
||||
int (*suspend)(struct device *dev, pm_message_t state);
|
||||
int (*resume)(struct device *dev);
|
||||
int (*shutdown)(struct device *dev);
|
||||
|
||||
const struct kobj_ns_type_operations *ns_type;
|
||||
const void *(*namespace)(struct device *dev);
|
||||
|
||||
@ -143,7 +143,7 @@ struct ctl_table_header
|
||||
struct ctl_table_set *set;
|
||||
struct ctl_dir *parent;
|
||||
struct ctl_node *node;
|
||||
struct list_head inodes; /* head for proc_inode->sysctl_inodes */
|
||||
struct hlist_head inodes; /* head for proc_inode->sysctl_inodes */
|
||||
};
|
||||
|
||||
struct ctl_dir {
|
||||
|
||||
@ -565,9 +565,9 @@ extern void usb_ep0_reinit(struct usb_device *);
|
||||
((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
|
||||
|
||||
#define EndpointRequest \
|
||||
((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
|
||||
((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
|
||||
#define EndpointOutRequest \
|
||||
((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
|
||||
((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
|
||||
|
||||
/* class requests from the USB 2.0 hub spec, table 11-15 */
|
||||
#define HUB_CLASS_REQ(dir, type, request) ((((dir) | (type)) << 8) | (request))
|
||||
|
||||
@ -1253,8 +1253,10 @@ retry:
|
||||
|
||||
timeo = MAX_SCHEDULE_TIMEOUT;
|
||||
ret = netlink_attachskb(sock, nc, &timeo, NULL);
|
||||
if (ret == 1)
|
||||
if (ret == 1) {
|
||||
sock = NULL;
|
||||
goto retry;
|
||||
}
|
||||
if (ret) {
|
||||
sock = NULL;
|
||||
nc = NULL;
|
||||
|
||||
@ -231,8 +231,8 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
|
||||
|
||||
out_nolock:
|
||||
list_del(&waiter.list);
|
||||
if (!list_empty(&sem->wait_list))
|
||||
__rwsem_do_wake(sem, 1);
|
||||
if (!list_empty(&sem->wait_list) && sem->count >= 0)
|
||||
__rwsem_do_wake(sem, 0);
|
||||
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
|
||||
return -EINTR;
|
||||
|
||||
@ -1381,7 +1381,6 @@ static unsigned long weighted_cpuload(const int cpu);
|
||||
static unsigned long source_load(int cpu, int type);
|
||||
static unsigned long target_load(int cpu, int type);
|
||||
static unsigned long capacity_of(int cpu);
|
||||
static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
|
||||
|
||||
/* Cached statistics for all CPUs within a node */
|
||||
struct numa_stats {
|
||||
@ -2469,7 +2468,8 @@ void task_numa_work(struct callback_head *work)
|
||||
return;
|
||||
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
if (!down_read_trylock(&mm->mmap_sem))
|
||||
return;
|
||||
vma = find_vma(mm, start);
|
||||
if (!vma) {
|
||||
reset_ptenuma_scan(p);
|
||||
@ -2584,6 +2584,60 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Can a task be moved from prev_cpu to this_cpu without causing a load
|
||||
* imbalance that would trigger the load balancer?
|
||||
*/
|
||||
static inline bool numa_wake_affine(struct sched_domain *sd,
|
||||
struct task_struct *p, int this_cpu,
|
||||
int prev_cpu, int sync)
|
||||
{
|
||||
struct numa_stats prev_load, this_load;
|
||||
s64 this_eff_load, prev_eff_load;
|
||||
|
||||
update_numa_stats(&prev_load, cpu_to_node(prev_cpu));
|
||||
update_numa_stats(&this_load, cpu_to_node(this_cpu));
|
||||
|
||||
/*
|
||||
* If sync wakeup then subtract the (maximum possible)
|
||||
* effect of the currently running task from the load
|
||||
* of the current CPU:
|
||||
*/
|
||||
if (sync) {
|
||||
unsigned long current_load = task_h_load(current);
|
||||
|
||||
if (this_load.load > current_load)
|
||||
this_load.load -= current_load;
|
||||
else
|
||||
this_load.load = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* In low-load situations, where this_cpu's node is idle due to the
|
||||
* sync cause above having dropped this_load.load to 0, move the task.
|
||||
* Moving to an idle socket will not create a bad imbalance.
|
||||
*
|
||||
* Otherwise check if the nodes are near enough in load to allow this
|
||||
* task to be woken on this_cpu's node.
|
||||
*/
|
||||
if (this_load.load > 0) {
|
||||
unsigned long task_load = task_h_load(p);
|
||||
|
||||
this_eff_load = 100;
|
||||
this_eff_load *= prev_load.compute_capacity;
|
||||
|
||||
prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
|
||||
prev_eff_load *= this_load.compute_capacity;
|
||||
|
||||
this_eff_load *= this_load.load + task_load;
|
||||
prev_eff_load *= prev_load.load - task_load;
|
||||
|
||||
return this_eff_load <= prev_eff_load;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
static void task_tick_numa(struct rq *rq, struct task_struct *curr)
|
||||
{
|
||||
@ -2596,6 +2650,15 @@ static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
|
||||
static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static inline bool numa_wake_affine(struct sched_domain *sd,
|
||||
struct task_struct *p, int this_cpu,
|
||||
int prev_cpu, int sync)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
#endif /* !SMP */
|
||||
#endif /* CONFIG_NUMA_BALANCING */
|
||||
|
||||
static void
|
||||
@ -2982,8 +3045,7 @@ __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
|
||||
* differential update where we store the last value we propagated. This in
|
||||
* turn allows skipping updates if the differential is 'small'.
|
||||
*
|
||||
* Updating tg's load_avg is necessary before update_cfs_share() (which is
|
||||
* done) and effective_load() (which is not done because it is too costly).
|
||||
* Updating tg's load_avg is necessary before update_cfs_share().
|
||||
*/
|
||||
static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
|
||||
{
|
||||
@ -5215,126 +5277,6 @@ static unsigned long cpu_avg_load_per_task(int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
/*
|
||||
* effective_load() calculates the load change as seen from the root_task_group
|
||||
*
|
||||
* Adding load to a group doesn't make a group heavier, but can cause movement
|
||||
* of group shares between cpus. Assuming the shares were perfectly aligned one
|
||||
* can calculate the shift in shares.
|
||||
*
|
||||
* Calculate the effective load difference if @wl is added (subtracted) to @tg
|
||||
* on this @cpu and results in a total addition (subtraction) of @wg to the
|
||||
* total group weight.
|
||||
*
|
||||
* Given a runqueue weight distribution (rw_i) we can compute a shares
|
||||
* distribution (s_i) using:
|
||||
*
|
||||
* s_i = rw_i / \Sum rw_j (1)
|
||||
*
|
||||
* Suppose we have 4 CPUs and our @tg is a direct child of the root group and
|
||||
* has 7 equal weight tasks, distributed as below (rw_i), with the resulting
|
||||
* shares distribution (s_i):
|
||||
*
|
||||
* rw_i = { 2, 4, 1, 0 }
|
||||
* s_i = { 2/7, 4/7, 1/7, 0 }
|
||||
*
|
||||
* As per wake_affine() we're interested in the load of two CPUs (the CPU the
|
||||
* task used to run on and the CPU the waker is running on), we need to
|
||||
* compute the effect of waking a task on either CPU and, in case of a sync
|
||||
* wakeup, compute the effect of the current task going to sleep.
|
||||
*
|
||||
* So for a change of @wl to the local @cpu with an overall group weight change
|
||||
* of @wl we can compute the new shares distribution (s'_i) using:
|
||||
*
|
||||
* s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
|
||||
*
|
||||
* Suppose we're interested in CPUs 0 and 1, and want to compute the load
|
||||
* differences in waking a task to CPU 0. The additional task changes the
|
||||
* weight and shares distributions like:
|
||||
*
|
||||
* rw'_i = { 3, 4, 1, 0 }
|
||||
* s'_i = { 3/8, 4/8, 1/8, 0 }
|
||||
*
|
||||
* We can then compute the difference in effective weight by using:
|
||||
*
|
||||
* dw_i = S * (s'_i - s_i) (3)
|
||||
*
|
||||
* Where 'S' is the group weight as seen by its parent.
|
||||
*
|
||||
* Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
|
||||
* times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
|
||||
* 4/7) times the weight of the group.
|
||||
*/
|
||||
static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
|
||||
{
|
||||
struct sched_entity *se = tg->se[cpu];
|
||||
|
||||
if (!tg->parent) /* the trivial, non-cgroup case */
|
||||
return wl;
|
||||
|
||||
for_each_sched_entity(se) {
|
||||
struct cfs_rq *cfs_rq = se->my_q;
|
||||
long W, w = cfs_rq_load_avg(cfs_rq);
|
||||
|
||||
tg = cfs_rq->tg;
|
||||
|
||||
/*
|
||||
* W = @wg + \Sum rw_j
|
||||
*/
|
||||
W = wg + atomic_long_read(&tg->load_avg);
|
||||
|
||||
/* Ensure \Sum rw_j >= rw_i */
|
||||
W -= cfs_rq->tg_load_avg_contrib;
|
||||
W += w;
|
||||
|
||||
/*
|
||||
* w = rw_i + @wl
|
||||
*/
|
||||
w += wl;
|
||||
|
||||
/*
|
||||
* wl = S * s'_i; see (2)
|
||||
*/
|
||||
if (W > 0 && w < W)
|
||||
wl = (w * (long)scale_load_down(tg->shares)) / W;
|
||||
else
|
||||
wl = scale_load_down(tg->shares);
|
||||
|
||||
/*
|
||||
* Per the above, wl is the new se->load.weight value; since
|
||||
* those are clipped to [MIN_SHARES, ...) do so now. See
|
||||
* calc_cfs_shares().
|
||||
*/
|
||||
if (wl < MIN_SHARES)
|
||||
wl = MIN_SHARES;
|
||||
|
||||
/*
|
||||
* wl = dw_i = S * (s'_i - s_i); see (3)
|
||||
*/
|
||||
wl -= se->avg.load_avg;
|
||||
|
||||
/*
|
||||
* Recursively apply this logic to all parent groups to compute
|
||||
* the final effective load change on the root group. Since
|
||||
* only the @tg group gets extra weight, all parent groups can
|
||||
* only redistribute existing shares. @wl is the shift in shares
|
||||
* resulting from this level per the above.
|
||||
*/
|
||||
wg = 0;
|
||||
}
|
||||
|
||||
return wl;
|
||||
}
|
||||
#else
|
||||
|
||||
static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
|
||||
{
|
||||
return wl;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static void record_wakee(struct task_struct *p)
|
||||
{
|
||||
/*
|
||||
@ -5385,67 +5327,25 @@ static int wake_wide(struct task_struct *p)
|
||||
static int wake_affine(struct sched_domain *sd, struct task_struct *p,
|
||||
int prev_cpu, int sync)
|
||||
{
|
||||
s64 this_load, load;
|
||||
s64 this_eff_load, prev_eff_load;
|
||||
int idx, this_cpu;
|
||||
struct task_group *tg;
|
||||
unsigned long weight;
|
||||
int balanced;
|
||||
|
||||
idx = sd->wake_idx;
|
||||
this_cpu = smp_processor_id();
|
||||
load = source_load(prev_cpu, idx);
|
||||
this_load = target_load(this_cpu, idx);
|
||||
int this_cpu = smp_processor_id();
|
||||
bool affine = false;
|
||||
|
||||
/*
|
||||
* If sync wakeup then subtract the (maximum possible)
|
||||
* effect of the currently running task from the load
|
||||
* of the current CPU:
|
||||
* Common case: CPUs are in the same socket, and select_idle_sibling()
|
||||
* will do its thing regardless of what we return:
|
||||
*/
|
||||
if (sync) {
|
||||
tg = task_group(current);
|
||||
weight = current->se.avg.load_avg;
|
||||
|
||||
this_load += effective_load(tg, this_cpu, -weight, -weight);
|
||||
load += effective_load(tg, prev_cpu, 0, -weight);
|
||||
}
|
||||
|
||||
tg = task_group(p);
|
||||
weight = p->se.avg.load_avg;
|
||||
|
||||
/*
|
||||
* In low-load situations, where prev_cpu is idle and this_cpu is idle
|
||||
* due to the sync cause above having dropped this_load to 0, we'll
|
||||
* always have an imbalance, but there's really nothing you can do
|
||||
* about that, so that's good too.
|
||||
*
|
||||
* Otherwise check if either cpus are near enough in load to allow this
|
||||
* task to be woken on this_cpu.
|
||||
*/
|
||||
this_eff_load = 100;
|
||||
this_eff_load *= capacity_of(prev_cpu);
|
||||
|
||||
prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
|
||||
prev_eff_load *= capacity_of(this_cpu);
|
||||
|
||||
if (this_load > 0) {
|
||||
this_eff_load *= this_load +
|
||||
effective_load(tg, this_cpu, weight, weight);
|
||||
|
||||
prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
|
||||
}
|
||||
|
||||
balanced = this_eff_load <= prev_eff_load;
|
||||
if (cpus_share_cache(prev_cpu, this_cpu))
|
||||
affine = true;
|
||||
else
|
||||
affine = numa_wake_affine(sd, p, this_cpu, prev_cpu, sync);
|
||||
|
||||
schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
|
||||
if (affine) {
|
||||
schedstat_inc(sd->ttwu_move_affine);
|
||||
schedstat_inc(p->se.statistics.nr_wakeups_affine);
|
||||
}
|
||||
|
||||
if (!balanced)
|
||||
return 0;
|
||||
|
||||
schedstat_inc(sd->ttwu_move_affine);
|
||||
schedstat_inc(p->se.statistics.nr_wakeups_affine);
|
||||
|
||||
return 1;
|
||||
return affine;
|
||||
}
|
||||
|
||||
static inline int task_util(struct task_struct *p);
|
||||
@ -5640,43 +5540,6 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
|
||||
return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
|
||||
}
|
||||
|
||||
/*
|
||||
* Implement a for_each_cpu() variant that starts the scan at a given cpu
|
||||
* (@start), and wraps around.
|
||||
*
|
||||
* This is used to scan for idle CPUs; such that not all CPUs looking for an
|
||||
* idle CPU find the same CPU. The down-side is that tasks tend to cycle
|
||||
* through the LLC domain.
|
||||
*
|
||||
* Especially tbench is found sensitive to this.
|
||||
*/
|
||||
|
||||
static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped)
|
||||
{
|
||||
int next;
|
||||
|
||||
again:
|
||||
next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1);
|
||||
|
||||
if (*wrapped) {
|
||||
if (next >= start)
|
||||
return nr_cpumask_bits;
|
||||
} else {
|
||||
if (next >= nr_cpumask_bits) {
|
||||
*wrapped = 1;
|
||||
n = -1;
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
#define for_each_cpu_wrap(cpu, mask, start, wrap) \
|
||||
for ((wrap) = 0, (cpu) = (start)-1; \
|
||||
(cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)), \
|
||||
(cpu) < nr_cpumask_bits; )
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
|
||||
static inline void set_idle_cores(int cpu, int val)
|
||||
@ -5736,7 +5599,7 @@ unlock:
|
||||
static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
|
||||
{
|
||||
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
|
||||
int core, cpu, wrap;
|
||||
int core, cpu;
|
||||
|
||||
if (!static_branch_likely(&sched_smt_present))
|
||||
return -1;
|
||||
@ -5746,7 +5609,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
|
||||
|
||||
cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
|
||||
|
||||
for_each_cpu_wrap(core, cpus, target, wrap) {
|
||||
for_each_cpu_wrap(core, cpus, target) {
|
||||
bool idle = true;
|
||||
|
||||
for_each_cpu(cpu, cpu_smt_mask(core)) {
|
||||
@ -5809,27 +5672,38 @@ static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd
|
||||
static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
|
||||
{
|
||||
struct sched_domain *this_sd;
|
||||
u64 avg_cost, avg_idle = this_rq()->avg_idle;
|
||||
u64 avg_cost, avg_idle;
|
||||
u64 time, cost;
|
||||
s64 delta;
|
||||
int cpu, wrap;
|
||||
int cpu, nr = INT_MAX;
|
||||
|
||||
this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
|
||||
if (!this_sd)
|
||||
return -1;
|
||||
|
||||
avg_cost = this_sd->avg_scan_cost;
|
||||
|
||||
/*
|
||||
* Due to large variance we need a large fuzz factor; hackbench in
|
||||
* particularly is sensitive here.
|
||||
*/
|
||||
if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost)
|
||||
avg_idle = this_rq()->avg_idle / 512;
|
||||
avg_cost = this_sd->avg_scan_cost + 1;
|
||||
|
||||
if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost)
|
||||
return -1;
|
||||
|
||||
if (sched_feat(SIS_PROP)) {
|
||||
u64 span_avg = sd->span_weight * avg_idle;
|
||||
if (span_avg > 4*avg_cost)
|
||||
nr = div_u64(span_avg, avg_cost);
|
||||
else
|
||||
nr = 4;
|
||||
}
|
||||
|
||||
time = local_clock();
|
||||
|
||||
for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
|
||||
for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
|
||||
if (!--nr)
|
||||
return -1;
|
||||
if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
|
||||
continue;
|
||||
if (idle_cpu(cpu))
|
||||
@ -6011,11 +5885,15 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
||||
|
||||
if (affine_sd) {
|
||||
sd = NULL; /* Prefer wake_affine over balance flags */
|
||||
if (cpu != prev_cpu && wake_affine(affine_sd, p, prev_cpu, sync))
|
||||
if (cpu == prev_cpu)
|
||||
goto pick_cpu;
|
||||
|
||||
if (wake_affine(affine_sd, p, prev_cpu, sync))
|
||||
new_cpu = cpu;
|
||||
}
|
||||
|
||||
if (!sd) {
|
||||
pick_cpu:
|
||||
if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
|
||||
new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
|
||||
|
||||
@ -6686,6 +6564,10 @@ static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
|
||||
if (dst_nid == p->numa_preferred_nid)
|
||||
return 0;
|
||||
|
||||
/* Leaving a core idle is often worse than degrading locality. */
|
||||
if (env->idle != CPU_NOT_IDLE)
|
||||
return -1;
|
||||
|
||||
if (numa_group) {
|
||||
src_faults = group_faults(p, src_nid);
|
||||
dst_faults = group_faults(p, dst_nid);
|
||||
|
||||
@ -55,6 +55,7 @@ SCHED_FEAT(TTWU_QUEUE, true)
|
||||
* When doing wakeups, attempt to limit superfluous scans of the LLC domain.
|
||||
*/
|
||||
SCHED_FEAT(SIS_AVG_CPU, false)
|
||||
SCHED_FEAT(SIS_PROP, true)
|
||||
|
||||
/*
|
||||
* Issue a WARN when we do multiple update_rq_clock() calls
|
||||
|
||||
@ -43,6 +43,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
|
||||
}
|
||||
EXPORT_SYMBOL(cpumask_any_but);
|
||||
|
||||
/**
|
||||
* cpumask_next_wrap - helper to implement for_each_cpu_wrap
|
||||
* @n: the cpu prior to the place to search
|
||||
* @mask: the cpumask pointer
|
||||
* @start: the start point of the iteration
|
||||
* @wrap: assume @n crossing @start terminates the iteration
|
||||
*
|
||||
* Returns >= nr_cpu_ids on completion
|
||||
*
|
||||
* Note: the @wrap argument is required for the start condition when
|
||||
* we cannot assume @start is set in @mask.
|
||||
*/
|
||||
int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
|
||||
{
|
||||
int next;
|
||||
|
||||
again:
|
||||
next = cpumask_next(n, mask);
|
||||
|
||||
if (wrap && n < start && next >= start) {
|
||||
return nr_cpumask_bits;
|
||||
|
||||
} else if (next >= nr_cpumask_bits) {
|
||||
wrap = true;
|
||||
n = -1;
|
||||
goto again;
|
||||
}
|
||||
|
||||
return next;
|
||||
}
|
||||
EXPORT_SYMBOL(cpumask_next_wrap);
|
||||
|
||||
/* These are not inline because of header tangles. */
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user