Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 6beb1be0ea | |||
| 60e102ac73 | |||
| beb9294315 | |||
| 0fe6a2bc2f | |||
| a80d0c3c09 | |||
| 7e6d47e22d | |||
| eef0a12514 | |||
| 65c27e2766 | |||
| f5fa9283cf | |||
| f3e0e136d4 | |||
| f22ede939f | |||
| 6f21a69396 | |||
| f5bc40e890 | |||
| d5f8887fc2 | |||
| 28803de321 | |||
| b6842aabed | |||
| 5a2851884a | |||
| cec64fecff | |||
| 2cb79d15c7 | |||
| 65879edf74 |
2
Makefile
2
Makefile
@ -1,6 +1,6 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 0
|
||||
SUBLEVEL = 1
|
||||
EXTRAVERSION =
|
||||
NAME = One Giant Leap for Frogkind
|
||||
|
||||
|
||||
@ -90,8 +90,10 @@ int pwm_channel_alloc(int index, struct pwm_channel *ch)
|
||||
unsigned long flags;
|
||||
int status = 0;
|
||||
|
||||
/* insist on PWM init, with this signal pinned out */
|
||||
if (!pwm || !(pwm->mask & 1 << index))
|
||||
if (!pwm)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
if (!(pwm->mask & 1 << index))
|
||||
return -ENODEV;
|
||||
|
||||
if (index < 0 || index >= PWM_NCHAN || !ch)
|
||||
|
||||
@ -1599,7 +1599,8 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
|
||||
flits = skb_transport_offset(skb) / 8;
|
||||
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
|
||||
sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
|
||||
skb->tail - skb->transport_header,
|
||||
skb_tail_pointer(skb) -
|
||||
skb_transport_header(skb),
|
||||
adap->pdev);
|
||||
if (need_skb_unmap()) {
|
||||
setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
|
||||
|
||||
@ -1691,7 +1691,7 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave
|
||||
vp_oper->vlan_idx = NO_INDX;
|
||||
}
|
||||
if (NO_INDX != vp_oper->mac_idx) {
|
||||
__mlx4_unregister_mac(&priv->dev, port, vp_oper->mac_idx);
|
||||
__mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
|
||||
vp_oper->mac_idx = NO_INDX;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1118,11 +1118,6 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
|
||||
{
|
||||
struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
|
||||
|
||||
mutex_lock(&vi->config_lock);
|
||||
|
||||
if (!vi->config_enable)
|
||||
goto done;
|
||||
|
||||
switch(action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_ONLINE:
|
||||
case CPU_DOWN_FAILED:
|
||||
@ -1136,8 +1131,6 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
|
||||
break;
|
||||
}
|
||||
|
||||
done:
|
||||
mutex_unlock(&vi->config_lock);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
@ -1699,6 +1692,8 @@ static int virtnet_freeze(struct virtio_device *vdev)
|
||||
struct virtnet_info *vi = vdev->priv;
|
||||
int i;
|
||||
|
||||
unregister_hotcpu_notifier(&vi->nb);
|
||||
|
||||
/* Prevent config work handler from accessing the device */
|
||||
mutex_lock(&vi->config_lock);
|
||||
vi->config_enable = false;
|
||||
@ -1747,6 +1742,10 @@ static int virtnet_restore(struct virtio_device *vdev)
|
||||
virtnet_set_queues(vi, vi->curr_queue_pairs);
|
||||
rtnl_unlock();
|
||||
|
||||
err = register_hotcpu_notifier(&vi->nb);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -163,6 +163,7 @@ struct xenvif {
|
||||
unsigned long credit_usec;
|
||||
unsigned long remaining_credit;
|
||||
struct timer_list credit_timeout;
|
||||
u64 credit_window_start;
|
||||
|
||||
/* Statistics */
|
||||
unsigned long rx_gso_checksum_fixup;
|
||||
|
||||
@ -312,8 +312,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
|
||||
vif->credit_bytes = vif->remaining_credit = ~0UL;
|
||||
vif->credit_usec = 0UL;
|
||||
init_timer(&vif->credit_timeout);
|
||||
/* Initialize 'expires' now: it's used to track the credit window. */
|
||||
vif->credit_timeout.expires = jiffies;
|
||||
vif->credit_window_start = get_jiffies_64();
|
||||
|
||||
dev->netdev_ops = &xenvif_netdev_ops;
|
||||
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
|
||||
|
||||
@ -1185,9 +1185,8 @@ out:
|
||||
|
||||
static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
|
||||
{
|
||||
unsigned long now = jiffies;
|
||||
unsigned long next_credit =
|
||||
vif->credit_timeout.expires +
|
||||
u64 now = get_jiffies_64();
|
||||
u64 next_credit = vif->credit_window_start +
|
||||
msecs_to_jiffies(vif->credit_usec / 1000);
|
||||
|
||||
/* Timer could already be pending in rare cases. */
|
||||
@ -1195,8 +1194,8 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
|
||||
return true;
|
||||
|
||||
/* Passed the point where we can replenish credit? */
|
||||
if (time_after_eq(now, next_credit)) {
|
||||
vif->credit_timeout.expires = now;
|
||||
if (time_after_eq64(now, next_credit)) {
|
||||
vif->credit_window_start = now;
|
||||
tx_add_credit(vif);
|
||||
}
|
||||
|
||||
@ -1208,6 +1207,7 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
|
||||
tx_credit_callback;
|
||||
mod_timer(&vif->credit_timeout,
|
||||
next_credit);
|
||||
vif->credit_window_start = next_credit;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1033,6 +1033,7 @@ static int register_root_hub(struct usb_hcd *hcd)
|
||||
dev_name(&usb_dev->dev), retval);
|
||||
return retval;
|
||||
}
|
||||
usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
|
||||
}
|
||||
|
||||
retval = usb_new_device (usb_dev);
|
||||
|
||||
@ -135,7 +135,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
|
||||
return usb_get_intfdata(hdev->actconfig->interface[0]);
|
||||
}
|
||||
|
||||
static int usb_device_supports_lpm(struct usb_device *udev)
|
||||
int usb_device_supports_lpm(struct usb_device *udev)
|
||||
{
|
||||
/* USB 2.1 (and greater) devices indicate LPM support through
|
||||
* their USB 2.0 Extended Capabilities BOS descriptor.
|
||||
@ -156,6 +156,11 @@ static int usb_device_supports_lpm(struct usb_device *udev)
|
||||
"Power management will be impacted.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* udev is root hub */
|
||||
if (!udev->parent)
|
||||
return 1;
|
||||
|
||||
if (udev->parent->lpm_capable)
|
||||
return 1;
|
||||
|
||||
|
||||
@ -35,6 +35,7 @@ extern int usb_get_device_descriptor(struct usb_device *dev,
|
||||
unsigned int size);
|
||||
extern int usb_get_bos_descriptor(struct usb_device *dev);
|
||||
extern void usb_release_bos_descriptor(struct usb_device *dev);
|
||||
extern int usb_device_supports_lpm(struct usb_device *udev);
|
||||
extern char *usb_cache_string(struct usb_device *udev, int index);
|
||||
extern int usb_set_configuration(struct usb_device *dev, int configuration);
|
||||
extern int usb_choose_configuration(struct usb_device *udev);
|
||||
|
||||
@ -1376,6 +1376,23 @@ static const struct usb_device_id option_ids[] = {
|
||||
.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
|
||||
.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1545, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1546, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1547, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1565, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1566, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1567, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1589, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1590, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1591, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1592, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1594, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
|
||||
0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
|
||||
|
||||
@ -118,7 +118,7 @@ static const struct backlight_ops atmel_pwm_bl_ops = {
|
||||
.update_status = atmel_pwm_bl_set_intensity,
|
||||
};
|
||||
|
||||
static int __init atmel_pwm_bl_probe(struct platform_device *pdev)
|
||||
static int atmel_pwm_bl_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct backlight_properties props;
|
||||
const struct atmel_pwm_bl_platform_data *pdata;
|
||||
@ -202,7 +202,7 @@ err_free_mem:
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int __exit atmel_pwm_bl_remove(struct platform_device *pdev)
|
||||
static int atmel_pwm_bl_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev);
|
||||
|
||||
@ -220,10 +220,11 @@ static struct platform_driver atmel_pwm_bl_driver = {
|
||||
.name = "atmel-pwm-bl",
|
||||
},
|
||||
/* REVISIT add suspend() and resume() */
|
||||
.remove = __exit_p(atmel_pwm_bl_remove),
|
||||
.probe = atmel_pwm_bl_probe,
|
||||
.remove = atmel_pwm_bl_remove,
|
||||
};
|
||||
|
||||
module_platform_driver_probe(atmel_pwm_bl_driver, atmel_pwm_bl_probe);
|
||||
module_platform_driver(atmel_pwm_bl_driver);
|
||||
|
||||
MODULE_AUTHOR("Hans-Christian egtvedt <hans-christian.egtvedt@atmel.com>");
|
||||
MODULE_DESCRIPTION("Atmel PWM backlight driver");
|
||||
|
||||
@ -795,12 +795,21 @@ static int hvfb_remove(struct hv_device *hdev)
|
||||
}
|
||||
|
||||
|
||||
static DEFINE_PCI_DEVICE_TABLE(pci_stub_id_table) = {
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_MICROSOFT,
|
||||
.device = PCI_DEVICE_ID_HYPERV_VIDEO,
|
||||
},
|
||||
{ /* end of list */ }
|
||||
};
|
||||
|
||||
static const struct hv_vmbus_device_id id_table[] = {
|
||||
/* Synthetic Video Device GUID */
|
||||
{HV_SYNTHVID_GUID},
|
||||
{}
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, pci_stub_id_table);
|
||||
MODULE_DEVICE_TABLE(vmbus, id_table);
|
||||
|
||||
static struct hv_driver hvfb_drv = {
|
||||
@ -810,14 +819,43 @@ static struct hv_driver hvfb_drv = {
|
||||
.remove = hvfb_remove,
|
||||
};
|
||||
|
||||
static int hvfb_pci_stub_probe(struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hvfb_pci_stub_remove(struct pci_dev *pdev)
|
||||
{
|
||||
}
|
||||
|
||||
static struct pci_driver hvfb_pci_stub_driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.id_table = pci_stub_id_table,
|
||||
.probe = hvfb_pci_stub_probe,
|
||||
.remove = hvfb_pci_stub_remove,
|
||||
};
|
||||
|
||||
static int __init hvfb_drv_init(void)
|
||||
{
|
||||
return vmbus_driver_register(&hvfb_drv);
|
||||
int ret;
|
||||
|
||||
ret = vmbus_driver_register(&hvfb_drv);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
ret = pci_register_driver(&hvfb_pci_stub_driver);
|
||||
if (ret != 0) {
|
||||
vmbus_driver_unregister(&hvfb_drv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit hvfb_drv_exit(void)
|
||||
{
|
||||
pci_unregister_driver(&hvfb_pci_stub_driver);
|
||||
vmbus_driver_unregister(&hvfb_drv);
|
||||
}
|
||||
|
||||
|
||||
@ -165,6 +165,7 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
|
||||
static inline void rt6_clean_expires(struct rt6_info *rt)
|
||||
{
|
||||
rt->rt6i_flags &= ~RTF_EXPIRES;
|
||||
rt->dst.expires = 0;
|
||||
}
|
||||
|
||||
static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires)
|
||||
|
||||
@ -843,9 +843,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
|
||||
if (isspace(ch)) {
|
||||
parser->buffer[parser->idx] = 0;
|
||||
parser->cont = false;
|
||||
} else {
|
||||
} else if (parser->idx < parser->size - 1) {
|
||||
parser->cont = true;
|
||||
parser->buffer[parser->idx++] = ch;
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
*ppos += read;
|
||||
|
||||
@ -40,7 +40,7 @@ again:
|
||||
struct iphdr _iph;
|
||||
ip:
|
||||
iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
|
||||
if (!iph)
|
||||
if (!iph || iph->ihl < 5)
|
||||
return false;
|
||||
|
||||
if (ip_is_fragment(iph))
|
||||
|
||||
@ -2856,7 +2856,8 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
|
||||
* left edge of the send window.
|
||||
* See draft-ietf-tcplw-high-performance-00, section 3.3.
|
||||
*/
|
||||
if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
|
||||
if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
|
||||
flag & FLAG_ACKED)
|
||||
seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
|
||||
|
||||
if (seq_rtt < 0)
|
||||
@ -2871,14 +2872,19 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
|
||||
}
|
||||
|
||||
/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
|
||||
static void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
|
||||
static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
s32 seq_rtt = -1;
|
||||
|
||||
if (tp->lsndtime && !tp->total_retrans)
|
||||
seq_rtt = tcp_time_stamp - tp->lsndtime;
|
||||
tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
|
||||
if (synack_stamp && !tp->total_retrans)
|
||||
seq_rtt = tcp_time_stamp - synack_stamp;
|
||||
|
||||
/* If the ACK acks both the SYNACK and the (Fast Open'd) data packets
|
||||
* sent in SYN_RECV, SYNACK RTT is the smooth RTT computed in tcp_ack()
|
||||
*/
|
||||
if (!tp->srtt)
|
||||
tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
|
||||
}
|
||||
|
||||
static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
|
||||
@ -2981,6 +2987,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
|
||||
s32 seq_rtt = -1;
|
||||
s32 ca_seq_rtt = -1;
|
||||
ktime_t last_ackt = net_invalid_timestamp();
|
||||
bool rtt_update;
|
||||
|
||||
while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
|
||||
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
|
||||
@ -3057,14 +3064,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
|
||||
if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
|
||||
flag |= FLAG_SACK_RENEGING;
|
||||
|
||||
if (tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt) ||
|
||||
(flag & FLAG_ACKED))
|
||||
tcp_rearm_rto(sk);
|
||||
rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt);
|
||||
|
||||
if (flag & FLAG_ACKED) {
|
||||
const struct tcp_congestion_ops *ca_ops
|
||||
= inet_csk(sk)->icsk_ca_ops;
|
||||
|
||||
tcp_rearm_rto(sk);
|
||||
if (unlikely(icsk->icsk_mtup.probe_size &&
|
||||
!after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
|
||||
tcp_mtup_probe_success(sk);
|
||||
@ -3103,6 +3109,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
|
||||
|
||||
ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
|
||||
}
|
||||
} else if (skb && rtt_update && sack_rtt >= 0 &&
|
||||
sack_rtt > (s32)(now - TCP_SKB_CB(skb)->when)) {
|
||||
/* Do not re-arm RTO if the sack RTT is measured from data sent
|
||||
* after when the head was last (re)transmitted. Otherwise the
|
||||
* timeout may continue to extend in loss recovery.
|
||||
*/
|
||||
tcp_rearm_rto(sk);
|
||||
}
|
||||
|
||||
#if FASTRETRANS_DEBUG > 0
|
||||
@ -5587,6 +5600,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
struct request_sock *req;
|
||||
int queued = 0;
|
||||
bool acceptable;
|
||||
u32 synack_stamp;
|
||||
|
||||
tp->rx_opt.saw_tstamp = 0;
|
||||
|
||||
@ -5669,9 +5683,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
* so release it.
|
||||
*/
|
||||
if (req) {
|
||||
synack_stamp = tcp_rsk(req)->snt_synack;
|
||||
tp->total_retrans = req->num_retrans;
|
||||
reqsk_fastopen_remove(sk, req, false);
|
||||
} else {
|
||||
synack_stamp = tp->lsndtime;
|
||||
/* Make sure socket is routed, for correct metrics. */
|
||||
icsk->icsk_af_ops->rebuild_header(sk);
|
||||
tcp_init_congestion_control(sk);
|
||||
@ -5694,7 +5710,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
|
||||
tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
|
||||
tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
|
||||
tcp_synack_rtt_meas(sk, req);
|
||||
tcp_synack_rtt_meas(sk, synack_stamp);
|
||||
|
||||
if (tp->rx_opt.tstamp_ok)
|
||||
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
|
||||
|
||||
@ -18,6 +18,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
|
||||
netdev_features_t features)
|
||||
{
|
||||
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
||||
unsigned int sum_truesize = 0;
|
||||
struct tcphdr *th;
|
||||
unsigned int thlen;
|
||||
unsigned int seq;
|
||||
@ -102,13 +103,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
|
||||
if (copy_destructor) {
|
||||
skb->destructor = gso_skb->destructor;
|
||||
skb->sk = gso_skb->sk;
|
||||
/* {tcp|sock}_wfree() use exact truesize accounting :
|
||||
* sum(skb->truesize) MUST be exactly be gso_skb->truesize
|
||||
* So we account mss bytes of 'true size' for each segment.
|
||||
* The last segment will contain the remaining.
|
||||
*/
|
||||
skb->truesize = mss;
|
||||
gso_skb->truesize -= mss;
|
||||
sum_truesize += skb->truesize;
|
||||
}
|
||||
skb = skb->next;
|
||||
th = tcp_hdr(skb);
|
||||
@ -125,7 +120,9 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
|
||||
if (copy_destructor) {
|
||||
swap(gso_skb->sk, skb->sk);
|
||||
swap(gso_skb->destructor, skb->destructor);
|
||||
swap(gso_skb->truesize, skb->truesize);
|
||||
sum_truesize += skb->truesize;
|
||||
atomic_add(sum_truesize - gso_skb->truesize,
|
||||
&skb->sk->sk_wmem_alloc);
|
||||
}
|
||||
|
||||
delta = htonl(oldlen + (skb_tail_pointer(skb) -
|
||||
|
||||
@ -1087,10 +1087,13 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
|
||||
if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev)))
|
||||
return NULL;
|
||||
|
||||
if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
|
||||
return dst;
|
||||
if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
|
||||
return NULL;
|
||||
|
||||
return NULL;
|
||||
if (rt6_check_expired(rt))
|
||||
return NULL;
|
||||
|
||||
return dst;
|
||||
}
|
||||
|
||||
static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
|
||||
|
||||
@ -860,7 +860,6 @@ static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds,
|
||||
(!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
|
||||
return;
|
||||
|
||||
BUG_ON(asoc->peer.primary_path == NULL);
|
||||
sctp_unhash_established(asoc);
|
||||
sctp_association_free(asoc);
|
||||
}
|
||||
|
||||
@ -740,9 +740,10 @@ static int hdmi_manual_setup_channel_mapping(struct hda_codec *codec,
|
||||
static void hdmi_setup_fake_chmap(unsigned char *map, int ca)
|
||||
{
|
||||
int i;
|
||||
int ordered_ca = get_channel_allocation_order(ca);
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (i < channel_allocations[ca].channels)
|
||||
map[i] = from_cea_slot((hdmi_channel_mapping[ca][i] >> 4) & 0x0f);
|
||||
if (i < channel_allocations[ordered_ca].channels)
|
||||
map[i] = from_cea_slot(hdmi_channel_mapping[ca][i] & 0x0f);
|
||||
else
|
||||
map[i] = 0;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user