Compare commits
30 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 2adb99055e | |||
| 1bb571727a | |||
| 8d0d41174e | |||
| d1753372c3 | |||
| 5fa8ad644f | |||
| 4f0ae08d58 | |||
| d93f4eb413 | |||
| 4f3bfdc30b | |||
| 1518d0b7a1 | |||
| ad9ed6ca1b | |||
| f5fa864237 | |||
| 6f416659e9 | |||
| 9a50e1ccd3 | |||
| 76596a3df1 | |||
| 609269d535 | |||
| f72e024e3a | |||
| 7abeff5a23 | |||
| 15ee2e0694 | |||
| b37d1b4106 | |||
| 711f4b02bc | |||
| 1cc7060e10 | |||
| a81954faaf | |||
| e7594b2f23 | |||
| 4e5ae24c64 | |||
| 2d7fc97cea | |||
| a2a1d0e361 | |||
| 0c900c3b8c | |||
| 187754cae9 | |||
| 67e0d5c77e | |||
| b43a04de4c |
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 15
|
||||
EXTRAVERSION =
|
||||
EXTRAVERSION = .2
|
||||
NAME=Sliding Snow Leopard
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
||||
@ -190,6 +190,8 @@ boot-$(CONFIG_REDWOOD_5) += embed_config.o
|
||||
boot-$(CONFIG_REDWOOD_6) += embed_config.o
|
||||
boot-$(CONFIG_8xx) += embed_config.o
|
||||
boot-$(CONFIG_8260) += embed_config.o
|
||||
boot-$(CONFIG_EP405) += embed_config.o
|
||||
boot-$(CONFIG_XILINX_ML300) += embed_config.o
|
||||
boot-$(CONFIG_BSEIP) += iic.o
|
||||
boot-$(CONFIG_MBX) += iic.o pci.o qspan_pci.o
|
||||
boot-$(CONFIG_MV64X60) += misc-mv64x60.o
|
||||
|
||||
@ -179,7 +179,7 @@ config HUGETLB_PAGE_SIZE_512K
|
||||
bool "512K"
|
||||
|
||||
config HUGETLB_PAGE_SIZE_64K
|
||||
depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512K
|
||||
depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB
|
||||
bool "64K"
|
||||
|
||||
endchoice
|
||||
|
||||
@ -1657,13 +1657,10 @@ ret_sys_call:
|
||||
/* Check if force_successful_syscall_return()
|
||||
* was invoked.
|
||||
*/
|
||||
ldub [%curptr + TI_SYS_NOERROR], %l0
|
||||
brz,pt %l0, 1f
|
||||
nop
|
||||
ba,pt %xcc, 80f
|
||||
ldub [%curptr + TI_SYS_NOERROR], %l2
|
||||
brnz,a,pn %l2, 80f
|
||||
stb %g0, [%curptr + TI_SYS_NOERROR]
|
||||
|
||||
1:
|
||||
cmp %o0, -ERESTART_RESTARTBLOCK
|
||||
bgeu,pn %xcc, 1f
|
||||
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6
|
||||
|
||||
@ -98,7 +98,7 @@ sys_call_table:
|
||||
.word sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid
|
||||
/*50*/ .word sys_getegid, sys_acct, sys_memory_ordering, sys_nis_syscall, sys_ioctl
|
||||
.word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys_execve
|
||||
/*60*/ .word sys_umask, sys_chroot, sys_newfstat, sys_stat64, sys_getpagesize
|
||||
/*60*/ .word sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize
|
||||
.word sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_nis_syscall
|
||||
/*70*/ .word sys_nis_syscall, sys_mmap, sys_nis_syscall, sys64_munmap, sys_mprotect
|
||||
.word sys_madvise, sys_vhangup, sys_nis_syscall, sys_mincore, sys_getgroups
|
||||
|
||||
@ -280,9 +280,9 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
|
||||
* Since STICK is constantly updating, we have to access it carefully.
|
||||
*
|
||||
* The sequence we use to read is:
|
||||
* 1) read low
|
||||
* 2) read high
|
||||
* 3) read low again, if it rolled over increment high by 1
|
||||
* 1) read high
|
||||
* 2) read low
|
||||
* 3) read high again, if it rolled re-read both low and high again.
|
||||
*
|
||||
* Writing STICK safely is also tricky:
|
||||
* 1) write low to zero
|
||||
@ -295,18 +295,18 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
|
||||
static unsigned long __hbird_read_stick(void)
|
||||
{
|
||||
unsigned long ret, tmp1, tmp2, tmp3;
|
||||
unsigned long addr = HBIRD_STICK_ADDR;
|
||||
unsigned long addr = HBIRD_STICK_ADDR+8;
|
||||
|
||||
__asm__ __volatile__("ldxa [%1] %5, %2\n\t"
|
||||
"add %1, 0x8, %1\n\t"
|
||||
"ldxa [%1] %5, %3\n\t"
|
||||
__asm__ __volatile__("ldxa [%1] %5, %2\n"
|
||||
"1:\n\t"
|
||||
"sub %1, 0x8, %1\n\t"
|
||||
"ldxa [%1] %5, %3\n\t"
|
||||
"add %1, 0x8, %1\n\t"
|
||||
"ldxa [%1] %5, %4\n\t"
|
||||
"cmp %4, %2\n\t"
|
||||
"blu,a,pn %%xcc, 1f\n\t"
|
||||
" add %3, 1, %3\n"
|
||||
"1:\n\t"
|
||||
"sllx %3, 32, %3\n\t"
|
||||
"bne,a,pn %%xcc, 1b\n\t"
|
||||
" mov %4, %2\n\t"
|
||||
"sllx %4, 32, %4\n\t"
|
||||
"or %3, %4, %0\n\t"
|
||||
: "=&r" (ret), "=&r" (addr),
|
||||
"=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3)
|
||||
|
||||
@ -244,6 +244,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
get_order(size));
|
||||
|
||||
if (swiotlb) {
|
||||
gfp &= ~(GFP_DMA32|GFP_DMA);
|
||||
return
|
||||
swiotlb_alloc_coherent(dev, size,
|
||||
dma_handle,
|
||||
|
||||
@ -2609,30 +2609,6 @@ static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_attempt_remerge - attempt to remerge active head with next request
|
||||
* @q: The &request_queue_t belonging to the device
|
||||
* @rq: The head request (usually)
|
||||
*
|
||||
* Description:
|
||||
* For head-active devices, the queue can easily be unplugged so quickly
|
||||
* that proper merging is not done on the front request. This may hurt
|
||||
* performance greatly for some devices. The block layer cannot safely
|
||||
* do merging on that first request for these queues, but the driver can
|
||||
* call this function and make it happen any way. Only the driver knows
|
||||
* when it is safe to do so.
|
||||
**/
|
||||
void blk_attempt_remerge(request_queue_t *q, struct request *rq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
attempt_back_merge(q, rq);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(blk_attempt_remerge);
|
||||
|
||||
static int __make_request(request_queue_t *q, struct bio *bio)
|
||||
{
|
||||
struct request *req;
|
||||
|
||||
@ -1661,6 +1661,8 @@ int MoxaDriverIoctl(unsigned int cmd, unsigned long arg, int port)
|
||||
case MOXA_FIND_BOARD:
|
||||
case MOXA_LOAD_C320B:
|
||||
case MOXA_LOAD_CODE:
|
||||
if (!capable(CAP_SYS_RAWIO))
|
||||
return -EPERM;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@ -1332,8 +1332,6 @@ static ide_startstop_t cdrom_start_read (ide_drive_t *drive, unsigned int block)
|
||||
if (cdrom_read_from_buffer(drive))
|
||||
return ide_stopped;
|
||||
|
||||
blk_attempt_remerge(drive->queue, rq);
|
||||
|
||||
/* Clear the local sector buffer. */
|
||||
info->nsectors_buffered = 0;
|
||||
|
||||
@ -1874,14 +1872,6 @@ static ide_startstop_t cdrom_start_write(ide_drive_t *drive, struct request *rq)
|
||||
return ide_stopped;
|
||||
}
|
||||
|
||||
/*
|
||||
* for dvd-ram and such media, it's a really big deal to get
|
||||
* big writes all the time. so scour the queue and attempt to
|
||||
* remerge requests, often the plugging will not have had time
|
||||
* to do this properly
|
||||
*/
|
||||
blk_attempt_remerge(drive->queue, rq);
|
||||
|
||||
info->nsectors_buffered = 0;
|
||||
|
||||
/* use dma, if possible. we don't need to check more, since we
|
||||
|
||||
@ -729,7 +729,7 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
|
||||
&msg->u.head[1]);
|
||||
writel(i2o_cntxt_list_get_ptr(c, SCpnt), &msg->body[0]);
|
||||
|
||||
if (i2o_msg_post_wait(c, m, I2O_TIMEOUT_SCSI_SCB_ABORT))
|
||||
if (!i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT))
|
||||
status = SUCCESS;
|
||||
|
||||
return status;
|
||||
|
||||
@ -515,6 +515,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
|
||||
count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&ax->buflock);
|
||||
|
||||
set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
|
||||
actual = ax->tty->driver->write(ax->tty, ax->xbuff, count);
|
||||
|
||||
@ -43,7 +43,7 @@
|
||||
#include "skge.h"
|
||||
|
||||
#define DRV_NAME "skge"
|
||||
#define DRV_VERSION "1.2"
|
||||
#define DRV_VERSION "1.3"
|
||||
#define PFX DRV_NAME " "
|
||||
|
||||
#define DEFAULT_TX_RING_SIZE 128
|
||||
@ -88,15 +88,14 @@ MODULE_DEVICE_TABLE(pci, skge_id_table);
|
||||
|
||||
static int skge_up(struct net_device *dev);
|
||||
static int skge_down(struct net_device *dev);
|
||||
static void skge_phy_reset(struct skge_port *skge);
|
||||
static void skge_tx_clean(struct skge_port *skge);
|
||||
static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
|
||||
static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
|
||||
static void genesis_get_stats(struct skge_port *skge, u64 *data);
|
||||
static void yukon_get_stats(struct skge_port *skge, u64 *data);
|
||||
static void yukon_init(struct skge_hw *hw, int port);
|
||||
static void yukon_reset(struct skge_hw *hw, int port);
|
||||
static void genesis_mac_init(struct skge_hw *hw, int port);
|
||||
static void genesis_reset(struct skge_hw *hw, int port);
|
||||
static void genesis_link_up(struct skge_port *skge);
|
||||
|
||||
/* Avoid conditionals by using array */
|
||||
@ -276,10 +275,9 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
|
||||
skge->autoneg = ecmd->autoneg;
|
||||
skge->advertising = ecmd->advertising;
|
||||
|
||||
if (netif_running(dev)) {
|
||||
skge_down(dev);
|
||||
skge_up(dev);
|
||||
}
|
||||
if (netif_running(dev))
|
||||
skge_phy_reset(skge);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -399,6 +397,7 @@ static int skge_set_ring_param(struct net_device *dev,
|
||||
struct ethtool_ringparam *p)
|
||||
{
|
||||
struct skge_port *skge = netdev_priv(dev);
|
||||
int err;
|
||||
|
||||
if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
|
||||
p->tx_pending == 0 || p->tx_pending > MAX_TX_RING_SIZE)
|
||||
@ -409,7 +408,11 @@ static int skge_set_ring_param(struct net_device *dev,
|
||||
|
||||
if (netif_running(dev)) {
|
||||
skge_down(dev);
|
||||
skge_up(dev);
|
||||
err = skge_up(dev);
|
||||
if (err)
|
||||
dev_close(dev);
|
||||
else
|
||||
dev->set_multicast_list(dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -430,21 +433,11 @@ static void skge_set_msglevel(struct net_device *netdev, u32 value)
|
||||
static int skge_nway_reset(struct net_device *dev)
|
||||
{
|
||||
struct skge_port *skge = netdev_priv(dev);
|
||||
struct skge_hw *hw = skge->hw;
|
||||
int port = skge->port;
|
||||
|
||||
if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&hw->phy_lock);
|
||||
if (hw->chip_id == CHIP_ID_GENESIS) {
|
||||
genesis_reset(hw, port);
|
||||
genesis_mac_init(hw, port);
|
||||
} else {
|
||||
yukon_reset(hw, port);
|
||||
yukon_init(hw, port);
|
||||
}
|
||||
spin_unlock_bh(&hw->phy_lock);
|
||||
skge_phy_reset(skge);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -516,10 +509,8 @@ static int skge_set_pauseparam(struct net_device *dev,
|
||||
else
|
||||
skge->flow_control = FLOW_MODE_NONE;
|
||||
|
||||
if (netif_running(dev)) {
|
||||
skge_down(dev);
|
||||
skge_up(dev);
|
||||
}
|
||||
if (netif_running(dev))
|
||||
skge_phy_reset(skge);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1935,7 +1926,6 @@ static void yukon_link_down(struct skge_port *skge)
|
||||
|
||||
}
|
||||
|
||||
yukon_reset(hw, port);
|
||||
skge_link_down(skge);
|
||||
|
||||
yukon_init(hw, port);
|
||||
@ -2019,6 +2009,22 @@ static void yukon_phy_intr(struct skge_port *skge)
|
||||
/* XXX restart autonegotiation? */
|
||||
}
|
||||
|
||||
static void skge_phy_reset(struct skge_port *skge)
|
||||
{
|
||||
struct skge_hw *hw = skge->hw;
|
||||
int port = skge->port;
|
||||
|
||||
netif_stop_queue(skge->netdev);
|
||||
netif_carrier_off(skge->netdev);
|
||||
|
||||
spin_lock_bh(&hw->phy_lock);
|
||||
if (hw->chip_id == CHIP_ID_GENESIS)
|
||||
genesis_mac_init(hw, port);
|
||||
else
|
||||
yukon_init(hw, port);
|
||||
spin_unlock_bh(&hw->phy_lock);
|
||||
}
|
||||
|
||||
/* Basic MII support */
|
||||
static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
||||
{
|
||||
@ -2187,6 +2193,7 @@ static int skge_up(struct net_device *dev)
|
||||
kfree(skge->rx_ring.start);
|
||||
free_pci_mem:
|
||||
pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
|
||||
skge->mem = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -2197,6 +2204,9 @@ static int skge_down(struct net_device *dev)
|
||||
struct skge_hw *hw = skge->hw;
|
||||
int port = skge->port;
|
||||
|
||||
if (skge->mem == NULL)
|
||||
return 0;
|
||||
|
||||
if (netif_msg_ifdown(skge))
|
||||
printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
|
||||
|
||||
@ -2253,6 +2263,7 @@ static int skge_down(struct net_device *dev)
|
||||
kfree(skge->rx_ring.start);
|
||||
kfree(skge->tx_ring.start);
|
||||
pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
|
||||
skge->mem = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2413,18 +2424,23 @@ static void skge_tx_timeout(struct net_device *dev)
|
||||
|
||||
static int skge_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
int err = 0;
|
||||
int running = netif_running(dev);
|
||||
int err;
|
||||
|
||||
if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
|
||||
return -EINVAL;
|
||||
|
||||
if (!netif_running(dev)) {
|
||||
dev->mtu = new_mtu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
skge_down(dev);
|
||||
|
||||
if (running)
|
||||
skge_down(dev);
|
||||
dev->mtu = new_mtu;
|
||||
if (running)
|
||||
skge_up(dev);
|
||||
|
||||
err = skge_up(dev);
|
||||
if (err)
|
||||
dev_close(dev);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -3398,8 +3414,8 @@ static int skge_resume(struct pci_dev *pdev)
|
||||
struct net_device *dev = hw->dev[i];
|
||||
if (dev) {
|
||||
netif_device_attach(dev);
|
||||
if (netif_running(dev))
|
||||
skge_up(dev);
|
||||
if (netif_running(dev) && skge_up(dev))
|
||||
dev_close(dev);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
@ -259,7 +259,7 @@ static int hid_pid_upload_effect(struct input_dev *dev,
|
||||
int hid_pid_init(struct hid_device *hid)
|
||||
{
|
||||
struct hid_ff_pid *private;
|
||||
struct hid_input *hidinput = list_entry(&hid->inputs, struct hid_input, list);
|
||||
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
|
||||
struct input_dev *input_dev = hidinput->input;
|
||||
|
||||
private = hid->ff_private = kzalloc(sizeof(struct hid_ff_pid), GFP_KERNEL);
|
||||
|
||||
@ -403,7 +403,7 @@ static struct {
|
||||
{ PCI_CHIP_MACH64GM, "3D RAGE XL (Mach64 GM, AGP)", 230, 83, 63, ATI_CHIP_264XL },
|
||||
{ PCI_CHIP_MACH64GN, "3D RAGE XL (Mach64 GN, AGP)", 230, 83, 63, ATI_CHIP_264XL },
|
||||
{ PCI_CHIP_MACH64GO, "3D RAGE XL (Mach64 GO, PCI-66/BGA)", 230, 83, 63, ATI_CHIP_264XL },
|
||||
{ PCI_CHIP_MACH64GR, "3D RAGE XL (Mach64 GR, PCI-33MHz)", 230, 83, 63, ATI_CHIP_264XL },
|
||||
{ PCI_CHIP_MACH64GR, "3D RAGE XL (Mach64 GR, PCI-33MHz)", 235, 83, 63, ATI_CHIP_264XL | M64F_SDRAM_MAGIC_PLL },
|
||||
{ PCI_CHIP_MACH64GL, "3D RAGE XL (Mach64 GL, PCI)", 230, 83, 63, ATI_CHIP_264XL },
|
||||
{ PCI_CHIP_MACH64GS, "3D RAGE XL (Mach64 GS, PCI)", 230, 83, 63, ATI_CHIP_264XL },
|
||||
|
||||
|
||||
@ -503,10 +503,16 @@ static int vgacon_doresize(struct vc_data *c,
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int scanlines = height * c->vc_font.height;
|
||||
u8 scanlines_lo, r7, vsync_end, mode;
|
||||
u8 scanlines_lo, r7, vsync_end, mode, max_scan;
|
||||
|
||||
spin_lock_irqsave(&vga_lock, flags);
|
||||
|
||||
outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg);
|
||||
max_scan = inb_p(vga_video_port_val);
|
||||
|
||||
if (max_scan & 0x80)
|
||||
scanlines <<= 1;
|
||||
|
||||
outb_p(VGA_CRTC_MODE, vga_video_port_reg);
|
||||
mode = inb_p(vga_video_port_val);
|
||||
|
||||
|
||||
@ -1131,7 +1131,7 @@ static void handle_attrs(struct super_block *s)
|
||||
REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_ATTRS);
|
||||
}
|
||||
} else if (le32_to_cpu(rs->s_flags) & reiserfs_attrs_cleared) {
|
||||
REISERFS_SB(s)->s_mount_opt |= REISERFS_ATTRS;
|
||||
REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ATTRS);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1296,8 +1296,10 @@ static ssize_t ufs_quota_write(struct super_block *sb, int type,
|
||||
blk++;
|
||||
}
|
||||
out:
|
||||
if (len == towrite)
|
||||
if (len == towrite) {
|
||||
up(&inode->i_sem);
|
||||
return err;
|
||||
}
|
||||
if (inode->i_size < off+len-towrite)
|
||||
i_size_write(inode, off+len-towrite);
|
||||
inode->i_version++;
|
||||
|
||||
@ -255,8 +255,8 @@ extern void _ubh_memcpyubh_(struct ufs_sb_private_info *, struct ufs_buffer_head
|
||||
((struct ufs_super_block_first *)((ubh)->bh[0]->b_data))
|
||||
|
||||
#define ubh_get_usb_second(ubh) \
|
||||
((struct ufs_super_block_second *)(ubh)-> \
|
||||
bh[UFS_SECTOR_SIZE >> uspi->s_fshift]->b_data + (UFS_SECTOR_SIZE & ~uspi->s_fmask))
|
||||
((struct ufs_super_block_second *)((ubh)->\
|
||||
bh[UFS_SECTOR_SIZE >> uspi->s_fshift]->b_data + (UFS_SECTOR_SIZE & ~uspi->s_fmask)))
|
||||
|
||||
#define ubh_get_usb_third(ubh) \
|
||||
((struct ufs_super_block_third *)((ubh)-> \
|
||||
|
||||
@ -559,7 +559,6 @@ extern void register_disk(struct gendisk *dev);
|
||||
extern void generic_make_request(struct bio *bio);
|
||||
extern void blk_put_request(struct request *);
|
||||
extern void blk_end_sync_rq(struct request *rq);
|
||||
extern void blk_attempt_remerge(request_queue_t *, struct request *);
|
||||
extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
|
||||
extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
|
||||
extern void blk_requeue_request(request_queue_t *, struct request *);
|
||||
|
||||
@ -927,7 +927,7 @@ static inline int skb_tailroom(const struct sk_buff *skb)
|
||||
* Increase the headroom of an empty &sk_buff by reducing the tail
|
||||
* room. This is only allowed for an empty buffer.
|
||||
*/
|
||||
static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
|
||||
static inline void skb_reserve(struct sk_buff *skb, int len)
|
||||
{
|
||||
skb->data += len;
|
||||
skb->tail += len;
|
||||
|
||||
59
ipc/mqueue.c
59
ipc/mqueue.c
@ -598,15 +598,16 @@ static int mq_attr_ok(struct mq_attr *attr)
|
||||
static struct file *do_create(struct dentry *dir, struct dentry *dentry,
|
||||
int oflag, mode_t mode, struct mq_attr __user *u_attr)
|
||||
{
|
||||
struct file *filp;
|
||||
struct mq_attr attr;
|
||||
int ret;
|
||||
|
||||
if (u_attr != NULL) {
|
||||
if (u_attr) {
|
||||
ret = -EFAULT;
|
||||
if (copy_from_user(&attr, u_attr, sizeof(attr)))
|
||||
return ERR_PTR(-EFAULT);
|
||||
goto out;
|
||||
ret = -EINVAL;
|
||||
if (!mq_attr_ok(&attr))
|
||||
return ERR_PTR(-EINVAL);
|
||||
goto out;
|
||||
/* store for use during create */
|
||||
dentry->d_fsdata = &attr;
|
||||
}
|
||||
@ -615,13 +616,14 @@ static struct file *do_create(struct dentry *dir, struct dentry *dentry,
|
||||
ret = vfs_create(dir->d_inode, dentry, mode, NULL);
|
||||
dentry->d_fsdata = NULL;
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
goto out;
|
||||
|
||||
filp = dentry_open(dentry, mqueue_mnt, oflag);
|
||||
if (!IS_ERR(filp))
|
||||
dget(dentry);
|
||||
return dentry_open(dentry, mqueue_mnt, oflag);
|
||||
|
||||
return filp;
|
||||
out:
|
||||
dput(dentry);
|
||||
mntput(mqueue_mnt);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/* Opens existing queue */
|
||||
@ -629,20 +631,20 @@ static struct file *do_open(struct dentry *dentry, int oflag)
|
||||
{
|
||||
static int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
|
||||
MAY_READ | MAY_WRITE };
|
||||
struct file *filp;
|
||||
|
||||
if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
|
||||
if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
|
||||
dput(dentry);
|
||||
mntput(mqueue_mnt);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE], NULL))
|
||||
if (permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE], NULL)) {
|
||||
dput(dentry);
|
||||
mntput(mqueue_mnt);
|
||||
return ERR_PTR(-EACCES);
|
||||
}
|
||||
|
||||
filp = dentry_open(dentry, mqueue_mnt, oflag);
|
||||
|
||||
if (!IS_ERR(filp))
|
||||
dget(dentry);
|
||||
|
||||
return filp;
|
||||
return dentry_open(dentry, mqueue_mnt, oflag);
|
||||
}
|
||||
|
||||
asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode,
|
||||
@ -670,17 +672,20 @@ asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode,
|
||||
|
||||
if (oflag & O_CREAT) {
|
||||
if (dentry->d_inode) { /* entry already exists */
|
||||
filp = (oflag & O_EXCL) ? ERR_PTR(-EEXIST) :
|
||||
do_open(dentry, oflag);
|
||||
error = -EEXIST;
|
||||
if (oflag & O_EXCL)
|
||||
goto out;
|
||||
filp = do_open(dentry, oflag);
|
||||
} else {
|
||||
filp = do_create(mqueue_mnt->mnt_root, dentry,
|
||||
oflag, mode, u_attr);
|
||||
}
|
||||
} else
|
||||
filp = (dentry->d_inode) ? do_open(dentry, oflag) :
|
||||
ERR_PTR(-ENOENT);
|
||||
|
||||
dput(dentry);
|
||||
} else {
|
||||
error = -ENOENT;
|
||||
if (!dentry->d_inode)
|
||||
goto out;
|
||||
filp = do_open(dentry, oflag);
|
||||
}
|
||||
|
||||
if (IS_ERR(filp)) {
|
||||
error = PTR_ERR(filp);
|
||||
@ -691,8 +696,10 @@ asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode,
|
||||
fd_install(fd, filp);
|
||||
goto out_upsem;
|
||||
|
||||
out_putfd:
|
||||
out:
|
||||
dput(dentry);
|
||||
mntput(mqueue_mnt);
|
||||
out_putfd:
|
||||
put_unused_fd(fd);
|
||||
out_err:
|
||||
fd = error;
|
||||
|
||||
@ -29,7 +29,8 @@
|
||||
#include <linux/kthread.h>
|
||||
|
||||
/*
|
||||
* The per-CPU workqueue (if single thread, we always use cpu 0's).
|
||||
* The per-CPU workqueue (if single thread, we always use the first
|
||||
* possible cpu).
|
||||
*
|
||||
* The sequence counters are for flush_scheduled_work(). It wants to wait
|
||||
* until until all currently-scheduled works are completed, but it doesn't
|
||||
@ -69,6 +70,8 @@ struct workqueue_struct {
|
||||
static DEFINE_SPINLOCK(workqueue_lock);
|
||||
static LIST_HEAD(workqueues);
|
||||
|
||||
static int singlethread_cpu;
|
||||
|
||||
/* If it's single threaded, it isn't in the list of workqueues. */
|
||||
static inline int is_single_threaded(struct workqueue_struct *wq)
|
||||
{
|
||||
@ -102,7 +105,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
|
||||
|
||||
if (!test_and_set_bit(0, &work->pending)) {
|
||||
if (unlikely(is_single_threaded(wq)))
|
||||
cpu = any_online_cpu(cpu_online_map);
|
||||
cpu = singlethread_cpu;
|
||||
BUG_ON(!list_empty(&work->entry));
|
||||
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
|
||||
ret = 1;
|
||||
@ -118,7 +121,7 @@ static void delayed_work_timer_fn(unsigned long __data)
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (unlikely(is_single_threaded(wq)))
|
||||
cpu = any_online_cpu(cpu_online_map);
|
||||
cpu = singlethread_cpu;
|
||||
|
||||
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
|
||||
}
|
||||
@ -267,7 +270,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
|
||||
|
||||
if (is_single_threaded(wq)) {
|
||||
/* Always use first cpu's area. */
|
||||
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, any_online_cpu(cpu_online_map)));
|
||||
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
|
||||
} else {
|
||||
int cpu;
|
||||
|
||||
@ -320,7 +323,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
|
||||
lock_cpu_hotplug();
|
||||
if (singlethread) {
|
||||
INIT_LIST_HEAD(&wq->list);
|
||||
p = create_workqueue_thread(wq, any_online_cpu(cpu_online_map));
|
||||
p = create_workqueue_thread(wq, singlethread_cpu);
|
||||
if (!p)
|
||||
destroy = 1;
|
||||
else
|
||||
@ -374,7 +377,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
||||
/* We don't need the distraction of CPUs appearing and vanishing. */
|
||||
lock_cpu_hotplug();
|
||||
if (is_single_threaded(wq))
|
||||
cleanup_workqueue_thread(wq, any_online_cpu(cpu_online_map));
|
||||
cleanup_workqueue_thread(wq, singlethread_cpu);
|
||||
else {
|
||||
for_each_online_cpu(cpu)
|
||||
cleanup_workqueue_thread(wq, cpu);
|
||||
@ -543,6 +546,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
|
||||
|
||||
void init_workqueues(void)
|
||||
{
|
||||
singlethread_cpu = first_cpu(cpu_possible_map);
|
||||
hotcpu_notifier(workqueue_cpu_callback, 0);
|
||||
keventd_wq = create_workqueue("events");
|
||||
BUG_ON(!keventd_wq);
|
||||
|
||||
@ -158,7 +158,7 @@ void br_stp_recalculate_bridge_id(struct net_bridge *br)
|
||||
|
||||
list_for_each_entry(p, &br->port_list, list) {
|
||||
if (addr == br_mac_zero ||
|
||||
compare_ether_addr(p->dev->dev_addr, addr) < 0)
|
||||
memcmp(p->dev->dev_addr, addr, ETH_ALEN) < 0)
|
||||
addr = p->dev->dev_addr;
|
||||
|
||||
}
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
#include <linux/netfilter_bridge/ebtables.h>
|
||||
#include <linux/netfilter_bridge/ebt_ip.h>
|
||||
#include <linux/ip.h>
|
||||
#include <net/ip.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
@ -51,6 +52,8 @@ static int ebt_filter_ip(const struct sk_buff *skb, const struct net_device *in,
|
||||
if (!(info->bitmask & EBT_IP_DPORT) &&
|
||||
!(info->bitmask & EBT_IP_SPORT))
|
||||
return EBT_MATCH;
|
||||
if (ntohs(ih->frag_off) & IP_OFFSET)
|
||||
return EBT_NOMATCH;
|
||||
pptr = skb_header_pointer(skb, ih->ihl*4,
|
||||
sizeof(_ports), &_ports);
|
||||
if (pptr == NULL)
|
||||
|
||||
@ -16,6 +16,7 @@
|
||||
#include <net/sock.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/wireless.h>
|
||||
#include <net/iw_handler.h>
|
||||
|
||||
#define to_class_dev(obj) container_of(obj,struct class_device,kobj)
|
||||
#define to_net_dev(class) container_of(class, struct net_device, class_dev)
|
||||
@ -313,13 +314,19 @@ static ssize_t wireless_show(struct class_device *cd, char *buf,
|
||||
char *))
|
||||
{
|
||||
struct net_device *dev = to_net_dev(cd);
|
||||
const struct iw_statistics *iw;
|
||||
const struct iw_statistics *iw = NULL;
|
||||
ssize_t ret = -EINVAL;
|
||||
|
||||
read_lock(&dev_base_lock);
|
||||
if (dev_isalive(dev) && dev->get_wireless_stats
|
||||
&& (iw = dev->get_wireless_stats(dev)) != NULL)
|
||||
ret = (*format)(iw, buf);
|
||||
if (dev_isalive(dev)) {
|
||||
if(dev->wireless_handlers &&
|
||||
dev->wireless_handlers->get_wireless_stats)
|
||||
iw = dev->wireless_handlers->get_wireless_stats(dev);
|
||||
else if (dev->get_wireless_stats)
|
||||
iw = dev->get_wireless_stats(dev);
|
||||
if (iw != NULL)
|
||||
ret = (*format)(iw, buf);
|
||||
}
|
||||
read_unlock(&dev_base_lock);
|
||||
|
||||
return ret;
|
||||
@ -420,7 +427,8 @@ void netdev_unregister_sysfs(struct net_device * net)
|
||||
sysfs_remove_group(&class_dev->kobj, &netstat_group);
|
||||
|
||||
#ifdef WIRELESS_EXT
|
||||
if (net->get_wireless_stats)
|
||||
if (net->get_wireless_stats || (net->wireless_handlers &&
|
||||
net->wireless_handlers->get_wireless_stats))
|
||||
sysfs_remove_group(&class_dev->kobj, &wireless_group);
|
||||
#endif
|
||||
class_device_del(class_dev);
|
||||
@ -453,10 +461,12 @@ int netdev_register_sysfs(struct net_device *net)
|
||||
goto out_unreg;
|
||||
|
||||
#ifdef WIRELESS_EXT
|
||||
if (net->get_wireless_stats &&
|
||||
(ret = sysfs_create_group(&class_dev->kobj, &wireless_group)))
|
||||
goto out_cleanup;
|
||||
|
||||
if (net->get_wireless_stats || (net->wireless_handlers &&
|
||||
net->wireless_handlers->get_wireless_stats)) {
|
||||
ret = sysfs_create_group(&class_dev->kobj, &wireless_group);
|
||||
if (ret)
|
||||
goto out_cleanup;
|
||||
}
|
||||
return 0;
|
||||
out_cleanup:
|
||||
if (net->get_stats)
|
||||
|
||||
@ -148,14 +148,14 @@ pptp_outbound_pkt(struct sk_buff **pskb,
|
||||
{
|
||||
struct ip_ct_pptp_master *ct_pptp_info = &ct->help.ct_pptp_info;
|
||||
struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info;
|
||||
|
||||
u_int16_t msg, *cid = NULL, new_callid;
|
||||
u_int16_t msg, new_callid;
|
||||
unsigned int cid_off;
|
||||
|
||||
new_callid = htons(ct_pptp_info->pns_call_id);
|
||||
|
||||
switch (msg = ntohs(ctlh->messageType)) {
|
||||
case PPTP_OUT_CALL_REQUEST:
|
||||
cid = &pptpReq->ocreq.callID;
|
||||
cid_off = offsetof(union pptp_ctrl_union, ocreq.callID);
|
||||
/* FIXME: ideally we would want to reserve a call ID
|
||||
* here. current netfilter NAT core is not able to do
|
||||
* this :( For now we use TCP source port. This breaks
|
||||
@ -172,10 +172,10 @@ pptp_outbound_pkt(struct sk_buff **pskb,
|
||||
ct_pptp_info->pns_call_id = ntohs(new_callid);
|
||||
break;
|
||||
case PPTP_IN_CALL_REPLY:
|
||||
cid = &pptpReq->icreq.callID;
|
||||
cid_off = offsetof(union pptp_ctrl_union, icreq.callID);
|
||||
break;
|
||||
case PPTP_CALL_CLEAR_REQUEST:
|
||||
cid = &pptpReq->clrreq.callID;
|
||||
cid_off = offsetof(union pptp_ctrl_union, clrreq.callID);
|
||||
break;
|
||||
default:
|
||||
DEBUGP("unknown outbound packet 0x%04x:%s\n", msg,
|
||||
@ -197,18 +197,15 @@ pptp_outbound_pkt(struct sk_buff **pskb,
|
||||
|
||||
/* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass
|
||||
* down to here */
|
||||
|
||||
IP_NF_ASSERT(cid);
|
||||
|
||||
DEBUGP("altering call id from 0x%04x to 0x%04x\n",
|
||||
ntohs(*cid), ntohs(new_callid));
|
||||
ntohs(*(u_int16_t *)pptpReq + cid_off), ntohs(new_callid));
|
||||
|
||||
/* mangle packet */
|
||||
if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
|
||||
(void *)cid - ((void *)ctlh - sizeof(struct pptp_pkt_hdr)),
|
||||
sizeof(new_callid),
|
||||
(char *)&new_callid,
|
||||
sizeof(new_callid)) == 0)
|
||||
cid_off + sizeof(struct pptp_pkt_hdr) +
|
||||
sizeof(struct PptpControlHeader),
|
||||
sizeof(new_callid), (char *)&new_callid,
|
||||
sizeof(new_callid)) == 0)
|
||||
return NF_DROP;
|
||||
|
||||
return NF_ACCEPT;
|
||||
@ -299,7 +296,8 @@ pptp_inbound_pkt(struct sk_buff **pskb,
|
||||
union pptp_ctrl_union *pptpReq)
|
||||
{
|
||||
struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info;
|
||||
u_int16_t msg, new_cid = 0, new_pcid, *pcid = NULL, *cid = NULL;
|
||||
u_int16_t msg, new_cid = 0, new_pcid;
|
||||
unsigned int pcid_off, cid_off = 0;
|
||||
|
||||
int ret = NF_ACCEPT, rv;
|
||||
|
||||
@ -307,23 +305,23 @@ pptp_inbound_pkt(struct sk_buff **pskb,
|
||||
|
||||
switch (msg = ntohs(ctlh->messageType)) {
|
||||
case PPTP_OUT_CALL_REPLY:
|
||||
pcid = &pptpReq->ocack.peersCallID;
|
||||
cid = &pptpReq->ocack.callID;
|
||||
pcid_off = offsetof(union pptp_ctrl_union, ocack.peersCallID);
|
||||
cid_off = offsetof(union pptp_ctrl_union, ocack.callID);
|
||||
break;
|
||||
case PPTP_IN_CALL_CONNECT:
|
||||
pcid = &pptpReq->iccon.peersCallID;
|
||||
pcid_off = offsetof(union pptp_ctrl_union, iccon.peersCallID);
|
||||
break;
|
||||
case PPTP_IN_CALL_REQUEST:
|
||||
/* only need to nat in case PAC is behind NAT box */
|
||||
break;
|
||||
return NF_ACCEPT;
|
||||
case PPTP_WAN_ERROR_NOTIFY:
|
||||
pcid = &pptpReq->wanerr.peersCallID;
|
||||
pcid_off = offsetof(union pptp_ctrl_union, wanerr.peersCallID);
|
||||
break;
|
||||
case PPTP_CALL_DISCONNECT_NOTIFY:
|
||||
pcid = &pptpReq->disc.callID;
|
||||
pcid_off = offsetof(union pptp_ctrl_union, disc.callID);
|
||||
break;
|
||||
case PPTP_SET_LINK_INFO:
|
||||
pcid = &pptpReq->setlink.peersCallID;
|
||||
pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -345,25 +343,24 @@ pptp_inbound_pkt(struct sk_buff **pskb,
|
||||
* WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */
|
||||
|
||||
/* mangle packet */
|
||||
IP_NF_ASSERT(pcid);
|
||||
DEBUGP("altering peer call id from 0x%04x to 0x%04x\n",
|
||||
ntohs(*pcid), ntohs(new_pcid));
|
||||
ntohs(*(u_int16_t *)pptpReq + pcid_off), ntohs(new_pcid));
|
||||
|
||||
rv = ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
|
||||
(void *)pcid - ((void *)ctlh - sizeof(struct pptp_pkt_hdr)),
|
||||
rv = ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
|
||||
pcid_off + sizeof(struct pptp_pkt_hdr) +
|
||||
sizeof(struct PptpControlHeader),
|
||||
sizeof(new_pcid), (char *)&new_pcid,
|
||||
sizeof(new_pcid));
|
||||
if (rv != NF_ACCEPT)
|
||||
return rv;
|
||||
|
||||
if (new_cid) {
|
||||
IP_NF_ASSERT(cid);
|
||||
DEBUGP("altering call id from 0x%04x to 0x%04x\n",
|
||||
ntohs(*cid), ntohs(new_cid));
|
||||
rv = ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
|
||||
(void *)cid - ((void *)ctlh - sizeof(struct pptp_pkt_hdr)),
|
||||
sizeof(new_cid),
|
||||
(char *)&new_cid,
|
||||
ntohs(*(u_int16_t *)pptpReq + cid_off), ntohs(new_cid));
|
||||
rv = ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
|
||||
cid_off + sizeof(struct pptp_pkt_hdr) +
|
||||
sizeof(struct PptpControlHeader),
|
||||
sizeof(new_cid), (char *)&new_cid,
|
||||
sizeof(new_cid));
|
||||
if (rv != NF_ACCEPT)
|
||||
return rv;
|
||||
|
||||
@ -402,7 +402,7 @@ static int netlink_create(struct socket *sock, int protocol)
|
||||
groups = nl_table[protocol].groups;
|
||||
netlink_unlock_table();
|
||||
|
||||
if ((err = __netlink_create(sock, protocol) < 0))
|
||||
if ((err = __netlink_create(sock, protocol)) < 0)
|
||||
goto out_module;
|
||||
|
||||
nlk = nlk_sk(sock->sk);
|
||||
@ -1422,7 +1422,7 @@ static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
|
||||
while (skb->len >= nlmsg_total_size(0)) {
|
||||
nlh = (struct nlmsghdr *) skb->data;
|
||||
|
||||
if (skb->len < nlh->nlmsg_len)
|
||||
if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
|
||||
return 0;
|
||||
|
||||
total_len = min(NLMSG_ALIGN(nlh->nlmsg_len), skb->len);
|
||||
|
||||
@ -480,22 +480,38 @@ static int retire_playback_sync_urb_hs(snd_usb_substream_t *subs,
|
||||
/*
|
||||
* Prepare urb for streaming before playback starts.
|
||||
*
|
||||
* We don't care about (or have) any data, so we just send a transfer delimiter.
|
||||
* We don't yet have data, so we send a frame of silence.
|
||||
*/
|
||||
static int prepare_startup_playback_urb(snd_usb_substream_t *subs,
|
||||
snd_pcm_runtime_t *runtime,
|
||||
struct urb *urb)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int i, offs, counts;
|
||||
snd_urb_ctx_t *ctx = urb->context;
|
||||
int stride = runtime->frame_bits >> 3;
|
||||
|
||||
offs = 0;
|
||||
urb->dev = ctx->subs->dev;
|
||||
urb->number_of_packets = subs->packs_per_ms;
|
||||
for (i = 0; i < subs->packs_per_ms; ++i) {
|
||||
urb->iso_frame_desc[i].offset = 0;
|
||||
urb->iso_frame_desc[i].length = 0;
|
||||
/* calculate the size of a packet */
|
||||
if (subs->fill_max)
|
||||
counts = subs->maxframesize; /* fixed */
|
||||
else {
|
||||
subs->phase = (subs->phase & 0xffff)
|
||||
+ (subs->freqm << subs->datainterval);
|
||||
counts = subs->phase >> 16;
|
||||
if (counts > subs->maxframesize)
|
||||
counts = subs->maxframesize;
|
||||
}
|
||||
urb->iso_frame_desc[i].offset = offs * stride;
|
||||
urb->iso_frame_desc[i].length = counts * stride;
|
||||
offs += counts;
|
||||
}
|
||||
urb->transfer_buffer_length = 0;
|
||||
urb->transfer_buffer_length = offs * stride;
|
||||
memset(urb->transfer_buffer,
|
||||
subs->cur_audiofmt->format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0,
|
||||
offs * stride);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user