rpms/kernel/devel jwltest-3c59x-misc.patch, NONE, 1.1.4.1 jwltest-3c59x-mmio.patch, NONE, 1.1.6.1 jwltest-b44-alloc.patch, NONE, 1.1.8.1 jwltest-dma-x86_64.patch, NONE, 1.1.8.1 jwltest-e1000_ethtool_ops-whitespace.patch, NONE, 1.1.4.1 jwltest-ethtool-perm-addr.patch, NONE, 1.1.4.1 jwltest-ia64-max-cacheline-export.patch, NONE, 1.1.4.1 jwltest-rx_dropped.patch, NONE, 1.1.4.1 jwltest-swiotlb-bidirectional.patch, NONE, 1.1.4.1 jwltest-swiotlb-cleanup.patch, NONE, 1.1.4.1 jwltest-swiotlb-comments.patch, NONE, 1.1.4.1 jwltest-swiotlb-move.patch, NONE, 1.1.4.1 jwltest-swiotlb-range.patch, NONE, 1.1.4.1 kernel-2.6.spec, 1.1619, 1.1619.2.1
fedora-cvs-commits at redhat.com
fedora-cvs-commits at redhat.com
Thu Oct 20 18:01:44 UTC 2005
- Previous message (by thread): rpms/kernel/devel linux-2.6-serial-of.patch, NONE, 1.1 patch-2.6.14-rc5.bz2.sign, NONE, 1.1 .cvsignore, 1.252, 1.253 kernel-2.6.spec, 1.1618, 1.1619 sources, 1.194, 1.195 upstream, 1.181, 1.182 linux-2.6.13-serial-of.patch, 1.1, NONE
- Next message (by thread): rpms/kernel/devel kernel-2.6.spec,1.1619,1.1620
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
Author: linville
Update of /cvs/dist/rpms/kernel/devel
In directory cvs.devel.redhat.com:/tmp/cvs-serv17035
Modified Files:
Tag: private-linville-jwltest-fc5-5-branch
kernel-2.6.spec
Added Files:
Tag: private-linville-jwltest-fc5-5-branch
jwltest-3c59x-misc.patch jwltest-3c59x-mmio.patch
jwltest-b44-alloc.patch jwltest-dma-x86_64.patch
jwltest-e1000_ethtool_ops-whitespace.patch
jwltest-ethtool-perm-addr.patch
jwltest-ia64-max-cacheline-export.patch
jwltest-rx_dropped.patch jwltest-swiotlb-bidirectional.patch
jwltest-swiotlb-cleanup.patch jwltest-swiotlb-comments.patch
jwltest-swiotlb-move.patch jwltest-swiotlb-range.patch
Log Message:
jwltest-3c59x-misc.patch:
3c59x.c | 33 ++++++++++++++++++---------------
1 files changed, 18 insertions(+), 15 deletions(-)
--- NEW FILE jwltest-3c59x-misc.patch ---
--- linux-2.6.13/drivers/net/3c59x.c.orig 2005-09-11 16:16:39.612186537 -0400
+++ linux-2.6.13/drivers/net/3c59x.c 2005-09-11 16:18:52.538437848 -0400
@@ -903,12 +903,12 @@ static void set_8021q_mode(struct net_de
/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
/* Option count limit only -- unlimited interfaces are supported. */
#define MAX_UNITS 8
-static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1,};
-static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
-static int hw_checksums[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
-static int flow_ctrl[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
-static int enable_wol[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
-static int use_mmio[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int options[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 };
+static int full_duplex[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 };
+static int hw_checksums[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 };
+static int flow_ctrl[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 };
+static int enable_wol[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 };
+static int use_mmio[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 };
static int global_options = -1;
static int global_full_duplex = -1;
static int global_enable_wol = -1;
@@ -943,18 +943,18 @@ MODULE_PARM_DESC(debug, "3c59x debug lev
MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex");
MODULE_PARM_DESC(global_options, "3c59x: same as options, but applies to all NICs if options is unset");
MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)");
-MODULE_PARM_DESC(global_full_duplex, "3c59x: same as full_duplex, but applies to all NICs if options is unset");
+MODULE_PARM_DESC(global_full_duplex, "3c59x: same as full_duplex, but applies to all NICs if full_duplex is unset");
MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)");
MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)");
MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)");
-MODULE_PARM_DESC(global_enable_wol, "3c59x: same as enable_wol, but applies to all NICs if options is unset");
+MODULE_PARM_DESC(global_enable_wol, "3c59x: same as enable_wol, but applies to all NICs if enable_wol is unset");
MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames");
MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt");
MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)");
MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)");
MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)");
MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds");
-MODULE_PARM_DESC(global_use_mmio, "3c59x: same as use_mmio, but applies to all NICs if options is unset");
+MODULE_PARM_DESC(global_use_mmio, "3c59x: same as use_mmio, but applies to all NICs if use_mmio is unset");
MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)");
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1531,9 +1531,11 @@ static int __devinit vortex_probe1(struc
dev->hard_start_xmit = boomerang_start_xmit;
/* Actually, it still should work with iommu. */
dev->features |= NETIF_F_SG;
- if (((hw_checksums[card_idx] == -1) && (vp->drv_flags & HAS_HWCKSM)) ||
- (hw_checksums[card_idx] == 1)) {
- dev->features |= NETIF_F_IP_CSUM;
+ if ((card_idx < MAX_UNITS) &&
+ (((hw_checksums[card_idx] == -1) &&
+ (vp->drv_flags & HAS_HWCKSM)) ||
+ (hw_checksums[card_idx] == 1))) {
+ dev->features |= NETIF_F_IP_CSUM;
}
} else {
dev->hard_start_xmit = vortex_start_xmit;
@@ -2806,9 +2808,10 @@ vortex_close(struct net_device *dev)
}
#if DO_ZEROCOPY
- if ( vp->rx_csumhits &&
- ((vp->drv_flags & HAS_HWCKSM) == 0) &&
- (hw_checksums[vp->card_idx] == -1)) {
+ if (vp->rx_csumhits &&
+ ((vp->drv_flags & HAS_HWCKSM) == 0) &&
+ ((vp->card_idx >= MAX_UNITS) ||
+ (hw_checksums[vp->card_idx] == -1))) {
printk(KERN_WARNING "%s supports hardware checksums, and we're not using them!\n", dev->name);
}
#endif
jwltest-3c59x-mmio.patch:
3c59x.c | 532 ++++++++++++++++++++++++++++++++++------------------------------
1 files changed, 284 insertions(+), 248 deletions(-)
--- NEW FILE jwltest-3c59x-mmio.patch ---
--- linux-2.6.13/drivers/net/3c59x.c.orig 2005-09-10 17:18:41.843300468 -0400
+++ linux-2.6.13/drivers/net/3c59x.c 2005-09-10 17:21:29.554911748 -0400
@@ -602,7 +602,7 @@ MODULE_DEVICE_TABLE(pci, vortex_pci_tbl)
First the windows. There are eight register windows, with the command
and status registers available in each.
*/
-#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+#define EL3WINDOW(win_num) iowrite16(SelectWindow + (win_num), ioaddr + EL3_CMD)
#define EL3_CMD 0x0e
#define EL3_STATUS 0x0e
@@ -776,7 +776,8 @@ struct vortex_private {
/* PCI configuration space information. */
struct device *gendev;
- char __iomem *cb_fn_base; /* CardBus function status addr space. */
+ void __iomem *ioaddr; /* IO address space */
+ void __iomem *cb_fn_base; /* CardBus function status addr space. */
/* Some values here only for performance evaluation and path-coverage */
int rx_nocopy, rx_copy, queued_packet, rx_csumhits;
@@ -869,12 +870,12 @@ static struct {
/* number of ETHTOOL_GSTATS u64's */
#define VORTEX_NUM_STATS 3
-static int vortex_probe1(struct device *gendev, long ioaddr, int irq,
+static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
int chip_idx, int card_idx);
static void vortex_up(struct net_device *dev);
static void vortex_down(struct net_device *dev, int final);
static int vortex_open(struct net_device *dev);
-static void mdio_sync(long ioaddr, int bits);
+static void mdio_sync(void __iomem *ioaddr, int bits);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
static void vortex_timer(unsigned long arg);
@@ -887,7 +888,7 @@ static irqreturn_t vortex_interrupt(int
static irqreturn_t boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs);
static int vortex_close(struct net_device *dev);
static void dump_tx_ring(struct net_device *dev);
-static void update_stats(long ioaddr, struct net_device *dev);
+static void update_stats(void __iomem *ioaddr, struct net_device *dev);
static struct net_device_stats *vortex_get_stats(struct net_device *dev);
static void set_rx_mode(struct net_device *dev);
#ifdef CONFIG_PCI
@@ -907,9 +908,11 @@ static int full_duplex[MAX_UNITS] = {-1,
static int hw_checksums[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int flow_ctrl[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int enable_wol[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int use_mmio[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int global_options = -1;
static int global_full_duplex = -1;
static int global_enable_wol = -1;
+static int global_use_mmio = -1;
/* #define dev_alloc_skb dev_alloc_skb_debug */
@@ -934,6 +937,8 @@ module_param(compaq_ioaddr, int, 0);
module_param(compaq_irq, int, 0);
module_param(compaq_device_id, int, 0);
module_param(watchdog, int, 0);
+module_param(global_use_mmio, int, 0);
+module_param_array(use_mmio, int, NULL, 0);
MODULE_PARM_DESC(debug, "3c59x debug level (0-6)");
MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex");
MODULE_PARM_DESC(global_options, "3c59x: same as options, but applies to all NICs if options is unset");
@@ -949,6 +954,8 @@ MODULE_PARM_DESC(compaq_ioaddr, "3c59x P
MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)");
MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)");
MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds");
+MODULE_PARM_DESC(global_use_mmio, "3c59x: same as use_mmio, but applies to all NICs if options is unset");
+MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)");
#ifdef CONFIG_NET_POLL_CONTROLLER
static void poll_vortex(struct net_device *dev)
@@ -1029,18 +1036,19 @@ static struct eisa_driver vortex_eisa_dr
static int vortex_eisa_probe (struct device *device)
{
- long ioaddr;
+ void __iomem *ioaddr;
struct eisa_device *edev;
edev = to_eisa_device (device);
- ioaddr = edev->base_addr;
- if (!request_region(ioaddr, VORTEX_TOTAL_SIZE, DRV_NAME))
+ if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME))
return -EBUSY;
- if (vortex_probe1(device, ioaddr, inw(ioaddr + 0xC88) >> 12,
+ ioaddr = ioport_map(edev->base_addr, VORTEX_TOTAL_SIZE);
+
+ if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12,
edev->id.driver_data, vortex_cards_found)) {
- release_region (ioaddr, VORTEX_TOTAL_SIZE);
+ release_region (edev->base_addr, VORTEX_TOTAL_SIZE);
return -ENODEV;
}
@@ -1054,7 +1062,7 @@ static int vortex_eisa_remove (struct de
struct eisa_device *edev;
struct net_device *dev;
struct vortex_private *vp;
- long ioaddr;
+ void __iomem *ioaddr;
edev = to_eisa_device (device);
dev = eisa_get_drvdata (edev);
@@ -1065,11 +1073,11 @@ static int vortex_eisa_remove (struct de
}
vp = netdev_priv(dev);
- ioaddr = dev->base_addr;
+ ioaddr = vp->ioaddr;
unregister_netdev (dev);
- outw (TotalReset|0x14, ioaddr + EL3_CMD);
- release_region (ioaddr, VORTEX_TOTAL_SIZE);
+ iowrite16 (TotalReset|0x14, ioaddr + EL3_CMD);
+ release_region (dev->base_addr, VORTEX_TOTAL_SIZE);
free_netdev (dev);
return 0;
@@ -1096,8 +1104,8 @@ static int __init vortex_eisa_init (void
/* Special code to work-around the Compaq PCI BIOS32 problem. */
if (compaq_ioaddr) {
- vortex_probe1(NULL, compaq_ioaddr, compaq_irq,
- compaq_device_id, vortex_cards_found++);
+ vortex_probe1(NULL, ioport_map(compaq_ioaddr, VORTEX_TOTAL_SIZE),
+ compaq_irq, compaq_device_id, vortex_cards_found++);
}
return vortex_cards_found - orig_cards_found + eisa_found;
@@ -1107,15 +1115,32 @@ static int __init vortex_eisa_init (void
static int __devinit vortex_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int rc;
+ int rc, unit, pci_bar;
+ struct vortex_chip_info *vci;
+ void __iomem *ioaddr;
/* wake up and enable device */
rc = pci_enable_device (pdev);
if (rc < 0)
goto out;
- rc = vortex_probe1 (&pdev->dev, pci_resource_start (pdev, 0),
- pdev->irq, ent->driver_data, vortex_cards_found);
+ unit = vortex_cards_found;
+
+ if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
+ /* Determine the default if the user didn't override us */
+ vci = &vortex_info_tbl[ent->driver_data];
+ pci_bar = vci->drv_flags & (IS_CYCLONE | IS_TORNADO) ? 1 : 0;
+ } else if (unit < MAX_UNITS && use_mmio[unit] >= 0)
+ pci_bar = use_mmio[unit] ? 1 : 0;
+ else
+ pci_bar = global_use_mmio ? 1 : 0;
+
+ ioaddr = pci_iomap(pdev, pci_bar, 0);
+ if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
+ ioaddr = pci_iomap(pdev, 0, 0);
+
+ rc = vortex_probe1 (&pdev->dev, pci_iomap(pdev, pci_bar, 0),
+ pdev->irq, ent->driver_data, unit);
if (rc < 0) {
pci_disable_device (pdev);
goto out;
@@ -1134,7 +1154,7 @@ out:
* NOTE: pdev can be NULL, for the case of a Compaq device
*/
static int __devinit vortex_probe1(struct device *gendev,
- long ioaddr, int irq,
+ void __iomem *ioaddr, int irq,
int chip_idx, int card_idx)
{
struct vortex_private *vp;
@@ -1202,15 +1222,16 @@ static int __devinit vortex_probe1(struc
if (print_info)
printk (KERN_INFO "See Documentation/networking/vortex.txt\n");
- printk(KERN_INFO "%s: 3Com %s %s at 0x%lx. Vers " DRV_VERSION "\n",
+ printk(KERN_INFO "%s: 3Com %s %s at %p. Vers " DRV_VERSION "\n",
print_name,
pdev ? "PCI" : "EISA",
vci->name,
ioaddr);
- dev->base_addr = ioaddr;
+ dev->base_addr = (unsigned long)ioaddr;
dev->irq = irq;
dev->mtu = mtu;
+ vp->ioaddr = ioaddr;
vp->large_frames = mtu > 1500;
vp->drv_flags = vci->drv_flags;
vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
@@ -1226,7 +1247,7 @@ static int __devinit vortex_probe1(struc
if (pdev) {
/* EISA resources already marked, so only PCI needs to do this here */
/* Ignore return value, because Cardbus drivers already allocate for us */
- if (request_region(ioaddr, vci->io_size, print_name) != NULL)
+ if (request_region(dev->base_addr, vci->io_size, print_name) != NULL)
vp->must_free_region = 1;
/* enable bus-mastering if necessary */
@@ -1316,14 +1337,14 @@ static int __devinit vortex_probe1(struc
for (i = 0; i < 0x40; i++) {
int timer;
- outw(base + i, ioaddr + Wn0EepromCmd);
+ iowrite16(base + i, ioaddr + Wn0EepromCmd);
/* Pause for at least 162 us. for the read to take place. */
for (timer = 10; timer >= 0; timer--) {
udelay(162);
- if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
+ if ((ioread16(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
break;
}
- eeprom[i] = inw(ioaddr + Wn0EepromData);
+ eeprom[i] = ioread16(ioaddr + Wn0EepromData);
}
}
for (i = 0; i < 0x18; i++)
@@ -1351,7 +1372,7 @@ static int __devinit vortex_probe1(struc
}
EL3WINDOW(2);
for (i = 0; i < 6; i++)
- outb(dev->dev_addr[i], ioaddr + i);
+ iowrite8(dev->dev_addr[i], ioaddr + i);
#ifdef __sparc__
if (print_info)
@@ -1366,7 +1387,7 @@ static int __devinit vortex_probe1(struc
#endif
EL3WINDOW(4);
- step = (inb(ioaddr + Wn4_NetDiag) & 0x1e) >> 1;
+ step = (ioread8(ioaddr + Wn4_NetDiag) & 0x1e) >> 1;
if (print_info) {
printk(KERN_INFO " product code %02x%02x rev %02x.%d date %02d-"
"%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
@@ -1375,31 +1396,30 @@ static int __devinit vortex_probe1(struc
if (pdev && vci->drv_flags & HAS_CB_FNS) {
- unsigned long fn_st_addr; /* Cardbus function status space */
unsigned short n;
- fn_st_addr = pci_resource_start (pdev, 2);
- if (fn_st_addr) {
- vp->cb_fn_base = ioremap(fn_st_addr, 128);
+ vp->cb_fn_base = pci_iomap(pdev, 2, 0);
+ if (!vp->cb_fn_base) {
retval = -ENOMEM;
- if (!vp->cb_fn_base)
- goto free_ring;
+ goto free_ring;
}
+
if (print_info) {
printk(KERN_INFO "%s: CardBus functions mapped %8.8lx->%p\n",
- print_name, fn_st_addr, vp->cb_fn_base);
+ print_name, pci_resource_start(pdev, 2),
+ vp->cb_fn_base);
}
EL3WINDOW(2);
- n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010;
+ n = ioread16(ioaddr + Wn2_ResetOptions) & ~0x4010;
if (vp->drv_flags & INVERT_LED_PWR)
n |= 0x10;
if (vp->drv_flags & INVERT_MII_PWR)
n |= 0x4000;
- outw(n, ioaddr + Wn2_ResetOptions);
+ iowrite16(n, ioaddr + Wn2_ResetOptions);
if (vp->drv_flags & WNO_XCVR_PWR) {
EL3WINDOW(0);
- outw(0x0800, ioaddr);
+ iowrite16(0x0800, ioaddr);
}
}
@@ -1418,13 +1438,13 @@ static int __devinit vortex_probe1(struc
static const char * ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
unsigned int config;
EL3WINDOW(3);
- vp->available_media = inw(ioaddr + Wn3_Options);
+ vp->available_media = ioread16(ioaddr + Wn3_Options);
if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */
vp->available_media = 0x40;
- config = inl(ioaddr + Wn3_Config);
+ config = ioread32(ioaddr + Wn3_Config);
if (print_info) {
printk(KERN_DEBUG " Internal config register is %4.4x, "
- "transceivers %#x.\n", config, inw(ioaddr + Wn3_Options));
+ "transceivers %#x.\n", config, ioread16(ioaddr + Wn3_Options));
printk(KERN_INFO " %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
8 << RAM_SIZE(config),
RAM_WIDTH(config) ? "word" : "byte",
@@ -1555,7 +1575,7 @@ free_ring:
vp->rx_ring_dma);
free_region:
if (vp->must_free_region)
- release_region(ioaddr, vci->io_size);
+ release_region(dev->base_addr, vci->io_size);
free_netdev(dev);
printk(KERN_ERR PFX "vortex_probe1 fails. Returns %d\n", retval);
out:
@@ -1565,17 +1585,19 @@ out:
static void
issue_and_wait(struct net_device *dev, int cmd)
{
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
int i;
- outw(cmd, dev->base_addr + EL3_CMD);
+ iowrite16(cmd, ioaddr + EL3_CMD);
for (i = 0; i < 2000; i++) {
- if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress))
+ if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
return;
}
/* OK, that didn't work. Do it the slow way. One second */
for (i = 0; i < 100000; i++) {
- if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress)) {
+ if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) {
if (vortex_debug > 1)
printk(KERN_INFO "%s: command 0x%04x took %d usecs\n",
dev->name, cmd, i * 10);
@@ -1584,14 +1606,14 @@ issue_and_wait(struct net_device *dev, i
udelay(10);
}
printk(KERN_ERR "%s: command 0x%04x did not complete! Status=0x%x\n",
- dev->name, cmd, inw(dev->base_addr + EL3_STATUS));
+ dev->name, cmd, ioread16(ioaddr + EL3_STATUS));
}
static void
vortex_up(struct net_device *dev)
{
- long ioaddr = dev->base_addr;
struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
unsigned int config;
int i;
@@ -1604,7 +1626,7 @@ vortex_up(struct net_device *dev)
/* Before initializing select the active media port. */
EL3WINDOW(3);
- config = inl(ioaddr + Wn3_Config);
+ config = ioread32(ioaddr + Wn3_Config);
if (vp->media_override != 7) {
printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n",
@@ -1651,7 +1673,7 @@ vortex_up(struct net_device *dev)
config = BFINS(config, dev->if_port, 20, 4);
if (vortex_debug > 6)
printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config);
- outl(config, ioaddr + Wn3_Config);
+ iowrite32(config, ioaddr + Wn3_Config);
if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
int mii_reg1, mii_reg5;
@@ -1679,7 +1701,7 @@ vortex_up(struct net_device *dev)
}
/* Set the full-duplex bit. */
- outw( ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
+ iowrite16( ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
(vp->large_frames ? 0x40 : 0) |
((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
ioaddr + Wn3_MAC_Ctrl);
@@ -1695,51 +1717,51 @@ vortex_up(struct net_device *dev)
*/
issue_and_wait(dev, RxReset|0x04);
- outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+ iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
if (vortex_debug > 1) {
EL3WINDOW(4);
printk(KERN_DEBUG "%s: vortex_up() irq %d media status %4.4x.\n",
- dev->name, dev->irq, inw(ioaddr + Wn4_Media));
+ dev->name, dev->irq, ioread16(ioaddr + Wn4_Media));
}
/* Set the station address and mask in window 2 each time opened. */
EL3WINDOW(2);
for (i = 0; i < 6; i++)
- outb(dev->dev_addr[i], ioaddr + i);
+ iowrite8(dev->dev_addr[i], ioaddr + i);
for (; i < 12; i+=2)
- outw(0, ioaddr + i);
+ iowrite16(0, ioaddr + i);
if (vp->cb_fn_base) {
- unsigned short n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010;
+ unsigned short n = ioread16(ioaddr + Wn2_ResetOptions) & ~0x4010;
if (vp->drv_flags & INVERT_LED_PWR)
n |= 0x10;
if (vp->drv_flags & INVERT_MII_PWR)
n |= 0x4000;
- outw(n, ioaddr + Wn2_ResetOptions);
+ iowrite16(n, ioaddr + Wn2_ResetOptions);
}
if (dev->if_port == XCVR_10base2)
/* Start the thinnet transceiver. We should really wait 50ms...*/
- outw(StartCoax, ioaddr + EL3_CMD);
+ iowrite16(StartCoax, ioaddr + EL3_CMD);
if (dev->if_port != XCVR_NWAY) {
EL3WINDOW(4);
- outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) |
+ iowrite16((ioread16(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) |
media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
}
/* Switch to the stats window, and clear all stats by reading. */
- outw(StatsDisable, ioaddr + EL3_CMD);
+ iowrite16(StatsDisable, ioaddr + EL3_CMD);
EL3WINDOW(6);
for (i = 0; i < 10; i++)
- inb(ioaddr + i);
- inw(ioaddr + 10);
- inw(ioaddr + 12);
+ ioread8(ioaddr + i);
+ ioread16(ioaddr + 10);
+ ioread16(ioaddr + 12);
/* New: On the Vortex we must also clear the BadSSD counter. */
EL3WINDOW(4);
- inb(ioaddr + 12);
+ ioread8(ioaddr + 12);
/* ..and on the Boomerang we enable the extra statistics bits. */
- outw(0x0040, ioaddr + Wn4_NetDiag);
+ iowrite16(0x0040, ioaddr + Wn4_NetDiag);
/* Switch to register set 7 for normal use. */
EL3WINDOW(7);
@@ -1747,30 +1769,30 @@ vortex_up(struct net_device *dev)
if (vp->full_bus_master_rx) { /* Boomerang bus master. */
vp->cur_rx = vp->dirty_rx = 0;
/* Initialize the RxEarly register as recommended. */
- outw(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
- outl(0x0020, ioaddr + PktStatus);
- outl(vp->rx_ring_dma, ioaddr + UpListPtr);
+ iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ iowrite32(0x0020, ioaddr + PktStatus);
+ iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr);
}
if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
vp->cur_tx = vp->dirty_tx = 0;
if (vp->drv_flags & IS_BOOMERANG)
- outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
+ iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
/* Clear the Rx, Tx rings. */
for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */
vp->rx_ring[i].status = 0;
for (i = 0; i < TX_RING_SIZE; i++)
vp->tx_skbuff[i] = NULL;
- outl(0, ioaddr + DownListPtr);
+ iowrite32(0, ioaddr + DownListPtr);
}
/* Set receiver mode: presumably accept b-case and phys addr only. */
set_rx_mode(dev);
/* enable 802.1q tagged frames */
set_8021q_mode(dev, 1);
- outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+ iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
// issue_and_wait(dev, SetTxStart|0x07ff);
- outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
- outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
/* Allow status bits to be seen. */
vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
(vp->full_bus_master_tx ? DownComplete : TxAvailable) |
@@ -1780,13 +1802,13 @@ vortex_up(struct net_device *dev)
(vp->full_bus_master_rx ? 0 : RxComplete) |
StatsFull | HostError | TxComplete | IntReq
| (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
- outw(vp->status_enable, ioaddr + EL3_CMD);
+ iowrite16(vp->status_enable, ioaddr + EL3_CMD);
/* Ack all pending events, and set active indicator mask. */
- outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ iowrite16(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
ioaddr + EL3_CMD);
- outw(vp->intr_enable, ioaddr + EL3_CMD);
+ iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
- writel(0x8000, vp->cb_fn_base + 4);
+ iowrite32(0x8000, vp->cb_fn_base + 4);
netif_start_queue (dev);
}
@@ -1852,7 +1874,7 @@ vortex_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = vp->ioaddr;
int next_tick = 60*HZ;
int ok = 0;
int media_status, mii_status, old_window;
@@ -1866,9 +1888,9 @@ vortex_timer(unsigned long data)
if (vp->medialock)
goto leave_media_alone;
disable_irq(dev->irq);
- old_window = inw(ioaddr + EL3_CMD) >> 13;
+ old_window = ioread16(ioaddr + EL3_CMD) >> 13;
EL3WINDOW(4);
- media_status = inw(ioaddr + Wn4_Media);
+ media_status = ioread16(ioaddr + Wn4_Media);
switch (dev->if_port) {
case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx:
if (media_status & Media_LnkBeat) {
@@ -1909,7 +1931,7 @@ vortex_timer(unsigned long data)
vp->phys[0], mii_reg5);
/* Set the full-duplex bit. */
EL3WINDOW(3);
- outw( (vp->full_duplex ? 0x20 : 0) |
+ iowrite16( (vp->full_duplex ? 0x20 : 0) |
(vp->large_frames ? 0x40 : 0) |
((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
ioaddr + Wn3_MAC_Ctrl);
@@ -1950,15 +1972,15 @@ vortex_timer(unsigned long data)
dev->name, media_tbl[dev->if_port].name);
next_tick = media_tbl[dev->if_port].wait;
}
- outw((media_status & ~(Media_10TP|Media_SQE)) |
+ iowrite16((media_status & ~(Media_10TP|Media_SQE)) |
media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
EL3WINDOW(3);
- config = inl(ioaddr + Wn3_Config);
+ config = ioread32(ioaddr + Wn3_Config);
config = BFINS(config, dev->if_port, 20, 4);
- outl(config, ioaddr + Wn3_Config);
+ iowrite32(config, ioaddr + Wn3_Config);
- outw(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
+ iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
ioaddr + EL3_CMD);
if (vortex_debug > 1)
printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config);
@@ -1974,29 +1996,29 @@ leave_media_alone:
mod_timer(&vp->timer, RUN_AT(next_tick));
if (vp->deferred)
- outw(FakeIntr, ioaddr + EL3_CMD);
+ iowrite16(FakeIntr, ioaddr + EL3_CMD);
return;
}
static void vortex_tx_timeout(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = vp->ioaddr;
printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
- dev->name, inb(ioaddr + TxStatus),
- inw(ioaddr + EL3_STATUS));
+ dev->name, ioread8(ioaddr + TxStatus),
+ ioread16(ioaddr + EL3_STATUS));
EL3WINDOW(4);
printk(KERN_ERR " diagnostics: net %04x media %04x dma %08x fifo %04x\n",
- inw(ioaddr + Wn4_NetDiag),
- inw(ioaddr + Wn4_Media),
- inl(ioaddr + PktStatus),
- inw(ioaddr + Wn4_FIFODiag));
+ ioread16(ioaddr + Wn4_NetDiag),
+ ioread16(ioaddr + Wn4_Media),
+ ioread32(ioaddr + PktStatus),
+ ioread16(ioaddr + Wn4_FIFODiag));
/* Slight code bloat to be user friendly. */
- if ((inb(ioaddr + TxStatus) & 0x88) == 0x88)
+ if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88)
printk(KERN_ERR "%s: Transmitter encountered 16 collisions --"
" network cable problem?\n", dev->name);
- if (inw(ioaddr + EL3_STATUS) & IntLatch) {
+ if (ioread16(ioaddr + EL3_STATUS) & IntLatch) {
printk(KERN_ERR "%s: Interrupt posted but not delivered --"
" IRQ blocked by another device?\n", dev->name);
/* Bad idea here.. but we might as well handle a few events. */
@@ -2022,21 +2044,21 @@ static void vortex_tx_timeout(struct net
vp->stats.tx_errors++;
if (vp->full_bus_master_tx) {
printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name);
- if (vp->cur_tx - vp->dirty_tx > 0 && inl(ioaddr + DownListPtr) == 0)
- outl(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
+ if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0)
+ iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
ioaddr + DownListPtr);
if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE)
netif_wake_queue (dev);
if (vp->drv_flags & IS_BOOMERANG)
- outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
- outw(DownUnstall, ioaddr + EL3_CMD);
+ iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
+ iowrite16(DownUnstall, ioaddr + EL3_CMD);
} else {
vp->stats.tx_dropped++;
netif_wake_queue(dev);
}
/* Issue Tx Enable */
- outw(TxEnable, ioaddr + EL3_CMD);
+ iowrite16(TxEnable, ioaddr + EL3_CMD);
dev->trans_start = jiffies;
/* Switch to register set 7 for normal use. */
@@ -2051,7 +2073,7 @@ static void
vortex_error(struct net_device *dev, int status)
{
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = vp->ioaddr;
int do_tx_reset = 0, reset_mask = 0;
unsigned char tx_status = 0;
@@ -2060,7 +2082,7 @@ vortex_error(struct net_device *dev, int
}
if (status & TxComplete) { /* Really "TxError" for us. */
- tx_status = inb(ioaddr + TxStatus);
+ tx_status = ioread8(ioaddr + TxStatus);
/* Presumably a tx-timeout. We must merely re-enable. */
if (vortex_debug > 2
|| (tx_status != 0x88 && vortex_debug > 0)) {
@@ -2074,20 +2096,20 @@ vortex_error(struct net_device *dev, int
}
if (tx_status & 0x14) vp->stats.tx_fifo_errors++;
if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
- outb(0, ioaddr + TxStatus);
+ iowrite8(0, ioaddr + TxStatus);
if (tx_status & 0x30) { /* txJabber or txUnderrun */
do_tx_reset = 1;
} else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */
do_tx_reset = 1;
reset_mask = 0x0108; /* Reset interface logic, but not download logic */
} else { /* Merely re-enable the transmitter. */
- outw(TxEnable, ioaddr + EL3_CMD);
+ iowrite16(TxEnable, ioaddr + EL3_CMD);
}
}
if (status & RxEarly) { /* Rx early is unused. */
vortex_rx(dev);
- outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
}
if (status & StatsFull) { /* Empty statistics. */
static int DoneDidThat;
@@ -2097,29 +2119,29 @@ vortex_error(struct net_device *dev, int
/* HACK: Disable statistics as an interrupt source. */
/* This occurs when we have the wrong media type! */
if (DoneDidThat == 0 &&
- inw(ioaddr + EL3_STATUS) & StatsFull) {
+ ioread16(ioaddr + EL3_STATUS) & StatsFull) {
printk(KERN_WARNING "%s: Updating statistics failed, disabling "
"stats as an interrupt source.\n", dev->name);
EL3WINDOW(5);
- outw(SetIntrEnb | (inw(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD);
+ iowrite16(SetIntrEnb | (ioread16(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD);
vp->intr_enable &= ~StatsFull;
EL3WINDOW(7);
DoneDidThat++;
}
}
if (status & IntReq) { /* Restore all interrupt sources. */
- outw(vp->status_enable, ioaddr + EL3_CMD);
- outw(vp->intr_enable, ioaddr + EL3_CMD);
+ iowrite16(vp->status_enable, ioaddr + EL3_CMD);
+ iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
}
if (status & HostError) {
u16 fifo_diag;
EL3WINDOW(4);
- fifo_diag = inw(ioaddr + Wn4_FIFODiag);
+ fifo_diag = ioread16(ioaddr + Wn4_FIFODiag);
printk(KERN_ERR "%s: Host error, FIFO diagnostic register %4.4x.\n",
dev->name, fifo_diag);
/* Adapter failure requires Tx/Rx reset and reinit. */
if (vp->full_bus_master_tx) {
- int bus_status = inl(ioaddr + PktStatus);
+ int bus_status = ioread32(ioaddr + PktStatus);
/* 0x80000000 PCI master abort. */
/* 0x40000000 PCI target abort. */
if (vortex_debug)
@@ -2139,14 +2161,14 @@ vortex_error(struct net_device *dev, int
set_rx_mode(dev);
/* enable 802.1q VLAN tagged frames */
set_8021q_mode(dev, 1);
- outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
- outw(AckIntr | HostError, ioaddr + EL3_CMD);
+ iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+ iowrite16(AckIntr | HostError, ioaddr + EL3_CMD);
}
}
if (do_tx_reset) {
issue_and_wait(dev, TxReset|reset_mask);
- outw(TxEnable, ioaddr + EL3_CMD);
+ iowrite16(TxEnable, ioaddr + EL3_CMD);
if (!vp->full_bus_master_tx)
netif_wake_queue(dev);
}
@@ -2156,29 +2178,29 @@ static int
vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = vp->ioaddr;
/* Put out the doubleword header... */
- outl(skb->len, ioaddr + TX_FIFO);
+ iowrite32(skb->len, ioaddr + TX_FIFO);
if (vp->bus_master) {
/* Set the bus-master controller to transfer the packet. */
int len = (skb->len + 3) & ~3;
- outl( vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE),
+ iowrite32( vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE),
ioaddr + Wn7_MasterAddr);
- outw(len, ioaddr + Wn7_MasterLen);
+ iowrite16(len, ioaddr + Wn7_MasterLen);
vp->tx_skb = skb;
- outw(StartDMADown, ioaddr + EL3_CMD);
+ iowrite16(StartDMADown, ioaddr + EL3_CMD);
/* netif_wake_queue() will be called at the DMADone interrupt. */
} else {
/* ... and the packet rounded to a doubleword. */
- outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
dev_kfree_skb (skb);
- if (inw(ioaddr + TxFree) > 1536) {
+ if (ioread16(ioaddr + TxFree) > 1536) {
netif_start_queue (dev); /* AKPM: redundant? */
} else {
/* Interrupt us when the FIFO has room for max-sized packet. */
netif_stop_queue(dev);
- outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
}
}
@@ -2189,7 +2211,7 @@ vortex_start_xmit(struct sk_buff *skb, s
int tx_status;
int i = 32;
- while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) {
+ while (--i > 0 && (tx_status = ioread8(ioaddr + TxStatus)) > 0) {
if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
if (vortex_debug > 2)
printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
@@ -2199,9 +2221,9 @@ vortex_start_xmit(struct sk_buff *skb, s
if (tx_status & 0x30) {
issue_and_wait(dev, TxReset);
}
- outw(TxEnable, ioaddr + EL3_CMD);
+ iowrite16(TxEnable, ioaddr + EL3_CMD);
}
- outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
+ iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */
}
}
return 0;
@@ -2211,7 +2233,7 @@ static int
boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = vp->ioaddr;
/* Calculate the next Tx descriptor entry. */
int entry = vp->cur_tx % TX_RING_SIZE;
struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
@@ -2275,8 +2297,8 @@ boomerang_start_xmit(struct sk_buff *skb
/* Wait for the stall to complete. */
issue_and_wait(dev, DownStall);
prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
- if (inl(ioaddr + DownListPtr) == 0) {
- outl(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
+ if (ioread32(ioaddr + DownListPtr) == 0) {
+ iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
vp->queued_packet++;
}
@@ -2291,7 +2313,7 @@ boomerang_start_xmit(struct sk_buff *skb
prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
#endif
}
- outw(DownUnstall, ioaddr + EL3_CMD);
+ iowrite16(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags);
dev->trans_start = jiffies;
return 0;
@@ -2310,15 +2332,15 @@ vortex_interrupt(int irq, void *dev_id,
{
struct net_device *dev = dev_id;
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr;
+ void __iomem *ioaddr;
int status;
int work_done = max_interrupt_work;
int handled = 0;
- ioaddr = dev->base_addr;
+ ioaddr = vp->ioaddr;
spin_lock(&vp->lock);
- status = inw(ioaddr + EL3_STATUS);
+ status = ioread16(ioaddr + EL3_STATUS);
if (vortex_debug > 6)
printk("vortex_interrupt(). status=0x%4x\n", status);
@@ -2337,7 +2359,7 @@ vortex_interrupt(int irq, void *dev_id,
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
- dev->name, status, inb(ioaddr + Timer));
+ dev->name, status, ioread8(ioaddr + Timer));
do {
if (vortex_debug > 5)
@@ -2350,16 +2372,16 @@ vortex_interrupt(int irq, void *dev_id,
if (vortex_debug > 5)
printk(KERN_DEBUG " TX room bit was handled.\n");
/* There's room in the FIFO for a full-sized packet. */
- outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD);
netif_wake_queue (dev);
}
if (status & DMADone) {
- if (inw(ioaddr + Wn7_MasterStatus) & 0x1000) {
- outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
+ if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
+ iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
- if (inw(ioaddr + TxFree) > 1536) {
+ if (ioread16(ioaddr + TxFree) > 1536) {
/*
* AKPM: FIXME: I don't think we need this. If the queue was stopped due to
* insufficient FIFO room, the TxAvailable test will succeed and call
@@ -2367,7 +2389,7 @@ vortex_interrupt(int irq, void *dev_id,
*/
netif_wake_queue(dev);
} else { /* Interrupt when FIFO has room for max-sized packet. */
- outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
netif_stop_queue(dev);
}
}
@@ -2385,17 +2407,17 @@ vortex_interrupt(int irq, void *dev_id,
/* Disable all pending interrupts. */
do {
vp->deferred |= status;
- outw(SetStatusEnb | (~vp->deferred & vp->status_enable),
+ iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
ioaddr + EL3_CMD);
- outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
- } while ((status = inw(ioaddr + EL3_CMD)) & IntLatch);
+ iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
+ } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
/* The timer will reenable interrupts. */
mod_timer(&vp->timer, jiffies + 1*HZ);
break;
}
/* Acknowledge the IRQ. */
- outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
- } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
+ iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+ } while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
@@ -2415,11 +2437,11 @@ boomerang_interrupt(int irq, void *dev_i
{
struct net_device *dev = dev_id;
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr;
+ void __iomem *ioaddr;
int status;
int work_done = max_interrupt_work;
- ioaddr = dev->base_addr;
+ ioaddr = vp->ioaddr;
/*
* It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
@@ -2427,7 +2449,7 @@ boomerang_interrupt(int irq, void *dev_i
*/
spin_lock(&vp->lock);
- status = inw(ioaddr + EL3_STATUS);
+ status = ioread16(ioaddr + EL3_STATUS);
if (vortex_debug > 6)
printk(KERN_DEBUG "boomerang_interrupt. status=0x%4x\n", status);
@@ -2448,13 +2470,13 @@ boomerang_interrupt(int irq, void *dev_i
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
- dev->name, status, inb(ioaddr + Timer));
+ dev->name, status, ioread8(ioaddr + Timer));
do {
if (vortex_debug > 5)
printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
dev->name, status);
if (status & UpComplete) {
- outw(AckIntr | UpComplete, ioaddr + EL3_CMD);
+ iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD);
if (vortex_debug > 5)
printk(KERN_DEBUG "boomerang_interrupt->boomerang_rx\n");
boomerang_rx(dev);
@@ -2463,11 +2485,11 @@ boomerang_interrupt(int irq, void *dev_i
if (status & DownComplete) {
unsigned int dirty_tx = vp->dirty_tx;
- outw(AckIntr | DownComplete, ioaddr + EL3_CMD);
+ iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD);
while (vp->cur_tx - dirty_tx > 0) {
int entry = dirty_tx % TX_RING_SIZE;
#if 1 /* AKPM: the latter is faster, but cyclone-only */
- if (inl(ioaddr + DownListPtr) ==
+ if (ioread32(ioaddr + DownListPtr) ==
vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
break; /* It still hasn't been processed. */
#else
@@ -2514,20 +2536,20 @@ boomerang_interrupt(int irq, void *dev_i
/* Disable all pending interrupts. */
do {
vp->deferred |= status;
- outw(SetStatusEnb | (~vp->deferred & vp->status_enable),
+ iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
ioaddr + EL3_CMD);
- outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
- } while ((status = inw(ioaddr + EL3_CMD)) & IntLatch);
+ iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
+ } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
/* The timer will reenable interrupts. */
mod_timer(&vp->timer, jiffies + 1*HZ);
break;
}
/* Acknowledge the IRQ. */
- outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+ iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
- writel(0x8000, vp->cb_fn_base + 4);
+ iowrite32(0x8000, vp->cb_fn_base + 4);
- } while ((status = inw(ioaddr + EL3_STATUS)) & IntLatch);
+ } while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
@@ -2540,16 +2562,16 @@ handler_exit:
static int vortex_rx(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = vp->ioaddr;
int i;
short rx_status;
if (vortex_debug > 5)
printk(KERN_DEBUG "vortex_rx(): status %4.4x, rx_status %4.4x.\n",
- inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
- while ((rx_status = inw(ioaddr + RxStatus)) > 0) {
+ ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus));
+ while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) {
if (rx_status & 0x4000) { /* Error, update stats. */
- unsigned char rx_error = inb(ioaddr + RxErrors);
+ unsigned char rx_error = ioread8(ioaddr + RxErrors);
if (vortex_debug > 2)
printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
vp->stats.rx_errors++;
@@ -2572,27 +2594,28 @@ static int vortex_rx(struct net_device *
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
/* 'skb_put()' points to the start of sk_buff data area. */
if (vp->bus_master &&
- ! (inw(ioaddr + Wn7_MasterStatus) & 0x8000)) {
+ ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
pkt_len, PCI_DMA_FROMDEVICE);
- outl(dma, ioaddr + Wn7_MasterAddr);
- outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
- outw(StartDMAUp, ioaddr + EL3_CMD);
- while (inw(ioaddr + Wn7_MasterStatus) & 0x8000)
+ iowrite32(dma, ioaddr + Wn7_MasterAddr);
+ iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
+ iowrite16(StartDMAUp, ioaddr + EL3_CMD);
+ while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
;
pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
} else {
- insl(ioaddr + RX_FIFO, skb_put(skb, pkt_len),
- (pkt_len + 3) >> 2);
+ ioread32_rep(ioaddr + RX_FIFO,
+ skb_put(skb, pkt_len),
+ (pkt_len + 3) >> 2);
}
- outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+ iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->last_rx = jiffies;
vp->stats.rx_packets++;
/* Wait a limited time to go to next packet. */
for (i = 200; i >= 0; i--)
- if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
break;
continue;
} else if (vortex_debug > 0)
@@ -2611,12 +2634,12 @@ boomerang_rx(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
int entry = vp->cur_rx % RX_RING_SIZE;
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = vp->ioaddr;
int rx_status;
int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
if (vortex_debug > 5)
- printk(KERN_DEBUG "boomerang_rx(): status %4.4x\n", inw(ioaddr+EL3_STATUS));
+ printk(KERN_DEBUG "boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
if (--rx_work_limit < 0)
@@ -2699,7 +2722,7 @@ boomerang_rx(struct net_device *dev)
vp->rx_skbuff[entry] = skb;
}
vp->rx_ring[entry].status = 0; /* Clear complete bit. */
- outw(UpUnstall, ioaddr + EL3_CMD);
+ iowrite16(UpUnstall, ioaddr + EL3_CMD);
}
return 0;
}
@@ -2728,7 +2751,7 @@ static void
vortex_down(struct net_device *dev, int final_down)
{
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = vp->ioaddr;
netif_stop_queue (dev);
@@ -2736,26 +2759,26 @@ vortex_down(struct net_device *dev, int
del_timer_sync(&vp->timer);
/* Turn off statistics ASAP. We update vp->stats below. */
- outw(StatsDisable, ioaddr + EL3_CMD);
+ iowrite16(StatsDisable, ioaddr + EL3_CMD);
/* Disable the receiver and transmitter. */
- outw(RxDisable, ioaddr + EL3_CMD);
- outw(TxDisable, ioaddr + EL3_CMD);
+ iowrite16(RxDisable, ioaddr + EL3_CMD);
+ iowrite16(TxDisable, ioaddr + EL3_CMD);
/* Disable receiving 802.1q tagged frames */
set_8021q_mode(dev, 0);
if (dev->if_port == XCVR_10base2)
/* Turn off thinnet power. Green! */
- outw(StopCoax, ioaddr + EL3_CMD);
+ iowrite16(StopCoax, ioaddr + EL3_CMD);
- outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
+ iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
update_stats(ioaddr, dev);
if (vp->full_bus_master_rx)
- outl(0, ioaddr + UpListPtr);
+ iowrite32(0, ioaddr + UpListPtr);
if (vp->full_bus_master_tx)
- outl(0, ioaddr + DownListPtr);
+ iowrite32(0, ioaddr + DownListPtr);
if (final_down && VORTEX_PCI(vp)) {
vp->pm_state_valid = 1;
@@ -2768,7 +2791,7 @@ static int
vortex_close(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = vp->ioaddr;
int i;
if (netif_device_present(dev))
@@ -2776,7 +2799,7 @@ vortex_close(struct net_device *dev)
if (vortex_debug > 1) {
printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
- dev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus));
+ dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus));
printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d"
" tx_queued %d Rx pre-checksummed %d.\n",
dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
@@ -2830,18 +2853,18 @@ dump_tx_ring(struct net_device *dev)
{
if (vortex_debug > 0) {
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = vp->ioaddr;
if (vp->full_bus_master_tx) {
int i;
- int stalled = inl(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */
+ int stalled = ioread32(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */
printk(KERN_ERR " Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
vp->full_bus_master_tx,
vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
printk(KERN_ERR " Transmit list %8.8x vs. %p.\n",
- inl(ioaddr + DownListPtr),
+ ioread32(ioaddr + DownListPtr),
&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
issue_and_wait(dev, DownStall);
for (i = 0; i < TX_RING_SIZE; i++) {
@@ -2855,7 +2878,7 @@ dump_tx_ring(struct net_device *dev)
le32_to_cpu(vp->tx_ring[i].status));
}
if (!stalled)
- outw(DownUnstall, ioaddr + EL3_CMD);
+ iowrite16(DownUnstall, ioaddr + EL3_CMD);
}
}
}
@@ -2863,11 +2886,12 @@ dump_tx_ring(struct net_device *dev)
static struct net_device_stats *vortex_get_stats(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */
spin_lock_irqsave (&vp->lock, flags);
- update_stats(dev->base_addr, dev);
+ update_stats(ioaddr, dev);
spin_unlock_irqrestore (&vp->lock, flags);
}
return &vp->stats;
@@ -2880,37 +2904,37 @@ static struct net_device_stats *vortex_g
table. This is done by checking that the ASM (!) code generated uses
atomic updates with '+='.
*/
-static void update_stats(long ioaddr, struct net_device *dev)
+static void update_stats(void __iomem *ioaddr, struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
- int old_window = inw(ioaddr + EL3_CMD);
+ int old_window = ioread16(ioaddr + EL3_CMD);
if (old_window == 0xffff) /* Chip suspended or ejected. */
return;
/* Unlike the 3c5x9 we need not turn off stats updates while reading. */
/* Switch to the stats window, and read everything. */
EL3WINDOW(6);
- vp->stats.tx_carrier_errors += inb(ioaddr + 0);
- vp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
- vp->stats.collisions += inb(ioaddr + 3);
- vp->stats.tx_window_errors += inb(ioaddr + 4);
- vp->stats.rx_fifo_errors += inb(ioaddr + 5);
- vp->stats.tx_packets += inb(ioaddr + 6);
- vp->stats.tx_packets += (inb(ioaddr + 9)&0x30) << 4;
- /* Rx packets */ inb(ioaddr + 7); /* Must read to clear */
+ vp->stats.tx_carrier_errors += ioread8(ioaddr + 0);
+ vp->stats.tx_heartbeat_errors += ioread8(ioaddr + 1);
+ vp->stats.collisions += ioread8(ioaddr + 3);
+ vp->stats.tx_window_errors += ioread8(ioaddr + 4);
+ vp->stats.rx_fifo_errors += ioread8(ioaddr + 5);
+ vp->stats.tx_packets += ioread8(ioaddr + 6);
+ vp->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4;
+ /* Rx packets */ ioread8(ioaddr + 7); /* Must read to clear */
/* Don't bother with register 9, an extension of registers 6&7.
If we do use the 6&7 values the atomic update assumption above
is invalid. */
- vp->stats.rx_bytes += inw(ioaddr + 10);
- vp->stats.tx_bytes += inw(ioaddr + 12);
+ vp->stats.rx_bytes += ioread16(ioaddr + 10);
+ vp->stats.tx_bytes += ioread16(ioaddr + 12);
/* Extra stats for get_ethtool_stats() */
- vp->xstats.tx_multiple_collisions += inb(ioaddr + 2);
- vp->xstats.tx_deferred += inb(ioaddr + 8);
+ vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2);
+ vp->xstats.tx_deferred += ioread8(ioaddr + 8);
EL3WINDOW(4);
- vp->xstats.rx_bad_ssd += inb(ioaddr + 12);
+ vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12);
{
- u8 up = inb(ioaddr + 13);
+ u8 up = ioread8(ioaddr + 13);
vp->stats.rx_bytes += (up & 0x0f) << 16;
vp->stats.tx_bytes += (up & 0xf0) << 12;
}
@@ -2922,7 +2946,7 @@ static void update_stats(long ioaddr, st
static int vortex_nway_reset(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
int rc;
@@ -2936,7 +2960,7 @@ static int vortex_nway_reset(struct net_
static u32 vortex_get_link(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
int rc;
@@ -2950,7 +2974,7 @@ static u32 vortex_get_link(struct net_de
static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
int rc;
@@ -2964,7 +2988,7 @@ static int vortex_get_settings(struct ne
static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
int rc;
@@ -2994,10 +3018,11 @@ static void vortex_get_ethtool_stats(str
struct ethtool_stats *stats, u64 *data)
{
struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
spin_lock_irqsave(&vp->lock, flags);
- update_stats(dev->base_addr, dev);
+ update_stats(ioaddr, dev);
spin_unlock_irqrestore(&vp->lock, flags);
data[0] = vp->xstats.tx_deferred;
@@ -3057,7 +3082,7 @@ static int vortex_ioctl(struct net_devic
{
int err;
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
int state = 0;
@@ -3085,7 +3110,8 @@ static int vortex_ioctl(struct net_devic
the chip has a very clean way to set the mode, unlike many others. */
static void set_rx_mode(struct net_device *dev)
{
- long ioaddr = dev->base_addr;
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
int new_mode;
if (dev->flags & IFF_PROMISC) {
@@ -3097,7 +3123,7 @@ static void set_rx_mode(struct net_devic
} else
new_mode = SetRxFilter | RxStation | RxBroadcast;
- outw(new_mode, ioaddr + EL3_CMD);
+ iowrite16(new_mode, ioaddr + EL3_CMD);
}
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
@@ -3111,8 +3137,8 @@ static void set_rx_mode(struct net_devic
static void set_8021q_mode(struct net_device *dev, int enable)
{
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
- int old_window = inw(ioaddr + EL3_CMD);
+ void __iomem *ioaddr = vp->ioaddr;
+ int old_window = ioread16(ioaddr + EL3_CMD);
int mac_ctrl;
if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
@@ -3124,24 +3150,24 @@ static void set_8021q_mode(struct net_de
max_pkt_size += 4; /* 802.1Q VLAN tag */
EL3WINDOW(3);
- outw(max_pkt_size, ioaddr+Wn3_MaxPktSize);
+ iowrite16(max_pkt_size, ioaddr+Wn3_MaxPktSize);
/* set VlanEtherType to let the hardware checksumming
treat tagged frames correctly */
EL3WINDOW(7);
- outw(VLAN_ETHER_TYPE, ioaddr+Wn7_VlanEtherType);
+ iowrite16(VLAN_ETHER_TYPE, ioaddr+Wn7_VlanEtherType);
} else {
/* on older cards we have to enable large frames */
vp->large_frames = dev->mtu > 1500 || enable;
EL3WINDOW(3);
- mac_ctrl = inw(ioaddr+Wn3_MAC_Ctrl);
+ mac_ctrl = ioread16(ioaddr+Wn3_MAC_Ctrl);
if (vp->large_frames)
mac_ctrl |= 0x40;
else
mac_ctrl &= ~0x40;
- outw(mac_ctrl, ioaddr+Wn3_MAC_Ctrl);
+ iowrite16(mac_ctrl, ioaddr+Wn3_MAC_Ctrl);
}
EL3WINDOW(old_window);
@@ -3163,7 +3189,7 @@ static void set_8021q_mode(struct net_de
/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
met by back-to-back PCI I/O cycles, but we insert a delay to avoid
"overclocking" issues. */
-#define mdio_delay() inl(mdio_addr)
+#define mdio_delay() ioread32(mdio_addr)
#define MDIO_SHIFT_CLK 0x01
#define MDIO_DIR_WRITE 0x04
@@ -3174,15 +3200,15 @@ static void set_8021q_mode(struct net_de
/* Generate the preamble required for initial synchronization and
a few older transceivers. */
-static void mdio_sync(long ioaddr, int bits)
+static void mdio_sync(void __iomem *ioaddr, int bits)
{
- long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+ void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt;
/* Establish sync by sending at least 32 logic ones. */
while (-- bits >= 0) {
- outw(MDIO_DATA_WRITE1, mdio_addr);
+ iowrite16(MDIO_DATA_WRITE1, mdio_addr);
mdio_delay();
- outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+ iowrite16(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
mdio_delay();
}
}
@@ -3190,10 +3216,11 @@ static void mdio_sync(long ioaddr, int b
static int mdio_read(struct net_device *dev, int phy_id, int location)
{
int i;
- long ioaddr = dev->base_addr;
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
unsigned int retval = 0;
- long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+ void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt;
if (mii_preamble_required)
mdio_sync(ioaddr, 32);
@@ -3201,17 +3228,17 @@ static int mdio_read(struct net_device *
/* Shift the read command bits out. */
for (i = 14; i >= 0; i--) {
int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
- outw(dataval, mdio_addr);
+ iowrite16(dataval, mdio_addr);
mdio_delay();
- outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ iowrite16(dataval | MDIO_SHIFT_CLK, mdio_addr);
mdio_delay();
}
/* Read the two transition, 16 data, and wire-idle bits. */
for (i = 19; i > 0; i--) {
- outw(MDIO_ENB_IN, mdio_addr);
+ iowrite16(MDIO_ENB_IN, mdio_addr);
mdio_delay();
- retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
- outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ retval = (retval << 1) | ((ioread16(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+ iowrite16(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
mdio_delay();
}
return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
@@ -3219,9 +3246,10 @@ static int mdio_read(struct net_device *
static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
{
- long ioaddr = dev->base_addr;
+ struct vortex_private *vp = netdev_priv(dev);
+ void __iomem *ioaddr = vp->ioaddr;
int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
- long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+ void __iomem *mdio_addr = ioaddr + Wn4_PhysicalMgmt;
int i;
if (mii_preamble_required)
@@ -3230,16 +3258,16 @@ static void mdio_write(struct net_device
/* Shift the command bits out. */
for (i = 31; i >= 0; i--) {
int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
- outw(dataval, mdio_addr);
+ iowrite16(dataval, mdio_addr);
mdio_delay();
- outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ iowrite16(dataval | MDIO_SHIFT_CLK, mdio_addr);
mdio_delay();
}
/* Leave the interface idle. */
for (i = 1; i >= 0; i--) {
- outw(MDIO_ENB_IN, mdio_addr);
+ iowrite16(MDIO_ENB_IN, mdio_addr);
mdio_delay();
- outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ iowrite16(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
mdio_delay();
}
return;
@@ -3250,15 +3278,15 @@ static void mdio_write(struct net_device
static void acpi_set_WOL(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = vp->ioaddr;
if (vp->enable_wol) {
/* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
EL3WINDOW(7);
- outw(2, ioaddr + 0x0c);
+ iowrite16(2, ioaddr + 0x0c);
/* The RxFilter must accept the WOL frames. */
- outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
- outw(RxEnable, ioaddr + EL3_CMD);
+ iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
+ iowrite16(RxEnable, ioaddr + EL3_CMD);
pci_enable_wake(VORTEX_PCI(vp), 0, 1);
@@ -3280,10 +3308,9 @@ static void __devexit vortex_remove_one
vp = netdev_priv(dev);
- /* AKPM: FIXME: we should have
- * if (vp->cb_fn_base) iounmap(vp->cb_fn_base);
- * here
- */
+ if (vp->cb_fn_base)
+ pci_iounmap(VORTEX_PCI(vp), vp->cb_fn_base);
+
unregister_netdev(dev);
if (VORTEX_PCI(vp)) {
@@ -3293,8 +3320,10 @@ static void __devexit vortex_remove_one
pci_disable_device(VORTEX_PCI(vp));
}
/* Should really use issue_and_wait() here */
- outw(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14),
- dev->base_addr + EL3_CMD);
+ iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14),
+ vp->ioaddr + EL3_CMD);
+
+ pci_iounmap(VORTEX_PCI(vp), vp->ioaddr);
pci_free_consistent(pdev,
sizeof(struct boom_rx_desc) * RX_RING_SIZE
@@ -3342,7 +3371,7 @@ static int __init vortex_init (void)
static void __exit vortex_eisa_cleanup (void)
{
struct vortex_private *vp;
- long ioaddr;
+ void __iomem *ioaddr;
#ifdef CONFIG_EISA
/* Take care of the EISA devices */
@@ -3351,11 +3380,13 @@ static void __exit vortex_eisa_cleanup (
if (compaq_net_device) {
vp = compaq_net_device->priv;
- ioaddr = compaq_net_device->base_addr;
+ ioaddr = ioport_map(compaq_net_device->base_addr,
+ VORTEX_TOTAL_SIZE);
unregister_netdev (compaq_net_device);
- outw (TotalReset, ioaddr + EL3_CMD);
- release_region (ioaddr, VORTEX_TOTAL_SIZE);
+ iowrite16 (TotalReset, ioaddr + EL3_CMD);
+ release_region(compaq_net_device->base_addr,
+ VORTEX_TOTAL_SIZE);
free_netdev (compaq_net_device);
}
jwltest-b44-alloc.patch:
b44.c | 134 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++----
b44.h | 2
2 files changed, 128 insertions(+), 8 deletions(-)
--- NEW FILE jwltest-b44-alloc.patch ---
--- linux-2.6.13/drivers/net/b44.c.orig 2005-08-29 13:45:52.482352805 -0400
+++ linux-2.6.13/drivers/net/b44.c 2005-08-29 13:48:35.657636325 -0400
@@ -107,6 +107,29 @@ static int b44_poll(struct net_device *d
static void b44_poll_controller(struct net_device *dev);
#endif
+static int dma_desc_align_mask;
+static int dma_desc_sync_size;
+
+static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
+ dma_addr_t dma_base,
+ unsigned long offset,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_range_for_device(&pdev->dev, dma_base,
+ offset & dma_desc_align_mask,
+ dma_desc_sync_size, dir);
+}
+
+static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
+ dma_addr_t dma_base,
+ unsigned long offset,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
+ offset & dma_desc_align_mask,
+ dma_desc_sync_size, dir);
+}
+
static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
{
return readl(bp->regs + reg);
@@ -669,6 +692,11 @@ static int b44_alloc_rx_skb(struct b44 *
dp->ctrl = cpu_to_le32(ctrl);
dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+ b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
+ dest_idx * sizeof(dp),
+ DMA_BIDIRECTIONAL);
+
return RX_PKT_BUF_SZ;
}
@@ -693,6 +721,11 @@ static void b44_recycle_rx(struct b44 *b
pci_unmap_addr_set(dest_map, mapping,
pci_unmap_addr(src_map, mapping));
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+ b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
+ src_idx * sizeof(src_desc),
+ DMA_BIDIRECTIONAL);
+
ctrl = src_desc->ctrl;
if (dest_idx == (B44_RX_RING_SIZE - 1))
ctrl |= cpu_to_le32(DESC_CTRL_EOT);
@@ -701,8 +734,14 @@ static void b44_recycle_rx(struct b44 *b
dest_desc->ctrl = ctrl;
dest_desc->addr = src_desc->addr;
+
src_map->skb = NULL;
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+ b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
+ dest_idx * sizeof(dest_desc),
+ DMA_BIDIRECTIONAL);
+
pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
RX_PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
@@ -960,6 +999,11 @@ static int b44_start_xmit(struct sk_buff
bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
+ if (bp->flags & B44_FLAG_TX_RING_HACK)
+ b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
+ entry * sizeof(bp->tx_ring[0]),
+ DMA_TO_DEVICE);
+
entry = NEXT_TX(entry);
bp->tx_prod = entry;
@@ -1065,6 +1109,16 @@ static void b44_init_rings(struct b44 *b
memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
+ if (bp->flags & B44_FLAG_RX_RING_HACK)
+ dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
+ DMA_TABLE_BYTES,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (bp->flags & B44_FLAG_TX_RING_HACK)
+ dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
+ DMA_TABLE_BYTES,
+ PCI_DMA_TODEVICE);
+
for (i = 0; i < bp->rx_pending; i++) {
if (b44_alloc_rx_skb(bp, -1, i) < 0)
break;
@@ -1086,14 +1140,28 @@ static void b44_free_consistent(struct b
bp->tx_buffers = NULL;
}
if (bp->rx_ring) {
- pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
- bp->rx_ring, bp->rx_ring_dma);
+ if (bp->flags & B44_FLAG_RX_RING_HACK) {
+ dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
+ DMA_TABLE_BYTES,
+ DMA_BIDIRECTIONAL);
+ kfree(bp->rx_ring);
+ } else
+ pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
+ bp->rx_ring, bp->rx_ring_dma);
bp->rx_ring = NULL;
+ bp->flags &= ~B44_FLAG_RX_RING_HACK;
}
if (bp->tx_ring) {
- pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
- bp->tx_ring, bp->tx_ring_dma);
+ if (bp->flags & B44_FLAG_TX_RING_HACK) {
+ dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
+ DMA_TABLE_BYTES,
+ DMA_TO_DEVICE);
+ kfree(bp->tx_ring);
+ } else
+ pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
+ bp->tx_ring, bp->tx_ring_dma);
bp->tx_ring = NULL;
+ bp->flags &= ~B44_FLAG_TX_RING_HACK;
}
}
@@ -1119,12 +1187,56 @@ static int b44_alloc_consistent(struct b
size = DMA_TABLE_BYTES;
bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
- if (!bp->rx_ring)
- goto out_err;
+ if (!bp->rx_ring) {
+ /* Allocation may have failed due to pci_alloc_consistent
+ insisting on use of GFP_DMA, which is more restrictive
+ than necessary... */
+ struct dma_desc *rx_ring;
+ dma_addr_t rx_ring_dma;
+
+ if (!(rx_ring = (struct dma_desc *)kmalloc(size, GFP_KERNEL)))
+ goto out_err;
+
+ memset(rx_ring, 0, size);
+ rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
+ DMA_TABLE_BYTES,
+ DMA_BIDIRECTIONAL);
+
+ if (rx_ring_dma + size > B44_DMA_MASK) {
+ kfree(rx_ring);
+ goto out_err;
+ }
+
+ bp->rx_ring = rx_ring;
+ bp->rx_ring_dma = rx_ring_dma;
+ bp->flags |= B44_FLAG_RX_RING_HACK;
+ }
bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
- if (!bp->tx_ring)
- goto out_err;
+ if (!bp->tx_ring) {
+ /* Allocation may have failed due to pci_alloc_consistent
+ insisting on use of GFP_DMA, which is more restrictive
+ than necessary... */
+ struct dma_desc *tx_ring;
+ dma_addr_t tx_ring_dma;
+
+ if (!(tx_ring = (struct dma_desc *)kmalloc(size, GFP_KERNEL)))
+ goto out_err;
+
+ memset(tx_ring, 0, size);
+ tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
+ DMA_TABLE_BYTES,
+ DMA_TO_DEVICE);
+
+ if (tx_ring_dma + size > B44_DMA_MASK) {
+ kfree(tx_ring);
+ goto out_err;
+ }
+
+ bp->tx_ring = tx_ring;
+ bp->tx_ring_dma = tx_ring_dma;
+ bp->flags |= B44_FLAG_TX_RING_HACK;
+ }
return 0;
@@ -1972,6 +2084,12 @@ static struct pci_driver b44_driver = {
static int __init b44_init(void)
{
+ unsigned int dma_desc_align_size = dma_get_cache_alignment();
+
+ /* Setup paramaters for syncing RX/TX DMA descriptors */
+ dma_desc_align_mask = ~(dma_desc_align_size - 1);
+ dma_desc_sync_size = max(dma_desc_align_size, sizeof(struct dma_desc));
+
return pci_module_init(&b44_driver);
}
--- linux-2.6.13/drivers/net/b44.h.orig 2005-08-28 19:41:01.000000000 -0400
+++ linux-2.6.13/drivers/net/b44.h 2005-08-29 13:48:35.658636192 -0400
@@ -400,6 +400,8 @@ struct b44 {
#define B44_FLAG_ADV_100HALF 0x04000000
#define B44_FLAG_ADV_100FULL 0x08000000
#define B44_FLAG_INTERNAL_PHY 0x10000000
+#define B44_FLAG_RX_RING_HACK 0x20000000
+#define B44_FLAG_TX_RING_HACK 0x40000000
u32 rx_offset;
jwltest-dma-x86_64.patch:
dma-mapping.h | 28 ++++++++++++++++++++++++++++
1 files changed, 28 insertions(+)
--- NEW FILE jwltest-dma-x86_64.patch ---
--- linux-2.6.13/include/asm-x86_64/dma-mapping.h.orig 2005-09-10 16:57:13.698387495 -0400
+++ linux-2.6.13/include/asm-x86_64/dma-mapping.h 2005-09-10 16:57:44.244303844 -0400
@@ -85,6 +85,34 @@ static inline void dma_sync_single_for_d
flush_write_buffers();
}
+static inline void dma_sync_single_range_for_cpu(struct device *hwdev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size, int direction)
+{
+ if (direction == DMA_NONE)
+ out_of_line_bug();
+
+ if (swiotlb)
+ return swiotlb_sync_single_range_for_cpu(hwdev,dma_handle,offset,size,direction);
+
+ flush_write_buffers();
+}
+
+static inline void dma_sync_single_range_for_device(struct device *hwdev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size, int direction)
+{
+ if (direction == DMA_NONE)
+ out_of_line_bug();
+
+ if (swiotlb)
+ return swiotlb_sync_single_range_for_device(hwdev,dma_handle,offset,size,direction);
+
+ flush_write_buffers();
+}
+
static inline void dma_sync_sg_for_cpu(struct device *hwdev,
struct scatterlist *sg,
int nelems, int direction)
jwltest-e1000_ethtool_ops-whitespace.patch:
e1000_ethtool.c | 44 ++++++++++++++++++++++----------------------
1 files changed, 22 insertions(+), 22 deletions(-)
--- NEW FILE jwltest-e1000_ethtool_ops-whitespace.patch ---
--- linux-2.6.13/drivers/net/e1000/e1000_ethtool.c.orig 2005-09-11 16:21:40.869965028 -0400
+++ linux-2.6.13/drivers/net/e1000/e1000_ethtool.c 2005-09-11 16:27:05.886584114 -0400
@@ -1705,22 +1705,22 @@ e1000_get_strings(struct net_device *net
}
struct ethtool_ops e1000_ethtool_ops = {
- .get_settings = e1000_get_settings,
- .set_settings = e1000_set_settings,
- .get_drvinfo = e1000_get_drvinfo,
- .get_regs_len = e1000_get_regs_len,
- .get_regs = e1000_get_regs,
- .get_wol = e1000_get_wol,
- .set_wol = e1000_set_wol,
- .get_msglevel = e1000_get_msglevel,
- .set_msglevel = e1000_set_msglevel,
- .nway_reset = e1000_nway_reset,
- .get_link = ethtool_op_get_link,
- .get_eeprom_len = e1000_get_eeprom_len,
- .get_eeprom = e1000_get_eeprom,
- .set_eeprom = e1000_set_eeprom,
- .get_ringparam = e1000_get_ringparam,
- .set_ringparam = e1000_set_ringparam,
+ .get_settings = e1000_get_settings,
+ .set_settings = e1000_set_settings,
+ .get_drvinfo = e1000_get_drvinfo,
+ .get_regs_len = e1000_get_regs_len,
+ .get_regs = e1000_get_regs,
+ .get_wol = e1000_get_wol,
+ .set_wol = e1000_set_wol,
+ .get_msglevel = e1000_get_msglevel,
+ .set_msglevel = e1000_set_msglevel,
+ .nway_reset = e1000_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = e1000_get_eeprom_len,
+ .get_eeprom = e1000_get_eeprom,
+ .set_eeprom = e1000_set_eeprom,
+ .get_ringparam = e1000_get_ringparam,
+ .set_ringparam = e1000_set_ringparam,
.get_pauseparam = e1000_get_pauseparam,
.set_pauseparam = e1000_set_pauseparam,
.get_rx_csum = e1000_get_rx_csum,
@@ -1733,12 +1733,12 @@ struct ethtool_ops e1000_ethtool_ops = {
.get_tso = ethtool_op_get_tso,
.set_tso = e1000_set_tso,
#endif
- .self_test_count = e1000_diag_test_count,
- .self_test = e1000_diag_test,
- .get_strings = e1000_get_strings,
- .phys_id = e1000_phys_id,
- .get_stats_count = e1000_get_stats_count,
- .get_ethtool_stats = e1000_get_ethtool_stats,
+ .self_test_count = e1000_diag_test_count,
+ .self_test = e1000_diag_test,
+ .get_strings = e1000_get_strings,
+ .phys_id = e1000_phys_id,
+ .get_stats_count = e1000_get_stats_count,
+ .get_ethtool_stats = e1000_get_ethtool_stats,
.get_perm_addr = ethtool_op_get_perm_addr,
};
jwltest-ethtool-perm-addr.patch:
3c59x.c | 2 ++
8139cp.c | 2 ++
8139too.c | 2 ++
b44.c | 2 ++
bnx2.c | 2 ++
e100.c | 4 +++-
e1000/e1000_ethtool.c | 1 +
e1000/e1000_main.c | 3 ++-
forcedeth.c | 4 +++-
ixgb/ixgb_ethtool.c | 1 +
ixgb/ixgb_main.c | 3 ++-
ne2k-pci.c | 2 ++
pcnet32.c | 4 +++-
r8169.c | 2 ++
skge.c | 2 ++
sundance.c | 2 ++
tg3.c | 4 ++++
via-rhine.c | 4 +++-
18 files changed, 40 insertions(+), 6 deletions(-)
--- NEW FILE jwltest-ethtool-perm-addr.patch ---
--- linux-2.6.13/drivers/net/e1000/e1000_main.c.orig 2005-09-11 16:16:35.958674397 -0400
+++ linux-2.6.13/drivers/net/e1000/e1000_main.c 2005-09-11 16:21:40.873964494 -0400
@@ -614,8 +614,9 @@ e1000_probe(struct pci_dev *pdev,
if(e1000_read_mac_addr(&adapter->hw))
DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
- if(!is_valid_ether_addr(netdev->dev_addr)) {
+ if(!is_valid_ether_addr(netdev->perm_addr)) {
DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
--- linux-2.6.13/drivers/net/e1000/e1000_ethtool.c.orig 2005-08-28 19:41:01.000000000 -0400
+++ linux-2.6.13/drivers/net/e1000/e1000_ethtool.c 2005-09-11 16:21:40.869965028 -0400
@@ -1739,6 +1739,7 @@ struct ethtool_ops e1000_ethtool_ops = {
.phys_id = e1000_phys_id,
.get_stats_count = e1000_get_stats_count,
.get_ethtool_stats = e1000_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
void e1000_set_ethtool_ops(struct net_device *netdev)
--- linux-2.6.13/drivers/net/ne2k-pci.c.orig 2005-08-28 19:41:01.000000000 -0400
+++ linux-2.6.13/drivers/net/ne2k-pci.c 2005-09-11 16:21:40.901960757 -0400
@@ -372,6 +372,7 @@ static int __devinit ne2k_pci_init_one (
printk("%2.2X%s", SA_prom[i], i == 5 ? ".\n": ":");
dev->dev_addr[i] = SA_prom[i];
}
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
return 0;
@@ -637,6 +638,7 @@ static struct ethtool_ops ne2k_pci_ethto
.get_drvinfo = ne2k_pci_get_drvinfo,
.get_tx_csum = ethtool_op_get_tx_csum,
.get_sg = ethtool_op_get_sg,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev)
--- linux-2.6.13/drivers/net/skge.c.orig 2005-09-11 16:16:36.080658108 -0400
+++ linux-2.6.13/drivers/net/skge.c 2005-09-11 16:21:40.922957954 -0400
@@ -743,6 +743,7 @@ static struct ethtool_ops skge_ethtool_o
.phys_id = skge_phys_id,
.get_stats_count = skge_get_stats_count,
.get_ethtool_stats = skge_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
/*
@@ -3080,6 +3081,7 @@ static struct net_device *skge_devinit(s
/* read the mac address */
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* device is off until link detection */
netif_carrier_off(dev);
--- linux-2.6.13/drivers/net/tg3.c.orig 2005-09-11 16:16:39.792162505 -0400
+++ linux-2.6.13/drivers/net/tg3.c 2005-09-11 16:21:40.939955684 -0400
@@ -8304,6 +8304,7 @@ static struct ethtool_ops tg3_ethtool_op
.get_ethtool_stats = tg3_get_ethtool_stats,
.get_coalesce = tg3_get_coalesce,
.set_coalesce = tg3_set_coalesce,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@@ -9782,6 +9783,7 @@ static int __devinit tg3_get_macaddr_spa
if (prom_getproplen(node, "local-mac-address") == 6) {
prom_getproperty(node, "local-mac-address",
dev->dev_addr, 6);
+ memcpy(dev->perm_addr, dev->dev_addr, 6);
return 0;
}
}
@@ -9793,6 +9795,7 @@ static int __devinit tg3_get_default_mac
struct net_device *dev = tp->dev;
memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+ memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
return 0;
}
#endif
@@ -9862,6 +9865,7 @@ static int __devinit tg3_get_device_addr
#endif
return -EINVAL;
}
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
return 0;
}
--- linux-2.6.13/drivers/net/8139cp.c.orig 2005-08-28 19:41:01.000000000 -0400
+++ linux-2.6.13/drivers/net/8139cp.c 2005-09-11 16:21:40.839969032 -0400
@@ -1575,6 +1575,7 @@ static struct ethtool_ops cp_ethtool_ops
.set_wol = cp_set_wol,
.get_strings = cp_get_strings,
.get_ethtool_stats = cp_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
@@ -1773,6 +1774,7 @@ static int cp_init_one (struct pci_dev *
for (i = 0; i < 3; i++)
((u16 *) (dev->dev_addr))[i] =
le16_to_cpu (read_eeprom (regs, i + 7, addr_len));
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
dev->open = cp_open;
dev->stop = cp_close;
--- linux-2.6.13/drivers/net/forcedeth.c.orig 2005-09-11 16:16:35.965673462 -0400
+++ linux-2.6.13/drivers/net/forcedeth.c 2005-09-11 16:21:40.885962892 -0400
@@ -2065,6 +2065,7 @@ static struct ethtool_ops ops = {
.get_regs_len = nv_get_regs_len,
.get_regs = nv_get_regs,
.nway_reset = nv_nway_reset,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static int nv_open(struct net_device *dev)
@@ -2377,8 +2378,9 @@ static int __devinit nv_probe(struct pci
dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
- if (!is_valid_ether_addr(dev->dev_addr)) {
+ if (!is_valid_ether_addr(dev->perm_addr)) {
/*
* Bad mac address. At least one bios sets the mac address
* to 01:23:45:67:89:ab
--- linux-2.6.13/drivers/net/via-rhine.c.orig 2005-08-28 19:41:01.000000000 -0400
+++ linux-2.6.13/drivers/net/via-rhine.c 2005-09-11 16:21:40.954953682 -0400
@@ -814,8 +814,9 @@ static int __devinit rhine_init_one(stru
for (i = 0; i < 6; i++)
dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
- if (!is_valid_ether_addr(dev->dev_addr)) {
+ if (!is_valid_ether_addr(dev->perm_addr)) {
rc = -EIO;
printk(KERN_ERR "Invalid MAC address\n");
goto err_out_unmap;
@@ -1829,6 +1830,7 @@ static struct ethtool_ops netdev_ethtool
.set_wol = rhine_set_wol,
.get_sg = ethtool_op_get_sg,
.get_tx_csum = ethtool_op_get_tx_csum,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
--- linux-2.6.13/drivers/net/bnx2.c.orig 2005-09-11 16:16:35.901682007 -0400
+++ linux-2.6.13/drivers/net/bnx2.c 2005-09-11 16:21:40.859966362 -0400
@@ -5015,6 +5015,7 @@ static struct ethtool_ops bnx2_ethtool_o
.phys_id = bnx2_phys_id,
.get_stats_count = bnx2_get_stats_count,
.get_ethtool_stats = bnx2_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
/* Called with rtnl_lock */
@@ -5442,6 +5443,7 @@ bnx2_init_one(struct pci_dev *pdev, cons
pci_set_drvdata(pdev, dev);
memcpy(dev->dev_addr, bp->mac_addr, 6);
+ memcpy(dev->perm_addr, bp->mac_addr, 6);
bp->name = board_info[ent->driver_data].name,
printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
"IRQ %d, ",
--- linux-2.6.13/drivers/net/3c59x.c.orig 2005-09-11 16:18:52.538437848 -0400
+++ linux-2.6.13/drivers/net/3c59x.c 2005-09-11 16:21:40.819971701 -0400
@@ -1359,6 +1359,7 @@ static int __devinit vortex_probe1(struc
printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
for (i = 0; i < 3; i++)
((u16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
if (print_info) {
for (i = 0; i < 6; i++)
printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]);
@@ -3075,6 +3076,7 @@ static struct ethtool_ops vortex_ethtool
.set_settings = vortex_set_settings,
.get_link = vortex_get_link,
.nway_reset = vortex_nway_reset,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
#ifdef CONFIG_PCI
--- linux-2.6.13/drivers/net/sundance.c.orig 2005-09-11 16:16:39.586190008 -0400
+++ linux-2.6.13/drivers/net/sundance.c 2005-09-11 16:21:40.928957153 -0400
@@ -551,6 +551,7 @@ static int __devinit sundance_probe1 (st
for (i = 0; i < 3; i++)
((u16 *)dev->dev_addr)[i] =
le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
dev->base_addr = (unsigned long)ioaddr;
dev->irq = irq;
@@ -1621,6 +1622,7 @@ static struct ethtool_ops ethtool_ops =
.get_link = get_link,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
--- linux-2.6.13/drivers/net/8139too.c.orig 2005-08-28 19:41:01.000000000 -0400
+++ linux-2.6.13/drivers/net/8139too.c 2005-09-11 16:21:40.846968098 -0400
@@ -970,6 +970,7 @@ static int __devinit rtl8139_init_one (s
for (i = 0; i < 3; i++)
((u16 *) (dev->dev_addr))[i] =
le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len));
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* The Rtl8139-specific entries in the device structure. */
dev->open = rtl8139_open;
@@ -2465,6 +2466,7 @@ static struct ethtool_ops rtl8139_ethtoo
.get_strings = rtl8139_get_strings,
.get_stats_count = rtl8139_get_stats_count,
.get_ethtool_stats = rtl8139_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
--- linux-2.6.13/drivers/net/ixgb/ixgb_main.c.orig 2005-09-11 16:16:36.009667588 -0400
+++ linux-2.6.13/drivers/net/ixgb/ixgb_main.c 2005-09-11 16:21:40.892961958 -0400
@@ -460,8 +460,9 @@ ixgb_probe(struct pci_dev *pdev,
}
ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+ memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
- if(!is_valid_ether_addr(netdev->dev_addr)) {
+ if(!is_valid_ether_addr(netdev->perm_addr)) {
err = -EIO;
goto err_eeprom;
}
--- linux-2.6.13/drivers/net/ixgb/ixgb_ethtool.c.orig 2005-09-11 16:16:36.005668122 -0400
+++ linux-2.6.13/drivers/net/ixgb/ixgb_ethtool.c 2005-09-11 16:21:40.889962358 -0400
@@ -723,6 +723,7 @@ struct ethtool_ops ixgb_ethtool_ops = {
.phys_id = ixgb_phys_id,
.get_stats_count = ixgb_get_stats_count,
.get_ethtool_stats = ixgb_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
void ixgb_set_ethtool_ops(struct net_device *netdev)
--- linux-2.6.13/drivers/net/b44.c.orig 2005-09-11 16:16:39.781163973 -0400
+++ linux-2.6.13/drivers/net/b44.c 2005-09-11 16:21:40.852967297 -0400
@@ -1789,6 +1789,7 @@ static struct ethtool_ops b44_ethtool_op
.set_pauseparam = b44_set_pauseparam,
.get_msglevel = b44_get_msglevel,
.set_msglevel = b44_set_msglevel,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1831,6 +1832,7 @@ static int __devinit b44_get_invariants(
bp->dev->dev_addr[3] = eeprom[80];
bp->dev->dev_addr[4] = eeprom[83];
bp->dev->dev_addr[5] = eeprom[82];
+ memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
bp->phy_addr = eeprom[90] & 0x1f;
--- linux-2.6.13/drivers/net/e100.c.orig 2005-09-11 16:16:35.951675332 -0400
+++ linux-2.6.13/drivers/net/e100.c 2005-09-11 16:21:40.879963693 -0400
@@ -2391,6 +2391,7 @@ static struct ethtool_ops e100_ethtool_o
.phys_id = e100_phys_id,
.get_stats_count = e100_get_stats_count,
.get_ethtool_stats = e100_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
@@ -2541,7 +2542,8 @@ static int __devinit e100_probe(struct p
e100_phy_init(nic);
memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
- if(!is_valid_ether_addr(netdev->dev_addr)) {
+ memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
+ if(!is_valid_ether_addr(netdev->perm_addr)) {
DPRINTK(PROBE, ERR, "Invalid MAC address from "
"EEPROM, aborting.\n");
err = -EAGAIN;
--- linux-2.6.13/drivers/net/pcnet32.c.orig 2005-08-28 19:41:01.000000000 -0400
+++ linux-2.6.13/drivers/net/pcnet32.c 2005-09-11 16:21:40.911959422 -0400
@@ -957,6 +957,7 @@ static struct ethtool_ops pcnet32_ethtoo
.phys_id = pcnet32_phys_id,
.get_regs_len = pcnet32_get_regs_len,
.get_regs = pcnet32_get_regs,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
/* only probes for non-PCI devices, the rest are handled by
@@ -1185,9 +1186,10 @@ pcnet32_probe1(unsigned long ioaddr, int
memcpy(dev->dev_addr, promaddr, 6);
}
}
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
- if (!is_valid_ether_addr(dev->dev_addr))
+ if (!is_valid_ether_addr(dev->perm_addr))
memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
if (pcnet32_debug & NETIF_MSG_PROBE) {
--- linux-2.6.13/drivers/net/r8169.c.orig 2005-09-11 16:16:36.046662648 -0400
+++ linux-2.6.13/drivers/net/r8169.c 2005-09-11 16:21:40.916958754 -0400
@@ -1028,6 +1028,7 @@ static struct ethtool_ops rtl8169_ethtoo
.get_strings = rtl8169_get_strings,
.get_stats_count = rtl8169_get_stats_count,
.get_ethtool_stats = rtl8169_get_ethtool_stats,
+ .get_perm_addr = ethtool_op_get_perm_addr,
};
static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum,
@@ -1512,6 +1513,7 @@ rtl8169_init_one(struct pci_dev *pdev, c
/* Get MAC address. FIXME: read EEPROM */
for (i = 0; i < MAC_ADDR_LEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
dev->open = rtl8169_open;
dev->hard_start_xmit = rtl8169_start_xmit;
jwltest-ia64-max-cacheline-export.patch:
setup.c | 1 +
1 files changed, 1 insertion(+)
--- NEW FILE jwltest-ia64-max-cacheline-export.patch ---
--- linux-2.6.13/arch/ia64/kernel/setup.c.orig 2005-09-10 16:49:31.658103092 -0400
+++ linux-2.6.13/arch/ia64/kernel/setup.c 2005-09-10 16:59:51.061351493 -0400
@@ -79,6 +79,7 @@ unsigned long vga_console_iobase;
unsigned long vga_console_membase;
unsigned long ia64_max_cacheline_size;
+EXPORT_SYMBOL_GPL(ia64_max_cacheline_size);
unsigned long ia64_iobase; /* virtual address for I/O accesses */
EXPORT_SYMBOL(ia64_iobase);
struct io_space io_space[MAX_IO_SPACES];
jwltest-rx_dropped.patch:
3c59x.c | 2 +-
e100.c | 4 +---
e1000/e1000_main.c | 1 -
ixgb/ixgb_main.c | 2 --
tg3.c | 6 ++++--
5 files changed, 6 insertions(+), 9 deletions(-)
--- NEW FILE jwltest-rx_dropped.patch ---
--- linux-2.6.13/drivers/net/e1000/e1000_main.c.orig 2005-09-11 16:21:40.873964494 -0400
+++ linux-2.6.13/drivers/net/e1000/e1000_main.c 2005-09-11 16:25:30.661292801 -0400
@@ -2545,7 +2545,6 @@ e1000_update_stats(struct e1000_adapter
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.rlec + adapter->stats.mpc +
adapter->stats.cexterr;
- adapter->net_stats.rx_dropped = adapter->stats.mpc;
adapter->net_stats.rx_length_errors = adapter->stats.rlec;
adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
--- linux-2.6.13/drivers/net/tg3.c.orig 2005-09-11 16:21:40.939955684 -0400
+++ linux-2.6.13/drivers/net/tg3.c 2005-09-11 16:25:30.683289865 -0400
@@ -6894,8 +6894,7 @@ static struct net_device_stats *tg3_get_
get_stat64(&hw_stats->tx_octets);
stats->rx_errors = old_stats->rx_errors +
- get_stat64(&hw_stats->rx_errors) +
- get_stat64(&hw_stats->rx_discards);
+ get_stat64(&hw_stats->rx_errors);
stats->tx_errors = old_stats->tx_errors +
get_stat64(&hw_stats->tx_errors) +
get_stat64(&hw_stats->tx_mac_errors) +
@@ -6923,6 +6922,9 @@ static struct net_device_stats *tg3_get_
stats->rx_crc_errors = old_stats->rx_crc_errors +
calc_crc_errors(tp);
+ stats->rx_missed_errors = old_stats->rx_missed_errors +
+ get_stat64(&hw_stats->rx_discards);
+
return stats;
}
--- linux-2.6.13/drivers/net/3c59x.c.orig 2005-09-11 16:21:40.819971701 -0400
+++ linux-2.6.13/drivers/net/3c59x.c 2005-09-11 16:25:30.655293602 -0400
@@ -2624,8 +2624,8 @@ static int vortex_rx(struct net_device *
} else if (vortex_debug > 0)
printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "
"size %d.\n", dev->name, pkt_len);
+ vp->stats.rx_dropped++;
}
- vp->stats.rx_dropped++;
issue_and_wait(dev, RxDiscard);
}
--- linux-2.6.13/drivers/net/ixgb/ixgb_main.c.orig 2005-09-11 16:21:40.892961958 -0400
+++ linux-2.6.13/drivers/net/ixgb/ixgb_main.c 2005-09-11 16:25:30.672291333 -0400
@@ -1617,8 +1617,6 @@ ixgb_update_stats(struct ixgb_adapter *a
adapter->stats.icbc +
adapter->stats.ecbc + adapter->stats.mpc;
- adapter->net_stats.rx_dropped = adapter->stats.mpc;
-
/* see above
* adapter->net_stats.rx_length_errors = adapter->stats.rlec;
*/
--- linux-2.6.13/drivers/net/e100.c.orig 2005-09-11 16:21:40.879963693 -0400
+++ linux-2.6.13/drivers/net/e100.c 2005-09-11 16:25:30.667292000 -0400
@@ -1387,13 +1387,13 @@ static void e100_update_stats(struct nic
ns->collisions += nic->tx_collisions;
ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
le32_to_cpu(s->tx_lost_crs);
- ns->rx_dropped += le32_to_cpu(s->rx_resource_errors);
ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
nic->rx_over_length_errors;
ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
+ ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
le32_to_cpu(s->rx_alignment_errors) +
le32_to_cpu(s->rx_short_frame_errors) +
@@ -1727,12 +1727,10 @@ static inline int e100_rx_indicate(struc
if(unlikely(!(rfd_status & cb_ok))) {
/* Don't indicate if hardware indicates errors */
- nic->net_stats.rx_dropped++;
dev_kfree_skb_any(skb);
} else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
/* Don't indicate oversized frames */
nic->rx_over_length_errors++;
- nic->net_stats.rx_dropped++;
dev_kfree_skb_any(skb);
} else {
nic->net_stats.rx_packets++;
jwltest-swiotlb-bidirectional.patch:
swiotlb.c | 62 ++++++++++++++++++++++++++++++++++++++++----------------------
1 files changed, 40 insertions(+), 22 deletions(-)
--- NEW FILE jwltest-swiotlb-bidirectional.patch ---
--- linux-2.6.13/lib/swiotlb.c.orig 2005-09-10 16:54:25.850829818 -0400
+++ linux-2.6.13/lib/swiotlb.c 2005-09-10 16:55:54.991910452 -0400
@@ -49,6 +49,14 @@
*/
#define IO_TLB_SHIFT 11
+/*
+ * Enumeration for sync targets
+ */
+enum dma_sync_target {
+ SYNC_FOR_CPU = 0,
+ SYNC_FOR_DEVICE = 1,
+};
+
int swiotlb_force;
/*
@@ -295,21 +303,28 @@ unmap_single(struct device *hwdev, char
}
static void
-sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
+sync_single(struct device *hwdev, char *dma_addr, size_t size,
+ int dir, int target)
{
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
char *buffer = io_tlb_orig_addr[index];
- /*
- * bounce... copy the data back into/from the original buffer
- * XXX How do you handle DMA_BIDIRECTIONAL here ?
- */
- if (dir == DMA_FROM_DEVICE)
- memcpy(buffer, dma_addr, size);
- else if (dir == DMA_TO_DEVICE)
- memcpy(dma_addr, buffer, size);
- else
+ switch (target) {
+ case SYNC_FOR_CPU:
+ if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
+ memcpy(buffer, dma_addr, size);
+ else if (dir != DMA_TO_DEVICE && dir != DMA_NONE)
+ BUG();
+ break;
+ case SYNC_FOR_DEVICE:
+ if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
+ memcpy(dma_addr, buffer, size);
+ else if (dir != DMA_FROM_DEVICE && dir != DMA_NONE)
+ BUG();
+ break;
+ default:
BUG();
+ }
}
void *
@@ -494,14 +509,14 @@ swiotlb_unmap_single(struct device *hwde
*/
static inline void
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, int dir)
+ size_t size, int dir, int target)
{
char *dma_addr = phys_to_virt(dev_addr);
if (dir == DMA_NONE)
BUG();
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
- sync_single(hwdev, dma_addr, size, dir);
+ sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE)
mark_clean(dma_addr, size);
}
@@ -510,14 +525,14 @@ void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir)
{
- swiotlb_sync_single(hwdev, dev_addr, size, dir);
+ swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
}
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir)
{
- swiotlb_sync_single(hwdev, dev_addr, size, dir);
+ swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
}
/*
@@ -525,14 +540,15 @@ swiotlb_sync_single_for_device(struct de
*/
static inline void
swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
- unsigned long offset, size_t size, int dir)
+ unsigned long offset, size_t size,
+ int dir, int target)
{
char *dma_addr = phys_to_virt(dev_addr) + offset;
if (dir == DMA_NONE)
BUG();
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
- sync_single(hwdev, dma_addr, size, dir);
+ sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE)
mark_clean(dma_addr, size);
}
@@ -541,14 +557,16 @@ void
swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
unsigned long offset, size_t size, int dir)
{
- swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir);
+ swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
+ SYNC_FOR_CPU);
}
void
swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
unsigned long offset, size_t size, int dir)
{
- swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir);
+ swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
+ SYNC_FOR_DEVICE);
}
/*
@@ -627,7 +645,7 @@ swiotlb_unmap_sg(struct device *hwdev, s
*/
static inline void
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
- int nelems, int dir)
+ int nelems, int dir, int target)
{
int i;
@@ -637,21 +655,21 @@ swiotlb_sync_sg(struct device *hwdev, st
for (i = 0; i < nelems; i++, sg++)
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
sync_single(hwdev, (void *) sg->dma_address,
- sg->dma_length, dir);
+ sg->dma_length, dir, target);
}
void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
int nelems, int dir)
{
- swiotlb_sync_sg(hwdev, sg, nelems, dir);
+ swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
}
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, int dir)
{
- swiotlb_sync_sg(hwdev, sg, nelems, dir);
+ swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
}
int
jwltest-swiotlb-cleanup.patch:
swiotlb.c | 45 ++++++++++++++++++++++-----------------------
1 files changed, 22 insertions(+), 23 deletions(-)
--- NEW FILE jwltest-swiotlb-cleanup.patch ---
--- linux-2.6.13/lib/swiotlb.c.orig 2005-09-10 16:52:14.546366026 -0400
+++ linux-2.6.13/lib/swiotlb.c 2005-09-10 16:53:39.362046570 -0400
@@ -492,9 +492,9 @@ swiotlb_unmap_single(struct device *hwde
* address back to the card, you must first perform a
* swiotlb_dma_sync_for_device, and then the device again owns the buffer
*/
-void
-swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, int dir)
+static inline void
+swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, int dir)
{
char *dma_addr = phys_to_virt(dev_addr);
@@ -507,17 +507,17 @@ swiotlb_sync_single_for_cpu(struct devic
}
void
+swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, int dir)
+{
+ swiotlb_sync_single(hwdev, dev_addr, size, dir);
+}
+
+void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir)
{
- char *dma_addr = phys_to_virt(dev_addr);
-
- if (dir == DMA_NONE)
- BUG();
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
- sync_single(hwdev, dma_addr, size, dir);
- else if (dir == DMA_FROM_DEVICE)
- mark_clean(dma_addr, size);
+ swiotlb_sync_single(hwdev, dev_addr, size, dir);
}
/*
@@ -594,9 +594,9 @@ swiotlb_unmap_sg(struct device *hwdev, s
* The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
* and usage.
*/
-void
-swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
- int nelems, int dir)
+static inline void
+swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
+ int nelems, int dir)
{
int i;
@@ -610,18 +610,17 @@ swiotlb_sync_sg_for_cpu(struct device *h
}
void
+swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
+ int nelems, int dir)
+{
+ swiotlb_sync_sg(hwdev, sg, nelems, dir);
+}
+
+void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, int dir)
{
- int i;
-
- if (dir == DMA_NONE)
- BUG();
-
- for (i = 0; i < nelems; i++, sg++)
- if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
- sync_single(hwdev, (void *) sg->dma_address,
- sg->dma_length, dir);
+ swiotlb_sync_sg(hwdev, sg, nelems, dir);
}
int
jwltest-swiotlb-comments.patch:
swiotlb.c | 6 ++++--
1 files changed, 4 insertions(+), 2 deletions(-)
--- NEW FILE jwltest-swiotlb-comments.patch ---
--- linux-2.6.13/lib/swiotlb.c.orig 2005-09-10 16:55:54.991910452 -0400
+++ linux-2.6.13/lib/swiotlb.c 2005-09-10 16:56:16.324058265 -0400
@@ -1,7 +1,7 @@
/*
* Dynamic DMA mapping support.
*
- * This implementation is for IA-64 platforms that do not support
+ * This implementation is for IA-64 and EM64T platforms that do not support
* I/O TLBs (aka DMA address translation hardware).
* Copyright (C) 2000 Asit Mallick <Asit.K.Mallick at intel.com>
* Copyright (C) 2000 Goutham Rao <goutham.rao at intel.com>
@@ -11,7 +11,9 @@
* 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
* 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
* unnecessary i-cache flushing.
- * 04/07/.. ak Better overflow handling. Assorted fixes.
+ * 04/07/.. ak Better overflow handling. Assorted fixes.
+ * 05/09/10 linville Add support for syncing ranges, support syncing for
+ * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
*/
#include <linux/cache.h>
jwltest-swiotlb-move.patch:
arch/ia64/Kconfig | 4
arch/ia64/lib/Makefile | 2
arch/ia64/lib/swiotlb.c | 657 --------------------------------------------
arch/x86_64/kernel/Makefile | 2
lib/Makefile | 2
lib/swiotlb.c | 657 ++++++++++++++++++++++++++++++++++++++++++++
6 files changed, 664 insertions(+), 660 deletions(-)
--- NEW FILE jwltest-swiotlb-move.patch ---
--- linux-2.6.13/lib/Makefile.orig 2005-09-10 16:49:35.905536242 -0400
+++ linux-2.6.13/lib/Makefile 2005-09-10 16:52:14.548365759 -0400
@@ -43,6 +43,8 @@ obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
+obj-$(CONFIG_SWIOTLB) += swiotlb.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
--- /dev/null 2005-06-02 17:56:11.397464344 -0400
+++ linux-2.6.13/lib/swiotlb.c 2005-09-10 16:52:14.546366026 -0400
@@ -0,0 +1,657 @@
+/*
+ * Dynamic DMA mapping support.
+ *
+ * This implementation is for IA-64 platforms that do not support
+ * I/O TLBs (aka DMA address translation hardware).
+ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick at intel.com>
+ * Copyright (C) 2000 Goutham Rao <goutham.rao at intel.com>
+ * Copyright (C) 2000, 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm at hpl.hp.com>
+ *
+ * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
+ * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
+ * unnecessary i-cache flushing.
+ * 04/07/.. ak Better overflow handling. Assorted fixes.
+ */
+
+#include <linux/cache.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+
+#include <asm/io.h>
+#include <asm/pci.h>
+#include <asm/dma.h>
+
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#define OFFSET(val,align) ((unsigned long) \
+ ( (val) & ( (align) - 1)))
+
+#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
+#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
+
+/*
+ * Maximum allowable number of contiguous slabs to map,
+ * must be a power of 2. What is the appropriate value ?
+ * The complexity of {map,unmap}_single is linearly dependent on this value.
+ */
+#define IO_TLB_SEGSIZE 128
+
+/*
+ * log of the size of each IO TLB slab. The number of slabs is command line
+ * controllable.
+ */
+#define IO_TLB_SHIFT 11
+
+int swiotlb_force;
+
+/*
+ * Used to do a quick range check in swiotlb_unmap_single and
+ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
+ * API.
+ */
+static char *io_tlb_start, *io_tlb_end;
+
+/*
+ * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
+ * io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
+ */
+static unsigned long io_tlb_nslabs;
+
+/*
+ * When the IOMMU overflows we return a fallback buffer. This sets the size.
+ */
+static unsigned long io_tlb_overflow = 32*1024;
+
+void *io_tlb_overflow_buffer;
+
+/*
+ * This is a free list describing the number of free entries available from
+ * each index
+ */
+static unsigned int *io_tlb_list;
+static unsigned int io_tlb_index;
+
+/*
+ * We need to save away the original address corresponding to a mapped entry
+ * for the sync operations.
+ */
+static unsigned char **io_tlb_orig_addr;
+
+/*
+ * Protect the above data structures in the map and unmap calls
+ */
+static DEFINE_SPINLOCK(io_tlb_lock);
+
+static int __init
+setup_io_tlb_npages(char *str)
+{
+ if (isdigit(*str)) {
+ io_tlb_nslabs = simple_strtoul(str, &str, 0);
+ /* avoid tail segment of size < IO_TLB_SEGSIZE */
+ io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+ }
+ if (*str == ',')
+ ++str;
+ if (!strcmp(str, "force"))
+ swiotlb_force = 1;
+ return 1;
+}
+__setup("swiotlb=", setup_io_tlb_npages);
+/* make io_tlb_overflow tunable too? */
+
+/*
+ * Statically reserve bounce buffer space and initialize bounce buffer data
+ * structures for the software IO TLB used to implement the PCI DMA API.
+ */
+void
+swiotlb_init_with_default_size (size_t default_size)
+{
+ unsigned long i;
+
+ if (!io_tlb_nslabs) {
+ io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
+ io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+ }
+
+ /*
+ * Get IO TLB memory from the low pages
+ */
+ io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs *
+ (1 << IO_TLB_SHIFT));
+ if (!io_tlb_start)
+ panic("Cannot allocate SWIOTLB buffer");
+ io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
+
+ /*
+ * Allocate and initialize the free list array. This array is used
+ * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
+ * between io_tlb_start and io_tlb_end.
+ */
+ io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
+ for (i = 0; i < io_tlb_nslabs; i++)
+ io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+ io_tlb_index = 0;
+ io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
+
+ /*
+ * Get the overflow emergency buffer
+ */
+ io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
+ printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
+ virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
+}
+
+void
+swiotlb_init (void)
+{
+ swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
+}
+
+static inline int
+address_needs_mapping(struct device *hwdev, dma_addr_t addr)
+{
+ dma_addr_t mask = 0xffffffff;
+ /* If the device has a mask, use it, otherwise default to 32 bits */
+ if (hwdev && hwdev->dma_mask)
+ mask = *hwdev->dma_mask;
+ return (addr & ~mask) != 0;
+}
+
+/*
+ * Allocates bounce buffer and returns its kernel virtual address.
+ */
+static void *
+map_single(struct device *hwdev, char *buffer, size_t size, int dir)
+{
+ unsigned long flags;
+ char *dma_addr;
+ unsigned int nslots, stride, index, wrap;
+ int i;
+
+ /*
+ * For mappings greater than a page, we limit the stride (and
+ * hence alignment) to a page size.
+ */
+ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+ if (size > PAGE_SIZE)
+ stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
+ else
+ stride = 1;
+
+ if (!nslots)
+ BUG();
+
+ /*
+ * Find suitable number of IO TLB entries size that will fit this
+ * request and allocate a buffer from that IO TLB pool.
+ */
+ spin_lock_irqsave(&io_tlb_lock, flags);
+ {
+ wrap = index = ALIGN(io_tlb_index, stride);
+
+ if (index >= io_tlb_nslabs)
+ wrap = index = 0;
+
+ do {
+ /*
+ * If we find a slot that indicates we have 'nslots'
+ * number of contiguous buffers, we allocate the
+ * buffers from that slot and mark the entries as '0'
+ * indicating unavailable.
+ */
+ if (io_tlb_list[index] >= nslots) {
+ int count = 0;
+
+ for (i = index; i < (int) (index + nslots); i++)
+ io_tlb_list[i] = 0;
+ for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
+ io_tlb_list[i] = ++count;
+ dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
+
+ /*
+ * Update the indices to avoid searching in
+ * the next round.
+ */
+ io_tlb_index = ((index + nslots) < io_tlb_nslabs
+ ? (index + nslots) : 0);
+
+ goto found;
+ }
+ index += stride;
+ if (index >= io_tlb_nslabs)
+ index = 0;
+ } while (index != wrap);
+
+ spin_unlock_irqrestore(&io_tlb_lock, flags);
+ return NULL;
+ }
+ found:
+ spin_unlock_irqrestore(&io_tlb_lock, flags);
+
+ /*
+ * Save away the mapping from the original address to the DMA address.
+ * This is needed when we sync the memory. Then we sync the buffer if
+ * needed.
+ */
+ io_tlb_orig_addr[index] = buffer;
+ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
+ memcpy(dma_addr, buffer, size);
+
+ return dma_addr;
+}
+
+/*
+ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
+ */
+static void
+unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
+{
+ unsigned long flags;
+ int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+ int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
+ char *buffer = io_tlb_orig_addr[index];
+
+ /*
+ * First, sync the memory before unmapping the entry
+ */
+ if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+ /*
+ * bounce... copy the data back into the original buffer * and
+ * delete the bounce buffer.
+ */
+ memcpy(buffer, dma_addr, size);
+
+ /*
+ * Return the buffer to the free list by setting the corresponding
+ * entries to indicate the number of contigous entries available.
+ * While returning the entries to the free list, we merge the entries
+ * with slots below and above the pool being returned.
+ */
+ spin_lock_irqsave(&io_tlb_lock, flags);
+ {
+ count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
+ io_tlb_list[index + nslots] : 0);
+ /*
+ * Step 1: return the slots to the free list, merging the
+ * slots with superceeding slots
+ */
+ for (i = index + nslots - 1; i >= index; i--)
+ io_tlb_list[i] = ++count;
+ /*
+ * Step 2: merge the returned slots with the preceding slots,
+ * if available (non zero)
+ */
+ for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
+ io_tlb_list[i] = ++count;
+ }
+ spin_unlock_irqrestore(&io_tlb_lock, flags);
+}
+
+static void
+sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
+{
+ int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
+ char *buffer = io_tlb_orig_addr[index];
+
+ /*
+ * bounce... copy the data back into/from the original buffer
+ * XXX How do you handle DMA_BIDIRECTIONAL here ?
+ */
+ if (dir == DMA_FROM_DEVICE)
+ memcpy(buffer, dma_addr, size);
+ else if (dir == DMA_TO_DEVICE)
+ memcpy(dma_addr, buffer, size);
+ else
+ BUG();
+}
+
+void *
+swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, int flags)
+{
+ unsigned long dev_addr;
+ void *ret;
+ int order = get_order(size);
+
+ /*
+ * XXX fix me: the DMA API should pass us an explicit DMA mask
+ * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
+ * bit range instead of a 16MB one).
+ */
+ flags |= GFP_DMA;
+
+ ret = (void *)__get_free_pages(flags, order);
+ if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) {
+ /*
+ * The allocated memory isn't reachable by the device.
+ * Fall back on swiotlb_map_single().
+ */
+ free_pages((unsigned long) ret, order);
+ ret = NULL;
+ }
+ if (!ret) {
+ /*
+ * We are either out of memory or the device can't DMA
+ * to GFP_DMA memory; fall back on
+ * swiotlb_map_single(), which will grab memory from
+ * the lowest available address range.
+ */
+ dma_addr_t handle;
+ handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(handle))
+ return NULL;
+
+ ret = phys_to_virt(handle);
+ }
+
+ memset(ret, 0, size);
+ dev_addr = virt_to_phys(ret);
+
+ /* Confirm address can be DMA'd by device */
+ if (address_needs_mapping(hwdev, dev_addr)) {
+ printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n",
+ (unsigned long long)*hwdev->dma_mask, dev_addr);
+ panic("swiotlb_alloc_coherent: allocated memory is out of "
+ "range for device");
+ }
+ *dma_handle = dev_addr;
+ return ret;
+}
+
+void
+swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+ if (!(vaddr >= (void *)io_tlb_start
+ && vaddr < (void *)io_tlb_end))
+ free_pages((unsigned long) vaddr, get_order(size));
+ else
+ /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
+ swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
+}
+
+static void
+swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
+{
+ /*
+ * Ran out of IOMMU space for this operation. This is very bad.
+ * Unfortunately the drivers cannot handle this operation properly.
+ * unless they check for pci_dma_mapping_error (most don't)
+ * When the mapping is small enough return a static buffer to limit
+ * the damage, or panic when the transfer is too big.
+ */
+ printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
+ "device %s\n", size, dev ? dev->bus_id : "?");
+
+ if (size > io_tlb_overflow && do_panic) {
+ if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
+ panic("PCI-DMA: Memory would be corrupted\n");
+ if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
+ panic("PCI-DMA: Random memory would be DMAed\n");
+ }
+}
+
+/*
+ * Map a single buffer of the indicated size for DMA in streaming mode. The
+ * PCI address to use is returned.
+ *
+ * Once the device is given the dma address, the device owns this memory until
+ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
+ */
+dma_addr_t
+swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
+{
+ unsigned long dev_addr = virt_to_phys(ptr);
+ void *map;
+
+ if (dir == DMA_NONE)
+ BUG();
+ /*
+ * If the pointer passed in happens to be in the device's DMA window,
+ * we can safely return the device addr and not worry about bounce
+ * buffering it.
+ */
+ if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
+ return dev_addr;
+
+ /*
+ * Oh well, have to allocate and map a bounce buffer.
+ */
+ map = map_single(hwdev, ptr, size, dir);
+ if (!map) {
+ swiotlb_full(hwdev, size, dir, 1);
+ map = io_tlb_overflow_buffer;
+ }
+
+ dev_addr = virt_to_phys(map);
+
+ /*
+ * Ensure that the address returned is DMA'ble
+ */
+ if (address_needs_mapping(hwdev, dev_addr))
+ panic("map_single: bounce buffer is not DMA'ble");
+
+ return dev_addr;
+}
+
+/*
+ * Since DMA is i-cache coherent, any (complete) pages that were written via
+ * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
+ * flush them when they get mapped into an executable vm-area.
+ */
+static void
+mark_clean(void *addr, size_t size)
+{
+ unsigned long pg_addr, end;
+
+ pg_addr = PAGE_ALIGN((unsigned long) addr);
+ end = (unsigned long) addr + size;
+ while (pg_addr + PAGE_SIZE <= end) {
+ struct page *page = virt_to_page(pg_addr);
+ set_bit(PG_arch_1, &page->flags);
+ pg_addr += PAGE_SIZE;
+ }
+}
+
+/*
+ * Unmap a single streaming mode DMA translation. The dma_addr and size must
+ * match what was provided for in a previous swiotlb_map_single call. All
+ * other usages are undefined.
+ *
+ * After this call, reads by the cpu to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+void
+swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
+ int dir)
+{
+ char *dma_addr = phys_to_virt(dev_addr);
+
+ if (dir == DMA_NONE)
+ BUG();
+ if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ unmap_single(hwdev, dma_addr, size, dir);
+ else if (dir == DMA_FROM_DEVICE)
+ mark_clean(dma_addr, size);
+}
+
+/*
+ * Make physical memory consistent for a single streaming mode DMA translation
+ * after a transfer.
+ *
+ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
+ * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
+ * call this function before doing so. At the next point you give the PCI dma
+ * address back to the card, you must first perform a
+ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
+ */
+void
+swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, int dir)
+{
+ char *dma_addr = phys_to_virt(dev_addr);
+
+ if (dir == DMA_NONE)
+ BUG();
+ if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ sync_single(hwdev, dma_addr, size, dir);
+ else if (dir == DMA_FROM_DEVICE)
+ mark_clean(dma_addr, size);
+}
+
+void
+swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, int dir)
+{
+ char *dma_addr = phys_to_virt(dev_addr);
+
+ if (dir == DMA_NONE)
+ BUG();
+ if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ sync_single(hwdev, dma_addr, size, dir);
+ else if (dir == DMA_FROM_DEVICE)
+ mark_clean(dma_addr, size);
+}
+
+/*
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * This is the scatter-gather version of the above swiotlb_map_single
+ * interface. Here the scatter gather list elements are each tagged with the
+ * appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ * DMA address/length pairs than there are SG table elements.
+ * (for example via virtual mapping capabilities)
+ * The routine returns the number of addr/length pairs actually
+ * used, at most nents.
+ *
+ * Device ownership issues as mentioned above for swiotlb_map_single are the
+ * same here.
+ */
+int
+swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
+ int dir)
+{
+ void *addr;
+ unsigned long dev_addr;
+ int i;
+
+ if (dir == DMA_NONE)
+ BUG();
+
+ for (i = 0; i < nelems; i++, sg++) {
+ addr = SG_ENT_VIRT_ADDRESS(sg);
+ dev_addr = virt_to_phys(addr);
+ if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
+ sg->dma_address = (dma_addr_t) virt_to_phys(map_single(hwdev, addr, sg->length, dir));
+ if (!sg->dma_address) {
+ /* Don't panic here, we expect map_sg users
+ to do proper error handling. */
+ swiotlb_full(hwdev, sg->length, dir, 0);
+ swiotlb_unmap_sg(hwdev, sg - i, i, dir);
+ sg[0].dma_length = 0;
+ return 0;
+ }
+ } else
+ sg->dma_address = dev_addr;
+ sg->dma_length = sg->length;
+ }
+ return nelems;
+}
+
+/*
+ * Unmap a set of streaming mode DMA translations. Again, cpu read rules
+ * concerning calls here are the same as for swiotlb_unmap_single() above.
+ */
+void
+swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
+ int dir)
+{
+ int i;
+
+ if (dir == DMA_NONE)
+ BUG();
+
+ for (i = 0; i < nelems; i++, sg++)
+ if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
+ unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir);
+ else if (dir == DMA_FROM_DEVICE)
+ mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
+}
+
+/*
+ * Make physical memory consistent for a set of streaming mode DMA translations
+ * after a transfer.
+ *
+ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
+ * and usage.
+ */
+void
+swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
+ int nelems, int dir)
+{
+ int i;
+
+ if (dir == DMA_NONE)
+ BUG();
+
+ for (i = 0; i < nelems; i++, sg++)
+ if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
+ sync_single(hwdev, (void *) sg->dma_address,
+ sg->dma_length, dir);
+}
+
+void
+swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
+ int nelems, int dir)
+{
+ int i;
+
+ if (dir == DMA_NONE)
+ BUG();
+
+ for (i = 0; i < nelems; i++, sg++)
+ if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
+ sync_single(hwdev, (void *) sg->dma_address,
+ sg->dma_length, dir);
+}
+
+int
+swiotlb_dma_mapping_error(dma_addr_t dma_addr)
+{
+ return (dma_addr == virt_to_phys(io_tlb_overflow_buffer));
+}
+
+/*
+ * Return whether the given PCI device DMA address mask can be supported
+ * properly. For example, if your device can only drive the low 24-bits
+ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
+ * this function.
+ */
+int
+swiotlb_dma_supported (struct device *hwdev, u64 mask)
+{
+ return (virt_to_phys (io_tlb_end) - 1) <= mask;
+}
+
+EXPORT_SYMBOL(swiotlb_init);
+EXPORT_SYMBOL(swiotlb_map_single);
+EXPORT_SYMBOL(swiotlb_unmap_single);
+EXPORT_SYMBOL(swiotlb_map_sg);
+EXPORT_SYMBOL(swiotlb_unmap_sg);
+EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
+EXPORT_SYMBOL(swiotlb_sync_single_for_device);
+EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
+EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
+EXPORT_SYMBOL(swiotlb_dma_mapping_error);
+EXPORT_SYMBOL(swiotlb_alloc_coherent);
+EXPORT_SYMBOL(swiotlb_free_coherent);
+EXPORT_SYMBOL(swiotlb_dma_supported);
--- linux-2.6.13/arch/x86_64/kernel/Makefile.orig 2005-09-10 16:49:32.444998075 -0400
+++ linux-2.6.13/arch/x86_64/kernel/Makefile 2005-09-10 16:52:14.549365626 -0400
@@ -27,7 +27,6 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_GART_IOMMU) += pci-gart.o aperture.o
obj-$(CONFIG_DUMMY_IOMMU) += pci-nommu.o pci-dma.o
-obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o
@@ -41,7 +40,6 @@ CFLAGS_vsyscall.o := $(PROFILING) -g0
bootflag-y += ../../i386/kernel/bootflag.o
cpuid-$(subst m,y,$(CONFIG_X86_CPUID)) += ../../i386/kernel/cpuid.o
topology-y += ../../i386/mach-default/topology.o
-swiotlb-$(CONFIG_SWIOTLB) += ../../ia64/lib/swiotlb.o
microcode-$(subst m,y,$(CONFIG_MICROCODE)) += ../../i386/kernel/microcode.o
intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o
quirks-y += ../../i386/kernel/quirks.o
--- linux-2.6.13/arch/ia64/lib/Makefile.orig 2005-09-10 16:49:31.667101891 -0400
+++ linux-2.6.13/arch/ia64/lib/Makefile 2005-09-10 16:52:14.554364959 -0400
@@ -9,7 +9,7 @@ lib-y := __divsi3.o __udivsi3.o __modsi3
bitop.o checksum.o clear_page.o csum_partial_copy.o \
clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \
flush.o ip_fast_csum.o do_csum.o \
- memset.o strlen.o swiotlb.o
+ memset.o strlen.o
lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
--- linux-2.6.13/arch/ia64/lib/swiotlb.c.orig 2005-09-10 16:49:31.671101357 -0400
+++ linux-2.6.13/arch/ia64/lib/swiotlb.c 2005-09-10 16:52:14.552365226 -0400
@@ -1,657 +0,0 @@
-/*
- * Dynamic DMA mapping support.
- *
- * This implementation is for IA-64 platforms that do not support
- * I/O TLBs (aka DMA address translation hardware).
- * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick at intel.com>
- * Copyright (C) 2000 Goutham Rao <goutham.rao at intel.com>
- * Copyright (C) 2000, 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm at hpl.hp.com>
- *
- * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
- * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
- * unnecessary i-cache flushing.
- * 04/07/.. ak Better overflow handling. Assorted fixes.
- */
-
-#include <linux/cache.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ctype.h>
-
-#include <asm/io.h>
-#include <asm/pci.h>
-#include <asm/dma.h>
-
-#include <linux/init.h>
-#include <linux/bootmem.h>
-
-#define OFFSET(val,align) ((unsigned long) \
- ( (val) & ( (align) - 1)))
-
-#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
-#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
-
-/*
- * Maximum allowable number of contiguous slabs to map,
- * must be a power of 2. What is the appropriate value ?
- * The complexity of {map,unmap}_single is linearly dependent on this value.
- */
-#define IO_TLB_SEGSIZE 128
-
-/*
- * log of the size of each IO TLB slab. The number of slabs is command line
- * controllable.
- */
-#define IO_TLB_SHIFT 11
-
-int swiotlb_force;
-
-/*
- * Used to do a quick range check in swiotlb_unmap_single and
- * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
- * API.
- */
-static char *io_tlb_start, *io_tlb_end;
-
-/*
- * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
- * io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
- */
-static unsigned long io_tlb_nslabs;
-
-/*
- * When the IOMMU overflows we return a fallback buffer. This sets the size.
- */
-static unsigned long io_tlb_overflow = 32*1024;
-
-void *io_tlb_overflow_buffer;
-
-/*
- * This is a free list describing the number of free entries available from
- * each index
- */
-static unsigned int *io_tlb_list;
-static unsigned int io_tlb_index;
-
-/*
- * We need to save away the original address corresponding to a mapped entry
- * for the sync operations.
- */
-static unsigned char **io_tlb_orig_addr;
-
-/*
- * Protect the above data structures in the map and unmap calls
- */
-static DEFINE_SPINLOCK(io_tlb_lock);
-
-static int __init
-setup_io_tlb_npages(char *str)
-{
- if (isdigit(*str)) {
- io_tlb_nslabs = simple_strtoul(str, &str, 0);
- /* avoid tail segment of size < IO_TLB_SEGSIZE */
- io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
- }
- if (*str == ',')
- ++str;
- if (!strcmp(str, "force"))
- swiotlb_force = 1;
- return 1;
-}
-__setup("swiotlb=", setup_io_tlb_npages);
-/* make io_tlb_overflow tunable too? */
-
-/*
- * Statically reserve bounce buffer space and initialize bounce buffer data
- * structures for the software IO TLB used to implement the PCI DMA API.
- */
-void
-swiotlb_init_with_default_size (size_t default_size)
-{
- unsigned long i;
-
- if (!io_tlb_nslabs) {
- io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
- io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
- }
-
- /*
- * Get IO TLB memory from the low pages
- */
- io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs *
- (1 << IO_TLB_SHIFT));
- if (!io_tlb_start)
- panic("Cannot allocate SWIOTLB buffer");
- io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
-
- /*
- * Allocate and initialize the free list array. This array is used
- * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
- * between io_tlb_start and io_tlb_end.
- */
- io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
- for (i = 0; i < io_tlb_nslabs; i++)
- io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
- io_tlb_index = 0;
- io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
-
- /*
- * Get the overflow emergency buffer
- */
- io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
- printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
- virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
-}
-
-void
-swiotlb_init (void)
-{
- swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
-}
-
-static inline int
-address_needs_mapping(struct device *hwdev, dma_addr_t addr)
-{
- dma_addr_t mask = 0xffffffff;
- /* If the device has a mask, use it, otherwise default to 32 bits */
- if (hwdev && hwdev->dma_mask)
- mask = *hwdev->dma_mask;
- return (addr & ~mask) != 0;
-}
-
-/*
- * Allocates bounce buffer and returns its kernel virtual address.
- */
-static void *
-map_single(struct device *hwdev, char *buffer, size_t size, int dir)
-{
- unsigned long flags;
- char *dma_addr;
- unsigned int nslots, stride, index, wrap;
- int i;
-
- /*
- * For mappings greater than a page, we limit the stride (and
- * hence alignment) to a page size.
- */
- nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
- if (size > PAGE_SIZE)
- stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
- else
- stride = 1;
-
- if (!nslots)
- BUG();
-
- /*
- * Find suitable number of IO TLB entries size that will fit this
- * request and allocate a buffer from that IO TLB pool.
- */
- spin_lock_irqsave(&io_tlb_lock, flags);
- {
- wrap = index = ALIGN(io_tlb_index, stride);
-
- if (index >= io_tlb_nslabs)
- wrap = index = 0;
-
- do {
- /*
- * If we find a slot that indicates we have 'nslots'
- * number of contiguous buffers, we allocate the
- * buffers from that slot and mark the entries as '0'
- * indicating unavailable.
- */
- if (io_tlb_list[index] >= nslots) {
- int count = 0;
-
- for (i = index; i < (int) (index + nslots); i++)
- io_tlb_list[i] = 0;
- for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
- io_tlb_list[i] = ++count;
- dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
-
- /*
- * Update the indices to avoid searching in
- * the next round.
- */
- io_tlb_index = ((index + nslots) < io_tlb_nslabs
- ? (index + nslots) : 0);
-
- goto found;
- }
- index += stride;
- if (index >= io_tlb_nslabs)
- index = 0;
- } while (index != wrap);
-
- spin_unlock_irqrestore(&io_tlb_lock, flags);
- return NULL;
- }
- found:
- spin_unlock_irqrestore(&io_tlb_lock, flags);
-
- /*
- * Save away the mapping from the original address to the DMA address.
- * This is needed when we sync the memory. Then we sync the buffer if
- * needed.
- */
- io_tlb_orig_addr[index] = buffer;
- if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
- memcpy(dma_addr, buffer, size);
-
- return dma_addr;
-}
-
-/*
- * dma_addr is the kernel virtual address of the bounce buffer to unmap.
- */
-static void
-unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
-{
- unsigned long flags;
- int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
- int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
- char *buffer = io_tlb_orig_addr[index];
-
- /*
- * First, sync the memory before unmapping the entry
- */
- if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
- /*
- * bounce... copy the data back into the original buffer * and
- * delete the bounce buffer.
- */
- memcpy(buffer, dma_addr, size);
-
- /*
- * Return the buffer to the free list by setting the corresponding
- * entries to indicate the number of contigous entries available.
- * While returning the entries to the free list, we merge the entries
- * with slots below and above the pool being returned.
- */
- spin_lock_irqsave(&io_tlb_lock, flags);
- {
- count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
- io_tlb_list[index + nslots] : 0);
- /*
- * Step 1: return the slots to the free list, merging the
- * slots with superceeding slots
- */
- for (i = index + nslots - 1; i >= index; i--)
- io_tlb_list[i] = ++count;
- /*
- * Step 2: merge the returned slots with the preceding slots,
- * if available (non zero)
- */
- for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
- io_tlb_list[i] = ++count;
- }
- spin_unlock_irqrestore(&io_tlb_lock, flags);
-}
-
-static void
-sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
-{
- int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
- char *buffer = io_tlb_orig_addr[index];
-
- /*
- * bounce... copy the data back into/from the original buffer
- * XXX How do you handle DMA_BIDIRECTIONAL here ?
- */
- if (dir == DMA_FROM_DEVICE)
- memcpy(buffer, dma_addr, size);
- else if (dir == DMA_TO_DEVICE)
- memcpy(dma_addr, buffer, size);
- else
- BUG();
-}
-
-void *
-swiotlb_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, int flags)
-{
- unsigned long dev_addr;
- void *ret;
- int order = get_order(size);
-
- /*
- * XXX fix me: the DMA API should pass us an explicit DMA mask
- * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
- * bit range instead of a 16MB one).
- */
- flags |= GFP_DMA;
-
- ret = (void *)__get_free_pages(flags, order);
- if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) {
- /*
- * The allocated memory isn't reachable by the device.
- * Fall back on swiotlb_map_single().
- */
- free_pages((unsigned long) ret, order);
- ret = NULL;
- }
- if (!ret) {
- /*
- * We are either out of memory or the device can't DMA
- * to GFP_DMA memory; fall back on
- * swiotlb_map_single(), which will grab memory from
- * the lowest available address range.
- */
- dma_addr_t handle;
- handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
- if (dma_mapping_error(handle))
- return NULL;
-
- ret = phys_to_virt(handle);
- }
-
- memset(ret, 0, size);
- dev_addr = virt_to_phys(ret);
-
- /* Confirm address can be DMA'd by device */
- if (address_needs_mapping(hwdev, dev_addr)) {
- printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n",
- (unsigned long long)*hwdev->dma_mask, dev_addr);
- panic("swiotlb_alloc_coherent: allocated memory is out of "
- "range for device");
- }
- *dma_handle = dev_addr;
- return ret;
-}
-
-void
-swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
-{
- if (!(vaddr >= (void *)io_tlb_start
- && vaddr < (void *)io_tlb_end))
- free_pages((unsigned long) vaddr, get_order(size));
- else
- /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
- swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
-}
-
-static void
-swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
-{
- /*
- * Ran out of IOMMU space for this operation. This is very bad.
- * Unfortunately the drivers cannot handle this operation properly.
- * unless they check for pci_dma_mapping_error (most don't)
- * When the mapping is small enough return a static buffer to limit
- * the damage, or panic when the transfer is too big.
- */
- printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
- "device %s\n", size, dev ? dev->bus_id : "?");
-
- if (size > io_tlb_overflow && do_panic) {
- if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
- panic("PCI-DMA: Memory would be corrupted\n");
- if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
- panic("PCI-DMA: Random memory would be DMAed\n");
- }
-}
-
-/*
- * Map a single buffer of the indicated size for DMA in streaming mode. The
- * PCI address to use is returned.
- *
- * Once the device is given the dma address, the device owns this memory until
- * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
- */
-dma_addr_t
-swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
-{
- unsigned long dev_addr = virt_to_phys(ptr);
- void *map;
-
- if (dir == DMA_NONE)
- BUG();
- /*
- * If the pointer passed in happens to be in the device's DMA window,
- * we can safely return the device addr and not worry about bounce
- * buffering it.
- */
- if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
- return dev_addr;
-
- /*
- * Oh well, have to allocate and map a bounce buffer.
- */
- map = map_single(hwdev, ptr, size, dir);
- if (!map) {
- swiotlb_full(hwdev, size, dir, 1);
- map = io_tlb_overflow_buffer;
- }
-
- dev_addr = virt_to_phys(map);
-
- /*
- * Ensure that the address returned is DMA'ble
- */
- if (address_needs_mapping(hwdev, dev_addr))
- panic("map_single: bounce buffer is not DMA'ble");
-
- return dev_addr;
-}
-
-/*
- * Since DMA is i-cache coherent, any (complete) pages that were written via
- * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
- * flush them when they get mapped into an executable vm-area.
- */
-static void
-mark_clean(void *addr, size_t size)
-{
- unsigned long pg_addr, end;
-
- pg_addr = PAGE_ALIGN((unsigned long) addr);
- end = (unsigned long) addr + size;
- while (pg_addr + PAGE_SIZE <= end) {
- struct page *page = virt_to_page(pg_addr);
- set_bit(PG_arch_1, &page->flags);
- pg_addr += PAGE_SIZE;
- }
-}
-
-/*
- * Unmap a single streaming mode DMA translation. The dma_addr and size must
- * match what was provided for in a previous swiotlb_map_single call. All
- * other usages are undefined.
- *
- * After this call, reads by the cpu to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-void
-swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
- int dir)
-{
- char *dma_addr = phys_to_virt(dev_addr);
-
- if (dir == DMA_NONE)
- BUG();
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
- unmap_single(hwdev, dma_addr, size, dir);
- else if (dir == DMA_FROM_DEVICE)
- mark_clean(dma_addr, size);
-}
-
-/*
- * Make physical memory consistent for a single streaming mode DMA translation
- * after a transfer.
- *
- * If you perform a swiotlb_map_single() but wish to interrogate the buffer
- * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
- * call this function before doing so. At the next point you give the PCI dma
- * address back to the card, you must first perform a
- * swiotlb_dma_sync_for_device, and then the device again owns the buffer
- */
-void
-swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, int dir)
-{
- char *dma_addr = phys_to_virt(dev_addr);
-
- if (dir == DMA_NONE)
- BUG();
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
- sync_single(hwdev, dma_addr, size, dir);
- else if (dir == DMA_FROM_DEVICE)
- mark_clean(dma_addr, size);
-}
-
-void
-swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, int dir)
-{
- char *dma_addr = phys_to_virt(dev_addr);
-
- if (dir == DMA_NONE)
- BUG();
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
- sync_single(hwdev, dma_addr, size, dir);
- else if (dir == DMA_FROM_DEVICE)
- mark_clean(dma_addr, size);
-}
-
-/*
- * Map a set of buffers described by scatterlist in streaming mode for DMA.
- * This is the scatter-gather version of the above swiotlb_map_single
- * interface. Here the scatter gather list elements are each tagged with the
- * appropriate dma address and length. They are obtained via
- * sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for swiotlb_map_single are the
- * same here.
- */
-int
-swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
- int dir)
-{
- void *addr;
- unsigned long dev_addr;
- int i;
-
- if (dir == DMA_NONE)
- BUG();
-
- for (i = 0; i < nelems; i++, sg++) {
- addr = SG_ENT_VIRT_ADDRESS(sg);
- dev_addr = virt_to_phys(addr);
- if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
- sg->dma_address = (dma_addr_t) virt_to_phys(map_single(hwdev, addr, sg->length, dir));
- if (!sg->dma_address) {
- /* Don't panic here, we expect map_sg users
- to do proper error handling. */
- swiotlb_full(hwdev, sg->length, dir, 0);
- swiotlb_unmap_sg(hwdev, sg - i, i, dir);
- sg[0].dma_length = 0;
- return 0;
- }
- } else
- sg->dma_address = dev_addr;
- sg->dma_length = sg->length;
- }
- return nelems;
-}
-
-/*
- * Unmap a set of streaming mode DMA translations. Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_single() above.
- */
-void
-swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
- int dir)
-{
- int i;
-
- if (dir == DMA_NONE)
- BUG();
-
- for (i = 0; i < nelems; i++, sg++)
- if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
- unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir);
- else if (dir == DMA_FROM_DEVICE)
- mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
-}
-
-/*
- * Make physical memory consistent for a set of streaming mode DMA translations
- * after a transfer.
- *
- * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
- * and usage.
- */
-void
-swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
- int nelems, int dir)
-{
- int i;
-
- if (dir == DMA_NONE)
- BUG();
-
- for (i = 0; i < nelems; i++, sg++)
- if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
- sync_single(hwdev, (void *) sg->dma_address,
- sg->dma_length, dir);
-}
-
-void
-swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
- int nelems, int dir)
-{
- int i;
-
- if (dir == DMA_NONE)
- BUG();
-
- for (i = 0; i < nelems; i++, sg++)
- if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
- sync_single(hwdev, (void *) sg->dma_address,
- sg->dma_length, dir);
-}
-
-int
-swiotlb_dma_mapping_error(dma_addr_t dma_addr)
-{
- return (dma_addr == virt_to_phys(io_tlb_overflow_buffer));
-}
-
-/*
- * Return whether the given PCI device DMA address mask can be supported
- * properly. For example, if your device can only drive the low 24-bits
- * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
- * this function.
- */
-int
-swiotlb_dma_supported (struct device *hwdev, u64 mask)
-{
- return (virt_to_phys (io_tlb_end) - 1) <= mask;
-}
-
-EXPORT_SYMBOL(swiotlb_init);
-EXPORT_SYMBOL(swiotlb_map_single);
-EXPORT_SYMBOL(swiotlb_unmap_single);
-EXPORT_SYMBOL(swiotlb_map_sg);
-EXPORT_SYMBOL(swiotlb_unmap_sg);
-EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
-EXPORT_SYMBOL(swiotlb_sync_single_for_device);
-EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
-EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
-EXPORT_SYMBOL(swiotlb_dma_mapping_error);
-EXPORT_SYMBOL(swiotlb_alloc_coherent);
-EXPORT_SYMBOL(swiotlb_free_coherent);
-EXPORT_SYMBOL(swiotlb_dma_supported);
--- linux-2.6.13/arch/ia64/Kconfig.orig 2005-09-10 16:49:31.614108963 -0400
+++ linux-2.6.13/arch/ia64/Kconfig 2005-09-10 16:52:14.551365359 -0400
@@ -26,6 +26,10 @@ config MMU
bool
default y
+config SWIOTLB
+ bool
+ default y
+
config RWSEM_XCHGADD_ALGORITHM
bool
default y
jwltest-swiotlb-range.patch:
include/asm-x86_64/swiotlb.h | 8 ++++++++
lib/swiotlb.c | 33 +++++++++++++++++++++++++++++++++
2 files changed, 41 insertions(+)
--- NEW FILE jwltest-swiotlb-range.patch ---
--- linux-2.6.13/include/asm-x86_64/swiotlb.h.orig 2005-08-28 19:41:01.000000000 -0400
+++ linux-2.6.13/include/asm-x86_64/swiotlb.h 2005-09-10 16:54:25.848830086 -0400
@@ -15,6 +15,14 @@ extern void swiotlb_sync_single_for_cpu(
extern void swiotlb_sync_single_for_device(struct device *hwdev,
dma_addr_t dev_addr,
size_t size, int dir);
+extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev,
+ dma_addr_t dev_addr,
+ unsigned long offset,
+ size_t size, int dir);
+extern void swiotlb_sync_single_range_for_device(struct device *hwdev,
+ dma_addr_t dev_addr,
+ unsigned long offset,
+ size_t size, int dir);
extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
struct scatterlist *sg, int nelems,
int dir);
--- linux-2.6.13/lib/swiotlb.c.orig 2005-09-10 16:53:39.362046570 -0400
+++ linux-2.6.13/lib/swiotlb.c 2005-09-10 16:54:25.850829818 -0400
@@ -521,6 +521,37 @@ swiotlb_sync_single_for_device(struct de
}
/*
+ * Same as above, but for a sub-range of the mapping.
+ */
+static inline void
+swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
+ unsigned long offset, size_t size, int dir)
+{
+ char *dma_addr = phys_to_virt(dev_addr) + offset;
+
+ if (dir == DMA_NONE)
+ BUG();
+ if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ sync_single(hwdev, dma_addr, size, dir);
+ else if (dir == DMA_FROM_DEVICE)
+ mark_clean(dma_addr, size);
+}
+
+void
+swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
+ unsigned long offset, size_t size, int dir)
+{
+ swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir);
+}
+
+void
+swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
+ unsigned long offset, size_t size, int dir)
+{
+ swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir);
+}
+
+/*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
* This is the scatter-gather version of the above swiotlb_map_single
* interface. Here the scatter gather list elements are each tagged with the
@@ -648,6 +679,8 @@ EXPORT_SYMBOL(swiotlb_map_sg);
EXPORT_SYMBOL(swiotlb_unmap_sg);
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
+EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
+EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
Index: kernel-2.6.spec
===================================================================
RCS file: /cvs/dist/rpms/kernel/devel/kernel-2.6.spec,v
retrieving revision 1.1619
retrieving revision 1.1619.2.1
diff -u -r1.1619 -r1.1619.2.1
--- kernel-2.6.spec 20 Oct 2005 17:30:17 -0000 1.1619
+++ kernel-2.6.spec 20 Oct 2005 18:01:07 -0000 1.1619.2.1
@@ -20,7 +20,8 @@
%define sublevel 13
%define kversion 2.6.%{sublevel}
%define rpmversion 2.6.%{sublevel}
-%define rhbsys %([ -r /etc/beehive-root -o -n "%{?__beehive_build}" ] && echo || echo .`whoami`)
+#%define rhbsys %([ -r /etc/beehive-root -o -n "%{?__beehive_build}" ] && echo || echo .`whoami`)
+%define rhbsys .jwltest.5
%define release %(R="$Revision$"; RR="${R##: }"; echo ${RR%%?})_FC5%{rhbsys}
%define signmodules 0
%define make_target bzImage
@@ -150,7 +151,8 @@
License: GPLv2
Version: %{rpmversion}
Release: %{release}
-ExclusiveArch: noarch %{all_x86} x86_64 ppc ppc64 ia64 sparc sparc64
+#ExclusiveArch: noarch %{all_x86} x86_64 ppc ppc64 ia64 sparc sparc64
+ExclusiveArch: noarch %{all_x86} x86_64 ia64
ExclusiveOS: Linux
Provides: kernel = %{version}
Provides: kernel-drm = 4.3.0
@@ -210,6 +212,7 @@
Patch201: linux-2.6-x86_64-disable-tlb-flush-filter.patch
Patch202: linux-2.6-x86-apic-off-by-default.patch
Patch203: linux-2.6-x86-vga-vidfail.patch
+Patch204: jwltest-dma-x86_64.patch
# 300 - 399 ppc(64)
Patch300: linux-2.6-ppc64-build.patch
@@ -217,6 +220,8 @@
Patch302: linux-2.6-serial-of.patch
# 400 - 499 ia64
+Patch400: jwltest-ia64-max-cacheline-export.patch
+
# 500 - 599 s390(x)
# 600 - 699 sparc(64)
@@ -297,6 +302,12 @@
Patch1301: linux-2.6-net-sundance-ip100A.patch
Patch1302: linux-2.6-net-atm-lanai-nodev-rmmod.patch
Patch1303: linux-2.6-net-acenic-use-after-free.patch
+Patch1304: jwltest-b44-alloc.patch
+Patch1305: jwltest-3c59x-mmio.patch
+Patch1306: jwltest-3c59x-misc.patch
+Patch1307: jwltest-ethtool-perm-addr.patch
+Patch1308: jwltest-e1000_ethtool_ops-whitespace.patch
+Patch1309: jwltest-rx_dropped.patch
# Netdump and Diskdump bits.
Patch1500: linux-2.6-crashdump-common.patch
@@ -320,6 +331,11 @@
Patch1700: linux-2.6-missing-exports.patch
Patch1710: linux-2.6-radeon-backlight.patch
Patch1720: linux-2.6-ide-tune-locking.patch
+Patch1730: jwltest-swiotlb-move.patch
+Patch1731: jwltest-swiotlb-cleanup.patch
+Patch1732: jwltest-swiotlb-range.patch
+Patch1733: jwltest-swiotlb-bidirectional.patch
+Patch1734: jwltest-swiotlb-comments.patch
# ACPI patches.
Patch1800: linux-2.6-acpi-enable-ecburst.patch
@@ -578,6 +594,8 @@
# for the installer cd that wants to automatically fall back to textmode
# in that case
%patch203 -p1
+# implement dma_sync_single_range_for_{cpu,device}
+%patch204 -p1
#
# ppc64
@@ -590,6 +608,13 @@
%patch302 -p1
#
+# ia64
+#
+
+# EXPORT_SYMBOL_GPL ia64_max_cachline_size (needed for dma_get_cache_alignment)
+%patch400 -p1
+
+#
# Xen
#
%if %{includexen}
@@ -719,6 +744,18 @@
%patch1302 -p1
# Acenic use after free
%patch1303 -p1
+# heroic allocation for b44 dma descriptors
+%patch1304 -p1
+# allow 3c59x to use memory-mapped PCI I/O resources
+%patch1305 -p1
+# misc 3c59x patches
+%patch1306 -p1
+# add support for ETHTOOL_GPERMADDR to a number of drivers
+%patch1307 -p1
+# cleanup some whitespace issues in e1000_ethtool_ops
+%patch1308 -p1
+# normalize handling of rx_dropped among some popular drivers
+%patch1309 -p1
# netdump bits
%patch1500 -p1
@@ -759,6 +796,16 @@
%patch1710 -p1
# Fix IDE locking bug.
%patch1720 -p1
+# Move swiotlb.c to generic location
+%patch1730 -p1
+# Some swiotlb code cleanups
+%patch1731 -p1
+# Implement swiotlb_sync_single_range_for_{cpu,device}
+%patch1732 -p1
+# swiotlb: support syncing DMA_BIDIRECTIONAL mappings
+%patch1733 -p1
+# swiotlb: add a credit line *blush*
+%patch1734 -p1
# ACPI patches.
# Enable EC burst
- Previous message (by thread): rpms/kernel/devel linux-2.6-serial-of.patch, NONE, 1.1 patch-2.6.14-rc5.bz2.sign, NONE, 1.1 .cvsignore, 1.252, 1.253 kernel-2.6.spec, 1.1618, 1.1619 sources, 1.194, 1.195 upstream, 1.181, 1.182 linux-2.6.13-serial-of.patch, 1.1, NONE
- Next message (by thread): rpms/kernel/devel kernel-2.6.spec,1.1619,1.1620
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
More information about the fedora-cvs-commits
mailing list