net: dwc_eth_qos: restore support of not cache-aligned descriptor
Solve a issue with AXI_WIDTH_32 on a the 64 bytes cache line platform; in this case the requested descriptor padding length should be 12 but the associated parameter EQOS_DMA_CH0_CONTROL.DSL is limited at 3bits = 7. As the DMA descriptor can't be correctly aligned with the cache line, the maintenance of each descriptor can't be guarantee by a simple cache line operation: flush or invalid. To avoid all the maintenance issues, these descripto need to be allocated in a NOT CACHEABLE memory, allocated by noncached_alloc() when CONFIG_SYS_NONCACHED_MEMORY is enable. This patch don't change the current behavior when the descriptor can be cache-aligned with the filed "Descriptor Skip Length" of the DMA channel control register, when eqos->desc_pad = true. Change-Id: Iada23492743e3af977e07c1f1b8c2f32550436f7 Signed-off-by: Patrick Delaunay <patrick.delaunay@foss.st.com> Reviewed-on: https://gerrit.st.com/c/mpu/oe/st/u-boot/+/236650 Reviewed-by: CITOOLS <MDG-smet-aci-reviews@list.st.com> Reviewed-by: Christophe ROULLIER <christophe.roullier@st.com>
This commit is contained in:
committed by
Patrice Chotard
parent
7c0508bcc8
commit
6ee1aad5b6
@ -46,6 +46,7 @@
|
|||||||
#include <asm/cache.h>
|
#include <asm/cache.h>
|
||||||
#include <asm/gpio.h>
|
#include <asm/gpio.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
|
#include <dm/device_compat.h>
|
||||||
#include <eth_phy.h>
|
#include <eth_phy.h>
|
||||||
#ifdef CONFIG_ARCH_IMX8M
|
#ifdef CONFIG_ARCH_IMX8M
|
||||||
#include <asm/arch/clock.h>
|
#include <asm/arch/clock.h>
|
||||||
@ -211,6 +212,7 @@ struct eqos_dma_regs {
|
|||||||
#define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1)
|
#define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1)
|
||||||
|
|
||||||
#define EQOS_DMA_CH0_CONTROL_DSL_SHIFT 18
|
#define EQOS_DMA_CH0_CONTROL_DSL_SHIFT 18
|
||||||
|
#define EQOS_DMA_CH0_CONTROL_DSL_MAX 7
|
||||||
#define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16)
|
#define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16)
|
||||||
|
|
||||||
#define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16
|
#define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16
|
||||||
@ -275,9 +277,11 @@ struct eqos_config {
|
|||||||
struct eqos_ops *ops;
|
struct eqos_ops *ops;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct eqos_priv;
|
||||||
|
|
||||||
struct eqos_ops {
|
struct eqos_ops {
|
||||||
void (*eqos_inval_desc)(void *desc);
|
void (*eqos_inval_desc)(struct eqos_priv *eqos, void *desc);
|
||||||
void (*eqos_flush_desc)(void *desc);
|
void (*eqos_flush_desc)(struct eqos_priv *eqos, void *desc);
|
||||||
void (*eqos_inval_buffer)(void *buf, size_t size);
|
void (*eqos_inval_buffer)(void *buf, size_t size);
|
||||||
void (*eqos_flush_buffer)(void *buf, size_t size);
|
void (*eqos_flush_buffer)(void *buf, size_t size);
|
||||||
int (*eqos_probe_resources)(struct udevice *dev);
|
int (*eqos_probe_resources)(struct udevice *dev);
|
||||||
@ -321,6 +325,7 @@ struct eqos_priv {
|
|||||||
bool started;
|
bool started;
|
||||||
bool reg_access_ok;
|
bool reg_access_ok;
|
||||||
bool clk_ck_enabled;
|
bool clk_ck_enabled;
|
||||||
|
bool use_cached_mem;
|
||||||
#ifdef CONFIG_DM_REGULATOR
|
#ifdef CONFIG_DM_REGULATOR
|
||||||
struct udevice *phy_supply;
|
struct udevice *phy_supply;
|
||||||
#endif
|
#endif
|
||||||
@ -346,15 +351,38 @@ struct eqos_priv {
|
|||||||
*/
|
*/
|
||||||
static void *eqos_alloc_descs(struct eqos_priv *eqos, unsigned int num)
|
static void *eqos_alloc_descs(struct eqos_priv *eqos, unsigned int num)
|
||||||
{
|
{
|
||||||
|
void *descs = NULL;
|
||||||
|
ulong desc_pad;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* if descriptors can to be cache-line aligned with the DSL =
|
||||||
|
* "Descriptor Skip Length" field of the DMA channel control register
|
||||||
|
*/
|
||||||
eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
|
eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
|
||||||
(unsigned int)ARCH_DMA_MINALIGN);
|
(unsigned int)ARCH_DMA_MINALIGN);
|
||||||
|
desc_pad = (eqos->desc_size - sizeof(struct eqos_desc)) /
|
||||||
|
eqos->config->axi_bus_width;
|
||||||
|
if (desc_pad <= EQOS_DMA_CH0_CONTROL_DSL_MAX) {
|
||||||
|
eqos->use_cached_mem = true;
|
||||||
|
descs = memalign(eqos->desc_size, num * eqos->desc_size);
|
||||||
|
} else {
|
||||||
|
eqos->use_cached_mem = false;
|
||||||
|
eqos->desc_size = sizeof(struct eqos_desc);
|
||||||
|
#ifdef CONFIG_SYS_NONCACHED_MEMORY
|
||||||
|
descs = (void *)noncached_alloc(num * eqos->desc_size, ARCH_DMA_MINALIGN);
|
||||||
|
#else
|
||||||
|
log_err("DMA descriptors with cached memory.");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
return memalign(eqos->desc_size, num * eqos->desc_size);
|
return descs;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void eqos_free_descs(void *descs)
|
static void eqos_free_descs(struct eqos_priv *eqos)
|
||||||
{
|
{
|
||||||
free(descs);
|
if (eqos->use_cached_mem)
|
||||||
|
free(eqos->descs);
|
||||||
|
/* memory allocated by noncached_alloc() can't be freed */
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct eqos_desc *eqos_get_desc(struct eqos_priv *eqos,
|
static struct eqos_desc *eqos_get_desc(struct eqos_priv *eqos,
|
||||||
@ -364,22 +392,24 @@ static struct eqos_desc *eqos_get_desc(struct eqos_priv *eqos,
|
|||||||
((rx ? EQOS_DESCRIPTORS_TX : 0) + num) * eqos->desc_size;
|
((rx ? EQOS_DESCRIPTORS_TX : 0) + num) * eqos->desc_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void eqos_inval_desc_generic(void *desc)
|
static void eqos_inval_desc_generic(struct eqos_priv *eqos, void *desc)
|
||||||
{
|
{
|
||||||
unsigned long start = (unsigned long)desc;
|
unsigned long start = (unsigned long)desc;
|
||||||
unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
|
unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
|
||||||
ARCH_DMA_MINALIGN);
|
ARCH_DMA_MINALIGN);
|
||||||
|
|
||||||
invalidate_dcache_range(start, end);
|
if (eqos->use_cached_mem)
|
||||||
|
invalidate_dcache_range(start, end);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void eqos_flush_desc_generic(void *desc)
|
static void eqos_flush_desc_generic(struct eqos_priv *eqos, void *desc)
|
||||||
{
|
{
|
||||||
unsigned long start = (unsigned long)desc;
|
unsigned long start = (unsigned long)desc;
|
||||||
unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
|
unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
|
||||||
ARCH_DMA_MINALIGN);
|
ARCH_DMA_MINALIGN);
|
||||||
|
|
||||||
flush_dcache_range(start, end);
|
if (eqos->use_cached_mem)
|
||||||
|
flush_dcache_range(start, end);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void eqos_inval_buffer_tegra186(void *buf, size_t size)
|
static void eqos_inval_buffer_tegra186(void *buf, size_t size)
|
||||||
@ -1300,12 +1330,17 @@ static int eqos_start(struct udevice *dev)
|
|||||||
EQOS_MAX_PACKET_SIZE <<
|
EQOS_MAX_PACKET_SIZE <<
|
||||||
EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT);
|
EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT);
|
||||||
|
|
||||||
desc_pad = (eqos->desc_size - sizeof(struct eqos_desc)) /
|
setbits_le32(&eqos->dma_regs->ch0_control, EQOS_DMA_CH0_CONTROL_PBLX8);
|
||||||
eqos->config->axi_bus_width;
|
|
||||||
|
|
||||||
setbits_le32(&eqos->dma_regs->ch0_control,
|
/* "Descriptor Skip Length" field of the DMA channel control register */
|
||||||
EQOS_DMA_CH0_CONTROL_PBLX8 |
|
if (eqos->use_cached_mem) {
|
||||||
(desc_pad << EQOS_DMA_CH0_CONTROL_DSL_SHIFT));
|
desc_pad = (eqos->desc_size - sizeof(struct eqos_desc)) /
|
||||||
|
eqos->config->axi_bus_width;
|
||||||
|
setbits_le32(&eqos->dma_regs->ch0_control,
|
||||||
|
desc_pad << EQOS_DMA_CH0_CONTROL_DSL_SHIFT);
|
||||||
|
if (desc_pad > EQOS_DMA_CH0_CONTROL_DSL_MAX)
|
||||||
|
dev_dbg(dev, "DMA_CH0_CONTROL.DSL overflow");
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Burst length must be < 1/2 FIFO size.
|
* Burst length must be < 1/2 FIFO size.
|
||||||
@ -1338,7 +1373,7 @@ static int eqos_start(struct udevice *dev)
|
|||||||
|
|
||||||
for (i = 0; i < EQOS_DESCRIPTORS_TX; i++) {
|
for (i = 0; i < EQOS_DESCRIPTORS_TX; i++) {
|
||||||
struct eqos_desc *tx_desc = eqos_get_desc(eqos, i, false);
|
struct eqos_desc *tx_desc = eqos_get_desc(eqos, i, false);
|
||||||
eqos->config->ops->eqos_flush_desc(tx_desc);
|
eqos->config->ops->eqos_flush_desc(eqos, tx_desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) {
|
for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) {
|
||||||
@ -1347,7 +1382,7 @@ static int eqos_start(struct udevice *dev)
|
|||||||
(i * EQOS_MAX_PACKET_SIZE));
|
(i * EQOS_MAX_PACKET_SIZE));
|
||||||
rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
|
rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
|
||||||
mb();
|
mb();
|
||||||
eqos->config->ops->eqos_flush_desc(rx_desc);
|
eqos->config->ops->eqos_flush_desc(eqos, rx_desc);
|
||||||
eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf +
|
eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf +
|
||||||
(i * EQOS_MAX_PACKET_SIZE),
|
(i * EQOS_MAX_PACKET_SIZE),
|
||||||
EQOS_MAX_PACKET_SIZE);
|
EQOS_MAX_PACKET_SIZE);
|
||||||
@ -1478,13 +1513,13 @@ static int eqos_send(struct udevice *dev, void *packet, int length)
|
|||||||
*/
|
*/
|
||||||
mb();
|
mb();
|
||||||
tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length;
|
tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length;
|
||||||
eqos->config->ops->eqos_flush_desc(tx_desc);
|
eqos->config->ops->eqos_flush_desc(eqos, tx_desc);
|
||||||
|
|
||||||
writel((ulong)eqos_get_desc(eqos, eqos->tx_desc_idx, false),
|
writel((ulong)eqos_get_desc(eqos, eqos->tx_desc_idx, false),
|
||||||
&eqos->dma_regs->ch0_txdesc_tail_pointer);
|
&eqos->dma_regs->ch0_txdesc_tail_pointer);
|
||||||
|
|
||||||
for (i = 0; i < 1000000; i++) {
|
for (i = 0; i < 1000000; i++) {
|
||||||
eqos->config->ops->eqos_inval_desc(tx_desc);
|
eqos->config->ops->eqos_inval_desc(eqos, tx_desc);
|
||||||
if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN))
|
if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN))
|
||||||
return 0;
|
return 0;
|
||||||
udelay(1);
|
udelay(1);
|
||||||
@ -1504,7 +1539,7 @@ static int eqos_recv(struct udevice *dev, int flags, uchar **packetp)
|
|||||||
debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags);
|
debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags);
|
||||||
|
|
||||||
rx_desc = eqos_get_desc(eqos, eqos->rx_desc_idx, true);
|
rx_desc = eqos_get_desc(eqos, eqos->rx_desc_idx, true);
|
||||||
eqos->config->ops->eqos_inval_desc(rx_desc);
|
eqos->config->ops->eqos_inval_desc(eqos, rx_desc);
|
||||||
if (rx_desc->des3 & EQOS_DESC3_OWN) {
|
if (rx_desc->des3 & EQOS_DESC3_OWN) {
|
||||||
debug("%s: RX packet not available\n", __func__);
|
debug("%s: RX packet not available\n", __func__);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
@ -1542,7 +1577,7 @@ static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
|
|||||||
|
|
||||||
rx_desc->des0 = 0;
|
rx_desc->des0 = 0;
|
||||||
mb();
|
mb();
|
||||||
eqos->config->ops->eqos_flush_desc(rx_desc);
|
eqos->config->ops->eqos_flush_desc(eqos, rx_desc);
|
||||||
eqos->config->ops->eqos_inval_buffer(packet, length);
|
eqos->config->ops->eqos_inval_buffer(packet, length);
|
||||||
rx_desc->des0 = (u32)(ulong)packet;
|
rx_desc->des0 = (u32)(ulong)packet;
|
||||||
rx_desc->des1 = 0;
|
rx_desc->des1 = 0;
|
||||||
@ -1553,7 +1588,7 @@ static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
|
|||||||
*/
|
*/
|
||||||
mb();
|
mb();
|
||||||
rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
|
rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
|
||||||
eqos->config->ops->eqos_flush_desc(rx_desc);
|
eqos->config->ops->eqos_flush_desc(eqos, rx_desc);
|
||||||
|
|
||||||
writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
|
writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
|
||||||
|
|
||||||
@ -1628,7 +1663,7 @@ static int eqos_remove_resources_core(struct udevice *dev)
|
|||||||
free(eqos->rx_pkt);
|
free(eqos->rx_pkt);
|
||||||
free(eqos->rx_dma_buf);
|
free(eqos->rx_dma_buf);
|
||||||
free(eqos->tx_dma_buf);
|
free(eqos->tx_dma_buf);
|
||||||
eqos_free_descs(eqos->descs);
|
eqos_free_descs(eqos);
|
||||||
|
|
||||||
debug("%s: OK\n", __func__);
|
debug("%s: OK\n", __func__);
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
Reference in New Issue
Block a user