blob: 624101e064274bc3fdf666118c1a7004a854d1b5 [file] [log] [blame]
/*
<:copyright-BRCM:2016:DUAL/GPL:standard
Broadcom Proprietary and Confidential.(c) 2016 Broadcom
All Rights Reserved
Unless you and Broadcom execute a separate written software license
agreement governing use of this software, this software is licensed
to you under the terms of the GNU General Public License version 2
(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
with the following added to such license:
As a special exception, the copyright holders of this software give
you permission to link this software with independent modules, and
to copy and distribute the resulting executable under terms of your
choice, provided that you also meet, for each linked independent
module, the terms and conditions of the license of that module.
An independent module is a module which is not derived from this
software. The special exception does not apply to any modifications
of the software.
Not withstanding the above, under no circumstances may you combine
this software in any way with any other Broadcom software provided
under a license other than the GPL, without Broadcom's express prior
written consent.
:>
*/
#include <bcmtr_pcie.h>
#include <bcmolt_tr_pcie_specific.h>
#include "bcmtr_pcie_sw_queue.h"
/*
* bcm_pcie_sw_queue.c
* Software layer on top of low level PCIe driver that adds support
* for s/w queue and priority
*/
typedef struct
{
bcmos_buf_queue txq; /* Transmit queue */
bcmos_buf_queue rxq; /* Receive queue */
uint32_t max_hwq_size;
uint32_t max_swq_size;
bcmtr_swq_rx_cb rx_cb; /* Optional rx_cb for callback-based RX buffer delivery */
} pcie_swq;
static pcie_swq swq_info[BCMTR_MAX_OLTS][BCMTR_PCIE_PRTY__NUM_OF];
static uint32_t hwq_occupancy[BCMTR_MAX_OLTS]; /* Number of unacknowledged buffers in hw tx queue */
static bcmos_bool swq_initialized[BCMTR_MAX_OLTS];
static bcmos_fastlock tx_lock[BCMTR_MAX_OLTS];
static bcmos_fastlock rx_lock[BCMTR_MAX_OLTS];
#define BCMTR_SWQ_GET_RETURN_IF_ERROR(device,prty,swq) \
do { \
if (device >= BCMTR_MAX_OLTS) \
return BCM_ERR_PARM; \
swq = &swq_info[device][prty]; \
} while (0)
static inline long _bcmtr_swq_tx_lock(uint8_t device)
{
return bcmos_fastlock_lock(&tx_lock[device]);
}
static inline void _bcmtr_swq_tx_unlock(uint8_t device, long flags)
{
bcmos_fastlock_unlock(&tx_lock[device], flags);
}
static inline long _bcmtr_swq_rx_lock(uint8_t device)
{
return bcmos_fastlock_lock(&rx_lock[device]);
}
static inline void _bcmtr_swq_rx_unlock(uint8_t device, long flags)
{
bcmos_fastlock_unlock(&rx_lock[device], flags);
}
/** Tx done callback.
* Must be called under tx_lock
*/
static void _bcmtr_swq_tx_done_cb(uint8_t device, bcmos_buf *buf)
{
BUG_ON(!hwq_occupancy[device]);
--hwq_occupancy[device];
bcmos_buf_free(buf);
}
/* Initialize PCI software queue module */
bcmos_errno bcmtr_swq_init(void)
{
return bcmtr_pcie_tx_done_cblk_register(_bcmtr_swq_tx_done_cb);
}
/* Cleanup software queue module
*/
void bcmtr_swq_exit(void)
{
int i;
/* Unregister from bcmtr_pcie driver */
bcmtr_pcie_tx_done_cblk_unregister();
for (i = 0; i < BCMTR_MAX_OLTS; i++)
bcmtr_swq_device_exit(i);
}
/* Initialize PCI software queue module */
bcmos_errno bcmtr_swq_device_init(uint8_t device)
{
bcmtr_pcie_prty prty;
if (device >= BCMTR_MAX_OLTS)
return BCM_ERR_PARM;
if (swq_initialized[device])
return BCM_ERR_ALREADY;
bcmos_fastlock_init(&tx_lock[device], 0);
bcmos_fastlock_init(&rx_lock[device], 0);
for (prty = 0; prty < BCMTR_PCIE_PRTY__NUM_OF; prty++)
{
pcie_swq *swq = &swq_info[device][prty];
bcmos_buf_queue_init(&swq->txq);
bcmos_buf_queue_init(&swq->rxq);
swq->rx_cb = NULL;
swq->max_hwq_size = swq->max_swq_size = 0;
}
swq_initialized[device] = BCMOS_TRUE;
return BCM_ERR_OK;
}
/* Cleanup software queue module */
void bcmtr_swq_device_exit(uint8_t device)
{
bcmtr_pcie_prty prty;
if (!swq_initialized[device])
return;
for (prty = 0; prty < BCMTR_PCIE_PRTY__NUM_OF; prty++)
{
pcie_swq *swq = &swq_info[device][prty];
bcmos_buf *buf;
while ((buf=bcmos_buf_queue_get(&swq->txq)))
bcmos_buf_free(buf);
while ((buf=bcmos_buf_queue_get(&swq->rxq)))
bcmos_buf_free(buf);
}
swq_initialized[device] = BCMOS_FALSE;
return;
}
/** Send buffer to the peer
* \param[in] device Maple device index
* \param[in] channel Channel id (opaque to the bcmtr_pcie driver)
* \param[in] buf Buffer to be transferred
* \returns: 0 in case of success or error code < 0
*/
bcmos_errno bcmtr_swq_send(uint8_t device, uint8_t channel, bcmos_buf *buf)
{
bcmtr_pcie_prty prty = (channel >= BCMTR_SWQ_FIRST_URGENT_CHANNEL) ?
BCMTR_PCIE_PRTY_URGENT : BCMTR_PCIE_PRTY_NORMAL;
pcie_swq *swq;
bcmos_bool was_empty;
bcmos_bool hw_queue_full;
bcmos_errno err;
long flags;
BCMTR_SWQ_GET_RETURN_IF_ERROR(device, prty, swq);
/* Store channel in the buffer */
bcmos_buf_channel_set(buf, channel);
/* Prevent concurrent access to the queue */
flags = _bcmtr_swq_tx_lock(device);
/* Store q-was-empty status */
was_empty = bcmos_buf_queue_is_empty(&swq->txq);
/* Check if max h/w queue occupancy isn't exceeded. If it isn't and s/w queue is empty
* submit directly to the h/w queue.
*/
hw_queue_full = (swq->max_hwq_size && hwq_occupancy[device] >= swq->max_hwq_size);
if (was_empty && !hw_queue_full)
{
++hwq_occupancy[device];
_bcmtr_swq_tx_unlock(device, flags);
err = bcmtr_pcie_send(device, channel, buf);
if (err)
{
flags = _bcmtr_swq_tx_lock(device);
--hwq_occupancy[device];
/* If sw q is enabled, enque the buffer, otherwise, just return */
if (swq->max_swq_size || swq->max_hwq_size)
{
bcmos_buf_queue_put(&swq->txq, buf);
err = BCM_ERR_OK;
}
_bcmtr_swq_tx_unlock(device, flags);
}
}
else
{
bcmos_buf_queue_put(&swq->txq, buf);
_bcmtr_swq_tx_unlock(device, flags);
err = BCM_ERR_OK;
}
return err;
}
/** Receive packet from device
* \param[in] device Maple device index
* \param[in] prty Priority
* \param[out] channel message channel from the BD
* \param[out] buf pointer to network buffer containing the
* received packet
* \returns: 0 in case of success or error code < 0
*/
bcmos_errno bcmtr_swq_receive(uint8_t device, bcmtr_pcie_prty prty, uint8_t *channel, bcmos_buf **buf)
{
pcie_swq *swq;
long flags;
bcmos_errno err;
BCMTR_SWQ_GET_RETURN_IF_ERROR(device, prty, swq);
/* Peevent concurent access to the queue */
flags = _bcmtr_swq_rx_lock(device);
*buf = bcmos_buf_queue_get(&swq->rxq);
if (*buf)
{
*channel = bcmos_buf_channel(*buf);
err = BCM_ERR_OK;
}
else
{
err = BCM_ERR_QUEUE_EMPTY;
}
_bcmtr_swq_rx_unlock(device, flags);
return err;
}
/** Configure TX queue.
*/
bcmos_errno bcmtr_swq_tx_queue_cfg(uint8_t device, bcmtr_pcie_prty prty, uint32_t hardq_size, uint32_t softq_size)
{
pcie_swq *swq;
BCMTR_SWQ_GET_RETURN_IF_ERROR(device, prty, swq);
swq->max_hwq_size = hardq_size;
swq->max_swq_size = softq_size;
return BCM_ERR_OK;
}
/** Register for "data received indication"
* \param[in] device Maple device index
* \param[in] prty Priority
* \param[in] cb Callback pointer
* \returns: 0 in case of success or error code < 0
*/
bcmos_errno bcmtr_swq_rx_cb_register(uint8_t device, bcmtr_pcie_prty prty, bcmtr_swq_rx_cb rx_cb)
{
if (device >= BCMTR_MAX_OLTS)
return BCM_ERR_PARM;
swq_info[device][prty].rx_cb = rx_cb;
return BCM_ERR_OK;
}
/** Unregister "data received indication" callback
* \param[in] device Maple device index
* \param[in] prty Priority
* \returns: 0 in case of success or error code < 0
*/
bcmos_errno bcmtr_swq_rx_cb_unregister(uint8_t device, bcmtr_pcie_prty prty)
{
if (device >= BCMTR_MAX_OLTS)
return BCM_ERR_PARM;
swq_info[device][prty].rx_cb = NULL;
return BCM_ERR_OK;
}
/* Fetch data from the hw to the sw queue. */
void bcmtr_swq_rx_poll(uint8_t device, uint32_t nbuf[])
{
uint8_t channel;
bcmos_buf *buf;
int n[BCMTR_PCIE_PRTY__NUM_OF] = {};
long flags;
bcmos_errno err;
do
{
bcmtr_pcie_prty prty;
pcie_swq *swq;
err = bcmtr_pcie_receive(device, &channel, &buf);
if (err != BCM_ERR_OK)
break;
prty = (channel >= BCMTR_SWQ_FIRST_URGENT_CHANNEL) ?
BCMTR_PCIE_PRTY_URGENT : BCMTR_PCIE_PRTY_NORMAL;
/* If callback based delivery - deliver buffer now, otherwise, place on s/w queue */
swq = &swq_info[device][prty];
if (swq->rx_cb)
{
swq->rx_cb(device, channel, buf);
}
else
{
bcmos_buf_channel_set(buf, channel);
flags = _bcmtr_swq_rx_lock(device);
bcmos_buf_queue_put(&swq->rxq, buf);
_bcmtr_swq_rx_unlock(device, flags);
}
++n[prty];
} while (BCMOS_TRUE);
nbuf[0] = n[0];
nbuf[1] = n[1];
}
/* Submit data from the sw TX queue to the h/w */
static void _bcmtr_swq_tx_submit_prty(uint8_t device, bcmtr_pcie_prty prty)
{
bcmos_errno err = BCM_ERR_OK;
pcie_swq *swq;
bcmos_buf *buf;
uint8_t channel;
bcmos_bool hw_queue_full;
long flags;
swq = &swq_info[device][prty];
do
{
flags = _bcmtr_swq_tx_lock(device);
/* Check if not over limit */
hw_queue_full = (swq->max_hwq_size && hwq_occupancy[device] >= swq->max_hwq_size);
if (hw_queue_full)
{
_bcmtr_swq_tx_unlock(device, flags);
break;
}
/* Get from s/w queue and submit to the h/w queue */
buf = bcmos_buf_queue_peek(&swq->rxq);
_bcmtr_swq_tx_unlock(device, flags);
if (!buf)
break;
channel = bcmos_buf_channel(buf);
err = bcmtr_pcie_send(device, channel, buf);
if (err != BCM_ERR_OK)
break;
flags = _bcmtr_swq_tx_lock(device);
++hwq_occupancy[device];
bcmos_buf_queue_get(&swq->txq);
_bcmtr_swq_tx_unlock(device, flags);
} while (BCMOS_TRUE);
}
/* Submit data from the sw TX queue to the h/w */
void bcmtr_swq_tx_submit(uint8_t device)
{
if (bcmtr_pcie_tx_collect(device) > 0)
{
_bcmtr_swq_tx_submit_prty(device, BCMTR_PCIE_PRTY_URGENT);
_bcmtr_swq_tx_submit_prty(device, BCMTR_PCIE_PRTY_NORMAL);
}
}
#ifdef __KERNEL__
EXPORT_SYMBOL(bcmtr_swq_tx_queue_cfg);
EXPORT_SYMBOL(bcmtr_swq_send);
#endif