blob: b73e65088724dc19127471fb007ca0a68d46a3a1 [file] [log] [blame]
/*
<:copyright-BRCM:2016:DUAL/GPL:standard
Broadcom Proprietary and Confidential.(c) 2016 Broadcom
All Rights Reserved
Unless you and Broadcom execute a separate written software license
agreement governing use of this software, this software is licensed
to you under the terms of the GNU General Public License version 2
(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
with the following added to such license:
As a special exception, the copyright holders of this software give
you permission to link this software with independent modules, and
to copy and distribute the resulting executable under terms of your
choice, provided that you also meet, for each linked independent
module, the terms and conditions of the license of that module.
An independent module is a module which is not derived from this
software. The special exception does not apply to any modifications
of the software.
Not withstanding the above, under no circumstances may you combine
this software in any way with any other Broadcom software provided
under a license other than the GPL, without Broadcom's express prior
written consent.
:>
*/
/*
* bcmtr_mux.c
*
* Transport Multiplexer
* - PCIe/in-band channel multiplexer
* - locally/remotely - terminated message multiplexer
* - autonomous messages de-multiplexer
*/
#include <bcmos_system.h>
#include <bcmolt_msg.h>
#include <bcmolt_buf.h>
#include <bcmolt_msg_pack.h>
#include <bcmtr_header.h>
#include <bcmolt_tr_mux.h>
#ifdef IN_BAND
#include <bcmtr_inband.h>
#else
#include <bcmtr_pcie.h>
#include <bcmolt_llpcie.h>
#include <bcmolt_fld.h>
#include <bcm_fld_common.h>
#include <bcmtr_pcie_sw_queue.h>
#endif
#include <bcmolt_api.h>
#include <bcmolt_model_types.h>
#include <bcmolt_model_ids.h>
#define BCMTRMUX_MAX_TX_DELAY 500 /* max transmit delay (us) */
#define BCMTRMUX_MAX_RX_DELAY 5000 /* max receive delay (us) */
#ifdef __KERNEL__
#define BCMTRMUX_LOG printk
#else
#define BCMTRMUX_LOG BCMOS_TRACE_INFO
#endif
/* Channel registration info */
typedef struct
{
f_bcmtr_rx_handler rx;
void *data;
} bcmtrmux_rx_info;
typedef struct
{
f_bcmtr_local_rx_handler rx;
void *data;
} bcmtrmux_local_rx_info;
/* Registration arrays */
static bcmtrmux_rx_info bcmtrmux_rx_info_array[BCMTR_MAX_OLTS][BCMTRMUX_MAX_CHANNELS];
static bcmtrmux_local_rx_info bcmtrmux_local_rx_info_array[BCMTR_MAX_OLTS];
static bcmtrmux_channel bcmtrmux_auto_channels[BCMTR_MAX_OLTS][BCMTR_MAX_INSTANCES][BCMOLT_OBJ_ID__NUM_OF];
static bcmtrmux_channel bcmtrmux_proxy_channels[BCMTR_MAX_OLTS][BCMTR_MAX_INSTANCES][BCMOLT_OBJ_ID__NUM_OF];
static bcmos_bool bcmtr_mux_connected[BCMTR_MAX_OLTS];
static bcmos_bool bcmtr_mux_terminated[BCMTR_MAX_OLTS];
static bcmos_bool bcmtrmux_dev_ctrl_intercept[BCMTR_MAX_OLTS][BCMOLT_GROUP_ID__NUM_OF];
#ifndef IN_BAND
/* Number of registered high-priority channels */
static bcmtrmux_channel num_urgent_channels[BCMTR_MAX_OLTS];
/* Receive semaphore */
static bcmos_sem bcmtrmux_rx_lock[BCMTR_MAX_OLTS];
#endif
/* Statistics */
static bcmtrmux_stat bcmtrmux_stat_array[BCMTR_MAX_OLTS];
/* Receive tasks */
static bcmos_task bcmtrmux_rx_task[BCMTR_MAX_OLTS];
/* Registration protection */
static bcmos_fastlock bcmtrmux_lock;
static void bcmtrmux_rx_auto_proxy(bcmolt_devid device, bcmos_buf *buf, bcmtrmux_channel channel, void *data);
static void bcmtrmux_rx_local(bcmolt_devid device, bcmos_buf *buf, bcmtrmux_channel channel, void *data);
static bcmtrmux_msg_filter_cb_t bcmtrmux_msg_filter_cb;
/* discard packet for which there is no registration */
static void bcmtrmux_rx_discard(bcmolt_devid device, bcmos_buf *buf, bcmtrmux_channel channel, void *data)
{
uint32_t *counter = (uint32_t *)data;
++(*counter);
bcmos_buf_free(buf);
}
/* discard unpacked message for which there is no registration */
static void bcmtrmux_local_rx_discard(bcmolt_devid device, bcmolt_msg *msg, void *data)
{
uint32_t *counter = (uint32_t *)data;
++(*counter);
bcmolt_msg_free(msg);
}
/* Find free channel. returns channel id >= 0 or BCMTRMUX_MAX_CHANNELS if no free channels */
static int bcmtrmux_channel_get_free(bcmolt_devid device)
{
int i;
/* Start from BCMTRMUX_CHANNEL_FIRST_FREE. Channel 0 is reserved for indications/proxy , Channel 1 is for Keep Alive*/
for (i=BCMTRMUX_CHANNEL_FIRST_FREE; i<BCMTRMUX_FIRST_URGENT_CHANNEL; i++)
{
if (bcmtrmux_rx_info_array[device][i].rx == bcmtrmux_rx_discard)
break;
}
return i;
}
#ifndef IN_BAND
/* PCIe Receive handler */
static int _bcmtrmux_pcie_rx_handler(long device)
{
bcmos_errno rc;
bcmos_buf *buf;
uint32_t nbuf[BCMTR_PCIE_PRTY__NUM_OF] = {};
uint8_t ch;
BUG_ON(device >= BCMTR_MAX_OLTS);
BCMTRMUX_LOG("rx_task(%ld) - started\n", device);
/* Enable rx interrupt */
while (1)
{
bcmtr_pcie_rxint_enable(device);
bcmos_sem_wait(&bcmtrmux_rx_lock[device], BCMTRMUX_MAX_RX_DELAY);
if (!bcmtr_mux_connected[device])
{
break;
}
/* Wait for receive */
do
{
buf = NULL;
rc = bcmtr_swq_receive(device, BCMTR_PCIE_PRTY_NORMAL, &ch, &buf);
if (rc)
{
/* If DMA is not shared with urgent service, poll RXQ here,
* don't wait for interrupt
*/
if (rc == BCM_ERR_QUEUE_EMPTY && !num_urgent_channels[device])
{
bcmtr_pcie_rxint_disable(device);
bcmtr_swq_rx_poll(device, nbuf);
bcmtrmux_stat_array[device].rx_poll_normal += nbuf[BCMTR_PCIE_PRTY_NORMAL];
}
/* Wait for interrupt or semaphore timeout */
break;
}
bcmtrmux_rx_from_line((bcmolt_devid)device, (bcmtrmux_channel)ch, buf);
} while (bcmtr_mux_connected[device]);
}
BCMTRMUX_LOG("rx_task(%ld) - terminated\n", device);
bcmtr_mux_terminated[device] = BCMOS_TRUE;
return 0;
}
static void bcmtrmux_rx_irq(bcmolt_devid device)
{
bcmos_buf *buf;
uint8_t ch;
uint32_t nbuf[BCMTR_PCIE_PRTY__NUM_OF] = {};
bcmtr_pcie_rxint_disable(device);
bcmtr_pcie_rxint_clear(device);
bcmtr_swq_rx_poll(device, nbuf);
/* Got some packets. Deliver high priority from interrupt level */
if (nbuf[BCMTR_PCIE_PRTY_URGENT])
{
bcmtrmux_stat_array[device].rx_poll_urgent += nbuf[BCMTR_PCIE_PRTY_URGENT];
while (bcmtr_swq_receive(device, BCMTR_PCIE_PRTY_URGENT, &ch, &buf) == BCM_ERR_OK)
{
bcmtrmux_rx_from_line(device, ch, buf);
}
}
/* Check normal priority */
if (nbuf[BCMTR_PCIE_PRTY_NORMAL])
{
bcmtrmux_stat_array[device].rx_poll_normal += nbuf[BCMTR_PCIE_PRTY_NORMAL];
bcmos_sem_post(&bcmtrmux_rx_lock[device]);
}
/* Enable rx interrupt */
bcmtr_pcie_rxint_enable(device);
return;
}
/* Tx confirmation interrupt handler.
* Used only if there are urgent channels
*/
static void bcmtrmux_tx_irq(bcmolt_devid device)
{
/* Disable and clear transmit completion interrupts */
bcmtr_pcie_txint_disable(device);
bcmtr_pcie_txint_clear(device);
/* Submit buffers pending in sw queue to the h/w queue */
bcmtr_swq_tx_submit(device);
/* Re-enable tx completion interrupt */
bcmtr_pcie_txint_enable(device);
return;
}
/* Init PCIE transport */
static bcmos_errno bcmtrmux_pcie_init(bcmolt_devid device, uint32_t txq_size, uint32_t rxq_size)
{
uint32_t pcie_cookie[BCMOS_ROUND_UP(PCIE_OPAQUE_DATA_SIZE, sizeof(uint32_t))/sizeof(uint32_t)];
bcmtr_pcie_pre_connect_cfg cfg;
bcm_ll_dev_info ll_info;
int niter = 0;
bcmos_errno err;
bcmos_bool status;
#ifndef SIMULATION_BUILD
err = bcm_ll_pcie_query(device, &ll_info);
if (err)
{
BCMOS_TRACE_RETURN(err, "bcm_ll_pcie_query() failed\n");
}
#endif
BCMTRMUX_LOG("Waiting for BCM68620 application\n");
bcm_fld_set_rings_size(device, txq_size, rxq_size);
status = bcm_fld_test_device_bootrecord_flag(device);
/* Wait for embedded handshake. BCMTR_PCIE_START_TIMEOUT is in ms */
for (niter = 0;
niter < BCMTR_PCIE_START_TIMEOUT / 10 && !(status = bcm_fld_test_device_bootrecord_flag(device));
niter++)
{
bcmos_usleep(10000);
}
if (!status)
{
BCMOS_TRACE_RETURN(BCM_ERR_IO, "BCM68620 application timeout\n");
}
err = bcm_fld_get_device_bootrecord(device, pcie_cookie);
if (err != BCM_ERR_OK)
{
BCMOS_TRACE_RETURN(err, "bcm_fld_get_device_bootrecord() failed\n");
}
/* set host prm bit - indicate host ready to send/receive DMA */
bcm_fld_clear_device_bootrecord_flag(device);
cfg.txq_size = txq_size; /* Transmit queue size */
cfg.rxq_size = rxq_size; /* Receive queue size */
cfg.max_mtu = BCMTR_MAX_MTU_SIZE; /* Max MTU size */
cfg.pcie_reg_base = ll_info.soc_regs_base;
cfg.ddr_win_base = ll_info.soc_ddr_base;
cfg.rx_irq = ll_info.irq;
err = bcmtr_pcie_pre_connect(device, &cfg, (bcmtr_pcie_opaque_data *)pcie_cookie);
if (err)
{
BCMOS_TRACE_RETURN(err, "bcmtr_pcie_pre_connect() failed\n");
}
err = bcmtr_pcie_connect(device,(bcmtr_pcie_opaque_data *)pcie_cookie);
if (err)
{
BCMOS_TRACE_RETURN(err, "bcmtr_pcie_connect() failed\n");
}
bcm_fld_set_host_bootrecord_flag(device);
/* Wait for embedded handshake. BCMTR_PCIE_CONNECT_TIMEOUT is in ms */
for (niter = 0;
niter < BCMTR_PCIE_CONNECT_TIMEOUT / 10 && (status = bcm_fld_test_host_bootrecord_flag(device));
niter++)
{
bcmos_usleep(10000);
}
if (status)
{
BCMOS_TRACE_RETURN(BCM_ERR_IO, "BCM68620 connect timeout\n");
}
BCMTRMUX_LOG("PCI transport: initialized\n");
return BCM_ERR_OK;
}
#else /* #ifndef IN_BAND */
/* IN-BAND receive handler */
static int _bcmtrmux_ib_rx_handler(long device)
{
bcmos_errno rc;
bcmos_buf *buf;
uint8_t ch;
BUG_ON(device >= BCMTR_MAX_OLTS);
BCMTRMUX_LOG("rx_task(%ld) - started\n", device);
while (bcmtr_mux_connected[device])
{
/* Wait for receive */
buf = NULL;
rc = bcmtr_ib_receive(device, &ch, &buf);
if (rc == BCM_ERR_OK)
{
bcmtrmux_rx_from_line((bcmolt_devid)device, (bcmtrmux_channel)ch, buf);
}
}
BCMTRMUX_LOG("rx_task(%ld) - terminated\n", device);
bcmtr_mux_terminated[device] = BCMOS_TRUE;
return 0;
}
static bcmos_errno bcmtrmux_ib_connect(bcmolt_devid device, bcmos_ipv4_address ip_address, uint16_t udp_port)
{
bcmos_errno rc;
rc = bcmtr_ib_connect((uint8_t)device, ip_address, udp_port);
if (rc)
{
BCMTRMUX_LOG("%s: Failed to connect. Error %d\n", __FUNCTION__, rc);
return rc;
}
return rc;
}
#endif /* #ifndef IN_BAND */
/** Initialize mux service
* \returns: 0 in case of success or error code < 0
*/
bcmos_errno bcmtrmux_init(bcmtrmux_msg_filter_cb_t msg_filter_cb)
{
static bcmos_bool initialized = BCMOS_FALSE;
int i, j;
bcmos_errno rc;
bcmtrmux_msg_filter_cb = msg_filter_cb;
BCMTRMUX_LOG("Initialising transport MUX subsystem\n");
if (initialized)
{
return BCM_ERR_ALREADY;
}
for (i=0; i<BCMTR_MAX_OLTS; i++)
{
for (j=0; j<BCMTRMUX_MAX_CHANNELS; j++)
{
bcmtrmux_rx_info_array[i][j].rx = bcmtrmux_rx_discard;
bcmtrmux_rx_info_array[i][j].data = &bcmtrmux_stat_array[i].rx_disc_remote;
}
bcmtrmux_rx_info_array[i][BCMTRMUX_CHANNEL_AUTO_PROXY].rx = bcmtrmux_rx_auto_proxy;
bcmtrmux_rx_info_array[i][BCMTRMUX_CHANNEL_AUTO_PROXY].data = NULL;
bcmtrmux_rx_info_array[i][BCMTRMUX_CHANNEL_DEV_CONTROL].rx = bcmtrmux_rx_local;
bcmtrmux_rx_info_array[i][BCMTRMUX_CHANNEL_DEV_CONTROL].data = NULL;
for (j=0; j<BCMTR_MAX_INSTANCES; j++)
{
bcmolt_obj_id k;
for (k=0; k<BCMOLT_OBJ_ID__NUM_OF; k++)
{
bcmtrmux_auto_channels[i][j][k] = BCMTRMUX_MAX_CHANNELS;
bcmtrmux_proxy_channels[i][j][k] = BCMTRMUX_MAX_CHANNELS;
}
}
bcmtrmux_local_rx_info_array[i].rx = bcmtrmux_local_rx_discard;
bcmtrmux_local_rx_info_array[i].data = &bcmtrmux_stat_array[i].tx_disc_local;
}
bcmos_fastlock_init(&bcmtrmux_lock, 0);
/*Don't initialize at this time for User Space dev ctrl,
don't have enough information*/
#ifndef IN_BAND
rc = bcmtr_pcie_init(BCMTR_MAX_OLTS);
if (rc)
{
BCMOS_TRACE_RETURN(rc, "bcmtr_pcie_init() failed\n");
}
rc = bcmtr_swq_init();
if (rc)
{
BCMOS_TRACE_RETURN(rc, "bcmtr_swq_init() failed\n");
}
/* Register rx callback in PCIe driver */
bcmtr_pcie_rx_irq_cblk_register(bcmtrmux_rx_irq);
bcmtr_pcie_tx_irq_cblk_register(bcmtrmux_tx_irq);
#else
rc = bcmtr_ib_init();
if (rc)
{
BCMOS_TRACE_RETURN(rc, "bcmtr_ib_init() failed\n");
}
#endif /* #ifndef IN_BAND */
BCMTRMUX_LOG("Transport MUX init done\n");
return rc;
}
/** Notify mux driver that low-level transport connection is ready
* \returns: 0 in case of success or error code < 0
*/
#ifdef IN_BAND
bcmos_errno bcmtrmux_connect(bcmolt_devid device, bcmos_ipv4_address ip_address, uint16_t udp_port)
#else
bcmos_errno bcmtrmux_connect(bcmolt_devid device, uint32_t txq_size, uint32_t rxq_size)
#endif
{
static char task_name[BCMTR_MAX_OLTS][16];
bcmos_task_parm taskp = {};
bcmos_errno rc;
if (bcmtr_mux_connected[device])
{
return BCM_ERR_ALREADY;
}
snprintf(task_name[device], sizeof(task_name[device]), "bcmtr_rx%d", device);
taskp.data = (long)device;
taskp.name = task_name[device];
#ifdef IN_BAND
rc = bcmtrmux_ib_connect(device, ip_address, udp_port);
if (rc)
{
BCMTRMUX_LOG("%s: Failed to init inband device. Error %d\n", __FUNCTION__, rc);
return rc;
}
taskp.handler = _bcmtrmux_ib_rx_handler;
#else
rc = bcmos_sem_create(&bcmtrmux_rx_lock[device], 0, 0, NULL);
if (rc)
{
BCMTRMUX_LOG("%s: Failed to create rx lock. Error %d\n", __FUNCTION__, rc);
return rc;
}
/* Initialize low-level PCIe transport */
rc = bcmtrmux_pcie_init(device, txq_size, rxq_size);
if (rc)
{
bcmos_sem_destroy(&bcmtrmux_rx_lock[device]);
BCMTRMUX_LOG("%s: Failed to init low-level PCIe transport. Error %d\n", __FUNCTION__, rc);
return rc;
}
taskp.handler = _bcmtrmux_pcie_rx_handler;
rc = bcmtr_swq_device_init(device);
if (rc)
{
bcmos_sem_destroy(&bcmtrmux_rx_lock[device]);
BCMTRMUX_LOG("%s: Failed to init pcie_swq. Error %d\n", __FUNCTION__, rc);
return rc;
}
#endif
bcmtr_mux_connected[device] = BCMOS_TRUE;
bcmtr_mux_terminated[device] = BCMOS_FALSE;
rc = bcmos_task_create(&bcmtrmux_rx_task[device], &taskp);
if (rc)
{
#ifndef IN_BAND
bcmos_sem_destroy(&bcmtrmux_rx_lock[device]);
#endif
bcmtr_mux_connected[device] = BCMOS_FALSE;
BCMTRMUX_LOG("%s: Failed to create rx task. Error %d\n", __FUNCTION__, rc);
return rc;
}
return BCM_ERR_OK;
}
/** Notify mux driver that low-level transport connection is disconnected
* \returns: 0 in case of success or error code < 0
*/
bcmos_errno bcmtrmux_disconnect(bcmolt_devid device)
{
if (!bcmtr_mux_connected[device])
{
return BCM_ERR_ALREADY;
}
bcmtr_mux_connected[device] = BCMOS_FALSE;
#ifdef IN_BAND
bcmtr_ib_disconnect((uint8_t)device);
#else
bcmos_sem_post(&bcmtrmux_rx_lock[device]);
#endif
while (!bcmtr_mux_terminated[device])
{
bcmos_usleep(10000);
}
bcmos_task_destroy(&bcmtrmux_rx_task[device]);
#ifndef IN_BAND
bcmos_sem_destroy(&bcmtrmux_rx_lock[device]);
bcmtr_swq_device_exit(device);
bcmtr_pcie_disconnect((uint8_t)device);
#endif
return BCM_ERR_OK;
}
/** Cleanup and exit
*/
void bcmtrmux_exit(void)
{
int i;
BCMTRMUX_LOG("Cleaning up transport MUX subsystem\n");
#ifndef IN_BAND
bcmtr_swq_exit();
bcmtr_pcie_rx_irq_cblk_unregister();
bcmtr_pcie_tx_irq_cblk_unregister();
#endif
/* kill receive tasks */
for (i=0; i<BCMTR_MAX_OLTS; i++)
{
bcmtrmux_disconnect((bcmolt_devid)i);
}
#ifdef IN_BAND
bcmtr_ib_exit();
#else
bcmtr_pcie_exit();
#endif
BCMTRMUX_LOG("Transport MUX cleanup done\n");
}
/** Register PCIe channel owner */
bcmos_errno bcmtrmux_channel_register(bcmolt_devid device, bcmtrmux_channel *channel,
f_bcmtr_rx_handler rx, void *data)
{
bcmtrmux_channel ch;
long flags;
if ((unsigned)device >= BCMTR_MAX_OLTS || !channel || !rx)
{
return BCM_ERR_PARM;
}
ch = *channel;
flags = bcmos_fastlock_lock(&bcmtrmux_lock);
if (ch == BCMTRMUX_CHANNEL_AUTO_ASSIGN)
{
/* Auto-assign free channel */
ch = (bcmtrmux_channel)bcmtrmux_channel_get_free(device);
if (ch >= BCMTRMUX_MAX_CHANNELS)
{
bcmos_fastlock_unlock(&bcmtrmux_lock, flags);
return BCM_ERR_NORES;
}
}
/* Make sure that channel is valid and not busy */
if ((unsigned)ch >= BCMTRMUX_MAX_CHANNELS)
{
bcmos_fastlock_unlock(&bcmtrmux_lock, flags);
return BCM_ERR_PARM;
}
if (bcmtrmux_rx_info_array[device][ch].rx != bcmtrmux_rx_discard)
{
bcmos_fastlock_unlock(&bcmtrmux_lock, flags);
return BCM_ERR_ALREADY;
}
/* Assign channel */
bcmtrmux_rx_info_array[device][ch].rx = rx;
bcmtrmux_rx_info_array[device][ch].data = data;
#ifndef IN_BAND
/* Urgent channels are not supported for IN-BAND management */
if (ch >= BCMTRMUX_FIRST_URGENT_CHANNEL)
{
/* We use transmit confirmation interrupt to kick transmission
* if PCI bus is shared between high and low-priority channels
*/
if (!num_urgent_channels[device])
bcmtr_pcie_txint_enable(device);
++num_urgent_channels[device];
}
#endif
bcmos_fastlock_unlock(&bcmtrmux_lock, flags);
*channel = ch;
return BCM_ERR_OK;
}
/** Release PCIe channel allocated by bcmtrmux_channel_register()
*
* \param[in] device Maple device index
* \param[in] channel
* \returns: 0 in case of success or error code < 0
*/
bcmos_errno bcmtrmux_channel_unregister(bcmolt_devid device, bcmtrmux_channel channel)
{
long flags;
if ((unsigned)device >= BCMTR_MAX_OLTS || (unsigned)channel >= BCMTRMUX_MAX_CHANNELS)
{
return BCM_ERR_PARM;
}
flags = bcmos_fastlock_lock(&bcmtrmux_lock);
if (bcmtrmux_rx_info_array[device][channel].rx == bcmtrmux_rx_discard)
{
bcmos_fastlock_unlock(&bcmtrmux_lock, flags);
return BCM_ERR_NOENT;
}
bcmtrmux_rx_info_array[device][channel].rx = bcmtrmux_rx_discard;
bcmtrmux_rx_info_array[device][channel].data = &bcmtrmux_stat_array[device].rx_disc_remote;
#ifndef IN_BAND
/* Urgent channels are not supported for IN-BAND management */
if (channel >= BCMTRMUX_FIRST_URGENT_CHANNEL)
{
--num_urgent_channels[device];
/* If PCI bus is not shared between normal and urgent channels,
* transmit confirmation mechanism is not needed
*/
if (!num_urgent_channels[device])
bcmtr_pcie_txint_disable(device);
}
#endif
bcmos_fastlock_unlock(&bcmtrmux_lock, flags);
return BCM_ERR_OK;
}
/*
* Local termination handler
*/
/* Register local termination handler. */
bcmos_errno bcmtrmux_local_handler_register(bcmolt_devid device, f_bcmtr_local_rx_handler rx, void *data)
{
long flags;
if ((unsigned)device >= BCMTR_MAX_OLTS || !rx)
{
return BCM_ERR_PARM;
}
flags = bcmos_fastlock_lock(&bcmtrmux_lock);
if (bcmtrmux_local_rx_info_array[device].rx != bcmtrmux_local_rx_discard)
{
bcmos_fastlock_unlock(&bcmtrmux_lock, flags);
return BCM_ERR_ALREADY;
}
bcmtrmux_local_rx_info_array[device].rx = rx;
bcmtrmux_local_rx_info_array[device].data = data;
bcmos_fastlock_unlock(&bcmtrmux_lock, flags);
return BCM_ERR_OK;
}
/* Unregister local termination handler registered by bcmtrmux_local_handler_register() */
bcmos_errno bcmtrmux_local_handler_unregister(bcmolt_devid device)
{
long flags;
if ((unsigned)device >= BCMTR_MAX_OLTS)
{
return BCM_ERR_PARM;
}
flags = bcmos_fastlock_lock(&bcmtrmux_lock);
if (bcmtrmux_local_rx_info_array[device].rx == bcmtrmux_local_rx_discard)
{
bcmos_fastlock_unlock(&bcmtrmux_lock, flags);
return BCM_ERR_NOENT;
}
bcmtrmux_local_rx_info_array[device].data = &bcmtrmux_stat_array[device].tx_disc_local;
bcmtrmux_local_rx_info_array[device].rx = bcmtrmux_local_rx_discard;
bcmos_fastlock_unlock(&bcmtrmux_lock, flags);
return BCM_ERR_OK;
}
/* Deliver message to local destination */
static bcmos_bool bcmtrmux_deliver_to_local(bcmolt_devid device, bcmtrmux_channel channel, bcmos_buf *buf, bcmtr_hdr *hdr)
{
bcmtrmux_local_rx_info *rx_info;
bcmolt_msg *msg = NULL;
bcmolt_buf ubuf;
bcmos_errno err;
/* Unpack */
bcmolt_buf_init(&ubuf, bcmos_buf_length(buf), bcmos_buf_data(buf), BCMTR_BUF_ENDIAN);
bcmolt_buf_skip(&ubuf, BCMTR_HDR_SIZE);
err = bcmolt_msg_unpack(&ubuf, &msg);
bcmos_buf_free(buf);
if (err < 0)
{
BCMOS_TRACE_ERR("Message unpack error %s (%d)\n", bcmos_strerror(err), err);
++bcmtrmux_stat_array[device].tx_disc_local;
return BCMOS_TRUE;
}
msg->corr_tag = hdr->corr_tag;
msg->subch = (bcmolt_subchannel)channel;
++bcmtrmux_stat_array[device].tx_local;
rx_info = &bcmtrmux_local_rx_info_array[device];
rx_info->rx(device, msg, rx_info->data);
return BCMOS_TRUE;
}
/* send to line with repetitive attempts if pcie buffer is full */
static void bcmtrmux_send_to_line(bcmolt_devid device, bcmtrmux_channel channel, bcmos_buf *buf)
{
bcmos_errno rc;
#ifdef IN_BAND
rc = bcmtr_ib_send((uint8_t)device, (uint8_t)channel, buf);
#else
rc = bcmtr_swq_send((uint8_t)device, channel, buf);
#endif
if (rc != BCM_ERR_OK)
{
/* Failed */
++bcmtrmux_stat_array[device].tx_disc_remote;
bcmos_buf_free(buf);
}
}
/* Receive message from host application */
void bcmtrmux_rx_from_host(bcmolt_devid device, bcmtrmux_channel channel, bcmos_buf *buf)
{
bcmtr_hdr hdr;
bcmolt_obj_id obj;
bcmolt_mgt_group group;
uint16_t subgroup;
bcmos_errno rc;
/* Validate parameters */
BUG_ON((unsigned)device >= BCMTR_MAX_OLTS);
BUG_ON((unsigned)channel >= BCMTRMUX_MAX_CHANNELS);
/* Peek in transport header. It contains enough info to decide what to do with the message */
bcmtr_header_unpack(bcmos_buf_data(buf), &hdr);
rc = bcmolt_group_id_split(hdr.msg_id, &obj, &group, &subgroup);
if (rc)
{
BCMOS_TRACE_ERR("Can't decode group_id %u. Error %s (%d)\n", hdr.msg_id, bcmos_strerror(rc), rc);
++bcmtrmux_stat_array[device].tx_disc_remote;
bcmos_buf_free(buf);
return;
}
/* Filter auto/proxy (un)registration.
* This message is terminated here.
*/
if (hdr.auto_proxy_reg || hdr.auto_proxy_unreg)
{
bcmtrmux_channel *p_ch = (group == BCMOLT_MGT_GROUP_AUTO) ?
&bcmtrmux_auto_channels[device][hdr.instance][obj] : &bcmtrmux_proxy_channels[device][hdr.instance][obj];
bcmos_buf_free(buf);
/* Sanity check */
if (hdr.instance >= BCMTR_MAX_INSTANCES || obj >= BCMOLT_OBJ_ID__NUM_OF)
{
BCMOS_TRACE_ERR("Instance %u or object %d is insane\n", hdr.instance, obj);
return;
}
/* Do not override bcmolt_dev_ctrl filters */
if (*p_ch != BCMTRMUX_CHANNEL_DEV_CONTROL)
{
*p_ch = hdr.auto_proxy_reg ? channel : BCMTRMUX_MAX_CHANNELS;
}
return;
}
/* Filter message that should go to local destination (device control) */
if (bcmtrmux_msg_filter_cb && bcmtrmux_msg_filter_cb(device, obj, group, subgroup) == BCMTRMUX_DEST_LOCAL)
{
if (bcmtrmux_deliver_to_local(device, channel, buf, &hdr) == BCMOS_TRUE)
return;
}
/* Handle Remote message */
++bcmtrmux_stat_array[device].tx_remote;
bcmtrmux_send_to_line(device, channel, buf);
}
/* Receive packet from the line or local control process.
* Parameters are expected to be checked beforehand.
* The function de-muxes
* - replies based on channel
* - autonomous/proxy messages based on registration info
*/
static void bcmtrmux_rx(bcmolt_devid device, bcmtrmux_channel channel, bcmos_buf *buf)
{
bcmtrmux_rx_info *rx_info;
rx_info = &bcmtrmux_rx_info_array[device][channel];
rx_info->rx(device, buf, channel, rx_info->data);
}
/* Receive packet from PCIe interface */
void bcmtrmux_rx_from_line(bcmolt_devid device, bcmtrmux_channel channel, bcmos_buf *buf)
{
BUG_ON((unsigned)device >= BCMTR_MAX_OLTS);
if ((unsigned)channel >= BCMTRMUX_MAX_CHANNELS)
{
++bcmtrmux_stat_array[device].rx_disc_inv_ch;
bcmos_buf_free(buf);
return;
}
++bcmtrmux_stat_array[device].rx_remote;
bcmtrmux_rx(device, channel, buf);
}
/* Handle message received via Auto/Proxy channel */
static void bcmtrmux_rx_auto_proxy(bcmolt_devid device, bcmos_buf *buf, bcmtrmux_channel channel, void *data)
{
bcmtr_hdr hdr;
bcmolt_obj_id obj;
bcmolt_mgt_group group;
uint16_t subgroup;
bcmos_errno rc;
/* Peek in transport header. It contains enough info to decide what to do with the message */
bcmtr_header_unpack(bcmos_buf_data(buf), &hdr);
rc = bcmolt_group_id_split(hdr.msg_id, &obj, &group, &subgroup);
if (rc)
{
BCMOS_TRACE_ERR("Can't decode group_id %u. Error %s (%d)\n", hdr.msg_id, bcmos_strerror(rc), rc);
++bcmtrmux_stat_array[device].rx_disc_auto;
bcmos_buf_free(buf);
return;
}
/* Sanity check */
if (hdr.instance >= BCMTR_MAX_INSTANCES || obj >= BCMOLT_OBJ_ID__NUM_OF)
{
BCMOS_TRACE_ERR("Instance %u or object %d is insane\n", hdr.instance, obj);
++bcmtrmux_stat_array[device].rx_disc_auto;
bcmos_buf_free(buf);
return;
}
/* Dispatch based on object id */
/* Handle dev_ctrl intercept */
if (bcmtrmux_dev_ctrl_intercept[device][hdr.msg_id])
{
channel = BCMTRMUX_CHANNEL_DEV_CONTROL;
}
else
{
channel = (group == BCMOLT_MGT_GROUP_AUTO) ?
bcmtrmux_auto_channels[device][hdr.instance][obj] : bcmtrmux_proxy_channels[device][hdr.instance][obj];
}
/* If no registration - discard */
if (channel >= BCMTRMUX_MAX_CHANNELS)
{
++bcmtrmux_stat_array[device].rx_disc_auto;
bcmos_buf_free(buf);
return;
}
bcmtrmux_rx(device, channel, buf);
}
/* Handle message received via DEV_CONTROL channel */
static void bcmtrmux_rx_local(bcmolt_devid device, bcmos_buf *buf, bcmtrmux_channel channel, void *data)
{
bcmtrmux_local_rx_info *rx_info;
bcmtr_hdr hdr;
bcmolt_buf ubuf;
bcmolt_msg *msg = NULL;
bcmos_errno err;
bcmtr_header_unpack(bcmos_buf_data(buf), &hdr);
bcmolt_buf_init(&ubuf, bcmos_buf_length(buf), bcmos_buf_data(buf), BCMTR_BUF_ENDIAN);
bcmolt_buf_skip(&ubuf, BCMTR_HDR_SIZE);
err = bcmolt_msg_unpack(&ubuf, &msg);
bcmos_buf_free(buf);
if (err < 0)
{
BCMOS_TRACE_ERR("Message unpack error %s (%d)\n", bcmos_strerror(err), err);
++bcmtrmux_stat_array[device].rx_disc_remote;
return;
}
msg->corr_tag = hdr.corr_tag;
msg->subch = (bcmolt_subchannel)channel;
++bcmtrmux_stat_array[device].rx_local;
rx_info = &bcmtrmux_local_rx_info_array[device];
rx_info->rx(device, msg, rx_info->data);
}
static bcmos_errno bcmtrmux_msg_pack(bcmolt_devid device, bcmolt_msg *msg, bcmos_buf **p_buf)
{
int32_t len = bcmolt_msg_get_packed_length(msg);
bcmos_buf *buf;
bcmolt_buf ubuf;
bcmos_errno rc = BCM_ERR_OK;
bcmtr_hdr thdr = {};
if (len < 0)
{
BCMOS_TRACE_ERR("Can't calculate packet length. Error %s (%d)\n", bcmos_strerror(rc), len);
return BCM_ERR_PARM;
}
rc = bcmtr_header_fill(msg, &thdr);
if (rc)
{
BCMOS_TRACE_ERR("Can't create transport header. Error %s (%d)\n", bcmos_strerror(rc), rc);
return BCM_ERR_PARM;
}
len += BCMTR_HDR_SIZE;
buf = bcmos_buf_alloc(len);
if (!buf)
{
BCMOS_TRACE_ERR("Can't allocate packet buffer\n");
return BCM_ERR_NOMEM;
}
bcmolt_buf_init(&ubuf, len, bcmos_buf_data(buf), BCMTR_BUF_ENDIAN);
bcmolt_buf_skip(&ubuf, BCMTR_HDR_SIZE);
/* Pack transport header */
bcmtr_header_pack(&thdr, ubuf.start);
/* Pack message */
rc = bcmolt_msg_pack(msg, &ubuf);
if (rc)
{
BCMOS_TRACE_ERR("Message pack failed. Error %s (%d)\n", bcmos_strerror(rc), rc);
bcmos_buf_free(buf);
return BCM_ERR_PARM;
}
bcmos_buf_length_set(buf, len);
*p_buf = buf;
return BCM_ERR_OK;
}
/* Send packet from local control process to the host application */
bcmos_errno bcmtrmux_control_to_host(bcmolt_devid device, bcmolt_msg *msg)
{
bcmtrmux_channel channel = (bcmtrmux_channel)msg->subch;
bcmos_buf *buf;
bcmos_errno rc;
BUG_ON((unsigned)channel >= BCMTRMUX_MAX_CHANNELS);
BUG_ON((unsigned)device >= BCMTR_MAX_OLTS);
++bcmtrmux_stat_array[device].control_to_host;
rc = bcmtrmux_msg_pack(device, msg, &buf);
if (rc)
{
return rc;
}
bcmtrmux_rx(device, channel, buf);
return BCM_ERR_OK;
}
/* Send packet from local control process to the embedded system */
bcmos_errno bcmtrmux_control_to_line(bcmolt_devid device, bcmolt_msg *msg)
{
bcmos_buf *buf;
bcmos_errno err;
BUG_ON((unsigned)device >= BCMTR_MAX_OLTS);
++bcmtrmux_stat_array[device].control_to_line;
err = bcmtrmux_msg_pack(device, msg, &buf);
if (err)
{
return err;
}
bcmtrmux_send_to_line(device, BCMTRMUX_CHANNEL_DEV_CONTROL, buf);
return err;
}
/* Register message for intercept by bcmolt_dev_ctrl */
bcmos_errno bcmtrmux_control_auto_intercept_filter(bcmolt_devid device, bcmolt_obj_id object, uint16_t subgroup)
{
bcmos_errno err;
bcmolt_group_id msg_id;
if ((unsigned)device >= BCMTR_MAX_OLTS)
{
return BCM_ERR_PARM;
}
err = bcmolt_group_id_combine(object, BCMOLT_MGT_GROUP_AUTO, subgroup, &msg_id);
if (err)
{
BCMOS_TRACE_ERR("Can't identify operation %d for object %d. Error %s (%d)\n",
(int)subgroup, (int)object, bcmos_strerror(err), err);
return err;
}
bcmtrmux_dev_ctrl_intercept[device][msg_id] = BCMOS_TRUE;
return BCM_ERR_OK;
}
/** Get transport mux statistics.
*
* \param[in] device Maple device index
* \param[out] stat Statistics
* \returns: 0 in case of success or error code < 0
*/
bcmos_errno bcmtrmux_stat_get(bcmolt_devid device, bcmtrmux_stat *stat)
{
if ((unsigned)device >= BCMTR_MAX_OLTS || !stat)
{
return BCM_ERR_PARM;
}
*stat = bcmtrmux_stat_array[device];
return BCM_ERR_OK;
}
#ifdef __KERNEL__
EXPORT_SYMBOL(bcmtrmux_init);
EXPORT_SYMBOL(bcmtrmux_connect);
EXPORT_SYMBOL(bcmtrmux_disconnect);
EXPORT_SYMBOL(bcmtrmux_channel_register);
EXPORT_SYMBOL(bcmtrmux_channel_unregister);
EXPORT_SYMBOL(bcmtrmux_local_handler_register);
EXPORT_SYMBOL(bcmtrmux_local_handler_unregister);
EXPORT_SYMBOL(bcmtrmux_rx_from_host);
EXPORT_SYMBOL(bcmtrmux_rx_from_line);
EXPORT_SYMBOL(bcmtrmux_control_to_host);
EXPORT_SYMBOL(bcmtrmux_control_to_line);
EXPORT_SYMBOL(bcmtrmux_control_auto_intercept_filter);
EXPORT_SYMBOL(bcmtrmux_stat_get);
#endif