blob: add7ae5fb94353093406cbe009694a7e3ae26eb6 [file] [log] [blame]
/*
<:copyright-BRCM:2016:DUAL/GPL:standard
Broadcom Proprietary and Confidential.(c) 2016 Broadcom
All Rights Reserved
Unless you and Broadcom execute a separate written software license
agreement governing use of this software, this software is licensed
to you under the terms of the GNU General Public License version 2
(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
with the following added to such license:
As a special exception, the copyright holders of this software give
you permission to link this software with independent modules, and
to copy and distribute the resulting executable under terms of your
choice, provided that you also meet, for each linked independent
module, the terms and conditions of the license of that module.
An independent module is a module which is not derived from this
software. The special exception does not apply to any modifications
of the software.
Not withstanding the above, under no circumstances may you combine
this software in any way with any other Broadcom software provided
under a license other than the GPL, without Broadcom's express prior
written consent.
:>
*/
#include "bcmolt_coop_dba.h"
/* this is the host application callback that will be called to handle the indications sent from
* the embedded side to the host during DBA cycle
*/
static bcmolt_coop_dba_host_handler_cb host_handler_array[BCMTR_MAX_OLTS];
/* An API to implement Partial-External DBA type */
bcmos_errno bcmolt_fast_oper_set_allocs(bcmolt_devid device, uint8_t pon_ni, bcm_common_fast_set_allocs_msg *msg)
{
bcmos_errno rc = BCM_ERR_OK;
bcmos_buf *buf;
unsigned char *buf_data;
bcm_common_fast_set_allocs_msg *msg_p;
uint32_t len = sizeof(bcm_common_fast_msg_type) + sizeof(pon_ni) +
sizeof(msg->hdr) + sizeof(bcm_common_fast_set_alloc) * msg->hdr.num_of_allocs;
uint32_t i;
buf = bcmos_buf_alloc(len);
if (!buf)
{
BCMOS_TRACE_ERR("Can't allocate packet buffer\n");
return BCM_ERR_NOMEM;
}
bcmos_buf_length_set(buf, len);
buf_data = bcmos_buf_data(buf);
/* change endianness from host to embedded */
/* fixing header endianness and packing */
*buf_data++ = (uint8_t)BCM_COMMON_FAST_MSG_TYPE_SET_ALLOCS; /* msg type */
*buf_data++ = pon_ni; /* pon_ni is U8 */
msg_p = (bcm_common_fast_set_allocs_msg *)buf_data;
msg_p->hdr.cycle_num = BCMOLT_BUF_ENDIAN_CPU_TO_BUF(U32, msg->hdr.cycle_num);
msg_p->hdr.num_of_allocs = BCMOLT_BUF_ENDIAN_CPU_TO_BUF(U32, msg->hdr.num_of_allocs);
for (i = 0; i < sizeof(msg->hdr.num_of_allocs); i++)
{
/* going over the list parts and fixing their endianity and packing into buf */
msg_p->set_alloc_list[i].alloc_id = BCMOLT_BUF_ENDIAN_CPU_TO_BUF(U16, msg->set_alloc_list[i].alloc_id);
msg_p->set_alloc_list[i].allocation_blocks = BCMOLT_BUF_ENDIAN_CPU_TO_BUF(U32, msg->set_alloc_list[i].allocation_blocks);
}
/* send directly to PCIe driver on the dedicated channel for fast API */
rc = bcmtr_swq_send(device, URGENT_CHANNEL, buf);
if (rc)
{
BCMOS_TRACE_ERR("bcmtr_pcie_send failed. Error %s (%d)\n", bcmos_strerror(rc), rc);
bcmos_buf_free(buf);
return BCM_ERR_PARM;
}
/* buffer will be freed automatically once it is transmitted */
return BCM_ERR_OK;
}
bcmos_errno bcmolt_fast_oper_set_accesses(bcmolt_devid device, uint8_t pon_ni, bcm_common_fast_set_accesses_msg *msg)
{
bcmos_errno rc = BCM_ERR_OK;
bcmos_buf* buf;
bcm_common_fast_set_accesses_msg *msg_p;
unsigned char* buf_data;
uint32_t len = sizeof(bcm_common_fast_msg_type) + sizeof(pon_ni) +
sizeof (msg->hdr) + sizeof(bcm_common_fast_set_access) * msg->hdr.num_of_allocs;
uint32_t i;
buf = bcmos_buf_alloc(len);
if (!buf)
{
BCMOS_TRACE_ERR("Can't allocate packet buffer\n");
return BCM_ERR_NOMEM;
}
bcmos_buf_length_set(buf, len);
buf_data = bcmos_buf_data(buf);
/* change endianess from host to embedded */
/* fixing header endianness and packing */
*buf_data++ = (uint8_t)BCM_COMMON_FAST_MSG_TYPE_SET_BW_ALLOCS;
*buf_data++ = pon_ni; /* pon_ni is U8 */
msg_p = (bcm_common_fast_set_accesses_msg *)buf_data;
msg_p->hdr.cycle_num = BCMOLT_BUF_ENDIAN_CPU_TO_BUF(U32, msg->hdr.cycle_num);
msg_p->hdr.num_of_allocs = BCMOLT_BUF_ENDIAN_CPU_TO_BUF(U32, msg->hdr.num_of_allocs);
for (i = 0; i < sizeof(msg->hdr.num_of_allocs); i++)
{
/* going over the list parts and fixing their endianity and packing into buf */
msg_p->set_access_list[i].alloc_id = BCMOLT_BUF_ENDIAN_CPU_TO_BUF(U16, msg->set_access_list[i].alloc_id);
msg_p->set_access_list[i].allocation_size = BCMOLT_BUF_ENDIAN_CPU_TO_BUF(U16, msg->set_access_list[i].allocation_size);
msg_p->set_access_list[i].start_time = BCMOLT_BUF_ENDIAN_CPU_TO_BUF(U16, msg->set_access_list[i].start_time);
msg_p->set_access_list[i].burst_profile = msg->set_access_list[i].burst_profile; /* burst profile is U8 */
msg_p->set_access_list[i].alloc_flags = msg->set_access_list[i].alloc_flags; /* ploam_flag, dbru_flag, end_of_frame, end_of_map and fwi are boolean and are represented as a bitmap (U8) */
}
/* send directly to PCIe driver on the dedicated channel for fast API */
rc = bcmtr_swq_send(device, URGENT_CHANNEL, buf);
if (rc)
{
BCMOS_TRACE_ERR("bcmtr_pcie_send failed. Error %s (%d)\n", bcmos_strerror(rc), rc);
bcmos_buf_free(buf);
return BCM_ERR_PARM;
}
/* buffer will be freed automatically once it is transmitted */
return BCM_ERR_OK;
}
/* RX callback to handle messages from embedded -> host */
void bcmolt_coop_dba_rx(bcmolt_devid device, bcmos_buf *sysb, bcmtrmux_channel channel, void *data)
{
bcmos_errno err;
bcm_common_fast_get_stats_ind *msg_p;
unsigned char *buf_data = bcmos_buf_data(sysb);
uint32_t i;
/* calling host application callback to handle the statistics message from the embedded */
/* the host is expecting a message of type:
* typedef struct
{
bcmolt_xgpon_ni_fast_get_stats_ind_hdr hdr;
bws_dba_get_alloc_stat alloc_stats_list[BCM_COMMON_GPON_FAST_API_LIST_SIZE];
}bcmolt_xgpon_ni_fast_get_stats_ind;
*/
msg_p = (bcm_common_fast_get_stats_ind *)buf_data;
/* Fixing endianness and creating a message. */
/* pon_ni us uint8 no need to fix endianness */
msg_p->hdr.cycle_num = BCMOLT_BUF_ENDIAN_BUF_TO_CPU(U32, msg_p->hdr.cycle_num);
msg_p->hdr.available_bw = BCMOLT_BUF_ENDIAN_BUF_TO_CPU(U32, msg_p->hdr.available_bw);
msg_p->hdr.num_of_allocs = BCMOLT_BUF_ENDIAN_BUF_TO_CPU(U32, msg_p->hdr.num_of_allocs);
for (i = 0; i < msg_p->hdr.num_of_allocs; i++)
{
msg_p->alloc_stats_list[i].alloc_id = BCMOLT_BUF_ENDIAN_BUF_TO_CPU(U16, msg_p->alloc_stats_list[i].alloc_id);
msg_p->alloc_stats_list[i].allocated = BCMOLT_BUF_ENDIAN_BUF_TO_CPU(U32, msg_p->alloc_stats_list[i].allocated);
msg_p->alloc_stats_list[i].used = BCMOLT_BUF_ENDIAN_BUF_TO_CPU(U32, msg_p->alloc_stats_list[i].used);
msg_p->alloc_stats_list[i].status_report = BCMOLT_BUF_ENDIAN_BUF_TO_CPU(U32, msg_p->alloc_stats_list[i].status_report);
}
if (host_handler_array[device])
{
err = host_handler_array[device](device, msg_p);
BUG_ON(err);
}
else
{
bcmos_free(sysb);
}
}
bcmos_errno bcmolt_coop_dba_init(bcmolt_devid devid, bcmtrmux_channel *channel, f_bcmtr_rx_handler rx_handler, void *data)
{
bcmos_errno rc;
rc = bcmtrmux_channel_register(devid, channel, rx_handler, data);
if (rc != BCM_ERR_OK)
{
bcmos_printf("%s: can't register channel %d for device %u. rc=%d\n", __FUNCTION__, *channel, devid, (int)rc);
goto exit;
}
rc = bcmtr_swq_tx_queue_cfg(devid, BCMTR_PCIE_PRTY_NORMAL, BCMOLT_COOP_DBA_HARDQ_SIZE, BCMOLT_COOP_DBA_SOFTQ_SIZE);
if (rc != BCM_ERR_OK)
{
bcmos_printf("%s: can't configure tx queue channel %d for device %u. rc=%d\n", __FUNCTION__, devid, (int)rc);
goto exit;
}
return rc;
exit:
bcmolt_coop_dba_exit(devid, *channel);
return rc;
}
void bcmolt_coop_dba_exit(bcmolt_devid devid, bcmtrmux_channel channel)
{
bcmtrmux_channel_unregister(devid, channel);
}
void bcmolt_coop_dba_host_handler_register(bcmolt_devid devid, bcmolt_coop_dba_host_handler_cb host_handler)
{
host_handler_array[devid] = host_handler;
}
void bcmolt_coop_dba_host_handler_unregister(bcmolt_devid devid)
{
host_handler_array[devid] = NULL;
}