blob: 029158e0a4fa2c23fbd53f8097c163d6759dbd1e [file] [log] [blame]
/*
<:copyright-BRCM:2016:DUAL/GPL:standard
Broadcom Proprietary and Confidential.(c) 2016 Broadcom
All Rights Reserved
Unless you and Broadcom execute a separate written software license
agreement governing use of this software, this software is licensed
to you under the terms of the GNU General Public License version 2
(the "GPL"), available at http://www.broadcom.com/licenses/GPLv2.php,
with the following added to such license:
As a special exception, the copyright holders of this software give
you permission to link this software with independent modules, and
to copy and distribute the resulting executable under terms of your
choice, provided that you also meet, for each linked independent
module, the terms and conditions of the license of that module.
An independent module is a module which is not derived from this
software. The special exception does not apply to any modifications
of the software.
Not withstanding the above, under no circumstances may you combine
this software in any way with any other Broadcom software provided
under a license other than the GPL, without Broadcom's express prior
written consent.
:>
*/
#include <bcmos_system.h>
#include <bcmtr_interface.h>
#include <bcmtr_debug.h>
#include "bcmtr_header.h"
#include "bcmtr_internal.h"
typedef struct
{
bcmtr_conn *conn; /**< Connection dynamic info (allocated on connect) */
bcmtr_handler msg_handler[BCMOLT_GROUP_ID__NUM_OF][BCMTR_MAX_INSTANCES];
F_bcmtr_tx_overflow overflow_cb; /**< Callback to be called in case of tx drop because of queue overflow */
} bcmtr_conn_info;
static bcmtr_conn_info conn_info[BCMTR_MAX_OLTS]; /* Store connection info separately per OLT */
static bcmos_errno _bcmtr_connect(bcmolt_devid device, bcmtr_conn **conn, bcmos_bool raw_mode);
static bcmos_mutex conn_lock;
/* Get existing connection. If none - setup new.
* If raw_mode is TRUE, the connection is intended for use by raw interface
* bcmtr_proxy_xx(). In this case RX task is not created
*/
static bcmos_errno _bcmtr_conn_get_any(bcmolt_devid device, bcmtr_conn **conn, bcmos_bool is_raw)
{
bcmos_errno err;
if (device >= BCMTR_MAX_OLTS)
{
return BCM_ERR_RANGE;
}
*conn = conn_info[device].conn;
if (*conn)
{
return BCM_ERR_OK;
}
err = _bcmtr_connect(device, &conn_info[device].conn, is_raw);
*conn = conn_info[device].conn;
return err;
}
/* Get existing connection. If none - setup new */
bcmos_errno _bcmtr_conn_get(bcmolt_devid device, bcmtr_conn **conn)
{
return _bcmtr_conn_get_any(device, conn, BCMOS_FALSE);
}
/* Free reassemble block */
static void _bcmtr_reass_block_free(bcmtr_reass **prb)
{
bcmtr_reass *reass = *prb;
int i;
for (i=0; i<reass->num_fragments; i++)
{
bcmolt_buf_free(&reass->fragments[i]);
}
bcmos_free(reass);
*prb = NULL;
}
/* Free transport header. Called under transport lock */
static void _bcmtr_tmsg_free(bcmtr_msg *tmsg, bcmtr_msg_list *cur_list)
{
/* Remove from the list it is in, if any */
if (cur_list)
TAILQ_REMOVE(cur_list, tmsg, l);
bcmolt_buf_free(&tmsg->tx_buf);
bcmolt_buf_free(&tmsg->rx_buf);
if (tmsg->reass)
_bcmtr_reass_block_free(&tmsg->reass);
memset(tmsg, 0, BCMTR_HDR_CLEAR_SIZE);
/* Request-response or autonomous ? */
TAILQ_INSERT_TAIL(tmsg->free_list, tmsg, l);
}
/* Unpack message. *unpacked is set=NULL in case of error */
static inline bcmos_errno _bcmtr_msg_unpack(bcmtr_conn *conn, bcmolt_buf *buf, bcmtr_hdr *hdr, uint32_t ts, bcmolt_msg **msg)
{
int16_t err;
uint8_t *packed = buf->curr;
uint32_t packed_length = bcmolt_buf_get_remaining_size(buf);
/* Unpack */
BUG_ON(!buf->start);
err = bcmolt_msg_unpack(buf, msg);
if (err < 0)
{
BCMTR_CLD_CHECK_NOTIFY(conn->device, hdr, BCMTR_CLD_EV_RECV_DISCARD, ts, packed, packed_length, NULL);
++conn->stat.unpack_errors;
return err;
}
BCMTR_CLD_CHECK_NOTIFY(conn->device, hdr, BCMTR_CLD_EV_RECV, ts, packed, packed_length, *msg);
return BCM_ERR_OK;
}
/* Transport IPC release callback */
static void _bcmtr_ipc_msg_release(bcmos_msg *m)
{
BCMOS_TRACE_ERR("We shouldn't be here!\n");
}
/* Transport IPC message handler.
* Called in context of the target module as part
* of dispatching message to the user task.
* It unpacks message, releases transport header and calls user callback
*/
static void _bcmtr_ipc_msg_handler(bcmos_module_id module_id, bcmos_msg *m)
{
bcmos_errno err;
bcmtr_msg *tmsg = m->data;
bcmtr_conn *conn = tmsg->conn;
bcmolt_msg *msg = NULL;
/* Unpack */
err = _bcmtr_msg_unpack(conn, &tmsg->rx_buf, &tmsg->hdr, tmsg->timestamp, &msg);
if (err != BCM_ERR_OK)
{
BCMOS_TRACE_ERR(
"Unpack error for module %d. Error %s (%d)\n",
module_id,
bcmos_strerror(err),
err);
msg = NULL;
}
if (msg != NULL)
{
bcmtr_handler *h = &conn_info[conn->device].msg_handler[tmsg->hdr.msg_id][tmsg->hdr.instance];
msg->subch = tmsg->subch;
if (h->app_cb)
{
msg->corr_tag = tmsg->hdr.corr_tag;
h->app_cb(conn->device, msg);
}
else
{
bcmolt_msg_free(msg);
++conn->stat.no_rx_handler;
}
}
/* Release transport header under conn lock */
bcmos_mutex_lock(&conn->mutex);
_bcmtr_tmsg_free(tmsg, NULL);
bcmos_mutex_unlock(&conn->mutex);
}
/* Init IPC header in transport message */
static void _bcmtr_tmsg_ipc_init(bcmtr_msg *tmsg)
{
tmsg->ipc_hdr.start = tmsg->ipc_hdr.data = tmsg;
tmsg->ipc_hdr.type = BCMOS_MSG_ID_INTERNAL_IPC;
tmsg->ipc_hdr.release = _bcmtr_ipc_msg_release;
tmsg->ipc_hdr.handler = _bcmtr_ipc_msg_handler;
}
/* Pre-allocate transport header array, put all blocks on free lists */
static int _bcmtr_tmsg_list_alloc(bcmtr_conn *conn)
{
int n_hdr, i;
bcmtr_msg *tmsg;
n_hdr = conn->cfg.max_requests + conn->cfg.max_autos;
conn->tmsg_array = bcmos_calloc(sizeof(bcmtr_msg) * n_hdr);
if (!conn->tmsg_array)
return BCM_ERR_NOMEM;
tmsg = conn->tmsg_array;
for (i=0; i<conn->cfg.max_requests; i++, tmsg++)
{
bcmos_errno rc;
TAILQ_INSERT_TAIL(&conn->free_req_list, tmsg, l);
tmsg->free_list = &conn->free_req_list;
rc = bcmos_sem_create(&tmsg->sem, 0, 0, NULL);
if (rc != BCM_ERR_OK)
return rc;
_bcmtr_tmsg_ipc_init(tmsg);
tmsg->conn = conn;
}
for (i=0; i<conn->cfg.max_autos; i++, tmsg++)
{
TAILQ_INSERT_TAIL(&conn->free_auto_list, tmsg, l);
tmsg->free_list = &conn->free_auto_list;
_bcmtr_tmsg_ipc_init(tmsg);
tmsg->conn = conn;
}
return BCM_ERR_OK;
}
/* Cleanup function - free transport headers */
static void _bcmtr_tmsg_list_free(bcmtr_conn *conn)
{
bcmtr_msg *tmsg, *tmp_tmsg;
if (!conn->tmsg_array)
return;
TAILQ_FOREACH_SAFE(tmsg, &conn->msg_list, l, tmp_tmsg)
{
bcmolt_msg *msg = tmsg->msg;
/* Release waiting task if request-response */
if (msg && tmsg->err == BCM_ERR_IN_PROGRESS)
{
msg->err = BCM_ERR_COMM_FAIL;
bcmos_sem_post(&tmsg->sem);
}
}
TAILQ_FOREACH_SAFE(tmsg, &conn->free_req_list, l, tmp_tmsg)
{
bcmos_sem_destroy(&tmsg->sem);
}
bcmos_free(conn->tmsg_array);
}
/* Allocate transport header from the given free list.
* Must be called under lock
*/
static inline bcmtr_msg *_bcmtr_msg_get_free(bcmtr_msg_list *free_list)
{
bcmtr_msg *tmsg = TAILQ_FIRST(free_list);
if (tmsg)
TAILQ_REMOVE(free_list, tmsg, l);
return tmsg;
}
/* Find message by correlation tag
* Called under lock
*/
static bcmtr_msg *_bcmtr_msg_get_by_corr_tag(const bcmtr_conn *conn, const bcmtr_hdr *hdr)
{
bcmtr_msg *tmsg;
TAILQ_FOREACH(tmsg, &conn->msg_list, l)
{
if (tmsg->hdr.corr_tag==hdr->corr_tag && tmsg->hdr.msg_id==hdr->msg_id && tmsg->err == BCM_ERR_IN_PROGRESS)
break;
}
return tmsg;
}
/* Message reassembler. Returns TRUE if message reassembling is completed */
static bcmos_bool _bcmtr_reassemble(bcmtr_conn *conn, bcmtr_msg *tmsg, bcmolt_buf *buf)
{
bcmtr_hdr *hdr = &tmsg->hdr;
uint16_t frag_num = hdr->frag_number;
bcmos_bool done = BCMOS_FALSE;
bcmos_bool is_last;
is_last = !hdr->more_fragments;
/* Single-buffer message ? */
if (is_last && !frag_num)
{
tmsg->rx_buf = *buf;
tmsg->err = BCM_ERR_OK;
buf->start = NULL;
return BCMOS_TRUE;
}
/*
* Multi-part message
*/
/* Discard if invalid fragment number or duplicate */
if (frag_num >= conn->cfg.max_fragments ||
(tmsg->reass && tmsg->reass->fragments[frag_num].start) )
{
bcmolt_buf_free(buf);
/* If last out-of range fragment was received report it.
* We want to avoid request retransmission in this case */
if (frag_num >= conn->cfg.max_fragments)
{
tmsg->err = BCM_ERR_TOO_MANY_FRAGS;
return is_last;
}
++conn->stat.frag_invalid;
return BCMOS_FALSE;
}
/* Allocate reassembly buffer if not done yet and store fragment */
if (!tmsg->reass)
{
tmsg->reass = bcmos_calloc(sizeof(bcmtr_reass) + conn->cfg.max_fragments * sizeof(bcmolt_buf));
if (!tmsg->reass)
{
tmsg->err = BCM_ERR_NOMEM;
++conn->stat.msg_no_mem;
bcmolt_buf_free(buf);
return BCMOS_FALSE;
}
tmsg->reass->fragments = (bcmolt_buf *)((long)tmsg->reass + sizeof(bcmtr_reass));
}
tmsg->reass->total_size += bcmolt_buf_get_remaining_size(buf);
tmsg->reass->fragments[frag_num] = *buf;
buf->start = NULL;
tmsg->reass->num_fragments++;
if (is_last)
tmsg->reass->max_fragment = frag_num;
done = (tmsg->reass->max_fragment && (tmsg->reass->num_fragments > tmsg->reass->max_fragment));
++conn->stat.frag_received;
/* Reassemble if done */
if (done)
{
/* Allocate big flat buffer */
if (bcmolt_buf_alloc(&tmsg->rx_buf, tmsg->reass->total_size, BCMTR_BUF_ENDIAN) == BCM_ERR_OK)
{
int i;
uint8_t *body = tmsg->rx_buf.start;
for (i=0; i<tmsg->reass->num_fragments; i++)
{
uint32_t frag_size = bcmolt_buf_get_remaining_size(&tmsg->reass->fragments[i]);
BUG_ON(!tmsg->reass->fragments[i].curr);
memcpy(body, tmsg->reass->fragments[i].curr, frag_size);
body += frag_size;
}
tmsg->err = BCM_ERR_OK;
}
else
{
/* Reassembly buffer allocation failed */
tmsg->err = BCM_ERR_NOMEM;
}
}
else
{
/* More fragments expected. Update timestamp to prolong timing out */
tmsg->timestamp = bcmos_timestamp();
}
return done;
}
/* Notify application that message is ready */
static inline void _bcmtr_notify_ready(bcmtr_conn *conn, bcmtr_msg *tmsg)
{
bcmos_sem_post(&tmsg->sem);
}
/* Notify rx request/response message
* called under connection lock
*/
static inline void _bcmtr_notify_rx_response(bcmtr_conn *conn, bcmtr_msg *tmsg)
{
++conn->stat.msg_resp_received;
/* Now unlock and notify application. Autonomous handler is only called if message is OK.
The lock has been taken in _bcmtr_rx_packet */
bcmos_mutex_unlock(&conn->mutex);
/* Release waiting application. It will take care of unpacking */
_bcmtr_notify_ready(conn, tmsg);
}
/* Notify rx autonomous message
* called under connection lock
*/
static inline void _bcmtr_notify_rx_req_auto(bcmtr_conn *conn, bcmtr_msg *tmsg)
{
bcmolt_buf rx_buf;
bcmolt_msg *msg = NULL;
bcmolt_group_id msg_id = tmsg->hdr.msg_id;
bcmtr_handler *h;
uint16_t corr_tag;
bcmtr_hdr hdr = tmsg->hdr;
uint32_t ts = tmsg->timestamp;
bcmolt_subchannel subch = tmsg->subch;
bcmos_errno err;
if (msg_id >= BCMOLT_GROUP_ID__NUM_OF)
{
BCMOS_TRACE_ERR("Unexpected msg group id: %u\n", tmsg->hdr.msg_id);
_bcmtr_tmsg_free(tmsg, NULL);
bcmos_mutex_unlock(&conn->mutex);
return;
}
if (tmsg->hdr.instance >= BCMTR_MAX_INSTANCES)
{
BCMOS_TRACE_ERR("Unexpected instance id: %u\n", tmsg->hdr.instance);
_bcmtr_tmsg_free(tmsg, NULL);
bcmos_mutex_unlock(&conn->mutex);
return;
}
h = &conn_info[conn->device].msg_handler[tmsg->hdr.msg_id][tmsg->hdr.instance];
BUG_ON(!h->app_cb);
++conn->stat.msg_req_auto_received;
/* If dispatch is required - do it.
* The message will be unpacked in the context of the receiver
*/
if ((h->flags & BCMOLT_AUTO_FLAGS_DISPATCH))
{
err = bcmos_msg_send_to_module(h->module, &tmsg->ipc_hdr, 0);
if (err)
{
BCMOS_TRACE_ERR("Can't deliver message to module %d. Error %s(%d)\n",
h->module, bcmos_strerror(err), err);
_bcmtr_tmsg_free(tmsg, NULL);
}
bcmos_mutex_unlock(&conn->mutex);
return;
}
/* No dispatch. Unpacking in the context of rx thread */
corr_tag = tmsg->hdr.corr_tag;
/* Make sure that rx_buf is not released by the following _bcmtr_msg_free.
* It is needed for unpack and will be released separately later. */
rx_buf = tmsg->rx_buf;
tmsg->rx_buf.start = NULL;
_bcmtr_tmsg_free(tmsg, NULL);
/* Release connection lock taken in _bcmtr_rx_packet */
bcmos_mutex_unlock(&conn->mutex);
/* Unpack and deliver */
_bcmtr_msg_unpack(conn, &rx_buf, &hdr, ts, &msg);
bcmolt_buf_free(&rx_buf);
if (msg)
{
msg->corr_tag = corr_tag;
msg->subch = subch;
h->app_cb(conn->device, msg);
}
}
/* Handle rx data. Returns number of messages that was identified and reassembled. Can be 0 or 1 */
static int _bcmtr_rx_packet(bcmtr_conn *conn, bcmolt_subchannel subch, bcmolt_buf *buf)
{
bcmtr_hdr hdr;
bcmtr_msg *tmsg;
bcmos_bool msg_done;
bcmos_bool is_response;
/* Transport lock. This lock is needed to
* - allocate/release transport header
* - update statistics
*/
bcmos_mutex_lock(&conn->mutex);
/* If some data was received - handle it */
if (buf->len < BCMTR_HDR_SIZE)
{
/* Message is too short */
++conn->stat.msg_comm_err;
goto rx_free_buf_and_error_exit;
}
if (NULL == buf->curr)
{
BCMOS_TRACE_ERR("Invalid buffer received!\n");
goto rx_done;
}
bcmtr_header_unpack(buf->curr, &hdr);
bcmolt_buf_skip(buf, BCMTR_HDR_SIZE);
is_response = (hdr.dir == BCMOLT_MSG_DIR_RESPONSE);
/* Find transport header. If not found - allocate new for autonomous message */
tmsg = _bcmtr_msg_get_by_corr_tag(conn, &hdr);
if (!tmsg)
{
if (!is_response)
{
/* Allocate new transport block */
tmsg = _bcmtr_msg_get_free(&conn->free_auto_list);
if (!tmsg)
{
++conn->stat.msg_too_many_auto;
goto rx_free_buf_and_error_exit;
}
tmsg->err = BCM_ERR_IN_PROGRESS;
TAILQ_INSERT_TAIL(&conn->msg_list, tmsg, l);
}
else
{
/* Response, but no request - discard */
++conn->stat.msg_no_req;
BCMTR_CLD_CHECK_NOTIFY(conn->device, &hdr, BCMTR_CLD_EV_RECV_DISCARD,
bcmos_timestamp(), buf->curr, bcmolt_buf_get_remaining_size(buf), NULL);
goto rx_free_buf_and_error_exit;
}
}
/* Reassemble. "buf" should not be used following this call */
tmsg->hdr = hdr;
tmsg->subch = subch;
msg_done = _bcmtr_reassemble(conn, tmsg, buf);
/* If expects more parts - nothing more to do here */
if (!msg_done)
goto rx_done;
if (tmsg->err && !is_response)
{
_bcmtr_tmsg_free(tmsg, &conn->msg_list);
goto rx_done;
}
/* Done with the message. Get it out of pending message queue to avoid race condition
* when timeout happens while the message is in flight to the destination task.
*/
TAILQ_REMOVE(&conn->msg_list, tmsg, l);
/* Notify rx. conn->mutex is still taken. It will be released
inside _bcmtr_notify_rx_response(), _bcmtr_notify_rx_req_auto() */
tmsg->timestamp = bcmos_timestamp();
if (is_response)
{
_bcmtr_notify_rx_response(conn, tmsg);
}
else
{
_bcmtr_notify_rx_req_auto(conn, tmsg);
}
return 1;
/* Error return */
rx_free_buf_and_error_exit:
bcmos_mutex_unlock(&conn->mutex);
bcmolt_buf_free(buf);
return 0;
/* return without a buffer */
rx_done:
bcmos_mutex_unlock(&conn->mutex);
return 0;
}
/*
* Low-level fragment and send function.
* It allocates a series of buffers up to MAX_MTU size, copies original data into them and sends.
* The original buffer stays intact - in case it should be retransmitted
*/
static bcmos_errno _bcmtr_fragment_and_send(bcmtr_conn *conn, bcmtr_msg *tmsg, bcmtr_send_flags flags)
{
uint32_t frag_number = 0;
bcmos_errno err = BCM_ERR_OK;
uint32_t data_offset = conn->cfg.plugin_cfg.headroom + BCMTR_HDR_SIZE;
uint32_t data_len = bcmolt_buf_get_used(&tmsg->tx_buf) - data_offset;
uint8_t *data = tmsg->tx_buf.start + data_offset;
do
{
uint32_t send_len = data_len + BCMTR_HDR_SIZE;
bcmolt_buf frag;
if (send_len > conn->cfg.max_mtu)
{
send_len = conn->cfg.max_mtu;
tmsg->hdr.more_fragments = BCMOS_TRUE;
}
else
{
tmsg->hdr.more_fragments = BCMOS_FALSE;
}
err = bcmolt_buf_alloc(&frag, send_len + conn->cfg.plugin_cfg.headroom, BCMTR_BUF_ENDIAN);
if (err)
break;
tmsg->hdr.frag_number = frag_number++;
/* Pack correlation tag, command and length */
bcmtr_header_pack(&tmsg->hdr, frag.start + conn->cfg.plugin_cfg.headroom);
bcmolt_buf_skip(&frag, data_offset);
if (bcmolt_buf_write(&frag, data, send_len - BCMTR_HDR_SIZE))
{
/* Send using customer-provided driver */
err = conn->driver.send(conn->drv_priv, tmsg->msg->subch, &frag, flags);
}
else
{
err = BCM_ERR_OVERFLOW;
}
bcmolt_buf_free(&frag);
if (err)
{
break;
}
data_len -= (send_len - BCMTR_HDR_SIZE);
data += (send_len - BCMTR_HDR_SIZE);
} while (data_len);
return err;
}
/* Check for time-outs. returns number of messages timed out */
static int _bcmtr_check_timeout(bcmtr_conn *conn)
{
bcmtr_msg *tmsg, *tmp;
uint32_t now;
int nmsg = 0;
bcmos_errno err;
/* Transport lock */
bcmos_mutex_lock(&conn->mutex);
now = bcmos_timestamp();
TAILQ_FOREACH_SAFE(tmsg, &conn->msg_list, l, tmp)
{
bcmolt_msg *msg = tmsg->msg;
if (now - tmsg->timestamp <= conn->cfg.msg_timeout)
continue;
/* Retransmit ? */
if (msg && tmsg->tx_count <= conn->cfg.max_retries)
{
tmsg->timestamp = bcmos_timestamp();
BCMTR_CLD_CHECK_NOTIFY(
conn->device,
&tmsg->hdr,
BCMTR_CLD_EV_RESEND,
tmsg->timestamp,
tmsg->tx_buf.start + conn->cfg.plugin_cfg.headroom + BCMTR_HDR_SIZE,
tmsg->tx_buf.len - (conn->cfg.plugin_cfg.headroom + BCMTR_HDR_SIZE),
msg);
/* Fragment and send or send directly, depending on message length */
if (bcmolt_buf_get_used(&tmsg->tx_buf) > conn->cfg.max_mtu)
{
err = _bcmtr_fragment_and_send(conn, tmsg, BCMTR_SEND_FLAGS_PRI_NORMAL);
}
else
{
err = conn->driver.send(conn->drv_priv, msg->subch, &tmsg->tx_buf, BCMTR_SEND_FLAGS_PRI_NORMAL);
}
if (err)
{
++conn->stat.msg_comm_err;
}
++tmsg->tx_count;
continue;
}
/* Giving up */
/* Release waiting task if request-response - unless it has already been done */
if (msg)
{
if (tmsg->err == BCM_ERR_IN_PROGRESS)
{
tmsg->err = BCM_ERR_TIMEOUT;
}
BCMTR_CLD_CHECK_NOTIFY(
conn->device,
&tmsg->hdr,
BCMTR_CLD_EV_TIMEOUT,
bcmos_timestamp(),
tmsg->tx_buf.start + conn->cfg.plugin_cfg.headroom + BCMTR_HDR_SIZE,
tmsg->tx_buf.len - (conn->cfg.plugin_cfg.headroom + BCMTR_HDR_SIZE),
msg);
TAILQ_REMOVE(&conn->msg_list, tmsg, l);
_bcmtr_notify_ready(conn, tmsg);
++conn->stat.msg_req_timeout;
}
else
{
_bcmtr_tmsg_free(tmsg, &conn->msg_list);
++conn->stat.msg_reass_timeout;
}
++nmsg;
}
conn->last_timeout_check = bcmos_timestamp();
/* Release transport lock */
bcmos_mutex_unlock(&conn->mutex);
return nmsg;
}
/* Check for receive and timeouts */
static int _bcmtr_rx_poll(bcmtr_conn *conn, int *pnmsg)
{
bcmolt_buf buf;
int nmsg = 0, nmsg_prev;
bcmos_errno rc = BCM_ERR_OK;
bcmolt_subchannel subch;
do
{
nmsg_prev = nmsg;
/* Plugin driver's recv - get pending rx packet is any.
* The function is not allowed to block for more then BCMTR_MSG_TIMEOUT ms
*/
rc = conn->driver.recv(conn->drv_priv, &subch, &buf);
if (rc != BCM_ERR_OK)
{
if (rc == BCM_ERR_NOMEM)
{
++conn->stat.msg_no_mem;
}
}
else
{
nmsg += _bcmtr_rx_packet(conn, subch, &buf);
}
/* Check for timeouts if any */
if (bcmos_timestamp() - conn->last_timeout_check > conn->timeout_check_period)
{
/* Check requests waiting for acknowledge and multy-part messages for timeout.
* Timed-out requests are retransmitted.
*/
nmsg += _bcmtr_check_timeout(conn);
}
} while(nmsg_prev != nmsg);
*pnmsg = nmsg;
return rc;
}
/* Rx thread handler */
static int _bcmtr_rx_thread_handler(long arg)
{
bcmtr_conn *conn = (bcmtr_conn *)arg;
int nmsgs;
int rc;
while(!conn->kill_request)
{
rc = _bcmtr_rx_poll(conn, &nmsgs);
if (rc == BCM_ERR_COMM_FAIL)
bcmos_usleep(1000);
}
conn->kill_done = 1;
return 0;
}
/*
* Internal transport interface
*/
/** Default message handler - discard */
static void _bcmtr_dft_msg_handler(bcmolt_devid olt, bcmolt_msg *msg)
{
bcmtr_conn *conn = conn_info[olt].conn;
/* ToDo: log */
if (conn)
++conn->stat.no_rx_handler;
bcmolt_msg_free(msg);
}
static bcmos_errno _bcmtr_create_rx_thread(bcmtr_conn *conn, bcmos_bool raw_mode)
{
bcmos_errno err = BCM_ERR_OK;
if (conn->cfg.rx_thread_priority >= 0 && !raw_mode)
{
bcmos_task_parm parm = {
.priority = conn->cfg.rx_thread_priority,
.stack_size = BCMTR_RX_THREAD_STACK,
.handler = _bcmtr_rx_thread_handler,
.name = conn->name,
.data = (long)conn
};
conn->kill_request = BCMOS_FALSE;
conn->kill_done = BCMOS_FALSE;
err = bcmos_task_create(&conn->rx_thread, &parm);
if (err == BCM_ERR_OK)
conn->rx_thread_created = BCMOS_TRUE;
}
return err;
}
static void _bcmtr_destroy_rx_thread(bcmtr_conn *conn)
{
/* Kill rx thread if any */
if (conn->rx_thread_created)
{
conn->kill_request = 1;
while(!conn->kill_done)
bcmos_usleep(1000);
bcmos_task_destroy(&conn->rx_thread);
}
}
static bcmos_errno _bcmtr_connect(bcmolt_devid device, bcmtr_conn **pconn, bcmos_bool raw_mode)
{
bcmtr_conn *conn;
bcmos_errno err = BCM_ERR_OK;
/* Init OS abstraction - just in case */
err = bcmos_init();
if (err != BCM_ERR_OK && err != BCM_ERR_ALREADY)
{
return err;
}
bcmos_mutex_lock(&conn_lock);
/* Allocate */
conn = bcmos_calloc(sizeof(*conn));
if (!conn)
{
bcmos_mutex_unlock(&conn_lock);
return BCM_ERR_NOMEM;
}
/* Get configuration */
err = bcmtr_cfg_get(device, &conn->cfg, &conn->driver);
if (err)
{
bcmos_mutex_unlock(&conn_lock);
bcmos_free(conn);
return err;
}
snprintf(conn->name, sizeof(conn->name), "tr_rx%u", device);
TAILQ_INIT(&conn->free_req_list);
TAILQ_INIT(&conn->free_auto_list);
TAILQ_INIT(&conn->msg_list);
bcmos_mutex_create(&conn->mutex, 0, NULL);
/* Convert timeouts to us */
conn->cfg.msg_timeout *= 1000;
conn->cfg.msg_ready_timeout *= 1000;
conn->cfg.msg_wait_timeout *= 1000;
/* Set defaults */
conn->timeout_check_period = conn->cfg.msg_wait_timeout;
conn->last_timeout_check = bcmos_timestamp();
/* Allocate and initialize transport blocks and put onto free request and autonomous lists */
err = _bcmtr_tmsg_list_alloc(conn);
/* Open/connect on driver level */
err = err ? err : conn->driver.open(device, &conn->cfg.plugin_cfg, &conn->drv_priv);
if (err)
{
bcmos_mutex_destroy(&conn->mutex);
goto cleanup;
}
conn->connected = BCMOS_TRUE;
/* Create rx thread if necessary */
err = _bcmtr_create_rx_thread(conn, raw_mode);
if (err)
{
conn->driver.close(conn->drv_priv);
bcmos_mutex_destroy(&conn->mutex);
goto cleanup;
}
conn->device = device;
*pconn = conn;
bcmos_mutex_unlock(&conn_lock);
return BCM_ERR_OK;
cleanup:
_bcmtr_tmsg_list_free(conn);
bcmos_free(conn);
bcmos_mutex_unlock(&conn_lock);
return err;
}
/** Query whether or not the device is currently connected */
bcmos_errno bcmtr_is_connected(bcmolt_devid device, bcmos_bool *is_connected)
{
bcmtr_conn *conn;
if (device >= BCMTR_MAX_OLTS)
{
return BCM_ERR_RANGE;
}
conn = conn_info[device].conn;
*is_connected = (conn != NULL && conn->connected);
return BCM_ERR_OK;
}
/** Open transport channel */
bcmos_errno bcmtr_connect(bcmolt_devid device)
{
bcmtr_conn *conn;
return _bcmtr_conn_get(device, &conn);
}
/** Close transport channel */
bcmos_errno bcmtr_disconnect(bcmolt_devid device)
{
bcmtr_conn *conn;
if (device >= BCMTR_MAX_OLTS)
{
return BCM_ERR_RANGE;
}
bcmos_mutex_lock(&conn_lock);
conn = conn_info[device].conn;
if (!conn)
{
bcmos_mutex_unlock(&conn_lock);
return BCM_ERR_NOT_CONNECTED;
}
conn_info[device].conn = NULL;
/* Kill rx thread if any */
_bcmtr_destroy_rx_thread(conn);
bcmos_mutex_lock(&conn->mutex);
/* Close connection */
if (conn->driver.close)
{
conn->driver.close(conn->drv_priv);
}
/* Release all pending messages */
bcmos_usleep(100000);
_bcmtr_tmsg_list_free(conn);
bcmos_mutex_unlock(&conn->mutex);
bcmos_mutex_destroy(&conn->mutex);
bcmos_free(conn);
bcmos_mutex_unlock(&conn_lock);
return BCM_ERR_OK;
}
/* Low-level disconnect that breaks "physical" connection, but doesn't destroy connection structure and registrations */
bcmos_errno bcmtr_driver_disconnect(bcmolt_devid device)
{
bcmtr_conn *conn;
bcmos_errno err = BCM_ERR_OK;
if (device >= BCMTR_MAX_OLTS)
{
return BCM_ERR_RANGE;
}
bcmos_mutex_lock(&conn_lock);
conn = conn_info[device].conn;
if (conn == NULL || !conn->connected)
{
bcmos_mutex_unlock(&conn_lock);
return BCM_ERR_NOT_CONNECTED;
}
_bcmtr_destroy_rx_thread(conn);
bcmos_mutex_lock(&conn->mutex);
/* Close driver connection */
if (conn->driver.close != NULL)
{
err = conn->driver.close(conn->drv_priv);
if (err != BCM_ERR_OK)
{
BCMOS_TRACE_ERR("Failed to close transport driver: %s (%d)\n", bcmos_strerror(err), err);
}
}
conn->connected = BCMOS_FALSE;
bcmos_mutex_unlock(&conn->mutex);
bcmos_mutex_unlock(&conn_lock);
return err;
}
/* Repair/reconnect the driver-level connection for an already-connected device */
bcmos_errno bcmtr_driver_reconnect(bcmolt_devid device)
{
bcmtr_conn *conn;
bcmos_errno err;
if (device >= BCMTR_MAX_OLTS)
{
return BCM_ERR_RANGE;
}
bcmos_mutex_lock(&conn_lock);
conn = conn_info[device].conn;
if (conn == NULL)
{
bcmos_mutex_unlock(&conn_lock);
return BCM_ERR_NOT_CONNECTED;
}
if (conn->connected)
{
bcmtr_driver_disconnect(device);
}
bcmos_mutex_lock(&conn->mutex);
/* Re-open driver connection */
err = conn->driver.open(device, &conn->cfg.plugin_cfg, &conn->drv_priv);
if (err != BCM_ERR_OK)
{
BCMOS_TRACE_ERR("Failed to re-open transport driver: %s (%d)\n", bcmos_strerror(err), err);
}
err = _bcmtr_create_rx_thread(conn, BCMOS_FALSE);
if (err != BCM_ERR_OK)
{
BCMOS_TRACE_ERR("Failed to create RX transport task: %s (%d)\n", bcmos_strerror(err), err);
conn->driver.close(conn->drv_priv);
}
conn->connected = BCMOS_TRUE;
bcmos_mutex_unlock(&conn->mutex);
bcmos_mutex_unlock(&conn_lock);
return err;
}
/* Register for notification that transmit failed because tx_queue was full */
bcmos_errno bcmtr_tx_overflow_cb_register(bcmolt_devid device, F_bcmtr_tx_overflow cb)
{
if (device >= BCMTR_MAX_OLTS)
{
return BCM_ERR_RANGE;
}
conn_info[device].overflow_cb = cb;
return BCM_ERR_OK;
}
/* Send message.
* Internal function. Called under connection lock
*/
static bcmos_errno _bcmtr_send(bcmtr_conn *conn, bcmolt_msg *msg, bcmolt_buf *tx_buf, bcmtr_send_flags flags, bcmtr_msg **ptmsg)
{
bcmtr_msg *tmsg;
bcmos_errno err;
if (!conn->connected)
{
++conn->stat.not_connected;
return BCM_ERR_NOT_CONNECTED;
}
/* Allocate message transport header */
tmsg = _bcmtr_msg_get_free(&conn->free_req_list);
if (!tmsg)
{
++conn->stat.msg_no_mem;
return BCM_ERR_TOO_MANY_REQS;
}
tmsg->msg = msg;
/* Fill transport header */
err = bcmtr_header_fill(tmsg->msg, &tmsg->hdr);
if (err)
return err;
tmsg->err = BCM_ERR_IN_PROGRESS;
tmsg->tx_count = 1;
/* Save transmit buffer. It will be released together with transport header */
tmsg->tx_buf = *tx_buf;
tmsg->timestamp = bcmos_timestamp();
if (bcmolt_buf_get_used(tx_buf) > conn->cfg.max_mtu)
{
err = _bcmtr_fragment_and_send(conn, tmsg, flags);
}
else
{
/* Pack correlation tag, command and length */
bcmtr_header_pack(&tmsg->hdr, tmsg->tx_buf.start + conn->cfg.plugin_cfg.headroom);
/* Send using customer-provided driver */
err = conn->driver.send(conn->drv_priv, msg->subch, &tmsg->tx_buf, flags);
}
BCMTR_CLD_CHECK_NOTIFY(
conn->device,
&tmsg->hdr,
BCMTR_CLD_EV_SEND,
tmsg->timestamp,
tmsg->tx_buf.start + conn->cfg.plugin_cfg.headroom + BCMTR_HDR_SIZE,
tmsg->tx_buf.len - (conn->cfg.plugin_cfg.headroom + BCMTR_HDR_SIZE),
msg);
if (err != BCM_ERR_OK)
{
++conn->stat.msg_comm_err;
goto cleanup;
}
tx_buf->start = NULL; /* Ownership passed to tmsg */
++conn->stat.msg_sent;
if (ptmsg)
{
*ptmsg = tmsg;
}
else
{
_bcmtr_tmsg_free(tmsg, NULL);
}
return BCM_ERR_OK;
/* error */
cleanup:
tmsg->tx_buf.start = NULL; /* prevent tx buffer de-allocation */
_bcmtr_tmsg_free(tmsg, NULL);
return err;
}
/* Allocate tx buffer and pack */
static bcmos_errno _bcmtr_pack(const bcmtr_conn *conn, bcmolt_msg *msg, bcmolt_buf *buf)
{
int32_t len = bcmolt_msg_get_packed_length(msg);
uint32_t headroom = conn->cfg.plugin_cfg.headroom;
bcmos_errno err;
if (len < 0)
return (bcmos_errno)len;
/* Reallocate if too big */
len += BCMTR_HDR_SIZE + headroom;
if (buf->start)
{
if (buf->len < len)
{
/* ToDo: reallocate */
return BCM_ERR_OVERFLOW;
}
else
{
bcmolt_buf_init(buf, len, buf->start, BCMTR_BUF_ENDIAN);
}
}
else
{
err = bcmolt_buf_alloc(buf, len, BCMTR_BUF_ENDIAN);
if (err)
{
return err;
}
}
/* Reserve room for header */
buf->curr = buf->start + BCMTR_HDR_SIZE + headroom;
/* Pack */
err = bcmolt_msg_pack(msg, buf);
return err;
}
/*
* External message interface
*/
/* Send message. Don't expect response. */
bcmos_errno bcmtr_send(bcmolt_devid device, bcmolt_msg *msg, bcmtr_send_flags flags)
{
bcmtr_conn *conn;
bcmos_errno err;
bcmolt_buf tx_buf = {};
err = _bcmtr_conn_get(device, &conn);
if (err)
return err;
/* Allocate transport buffer and pack */
err = _bcmtr_pack(conn, msg, &tx_buf);
if (err)
{
bcmolt_buf_free(&tx_buf);
return err;
}
bcmos_mutex_lock(&conn->mutex);
err = _bcmtr_send(conn, msg, &tx_buf, flags, NULL);
bcmos_mutex_unlock(&conn->mutex);
if (err)
{
bcmolt_buf_free(&tx_buf);
if (err == BCM_ERR_QUEUE_FULL && conn_info[device].overflow_cb)
conn_info[device].overflow_cb(conn->device, flags);
}
return err;
}
static bcmos_errno bcmtr_call_err(bcmolt_msg *msg, bcmos_errno err, const char *err_text)
{
msg->dir = BCMOLT_MSG_DIR_RESPONSE;
msg->err = err;
if (err_text != NULL)
{
strncpy(msg->err_text, err_text, BCMOLT_MAX_ERR_TEXT_LENGTH-1);
msg->err_text[BCMOLT_MAX_ERR_TEXT_LENGTH-1] = 0;
}
return err;
}
/* Send message and wait for response */
bcmos_errno bcmtr_call(bcmolt_devid device, bcmolt_msg *msg)
{
static uint32_t corr_tag = 0;
bcmos_task *task;
bcmtr_msg *tmsg = NULL;
bcmtr_conn *conn;
bcmolt_buf tx_buf = {};
bcmos_errno err;
uint8_t instance;
msg->err = BCM_ERR_OK;
msg->dir = BCMOLT_MSG_DIR_REQUEST;
msg->corr_tag = ++corr_tag;
err = _bcmtr_conn_get(device, &conn);
if (err)
{
return bcmtr_call_err(msg, err, NULL);
}
/* prevent sleeping in RX thread (this would cause it to never wake up) */
task = bcmos_task_current();
if (task == &conn->rx_thread)
{
return bcmtr_call_err(msg, BCM_ERR_COMM_FAIL, "Cannot call API functions from PCI RX thread");
}
instance = bcmolt_msg_instance(msg);
if (instance >= BCMTR_MAX_INSTANCES)
{
return bcmtr_call_err(msg, BCM_ERR_KEY_RANGE, "Invalid PON index");
}
/* Allocate transport buffer and pack */
err = _bcmtr_pack(conn, msg, &tx_buf);
if (err)
{
bcmolt_buf_free(&tx_buf);
return bcmtr_call_err(msg, err, NULL);
}
/* transmit request under connection lock */
bcmos_mutex_lock(&conn->mutex);
err = _bcmtr_send(conn, msg, &tx_buf, BCMTR_SEND_FLAGS_CALL, &tmsg);
if (!tmsg)
{
bcmos_mutex_unlock(&conn->mutex);
bcmolt_buf_free(&tx_buf);
return bcmtr_call_err(msg, err, NULL);
}
TAILQ_INSERT_TAIL(&conn->msg_list, tmsg, l);
bcmos_mutex_unlock(&conn->mutex);
/* Wait for response or timeout.
* Message timeout is enforced by audit rather than semaphore timeout option
*/
bcmos_sem_wait(&tmsg->sem, BCMOS_WAIT_FOREVER);
/* Connection could've been killed while we are waiting here.
* It is indicated by COMM_FAILURE in msg->err.
* In this case transport header (tmsg) is already released
*/
if (msg->err == BCM_ERR_COMM_FAIL)
{
return bcmtr_call_err(msg, msg->err, NULL);
}
err = tmsg->err;
if (!err)
{
err = _bcmtr_msg_unpack(conn, &tmsg->rx_buf, &tmsg->hdr, tmsg->timestamp, &msg);
}
/* Take connection lock again in order to release transport header safely */
bcmos_mutex_lock(&conn->mutex);
_bcmtr_tmsg_free(tmsg, NULL);
bcmos_mutex_unlock(&conn->mutex);
return bcmtr_call_err(msg, err ? err : msg->err, NULL);
}
#ifdef BCM_SUBSYSTEM_HOST
/* Send (un)registration info to the mux */
static bcmos_errno _bcmtr_send_to_mux(bcmtr_conn *conn, bcmtr_hdr *hdr)
{
bcmolt_buf buf;
uint8_t packed_hdr[BCMTR_HDR_SIZE];
bcmos_errno err;
err = bcmolt_buf_alloc(&buf, BCMTR_HDR_SIZE + conn->cfg.plugin_cfg.headroom, BCMTR_BUF_ENDIAN);
if (err)
{
return err;
}
bcmolt_buf_skip(&buf, conn->cfg.plugin_cfg.headroom);
bcmtr_header_pack(hdr, packed_hdr);
if (bcmolt_buf_write(&buf, packed_hdr, BCMTR_HDR_SIZE))
err = conn->driver.send(conn->drv_priv, 0, &buf, BCMTR_SEND_FLAGS_PRI_NORMAL);
else
err = BCM_ERR_OVERFLOW;
bcmolt_buf_free(&buf);
return err;
}
#endif
/** Register message handler
*
* \param[in] device OLT device index
* \param[in] parm Registration parameters
* \returns BCM_ERR_OK or error code
*/
bcmos_errno bcmtr_msg_handler_register(bcmolt_devid device, const bcmtr_handler_parm *parm)
{
bcmtr_conn *conn;
bcmos_errno err = BCM_ERR_OK;
bcmolt_group_id msg_id;
bcmtr_handler *h;
if (device >= BCMTR_MAX_OLTS || !parm || !parm->app_cb || parm->instance >= BCMTR_MAX_INSTANCES)
{
return BCM_ERR_PARM;
}
if ((unsigned)parm->object >= BCMOLT_OBJ_ID__NUM_OF)
{
bcmtr_handler_parm p1 = *parm;
for (p1.object = 0; p1.object < BCMOLT_OBJ_ID__NUM_OF && !err; p1.object++)
{
err = bcmtr_msg_handler_register(device, &p1);
/* Ignore RANGE error that indicates that the object being iterated doesn't have this group */
/* Ignore ALREADY error that indicates that registration is already present for specific message and was skipped */
if ((err == BCM_ERR_RANGE) || (err == BCM_ERR_ALREADY))
{
err = BCM_ERR_OK;
}
}
return err;
}
if ((unsigned)parm->subgroup == BCMOLT_SUBGROUP_ANY)
{
bcmtr_handler_parm p1 = *parm;
for (p1.subgroup = 0;
bcmolt_group_id_combine(p1.object, p1.group, p1.subgroup, &msg_id) == BCM_ERR_OK &&
(err == BCM_ERR_OK || err == BCM_ERR_ALREADY);
p1.subgroup++)
{
err = bcmtr_msg_handler_register(device, &p1);
}
if (err == BCM_ERR_ALREADY)
{
err = BCM_ERR_OK;
}
return err;
}
/* Specific object/group/subgroup */
err = bcmolt_group_id_combine(parm->object, parm->group, parm->subgroup, &msg_id);
if (err)
return err;
h = &conn_info[device].msg_handler[msg_id][parm->instance];
/* Refuse new registration if already registered */
if (h->app_cb != _bcmtr_dft_msg_handler)
{
return BCM_ERR_ALREADY;
}
h->app_cb = parm->app_cb;
h->flags = parm->flags;
if ((parm->flags & BCMOLT_AUTO_FLAGS_DISPATCH))
{
if (parm->module != BCMOS_MODULE_ID_NONE)
{
h->module = parm->module;
}
else
{
h->module = bcmos_module_current();
}
}
else
{
h->module = BCMOS_MODULE_ID_NONE;
}
#ifdef BCM_SUBSYSTEM_HOST
/* On the host, automatically connect on message handler registration */
err = _bcmtr_conn_get(device, &conn);
if (err)
return err;
/* Registration with tr-mux is per device, per-instance, per-object */
if (!parm->subgroup)
{
/* Send registration info to the mux driver. It is just a header */
bcmtr_hdr hdr;
memset(&hdr, 0, sizeof(hdr));
hdr.msg_id = msg_id;
hdr.auto_proxy_reg = 1;
hdr.instance = parm->instance;
err = _bcmtr_send_to_mux(conn, &hdr);
if (err)
{
bcmtr_msg_handler_unregister(device, parm);
}
}
#else
(void)conn;
#endif
return err;
}
/* Unregister autonomous message handler */
bcmos_errno bcmtr_msg_handler_unregister(bcmolt_devid device, const bcmtr_handler_parm *parm)
{
bcmtr_conn *conn;
bcmos_errno err = BCM_ERR_OK;
bcmolt_group_id msg_id;
bcmtr_handler *h;
if (device >= BCMTR_MAX_OLTS || !parm || parm->instance >= BCMTR_MAX_INSTANCES)
{
return BCM_ERR_PARM;
}
if ((unsigned)parm->object >= BCMOLT_OBJ_ID__NUM_OF)
{
bcmtr_handler_parm p1 = *parm;
for (p1.object = 0; p1.object < BCMOLT_OBJ_ID__NUM_OF && !err; p1.object++)
{
err = bcmtr_msg_handler_unregister(device, &p1);
/* Ignore RANGE error that indicates that the object being iterated doesn't have this group */
if (err == BCM_ERR_RANGE)
{
err = BCM_ERR_OK;
}
}
return err;
}
if ((unsigned)parm->subgroup == BCMOLT_SUBGROUP_ANY)
{
bcmtr_handler_parm p1 = *parm;
for (p1.subgroup = 0;
bcmolt_group_id_combine(p1.object, p1.group, p1.subgroup, &msg_id) == BCM_ERR_OK && !err;
p1.subgroup++)
{
err = bcmtr_msg_handler_unregister(device, &p1);
}
return err;
}
err = bcmolt_group_id_combine(parm->object, parm->group, parm->subgroup, &msg_id);
if (err)
return err;
h = &conn_info[device].msg_handler[msg_id][parm->instance];
h->app_cb = _bcmtr_dft_msg_handler;
h->flags = BCMOLT_AUTO_FLAGS_NONE;
h->module = BCMOS_MODULE_ID_NONE;
#ifdef BCM_SUBSYSTEM_HOST
/* On the host, automatically connect on message handler (de)registration */
err = _bcmtr_conn_get(device, &conn);
if (err)
return err;
/* Registration with tr-mux is per device, per-instance, per-object */
if (!parm->subgroup)
{
/* Send un-registration info to the mux driver. It is just a header */
bcmtr_hdr hdr;
memset(&hdr, 0, sizeof(hdr));
hdr.msg_id = msg_id;
hdr.auto_proxy_unreg = 1;
hdr.instance = parm->instance;
err = _bcmtr_send_to_mux(conn, &hdr);
}
#else
(void)conn;
#endif
return err;
}
/** Get registration info
*
* \param[in] device OLT device index
* \param[in,out] parm Registration parameters.
* instance, group, object, subgroup must be set
* \returns BCM_ERR_OK or error code
*/
bcmos_errno bcmtr_msg_handler_register_get(bcmolt_devid device, bcmtr_handler_parm *parm)
{
bcmos_errno err;
bcmolt_group_id msg_id;
bcmtr_handler *h;
if (device >= BCMTR_MAX_OLTS ||
!parm ||
parm->instance >= BCMTR_MAX_INSTANCES ||
(unsigned)parm->object >= BCMOLT_OBJ_ID__NUM_OF ||
(unsigned)parm->subgroup == BCMOLT_SUBGROUP_ANY)
{
return BCM_ERR_PARM;
}
err = bcmolt_group_id_combine(parm->object, parm->group, parm->subgroup, &msg_id);
if (err)
return err;
h = &conn_info[device].msg_handler[msg_id][parm->instance];
parm->app_cb = (h->app_cb == _bcmtr_dft_msg_handler) ? NULL : h->app_cb;
parm->flags = h->flags;
parm->module = h->module;
return BCM_ERR_OK;
}
/* Get transport statistics */
bcmos_errno bcmtr_stat_get(bcmolt_devid device, bcmtr_stat *stat)
{
bcmtr_conn *conn;
bcmos_errno err;
if (!stat)
{
return BCM_ERR_PARM;
}
err = _bcmtr_conn_get(device, &conn);
if (err)
{
return err;
}
bcmos_mutex_lock(&conn->mutex);
*stat = conn->stat;
memset(&conn->stat, 0, sizeof(conn->stat));
bcmos_mutex_unlock(&conn->mutex);
return BCM_ERR_OK;
}
#if defined(SIMULATION_BUILD) && defined(LINUX_USER_SPACE) && defined(BCM_SUBSYSTEM_EMBEDDED) && defined(BCMTR_UDP_SUPPORT)
static int _bcmtr_assign_random_port(void)
{
int port;
srand(bcmos_timestamp());
port = rand() % 50000;
if (port < 20000)
{
port += 20000;
}
return port;
}
#endif
/* Connect device in raw mode. Receive task is NOT created */
bcmos_errno bcmtr_proxy_connect(bcmolt_devid device, uint32_t *headroom)
{
bcmtr_conn *conn;
bcmos_errno rc;
rc = _bcmtr_conn_get_any(device, &conn, BCMOS_TRUE);
if (!rc)
{
*headroom = conn->cfg.plugin_cfg.headroom;
}
return rc;
}
/* Send data to device in raw mode */
bcmos_errno bcmtr_proxy_send(bcmolt_devid device, bcmolt_buf *tx_buf)
{
bcmtr_conn *conn;
bcmos_errno rc;
rc = _bcmtr_conn_get_any(device, &conn, BCMOS_TRUE);
if (rc)
return rc;
rc = conn->driver.send(conn->drv_priv, 0, tx_buf, BCMTR_SEND_FLAGS_NONE);
return rc;
}
/* Receive data from device in raw mode */
bcmos_errno bcmtr_proxy_receive(bcmolt_devid device, bcmolt_buf *rx_buf)
{
bcmtr_conn *conn;
bcmolt_subchannel subch;
bcmos_errno rc;
rc = _bcmtr_conn_get_any(device, &conn, BCMOS_TRUE);
if (rc)
return rc;
rc = conn->driver.recv(conn->drv_priv, &subch, rx_buf);
return rc;
}
/** Initialize transport library.
* \returns BCM_ERR_OK or error code
*/
bcmos_errno bcmtr_init(void)
{
bcmos_errno err = BCM_ERR_OK;
bcmolt_devid device;
bcmos_printf("bcmtr_init: init transport library\n");
#if defined(BCMTR_UDP_SUPPORT)
/* Set defaults and add configuration command */
if (!bcmtr_host_ip)
bcmtr_host_ip = BCMTR_TR_UDP_HOST_IP;
if (!bcmtr_host_udp_port)
bcmtr_host_udp_port = BCMTR_TR_UDP_HOST_PORT;
for (device = 0; device < BCMTR_MAX_OLTS; device++)
{
if (!bcmtr_olt_ip[device])
bcmtr_olt_ip[device] = BCMTR_TR_UDP_OLT_IP + device;
if (!bcmtr_olt_udp_port[device])
bcmtr_olt_udp_port[device] = BCMTR_TR_UDP_OLT_PORT;
}
/* A hack to allow multiple simulations run on the same PC */
#if defined(SIMULATION_BUILD) && defined(LINUX_USER_SPACE) && defined(BCM_SUBSYSTEM_EMBEDDED)
{
bcmtr_olt_udp_port[0] = _bcmtr_assign_random_port();
}
#endif
#endif
/* Initialize handlers */
for (device = 0; device < BCMTR_MAX_OLTS; device++)
{
bcmolt_group_id group;
for (group = 0; group < BCMOLT_GROUP_ID__NUM_OF; group++)
{
int inst;
for (inst = 0; inst < BCMTR_MAX_INSTANCES; inst++)
{
conn_info[device].msg_handler[group][inst].app_cb = _bcmtr_dft_msg_handler;
}
}
}
bcmos_mutex_create(&conn_lock, 0, NULL);
return err;
}
/** Release resources used by transport library.
* \returns BCM_ERR_OK or error code
*/
bcmos_errno bcmtr_exit(void)
{
int i;
for (i = 0; i < BCMTR_MAX_OLTS; i++)
bcmtr_disconnect(i);
bcmos_mutex_destroy(&conn_lock);
return BCM_ERR_OK;
}