1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (c) 2013 Broadcom
12 /******************************************************************************/
14 /* Global Variables */
16 /******************************************************************************/
18 uint8_t *ContextTableBase
;
19 extern RDD_CONNECTION_TABLE_DTS
*g_ds_connection_table_ptr
;
21 extern RDD_FC_MCAST_CONNECTION2_TABLE_DTS
*g_fc_mcast_connection2_table_ptr
;
22 extern uint8_t *g_runner_ddr_base_addr
;
23 extern uint32_t g_runner_ddr_base_addr_phys
;
24 extern uint8_t *g_runner_extra_ddr_base_addr
;
25 extern uint32_t g_runner_extra_ddr_base_addr_phys
;
26 extern uint32_t g_ddr_headroom_size
;
27 extern uint8_t *g_runner_tables_ptr
;
28 extern uint8_t g_broadcom_switch_mode
;
29 extern BL_LILAC_RDD_BRIDGE_PORT_DTE g_broadcom_switch_physical_port
;
30 extern uint32_t g_bridge_flow_cache_mode
;
31 extern uint8_t **g_cpu_tx_skb_pointers_reference_array
;
32 extern uint8_t *g_dhd_tx_cpu_usage_reference_array
;
33 extern rdd_phys_addr_t
*g_cpu_tx_data_pointers_reference_array
;
34 extern uint32_t g_cpu_tx_abs_packet_limit
;
35 extern rdd_phys_addr_t g_free_skb_indexes_fifo_table_physical_address
;
36 extern rdd_phys_addr_t g_free_skb_indexes_fifo_table_physical_address_last_idx
;
37 extern uint16_t *g_free_skb_indexes_fifo_table
;
38 extern RDD_INGRESS_CLASSIFICATION_RULE_CFG_TABLE_DTE g_ingress_classification_rule_cfg_table
[ 2 ];
39 extern uint32_t g_rate_controllers_pool_idx
;
40 extern uint32_t g_chip_revision
;
41 extern RDD_WAN_TX_POINTERS_TABLE_DTS
*wan_tx_pointers_table_ptr
;
42 rdpa_bpm_buffer_size_t g_bpm_buffer_size
= LILAC_RDD_RUNNER_PACKET_BUFFER_SIZE
;
44 static BL_LILAC_RDD_ERROR_DTE
f_rdd_bpm_initialize ( uint32_t, uint32_t, uint32_t );
45 static BL_LILAC_RDD_ERROR_DTE
f_rdd_ddr_initialize ( uint32_t, uint32_t, uint32_t );
46 static BL_LILAC_RDD_ERROR_DTE
f_rdd_psram_initialize ( void );
47 static BL_LILAC_RDD_ERROR_DTE
f_rdd_scheduler_initialize ( void );
48 static BL_LILAC_RDD_ERROR_DTE
f_rdd_free_packet_descriptors_pool_initialize ( void );
49 static BL_LILAC_RDD_ERROR_DTE
f_rdd_global_registers_initialize ( void );
50 static BL_LILAC_RDD_ERROR_DTE
f_rdd_local_registers_initialize ( void );
51 static BL_LILAC_RDD_ERROR_DTE
f_rdd_ingress_classification_table_initialize ( void );
52 static BL_LILAC_RDD_ERROR_DTE
f_rdd_eth_tx_initialize ( void );
53 static BL_LILAC_RDD_ERROR_DTE
f_rdd_wan_tx_initialize ( void );
54 static BL_LILAC_RDD_ERROR_DTE
f_rdd_inter_task_queues_initialize ( void );
55 static BL_LILAC_RDD_ERROR_DTE
f_rdd_pm_counters_initialize ( void );
56 static BL_LILAC_RDD_ERROR_DTE
f_rdd_transmit_from_abs_address_initialize ( void );
57 static BL_LILAC_RDD_ERROR_DTE
f_rdd_parallel_processing_initialize ( void );
59 extern BL_LILAC_RDD_ERROR_DTE
rdd_firewall_initialize ( void );
60 extern BL_LILAC_RDD_ERROR_DTE
rdd_cpu_tx_initialize ( void );
61 extern BL_LILAC_RDD_ERROR_DTE
rdd_cpu_rx_initialize ( void );
62 extern BL_LILAC_RDD_ERROR_DTE
f_rdd_mac_table_initialize ( uint32_t, uint32_t );
63 extern BL_LILAC_RDD_ERROR_DTE
f_rdd_ingress_filters_cam_initialize ( void );
64 extern BL_LILAC_RDD_ERROR_DTE
f_rdd_layer4_filters_initialize ( void );
65 extern BL_LILAC_RDD_ERROR_DTE
f_rdd_vlan_matrix_initialize ( void );
66 extern BL_LILAC_RDD_ERROR_DTE
f_rdd_connection_table_initialize ( void );
67 extern BL_LILAC_RDD_ERROR_DTE
f_rdd_multicast_initialize ( void );
68 extern BL_LILAC_RDD_ERROR_DTE
f_rdd_vid_cam_initialize ( void );
69 extern BL_LILAC_RDD_ERROR_DTE
f_rdd_ds_exponent_table_initialize ( void );
70 extern void f_rdd_full_flow_cache_config ( bdmf_boolean
);
72 BL_LILAC_RDD_ERROR_DTE
rdd_init ( void )
74 RUNNER_INST_MAIN
*sram_fast_program_ptr
;
75 RUNNER_INST_PICO
*sram_pico_program_ptr
;
76 RUNNER_COMMON
*sram_common_data_ptr
;
77 RUNNER_PRIVATE
*sram_private_data_ptr
;
78 RUNNER_CNTXT_MAIN
*sram_fast_context_ptr
;
79 RUNNER_CNTXT_PICO
*sram_pico_context_ptr
;
80 RUNNER_PRED_MAIN
*sram_fast_prediction_ptr
;
81 RUNNER_PRED_PICO
*sram_pico_prediction_ptr
;
83 /* reset SRAM program memory of both Runners */
84 sram_fast_program_ptr
= ( RUNNER_INST_MAIN
* )DEVICE_ADDRESS( RUNNER_INST_MAIN_0_OFFSET
);
85 rdp_mm_setl ( sram_fast_program_ptr
, 0, sizeof ( RUNNER_INST_MAIN
) );
87 sram_fast_program_ptr
= ( RUNNER_INST_MAIN
* )DEVICE_ADDRESS( RUNNER_INST_MAIN_1_OFFSET
);
88 rdp_mm_setl ( sram_fast_program_ptr
, 0, sizeof ( RUNNER_INST_MAIN
) );
90 sram_pico_program_ptr
= ( RUNNER_INST_PICO
* )DEVICE_ADDRESS( RUNNER_INST_PICO_0_OFFSET
);
91 rdp_mm_setl ( sram_pico_program_ptr
, 0, sizeof ( RUNNER_INST_PICO
) );
93 sram_pico_program_ptr
= ( RUNNER_INST_PICO
* )DEVICE_ADDRESS( RUNNER_INST_PICO_1_OFFSET
);
94 rdp_mm_setl ( sram_fast_program_ptr
, 0, sizeof ( RUNNER_INST_PICO
) );
96 /* reset SRAM common data memory of both Runners */
97 sram_common_data_ptr
= ( RUNNER_COMMON
* )DEVICE_ADDRESS( RUNNER_COMMON_0_OFFSET
);
98 rdp_mm_setl ( sram_common_data_ptr
, 0, sizeof ( RUNNER_COMMON
) );
100 sram_common_data_ptr
= ( RUNNER_COMMON
* )DEVICE_ADDRESS( RUNNER_COMMON_1_OFFSET
);
101 rdp_mm_setl ( sram_common_data_ptr
, 0, sizeof ( RUNNER_COMMON
) );
103 /* reset SRAM private data memory of both Runners */
104 sram_private_data_ptr
= ( RUNNER_PRIVATE
* )DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
);
105 rdp_mm_setl ( sram_private_data_ptr
, 0, sizeof ( RUNNER_PRIVATE
) );
107 sram_private_data_ptr
= ( RUNNER_PRIVATE
* )DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
);
108 rdp_mm_setl ( sram_private_data_ptr
, 0, sizeof ( RUNNER_PRIVATE
) );
110 /* reset SRAM context memory of both Runners */
111 sram_fast_context_ptr
= ( RUNNER_CNTXT_MAIN
* )DEVICE_ADDRESS( RUNNER_CNTXT_MAIN_0_OFFSET
);
112 rdp_mm_setl_context ( sram_fast_context_ptr
, 0, sizeof ( RUNNER_CNTXT_MAIN
) );
114 sram_fast_context_ptr
= ( RUNNER_CNTXT_MAIN
* )DEVICE_ADDRESS( RUNNER_CNTXT_MAIN_1_OFFSET
);
115 rdp_mm_setl_context ( sram_fast_context_ptr
, 0, sizeof ( RUNNER_CNTXT_MAIN
) );
117 sram_pico_context_ptr
= ( RUNNER_CNTXT_PICO
* )DEVICE_ADDRESS( RUNNER_CNTXT_PICO_0_OFFSET
);
118 rdp_mm_setl_context ( sram_pico_context_ptr
, 0, sizeof ( RUNNER_CNTXT_PICO
) );
120 sram_pico_context_ptr
= ( RUNNER_CNTXT_PICO
* )DEVICE_ADDRESS( RUNNER_CNTXT_PICO_1_OFFSET
);
121 rdp_mm_setl_context ( sram_pico_context_ptr
, 0, sizeof ( RUNNER_CNTXT_PICO
) );
123 /* reset SRAM prediction memory of both Runners */
124 sram_fast_prediction_ptr
= ( RUNNER_PRED_MAIN
* )DEVICE_ADDRESS( RUNNER_PRED_MAIN_0_OFFSET
);
125 rdp_mm_setl ( sram_fast_prediction_ptr
, 0, sizeof ( RUNNER_PRED_MAIN
) * 2 );
127 sram_fast_prediction_ptr
= ( RUNNER_PRED_MAIN
* )DEVICE_ADDRESS( RUNNER_PRED_MAIN_1_OFFSET
);
128 rdp_mm_setl ( sram_fast_prediction_ptr
, 0, sizeof ( RUNNER_PRED_MAIN
) * 2 );
130 sram_pico_prediction_ptr
= ( RUNNER_PRED_PICO
* )DEVICE_ADDRESS( RUNNER_PRED_PICO_0_OFFSET
);
131 rdp_mm_setl ( sram_pico_prediction_ptr
, 0, sizeof ( RUNNER_PRED_PICO
) * 2 );
133 sram_pico_prediction_ptr
= ( RUNNER_PRED_PICO
* )DEVICE_ADDRESS( RUNNER_PRED_PICO_1_OFFSET
);
134 rdp_mm_setl ( sram_pico_prediction_ptr
, 0, sizeof ( RUNNER_PRED_PICO
) * 2 );
136 return ( BL_LILAC_RDD_OK
);
139 BL_LILAC_RDD_ERROR_DTE
rdd_load_microcode ( uint8_t *xi_runer_A_microcode_ptr
,
140 uint8_t *xi_runer_B_microcode_ptr
,
141 uint8_t *xi_runer_C_microcode_ptr
,
142 uint8_t *xi_runer_D_microcode_ptr
)
144 RUNNER_INST_MAIN
*sram_fast_program_ptr
;
145 RUNNER_INST_PICO
*sram_pico_program_ptr
;
148 /* load the code segment into the SRAM program memory of fast Runner B */
149 sram_fast_program_ptr
= ( RUNNER_INST_MAIN
* )DEVICE_ADDRESS( RUNNER_INST_MAIN_1_OFFSET
);
150 MWRITE_BLK_32( sram_fast_program_ptr
, xi_runer_B_microcode_ptr
, sizeof ( RUNNER_INST_MAIN
) );
152 /* load the code segment into the SRAM program memory of pico Runner A */
153 sram_pico_program_ptr
= ( RUNNER_INST_PICO
* )DEVICE_ADDRESS( RUNNER_INST_PICO_0_OFFSET
);
154 MWRITE_BLK_32( sram_pico_program_ptr
, xi_runer_C_microcode_ptr
, sizeof ( RUNNER_INST_PICO
) );
156 /* load the code segment into the SRAM program memory of pico Runner B */
157 sram_pico_program_ptr
= ( RUNNER_INST_PICO
* )DEVICE_ADDRESS( RUNNER_INST_PICO_1_OFFSET
);
158 MWRITE_BLK_32( sram_pico_program_ptr
, xi_runer_D_microcode_ptr
, sizeof ( RUNNER_INST_PICO
) );
160 return ( BL_LILAC_RDD_OK
);
164 static void memcpyl_prediction ( void * __to
, void * __from
, unsigned int __n
)
166 uint8_t *src
= (uint8_t *)__from
;
167 uint8_t *dst
= (uint8_t *)__to
;
170 for (i
= 0; i
< (__n
/ 2); i
++, src
+= 2, dst
+= 4)
172 #ifdef _BYTE_ORDER_LITTLE_ENDIAN_
173 *(volatile unsigned int *)dst
= swap4bytes((unsigned int)(*(volatile unsigned short *)src
));
175 *(volatile unsigned int *)dst
= (unsigned int)(*(volatile unsigned short *)src
);
181 BL_LILAC_RDD_ERROR_DTE
rdd_load_prediction ( uint8_t *xi_runer_A_prediction_ptr
,
182 uint8_t *xi_runer_B_prediction_ptr
,
183 uint8_t *xi_runer_C_prediction_ptr
,
184 uint8_t *xi_runer_D_prediction_ptr
)
186 RUNNER_PRED_MAIN
*sram_fast_prediction_ptr
;
187 RUNNER_PRED_PICO
*sram_pico_prediction_ptr
;
189 sram_fast_prediction_ptr
= ( RUNNER_PRED_MAIN
* )DEVICE_ADDRESS( RUNNER_PRED_MAIN_0_OFFSET
);
190 memcpyl_prediction ( sram_fast_prediction_ptr
, xi_runer_A_prediction_ptr
, sizeof ( RUNNER_PRED_MAIN
) );
192 sram_fast_prediction_ptr
= ( RUNNER_PRED_MAIN
* )DEVICE_ADDRESS( RUNNER_PRED_MAIN_1_OFFSET
);
193 memcpyl_prediction ( sram_fast_prediction_ptr
, xi_runer_B_prediction_ptr
, sizeof ( RUNNER_PRED_MAIN
) );
195 sram_pico_prediction_ptr
= ( RUNNER_PRED_PICO
* )DEVICE_ADDRESS( RUNNER_PRED_PICO_0_OFFSET
);
196 memcpyl_prediction ( sram_pico_prediction_ptr
, xi_runer_C_prediction_ptr
, sizeof ( RUNNER_PRED_PICO
) );
198 sram_pico_prediction_ptr
= ( RUNNER_PRED_PICO
* )DEVICE_ADDRESS( RUNNER_PRED_PICO_1_OFFSET
);
199 memcpyl_prediction ( sram_pico_prediction_ptr
, xi_runer_D_prediction_ptr
, sizeof ( RUNNER_PRED_PICO
) );
201 return ( BL_LILAC_RDD_OK
);
205 BL_LILAC_RDD_ERROR_DTE
rdd_runner_enable ( void )
207 #if !defined(FIRMWARE_INIT)
208 RUNNER_REGS_CFG_GLOBAL_CTRL runner_global_control_register
;
210 /* enable Runner A through the global control register */
211 RUNNER_REGS_0_CFG_GLOBAL_CTRL_READ ( runner_global_control_register
);
212 runner_global_control_register
.pico_en
= LILAC_RDD_TRUE
;
213 runner_global_control_register
.main_cntxt_reb_en
= LILAC_RDD_TRUE
;
214 RUNNER_REGS_0_CFG_GLOBAL_CTRL_WRITE ( runner_global_control_register
);
216 /* enable Runner B through the global control register */
217 RUNNER_REGS_1_CFG_GLOBAL_CTRL_READ ( runner_global_control_register
);
218 runner_global_control_register
.main_en
= LILAC_RDD_TRUE
;
219 runner_global_control_register
.pico_en
= LILAC_RDD_TRUE
;
220 runner_global_control_register
.main_cntxt_reb_en
= LILAC_RDD_TRUE
;
221 RUNNER_REGS_1_CFG_GLOBAL_CTRL_WRITE ( runner_global_control_register
);
224 return ( BL_LILAC_RDD_OK
);
227 BL_LILAC_RDD_ERROR_DTE
rdd_runner_frequency_set ( uint16_t xi_runner_frequency
)
229 #if !defined(FIRMWARE_INIT)
230 RUNNER_REGS_CFG_GLOBAL_CTRL runner_global_control_register
;
232 /* set the frequency of the Runner through the global control register */
233 RUNNER_REGS_0_CFG_GLOBAL_CTRL_READ ( runner_global_control_register
);
234 runner_global_control_register
.micro_sec_val
= xi_runner_frequency
- 1;
235 RUNNER_REGS_0_CFG_GLOBAL_CTRL_WRITE ( runner_global_control_register
);
237 RUNNER_REGS_1_CFG_GLOBAL_CTRL_READ ( runner_global_control_register
);
238 runner_global_control_register
.micro_sec_val
= xi_runner_frequency
- 1;
239 RUNNER_REGS_1_CFG_GLOBAL_CTRL_WRITE ( runner_global_control_register
);
242 return ( BL_LILAC_RDD_OK
);
246 BL_LILAC_RDD_ERROR_DTE
rdd_data_structures_init ( RDD_INIT_PARAMS
*init_params
)
248 /* initialize the base address of the packets in the ddr */
249 g_runner_ddr_base_addr
= init_params
->ddr_pool_ptr
;
250 g_runner_ddr_base_addr_phys
= init_params
->ddr_pool_ptr_phys
;
251 g_runner_extra_ddr_base_addr
= init_params
->extra_ddr_pool_ptr
;
252 g_runner_extra_ddr_base_addr_phys
= init_params
->extra_ddr_pool_ptr_phys
;
253 g_runner_tables_ptr
= init_params
->ddr_runner_tables_ptr
;
254 g_ds_connection_table_ptr
= ( RDD_CONNECTION_TABLE_DTS
* )DsConnectionTableBase
;
255 #if !defined(FIRMWARE_INIT)
256 /* In simulation these are setup in rdd_sim_alloc_segments */
257 ContextTableBase
= g_runner_tables_ptr
+ CONTEXT_TABLE_ADDRESS
;
261 g_fc_mcast_connection2_table_ptr
= ( RDD_FC_MCAST_CONNECTION2_TABLE_DTS
* )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + FC_MCAST_CONNECTION2_TABLE_ADDRESS
);
263 g_ddr_headroom_size
= init_params
->ddr_headroom_size
;
265 g_broadcom_switch_mode
= init_params
->broadcom_switch_mode
;
266 g_broadcom_switch_physical_port
= init_params
->broadcom_switch_physical_port
;
268 g_bridge_flow_cache_mode
= init_params
->bridge_flow_cache_mode
;
269 g_chip_revision
= init_params
->chip_revision
;
271 /* check abs packet limit legal value*/
272 if( ( init_params
->cpu_tx_abs_packet_limit
<= LILAC_RDD_CPU_TX_SKB_LIMIT_MAX
) &&
273 ( init_params
->cpu_tx_abs_packet_limit
>= LILAC_RDD_CPU_TX_SKB_LIMIT_MIN
) &&
274 ( init_params
->cpu_tx_abs_packet_limit
% LILAC_RDD_CPU_TX_SKB_LIMIT_MIN
== 0 ) )
276 g_cpu_tx_abs_packet_limit
= init_params
->cpu_tx_abs_packet_limit
;
280 g_cpu_tx_abs_packet_limit
= LILAC_RDD_CPU_TX_SKB_LIMIT_MIN
;
283 /* initialize the base address of the BPM base address */
284 f_rdd_bpm_initialize(init_params
->ddr_pool_ptr_phys
, 0, init_params
->extra_ddr_pool_ptr_phys
);
286 /* initialize runner dma base address */
287 f_rdd_ddr_initialize(init_params
->ddr_pool_ptr_phys
, 0, g_ddr_headroom_size
);
289 /* initialize runner dma base address */
290 f_rdd_psram_initialize ();
292 /* initialize scheduler */
293 f_rdd_scheduler_initialize ();
295 /* create the Runner's free packet descriptors pool */
296 f_rdd_free_packet_descriptors_pool_initialize ();
298 /* initialize the CPU-RX mechanism */
299 rdd_cpu_rx_initialize ();
301 /* initialize the CPU-TX queue */
302 rdd_cpu_tx_initialize ();
304 /* initialize global registers */
305 f_rdd_global_registers_initialize ();
307 /* initialize the local registers through the Context memory */
308 f_rdd_local_registers_initialize ();
310 /* initialize ethernet tx queues and ports */
311 f_rdd_eth_tx_initialize ();
313 /* initialize WAN tx */
314 f_rdd_wan_tx_initialize ();
316 /* initialize inter task queues */
317 f_rdd_inter_task_queues_initialize ();
319 /* initialize PM counters */
320 f_rdd_pm_counters_initialize ();
322 /* initialize ingress classification table */
323 f_rdd_ingress_classification_table_initialize ();
325 /* set up the ETH0 EEE mode config message*/
326 MWRITE_32(DEVICE_ADDRESS(RUNNER_PRIVATE_1_OFFSET
) + US_ETH0_EEE_MODE_CONFIG_MESSAGE_ADDRESS
,
327 (BBH_PERIPHERAL_ETH0_TX
<<16)|BBTX_EEE_MODE_CONFIG_MESSAGE
);
329 /* initialize free skb indexes fifo and pointers*/
330 f_rdd_transmit_from_abs_address_initialize ();
332 /* Part of the bridge initialization. */
333 MWRITE_16( (DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + DOWNSTREAM_MULTICAST_LAN_ENQUEUE_INGRESS_QUEUE_PTR_ADDRESS
), DOWNSTREAM_MULTICAST_LAN_ENQUEUE_INGRESS_QUEUE_ADDRESS
);
335 /* initialize structures supporting parallel processing */
336 f_rdd_parallel_processing_initialize ();
338 /* set to not configured */
339 rdd_ethwan2_switch_port_config(0xff);
341 /* initialize ds rate limit exponent table */
342 f_rdd_ds_exponent_table_initialize ();
344 return ( BL_LILAC_RDD_OK
);
347 /******************************************************************************/
351 /* f_rdd_bpm_initialize */
355 /* Runner Initialization - initialize BPM */
359 /* This function returns the status of the operation */
367 /* xi_runner_ddr_pool_ptr - Packet DDR buffer base address */
368 /* xi_extra_ddr_pool_ptr - Packet DDR buffer base address (Multicast) */
369 /* xi_ddr_headroom_size - configurable headroom in addition to */
370 /* LILAC_RDD_PACKET_DDR_OFFSET */
377 /******************************************************************************/
378 static BL_LILAC_RDD_ERROR_DTE
f_rdd_bpm_initialize(uint32_t runner_ddr_pool_phys
,
379 uint32_t runner_ddr1_pool_phys
,
380 uint32_t runner_extra_ddr_pool_phys
)
382 uint32_t *bpm_ddr_base_ptr
;
383 uint32_t *bpm_extra_ddr_base_ptr
;
385 bpm_ddr_base_ptr
= (uint32_t *)(DEVICE_ADDRESS(RUNNER_PRIVATE_0_OFFSET
) + DS_BPM_DDR_BUFFERS_BASE_ADDRESS
);
386 MWRITE_32(bpm_ddr_base_ptr
, runner_ddr_pool_phys
);
387 bpm_ddr_base_ptr
= (uint32_t *)(DEVICE_ADDRESS(RUNNER_PRIVATE_1_OFFSET
) + US_BPM_DDR_BUFFERS_BASE_ADDRESS
);
388 MWRITE_32(bpm_ddr_base_ptr
, runner_ddr_pool_phys
);
390 bpm_extra_ddr_base_ptr
= (uint32_t *)(DEVICE_ADDRESS(RUNNER_PRIVATE_0_OFFSET
) + DS_BPM_EXTRA_DDR_BUFFERS_BASE_ADDRESS
);
391 MWRITE_32(bpm_extra_ddr_base_ptr
, runner_extra_ddr_pool_phys
);
393 bpm_extra_ddr_base_ptr
= (uint32_t *)(DEVICE_ADDRESS(RUNNER_PRIVATE_1_OFFSET
) + US_BPM_EXTRA_DDR_BUFFERS_BASE_ADDRESS
);
394 MWRITE_32(bpm_extra_ddr_base_ptr
, runner_extra_ddr_pool_phys
);
396 return ( BL_LILAC_RDD_OK
);
400 /******************************************************************************/
404 /* f_rdd_ddr_initialize */
408 /* Runner Initialization - initialize the runner ddr config register */
412 /* This function returns the status of the operation */
416 /* DDR_config Register */
420 /* xi_runner_ddr_pool_phys - Packet DDR buffer base address */
421 /* xi_ddr_headroom_size - configurable headroom in addition to */
422 /* LILAC_RDD_PACKET_DDR_OFFSET */
429 /******************************************************************************/
430 static BL_LILAC_RDD_ERROR_DTE
f_rdd_ddr_initialize(uint32_t xi_runner_ddr_pool_phys
,
431 uint32_t xi_runner_ddr1_pool_phys
,
432 uint32_t xi_ddr_headroom_size
)
434 RUNNER_REGS_CFG_DDR_CFG runner_ddr_config_register
;
435 RUNNER_REGS_CFG_DDR_LKUP_MASK0 runner_ddr_lkup_mask0_register
;
436 RUNNER_REGS_CFG_DDR_LKUP_MASK1 runner_ddr_lkup_mask1_register
;
437 uint32_t *ddr_address_ptr
; /* DSL */
439 runner_ddr_config_register
.buffer_offset
= LILAC_RDD_PACKET_DDR_OFFSET
;
440 runner_ddr_config_register
.rserved1
= 0;
441 runner_ddr_config_register
.dma_base
= (xi_runner_ddr_pool_phys
& 0x07E00000) >> 21;
442 runner_ddr_config_register
.buffer_size
= RDP_CFG_BUF_SIZE_VALUE
;
443 runner_ddr_config_register
.rserved2
= 0;
445 RUNNER_REGS_0_CFG_DDR_CFG_WRITE ( runner_ddr_config_register
);
446 RUNNER_REGS_1_CFG_DDR_CFG_WRITE ( runner_ddr_config_register
);
448 /* DDR lookup for routed packet - 5 tupples */
449 runner_ddr_lkup_mask0_register
.global_mask
= 0x000001FF;
451 RUNNER_REGS_0_CFG_DDR_LKUP_MASK0_WRITE ( runner_ddr_lkup_mask0_register
);
452 RUNNER_REGS_1_CFG_DDR_LKUP_MASK0_WRITE ( runner_ddr_lkup_mask0_register
);
454 /* DDR lookup for IPTV table - destination MAC, destination MAC + VLAN, destination IP */
455 runner_ddr_lkup_mask1_register
.global_mask
= 0x00000000;
457 RUNNER_REGS_0_CFG_DDR_LKUP_MASK1_WRITE ( runner_ddr_lkup_mask1_register
);
458 RUNNER_REGS_1_CFG_DDR_LKUP_MASK1_WRITE ( runner_ddr_lkup_mask1_register
);
460 ddr_address_ptr
= ( uint32_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + DS_PACKET_BUFFER_SIZE_ASR_8_ADDRESS
);
461 MWRITE_8( ddr_address_ptr
, g_bpm_buffer_size
>> 8 );
463 ddr_address_ptr
= ( uint32_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + US_PACKET_BUFFER_SIZE_ASR_8_ADDRESS
);
464 MWRITE_8( ddr_address_ptr
, g_bpm_buffer_size
>> 8 );
466 return ( BL_LILAC_RDD_OK
);
470 /******************************************************************************/
474 /* f_rdd_psram_initialize */
478 /* Runner Initialization - initialize the runner psram config register */
482 /* This function returns the status of the operation */
486 /* PSRAM_config Register */
497 /******************************************************************************/
498 static BL_LILAC_RDD_ERROR_DTE
f_rdd_psram_initialize ( void )
500 RUNNER_REGS_CFG_PSRAM_CFG runner_psram_config_register
;
501 RUNNER_REGS_CFG_PSRAM_LKUP_MASK0 runner_psram_lkup_mask0_register
;
503 runner_psram_config_register
.buffer_offset
= LILAC_RDD_PACKET_DDR_OFFSET
;
504 runner_psram_config_register
.rserved1
= 0;
505 runner_psram_config_register
.buffer_size
= RUNNER_REGS_CFG_PSRAM_CFG_BUFFER_SIZE_BUFFER_SIZE_128BYTE_VALUE
;
506 runner_psram_config_register
.rserved2
= 0;
507 runner_psram_config_register
.dma_base
= 0;
509 RUNNER_REGS_0_CFG_PSRAM_CFG_WRITE ( runner_psram_config_register
);
510 RUNNER_REGS_1_CFG_PSRAM_CFG_WRITE ( runner_psram_config_register
);
513 /* PSRAM lookup for data collection - 5 tupples & layer 2 */
514 runner_psram_lkup_mask0_register
.global_mask
= 0x0000FFFF;
516 RUNNER_REGS_0_CFG_PSRAM_LKUP_MASK0_WRITE ( runner_psram_lkup_mask0_register
);
517 RUNNER_REGS_1_CFG_PSRAM_LKUP_MASK0_WRITE ( runner_psram_lkup_mask0_register
);
519 return ( BL_LILAC_RDD_OK
);
523 /******************************************************************************/
527 /* f_rdd_scheduler_initialize */
531 /* Runner Initialization - initialize the scheduler config register */
535 /* This function returns the status of the operation */
539 /* DDR_config Register */
550 /******************************************************************************/
551 static BL_LILAC_RDD_ERROR_DTE
f_rdd_scheduler_initialize ( void )
553 uint32_t runner_scheduler_cfg_register
;
555 /* fast Runner A - class C */
556 runner_scheduler_cfg_register
= ( RUNNER_REGS_CFG_MAIN_SCH_CFG_ARB_CLASS_USE_RR_VALUE
<< 6 ) |
557 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_USE_CLASS_B_DONT_USE_CLASS_B_VALUE
<< 5 ) |
558 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_USE_CLASS_A_DONT_USE_CLASS_A_VALUE
<< 4 ) |
559 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_CLASS_31_24_RR_VALUE
<< 3 ) |
560 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_CLASS_23_16_RR_VALUE
<< 2 ) |
561 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_CLASS_15_8_RR_VALUE
<< 1 ) |
562 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_CLASS_7_0_STRICT_VALUE
<< 0 );
564 RUNNER_REGS_0_CFG_MAIN_SCH_CFG_WRITE ( runner_scheduler_cfg_register
);
566 /* fast Runner B - class C */
567 runner_scheduler_cfg_register
= ( RUNNER_REGS_CFG_MAIN_SCH_CFG_ARB_CLASS_USE_RR_VALUE
<< 6 ) |
568 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_USE_CLASS_B_DONT_USE_CLASS_B_VALUE
<< 5 ) |
569 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_USE_CLASS_A_DONT_USE_CLASS_A_VALUE
<< 4 ) |
570 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_CLASS_31_24_RR_VALUE
<< 3 ) |
571 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_CLASS_23_16_RR_VALUE
<< 2 ) |
572 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_CLASS_15_8_RR_VALUE
<< 1 ) |
573 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_CLASS_7_0_STRICT_VALUE
<< 0 );
575 RUNNER_REGS_1_CFG_MAIN_SCH_CFG_WRITE ( runner_scheduler_cfg_register
);
577 /* pico Runner A - class A */
578 runner_scheduler_cfg_register
= ( RUNNER_REGS_CFG_PICO_SCH_CFG_ARB_CLASS_USE_RR_VALUE
<< 6 ) |
579 ( RUNNER_REGS_CFG_PICO_SCH_CFG_USE_CLASS_B_DONT_USE_CLASS_B_VALUE
<< 5 ) |
580 ( RUNNER_REGS_CFG_PICO_SCH_CFG_USE_CLASS_A_USE_CLASS_A_VALUE
<< 4 ) |
581 ( RUNNER_REGS_CFG_PICO_SCH_CFG_CLASS_15_8_RR_VALUE
<< 1 ) |
582 ( RUNNER_REGS_CFG_PICO_SCH_CFG_CLASS_7_0_RR_VALUE
<< 0 );
584 RUNNER_REGS_0_CFG_PICO_SCH_CFG_WRITE ( runner_scheduler_cfg_register
);
586 /* pico Runner B - class A */
587 runner_scheduler_cfg_register
= ( RUNNER_REGS_CFG_PICO_SCH_CFG_ARB_CLASS_USE_RR_VALUE
<< 6 ) |
588 ( RUNNER_REGS_CFG_PICO_SCH_CFG_USE_CLASS_B_DONT_USE_CLASS_B_VALUE
<< 5 ) |
589 ( RUNNER_REGS_CFG_PICO_SCH_CFG_USE_CLASS_A_USE_CLASS_A_VALUE
<< 4 ) |
590 ( RUNNER_REGS_CFG_PICO_SCH_CFG_CLASS_15_8_RR_VALUE
<< 1 ) |
591 ( RUNNER_REGS_CFG_PICO_SCH_CFG_CLASS_7_0_RR_VALUE
<< 0 );
593 RUNNER_REGS_1_CFG_PICO_SCH_CFG_WRITE ( runner_scheduler_cfg_register
);
595 return ( BL_LILAC_RDD_OK
);
599 /******************************************************************************/
603 /* f_rdd_free_packet_descriptors_pool_initialize */
607 /* Runner Initialization - initialize the list of the free buffers pool */
611 /* Upstream pool is implemented as a stack of 3072 packet descriptors */
612 /* Downstream pool is implemented as a list of 2048 packet descriptors */
623 /******************************************************************************/
624 static BL_LILAC_RDD_ERROR_DTE
f_rdd_free_packet_descriptors_pool_initialize ( void )
626 RDD_DS_FREE_PACKET_DESCRIPTORS_POOL_DTS
*ds_free_packet_descriptors_pool_ptr
;
627 RDD_US_FREE_PACKET_DESCRIPTORS_POOL_DTS
*us_free_packet_descriptors_pool_ptr
;
628 RDD_PACKET_DESCRIPTOR_DTS
*packet_descriptor_ptr
;
629 RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_DTS
*free_packet_descriptors_pool_descriptor_ptr
;
630 RDD_US_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_DTS
*us_free_packet_descriptors_pool_descriptor_ptr
;
631 uint32_t next_packet_descriptor_address
;
634 ds_free_packet_descriptors_pool_ptr
= ( RDD_DS_FREE_PACKET_DESCRIPTORS_POOL_DTS
* )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + DS_FREE_PACKET_DESCRIPTORS_POOL_ADDRESS
);
636 /* create the free packet descriptors pool as a list of packet descriptors */
637 for ( i
= 0; i
< RDD_DS_FREE_PACKET_DESCRIPTORS_POOL_SIZE
; i
++ )
639 packet_descriptor_ptr
= &( ds_free_packet_descriptors_pool_ptr
->entry
[ i
].packet_descriptor
);
641 /* the last packet descriptor should point to NULL, the others points to the next packet descriptor */
642 if ( i
== ( RDD_DS_FREE_PACKET_DESCRIPTORS_POOL_SIZE
- 1 ) )
644 next_packet_descriptor_address
= 0;
648 next_packet_descriptor_address
= DS_FREE_PACKET_DESCRIPTORS_POOL_ADDRESS
+ ( i
+ 1 ) * sizeof(RDD_PACKET_DESCRIPTOR_DTS
);
651 RDD_PACKET_DESCRIPTOR_NEXT_PACKET_DESCRIPTOR_POINTER_WRITE ( next_packet_descriptor_address
, packet_descriptor_ptr
);
654 free_packet_descriptors_pool_descriptor_ptr
= ( RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_DTS
* )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ADDRESS
);
656 RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_HEAD_POINTER_WRITE ( DS_FREE_PACKET_DESCRIPTORS_POOL_ADDRESS
, free_packet_descriptors_pool_descriptor_ptr
);
658 RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_TAIL_POINTER_WRITE ( DS_FREE_PACKET_DESCRIPTORS_POOL_ADDRESS
+ ( RDD_DS_FREE_PACKET_DESCRIPTORS_POOL_SIZE
- 1 ) * sizeof(RDD_PACKET_DESCRIPTOR_DTS
),
659 free_packet_descriptors_pool_descriptor_ptr
);
661 us_free_packet_descriptors_pool_ptr
= ( RDD_US_FREE_PACKET_DESCRIPTORS_POOL_DTS
* )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + US_FREE_PACKET_DESCRIPTORS_POOL_ADDRESS
);
663 /* create the free packet descriptors pool as a stack of packet descriptors */
664 for ( i
= 0; i
< RDD_US_FREE_PACKET_DESCRIPTORS_POOL_SIZE
; i
++ )
666 packet_descriptor_ptr
= &( us_free_packet_descriptors_pool_ptr
->entry
[ i
].packet_descriptor
);
668 /* the last packet descriptor should point to NULL, the others points to the next packet descriptor */
669 if ( i
== ( RDD_US_FREE_PACKET_DESCRIPTORS_POOL_SIZE
- 1 ) )
671 next_packet_descriptor_address
= 0;
675 next_packet_descriptor_address
= US_FREE_PACKET_DESCRIPTORS_POOL_ADDRESS
+ ( i
+ 1 ) * sizeof(RDD_PACKET_DESCRIPTOR_DTS
);
678 RDD_PACKET_DESCRIPTOR_NEXT_PACKET_DESCRIPTOR_POINTER_WRITE ( next_packet_descriptor_address
, packet_descriptor_ptr
);
681 us_free_packet_descriptors_pool_descriptor_ptr
= ( RDD_US_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_DTS
* )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + US_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ADDRESS
);
683 RDD_US_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_GUARANTEED_THRESHOLD_WRITE ( US_FREE_PACKET_DESCRIPTOR_POOL_GUARANTEED_QUEUE_THRESHOLD
, us_free_packet_descriptors_pool_descriptor_ptr
);
684 RDD_US_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_GUARANTEED_FREE_COUNT_WRITE (US_FREE_PACKET_DESCRIPTOR_POOL_MIN_GUARANTEED_POOL_SIZE
, us_free_packet_descriptors_pool_descriptor_ptr
);
685 RDD_US_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_NON_GUARANTEED_FREE_COUNT_WRITE ( RDD_US_FREE_PACKET_DESCRIPTORS_POOL_SIZE
- US_FREE_PACKET_DESCRIPTOR_POOL_MIN_GUARANTEED_POOL_SIZE
, us_free_packet_descriptors_pool_descriptor_ptr
);
687 return ( BL_LILAC_RDD_OK
);
691 /******************************************************************************/
695 /* f_rdd_global_registers_initialize */
699 /* Runner Initialization - initialize the global registers (R1-R7) */
703 /* Runners global registers (R1-R7) */
714 /******************************************************************************/
715 static BL_LILAC_RDD_ERROR_DTE
f_rdd_global_registers_initialize ( void )
717 uint32_t *global_register_init_ptr
;
718 uint32_t global_register
[ 8 ];
721 /********** Fast Runner A **********/
723 /* zero all global registers */
724 memset ( global_register
, 0, sizeof ( global_register
) );
726 /* R1 - constant one */
727 global_register
[ 1 ] = 1;
729 global_register
[ 2 ] = ( g_broadcom_switch_mode
<< DS_GLOBAL_CFG_BROADCOM_SWITCH_MODE_BIT_OFFSET
) |
730 ( 1 << DS_GLOBAL_CFG_FLOW_CACHE_MODE_BIT_OFFSET
) |
731 ( g_bridge_flow_cache_mode
<< DS_GLOBAL_CFG_BRIDGE_FLOW_CACHE_MODE_BIT_OFFSET
) |
732 ( g_chip_revision
<< DS_GLOBAL_CFG_CHIP_REVISION_OFFSET
);
734 global_register
[ 3 ] = ( DS_PARALLEL_PROCESSING_TASK_REORDER_FIFO_ADDRESS
<< 16 ) | DS_PARALLEL_PROCESSING_TASK_REORDER_FIFO_ADDRESS
;
735 global_register
[ 4 ] = DOWNSTREAM_LAN_ENQUEUE_INGRESS_QUEUE_ADDRESS
<< 16 | DS_SQ_ENQUEUE_QUEUE_ADDRESS
;
736 global_register
[ 6 ] = ( DOWNSTREAM_MULTICAST_INGRESS_QUEUE_ADDRESS
<< 16 ) | DOWNSTREAM_MULTICAST_INGRESS_QUEUE_ADDRESS
;
738 global_register_init_ptr
= ( uint32_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + DS_FAST_RUNNER_GLOBAL_REGISTERS_INIT_ADDRESS
);
740 /* copy the global regsiters to the data SRAM, the firmware will load it from the SRAM at task -1 (initialization task) */
741 MWRITE_BLK_32( global_register_init_ptr
, global_register
, sizeof ( global_register
) );
744 /********** Fast Runner B **********/
746 /* zero all global registers */
747 memset ( global_register
, 0, sizeof ( global_register
) );
749 /* R1 - constant one */
750 global_register
[ 1 ] = 1;
752 /* R2 - head pointer of the free buffers pool stack */
753 global_register
[ 2 ] = US_FREE_PACKET_DESCRIPTORS_POOL_ADDRESS
;
757 global_register
[ 7 ] = ( g_broadcom_switch_mode
<< US_GLOBAL_CFG_BROADCOM_SWITCH_MODE_BIT_OFFSET
) |
758 ( g_chip_revision
<< US_GLOBAL_CFG_CHIP_REVISION_OFFSET
);
760 global_register_init_ptr
= ( uint32_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + US_FAST_RUNNER_GLOBAL_REGISTERS_INIT_ADDRESS
);
762 /* copy the global regsiters to the data SRAM, the firmware will load it from the SRAM at task -1 (initialization task) */
763 MWRITE_BLK_32( global_register_init_ptr
, global_register
, sizeof ( global_register
) );
766 /********** Pico Runner A **********/
768 /* zero all global registers */
769 memset ( global_register
, 0, sizeof ( global_register
) );
771 /* R1 - constant one */
772 global_register
[ 1 ] = 1;
774 global_register
[ 2 ] = ( g_chip_revision
<< DS_GLOBAL_CFG_CHIP_REVISION_OFFSET
);
776 global_register_init_ptr
= ( uint32_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + DS_PICO_RUNNER_GLOBAL_REGISTERS_INIT_ADDRESS
);
778 /* copy the global regsiters to the data SRAM, the firmware will load it from the SRAM at task -1 (initialization task) */
779 MWRITE_BLK_32( global_register_init_ptr
, global_register
, sizeof ( global_register
) );
782 /********** Pico Runner B **********/
784 /* zero all global registers */
785 memset ( global_register
, 0, sizeof ( global_register
) );
787 /* R1 - constant one */
788 global_register
[ 1 ] = 1;
790 global_register
[ 3 ] = US_PARALLEL_PROCESSING_TASK_REORDER_FIFO_ADDRESS
;
791 global_register
[ 3 ] |= US_PARALLEL_PROCESSING_TASK_REORDER_FIFO_ADDRESS
<< 16;
793 /* R4 - context_index_cache_write_index */
794 global_register
[ 4 ] = 0;
795 global_register
[ 7 ] = ( g_broadcom_switch_mode
<< US_GLOBAL_CFG_BROADCOM_SWITCH_MODE_BIT_OFFSET
) |
796 ( g_chip_revision
<< US_GLOBAL_CFG_CHIP_REVISION_OFFSET
);
798 global_register_init_ptr
= ( uint32_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + US_PICO_RUNNER_GLOBAL_REGISTERS_INIT_ADDRESS
);
800 MWRITE_BLK_32( global_register_init_ptr
, global_register
, sizeof ( global_register
) );
802 return ( BL_LILAC_RDD_OK
);
806 /******************************************************************************/
810 /* f_rdd_local_registers_initialize */
814 /* Runner Initialization - initialize context memeories of 4 Runners */
818 /* initialize the local registers (R8-R31), 32 threads for fast Runners */
819 /* and 16 threads for Pico Runners */
823 /* Runners local registers (R8-R31) */
834 /******************************************************************************/
835 static BL_LILAC_RDD_ERROR_DTE
f_rdd_local_registers_initialize ( void )
837 RUNNER_CNTXT_MAIN
*sram_fast_context_ptr
;
838 RUNNER_CNTXT_PICO
*sram_pico_context_ptr
;
839 static uint32_t local_register
[ 32 ][ 32 ];
841 /********** Fast Runner A **********/
843 sram_fast_context_ptr
= ( RUNNER_CNTXT_MAIN
* )DEVICE_ADDRESS( RUNNER_CNTXT_MAIN_0_OFFSET
);
845 /* read the local registers from the Context memory - maybe it was initialized by the ACE compiler */
846 MREAD_BLK_32( local_register
, sram_fast_context_ptr
, sizeof ( RUNNER_CNTXT_MAIN
) );
849 local_register
[ CPU_TX_FAST_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_a
, cpu_tx_wakeup_request
) << 16;
850 local_register
[ CPU_TX_FAST_THREAD_NUMBER
][ CS_R8
] = ( CPU_TX_FAST_QUEUE_ADDRESS
<< 16 );
851 local_register
[ CPU_TX_FAST_THREAD_NUMBER
][ CS_R9
] = ( INGRESS_HANDLER_BUFFER_ADDRESS
<< 16 ) | DS_CPU_TX_BBH_DESCRIPTORS_ADDRESS
;
852 local_register
[ CPU_TX_FAST_THREAD_NUMBER
][ CS_R10
] = ( BBH_PERIPHERAL_IH
<< 16 ) | ( LILAC_RDD_IH_BUFFER_BBH_ADDRESS
+ LILAC_RDD_RUNNER_A_IH_BUFFER_BBH_OFFSET
);
853 local_register
[ CPU_TX_FAST_THREAD_NUMBER
][ CS_R11
] = ( BBH_PERIPHERAL_IH
<< 16 ) | LILAC_RDD_IH_HEADER_DESCRIPTOR_BBH_ADDRESS
;
856 local_register
[ CPU_RX_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_a
, cpu_rx_wakeup_request
) << 16;
857 local_register
[ CPU_RX_THREAD_NUMBER
][ CS_R8
] = CPU_RX_FAST_PD_INGRESS_QUEUE_ADDRESS
;
858 local_register
[ CPU_RX_THREAD_NUMBER
][ CS_R9
] = DS_CPU_RX_FAST_INGRESS_QUEUE_ADDRESS
| ( DS_CPU_RX_PICO_INGRESS_QUEUE_ADDRESS
<< 16 );
859 local_register
[ CPU_RX_THREAD_NUMBER
][ CS_R10
] = INGRESS_HANDLER_BUFFER_ADDRESS
| ( CPU_RX_SQ_PD_INGRESS_QUEUE_ADDRESS
<< 16 );
860 local_register
[ CPU_RX_THREAD_NUMBER
][ CS_R11
] = DS_CPU_REASON_TO_METER_TABLE_ADDRESS
| ( CPU_RX_PD_INGRESS_QUEUE_ADDRESS
<< 16 );
862 /* Timer scheduler */
863 local_register
[ TIMER_SCHEDULER_MAIN_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_a
, timer_scheduler_set
) << 16;
864 local_register
[ TIMER_SCHEDULER_MAIN_THREAD_NUMBER
][ CS_R19
] = 0; /* RX_METER_INDEX */
865 local_register
[ TIMER_SCHEDULER_MAIN_THREAD_NUMBER
][ CS_R21
] = DS_CPU_RX_METER_TABLE_ADDRESS
;
867 /* DS Policers budget allocator */
868 local_register
[ POLICER_BUDGET_ALLOCATOR_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_a
, policer_budget_allocator_1st_wakeup_request
) << 16;
871 /* WAN Filters and Classification */
872 local_register
[ WAN_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_a
, wan_normal_wakeup_request
) << 16;
873 local_register
[ WAN_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R8
] = GPON_RX_NORMAL_DESCRIPTORS_ADDRESS
<< 16 | BBH_PERIPHERAL_WAN_RX
;
874 local_register
[ WAN_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R9
] = CAM_RESULT_SLOT_1
| ( CAM_RESULT_IO_ADDRESS_1
<< 16 );
875 local_register
[ WAN_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R10
] = INGRESS_HANDLER_BUFFER_ADDRESS
| CPU_REASON_WAN0_TABLE_INDEX
<< 16;
876 local_register
[ WAN_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R13
] = ( DMA_LOOKUP_RESULT_SLOT_0
<< 5 ) | DMA_LOOKUP_RESULT_FOUR_STEPS
| ( DMA_LOOKUP_RESULT_IO_ADDRESS_0
<< 16 );
877 local_register
[ WAN_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R14
] = WAN_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
;
880 /* WAN1 Filters and Classification */
881 local_register
[ WAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_a
, wan_normal_wakeup_request
) << 16;
882 local_register
[ WAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R8
] = ( ETH0_RX_DESCRIPTORS_ADDRESS
<< 16 ) | BBH_PERIPHERAL_ETH0_RX
;
883 local_register
[ WAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R9
] = CAM_RESULT_SLOT_2
| ( CAM_RESULT_IO_ADDRESS_2
<< 16 );
884 local_register
[ WAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R10
] = INGRESS_HANDLER_BUFFER_ADDRESS
| ( CPU_REASON_WAN1_TABLE_INDEX
<< 16 );
885 local_register
[ WAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R13
] = ( DMA_LOOKUP_RESULT_SLOT_1
<< 5 ) | DMA_LOOKUP_RESULT_FOUR_STEPS
| ( DMA_LOOKUP_RESULT_IO_ADDRESS_1
<< 16 );
886 local_register
[ WAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R14
] = WAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
;
888 /* ETHWAN2 Filters and Classification */
889 // FIXME!!! since this is a different thread from WAN1_FILTER... doesn't it require its own CAM_RESULT and DMA_LOOKUP?
890 local_register
[ ETHWAN2_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_a
, ethwan2_normal_wakeup_request
) << 16;
891 local_register
[ ETHWAN2_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R8
] = (ETHWAN2_RX_INGRESS_QUEUE_ADDRESS
<<16 ) | ( 1 << WAN_FILTERS_AND_CLASSIFICATON_R8_ETHWAN2_INDICATION_OFFSET
) | BBH_PERIPHERAL_ETH0_RX
;
892 local_register
[ ETHWAN2_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R9
] = CAM_RESULT_SLOT_2
| ( CAM_RESULT_IO_ADDRESS_2
<< 16 );
893 local_register
[ ETHWAN2_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R10
] = INGRESS_HANDLER_BUFFER_ADDRESS
| ( CPU_REASON_WAN1_TABLE_INDEX
<< 16 );
894 local_register
[ ETHWAN2_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R13
] = ( DMA_LOOKUP_RESULT_SLOT_1
<< 5 ) | DMA_LOOKUP_RESULT_FOUR_STEPS
| ( DMA_LOOKUP_RESULT_IO_ADDRESS_1
<< 16 );
895 local_register
[ ETHWAN2_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
][ CS_R14
] = ETHWAN2_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
;
898 local_register
[ DOWNSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_a
, flow_cache_wakeup_request
) << 16;
899 local_register
[ DOWNSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER
][ CS_R8
] = ((DS_CONNECTION_BUFFER_TABLE_ADDRESS
+ 0 * RDD_DS_CONNECTION_BUFFER_TABLE_SIZE2
* sizeof(RDD_CONNECTION_ENTRY_DTS
)) << 16) | (DS_L2_UCAST_CONNECTION_BUFFER_ADDRESS
+ 0x0000);
900 local_register
[ DOWNSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER
][ CS_R9
] = ( DS_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS
<< 16 ) | ADDRESS_OF(runner_a
, flow_cache_wakeup_request
);
901 local_register
[ DOWNSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER
][ CS_R10
] = ( FLOW_CACHE_SLAVE0_VECTOR_MASK
<< 16 ) | INGRESS_HANDLER_BUFFER_ADDRESS
;
903 local_register
[ DOWNSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_a
, flow_cache_wakeup_request
) << 16;
904 local_register
[ DOWNSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER
][ CS_R8
] = ((DS_CONNECTION_BUFFER_TABLE_ADDRESS
+ 1 * RDD_DS_CONNECTION_BUFFER_TABLE_SIZE2
* sizeof(RDD_CONNECTION_ENTRY_DTS
)) << 16) | (DS_L2_UCAST_CONNECTION_BUFFER_ADDRESS
+ 0x0010);
906 local_register
[ DOWNSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER
][ CS_R9
] = (( DS_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS
+ 1 ) << 16 ) | ADDRESS_OF(runner_a
, flow_cache_wakeup_request
);
907 local_register
[ DOWNSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER
][ CS_R10
] = ( FLOW_CACHE_SLAVE1_VECTOR_MASK
<< 16 ) | INGRESS_HANDLER_BUFFER_ADDRESS
;
909 local_register
[ DOWNSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_a
, flow_cache_wakeup_request
) << 16;
910 local_register
[ DOWNSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER
][ CS_R8
] = ((DS_CONNECTION_BUFFER_TABLE_ADDRESS
+ 2 * RDD_DS_CONNECTION_BUFFER_TABLE_SIZE2
* sizeof(RDD_CONNECTION_ENTRY_DTS
)) << 16) | (DS_L2_UCAST_CONNECTION_BUFFER_ADDRESS
+ 0x0020);
911 local_register
[ DOWNSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER
][ CS_R9
] = (( DS_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS
+ 2 ) << 16 ) | ADDRESS_OF(runner_a
, flow_cache_wakeup_request
);
912 local_register
[ DOWNSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER
][ CS_R10
] = ( FLOW_CACHE_SLAVE2_VECTOR_MASK
<< 16 ) | INGRESS_HANDLER_BUFFER_ADDRESS
;
914 local_register
[ DOWNSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_a
, flow_cache_wakeup_request
) << 16;
915 local_register
[ DOWNSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER
][ CS_R8
] = ((DS_CONNECTION_BUFFER_TABLE_ADDRESS
+ 3 * RDD_DS_CONNECTION_BUFFER_TABLE_SIZE2
* sizeof(RDD_CONNECTION_ENTRY_DTS
)) << 16) | (DS_L2_UCAST_CONNECTION_BUFFER_ADDRESS
+ 0x0030);
916 local_register
[ DOWNSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER
][ CS_R9
] = (( DS_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS
+ 3 ) << 16 ) | ADDRESS_OF(runner_a
, flow_cache_wakeup_request
);
917 local_register
[ DOWNSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER
][ CS_R10
] = ( FLOW_CACHE_SLAVE3_VECTOR_MASK
<< 16 ) | INGRESS_HANDLER_BUFFER_ADDRESS
;
919 /* Downstream Multicast */
920 local_register
[ DOWNSTREAM_MULTICAST_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_a
, downstream_multicast_wakeup_request
) << 16;
921 local_register
[ DOWNSTREAM_MULTICAST_THREAD_NUMBER
][ CS_R10
] = INGRESS_HANDLER_BUFFER_ADDRESS
;
924 local_register
[ FREE_SKB_INDEX_FAST_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_a
, free_skb_index_wakeup_request
) << 16;
926 rdp_mm_cpyl_context ( sram_fast_context_ptr
, local_register
, sizeof ( RUNNER_CNTXT_MAIN
) );
928 /********** Fast Runner B **********/
930 sram_fast_context_ptr
= ( RUNNER_CNTXT_MAIN
* )DEVICE_ADDRESS( RUNNER_CNTXT_MAIN_1_OFFSET
);
932 /* read the local registers from the Context memory - maybe it was initialized by the ACE compiler */
933 MREAD_BLK_32( local_register
, sram_fast_context_ptr
, sizeof ( RUNNER_CNTXT_MAIN
) );
936 local_register
[ CPU_TX_FAST_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_b
, cpu_tx_wakeup_request
) << 16;
937 local_register
[ CPU_TX_FAST_THREAD_NUMBER
][ CS_R8
] = CPU_TX_FAST_QUEUE_ADDRESS
;
940 local_register
[ CPU_RX_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_b
, cpu_rx_wakeup_request
) << 16;
941 local_register
[ CPU_RX_THREAD_NUMBER
][ CS_R9
] = US_CPU_RX_FAST_INGRESS_QUEUE_ADDRESS
+ ( US_CPU_RX_PICO_INGRESS_QUEUE_ADDRESS
<< 16 );
942 local_register
[ CPU_RX_THREAD_NUMBER
][ CS_R10
] = INGRESS_HANDLER_BUFFER_ADDRESS
| (CPU_RX_SQ_PD_INGRESS_QUEUE_ADDRESS
<< 16);
943 local_register
[ CPU_RX_THREAD_NUMBER
][ CS_R11
] = US_CPU_REASON_TO_METER_TABLE_ADDRESS
;
945 /* upstream rate controllers budget allocator */
946 local_register
[ RATE_CONTROLLER_BUDGET_ALLOCATOR_THREAD_NUMBER
][ CS_R14
] = US_RATE_CONTROLLER_EXPONENT_TABLE_ADDRESS
;
947 local_register
[ RATE_CONTROLLER_BUDGET_ALLOCATOR_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_b
, rate_control_budget_allocator_1st_wakeup_request
) << 16;
948 local_register
[ RATE_CONTROLLER_BUDGET_ALLOCATOR_THREAD_NUMBER
][ CS_R18
] = US_RATE_CONTROL_BUDGET_ALLOCATOR_TABLE_ADDRESS
;
949 local_register
[ RATE_CONTROLLER_BUDGET_ALLOCATOR_THREAD_NUMBER
][ CS_R31
] = 0; /* rate_controllers_group */
951 /* Timer scheduler */
952 local_register
[ TIMER_SCHEDULER_MAIN_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_b
, timer_scheduler_set
) << 16;
953 local_register
[ TIMER_SCHEDULER_MAIN_THREAD_NUMBER
][ CS_R19
] = 0; /* RX_METER_INDEX */
954 local_register
[ TIMER_SCHEDULER_MAIN_THREAD_NUMBER
][ CS_R20
] = US_RATE_LIMITER_TABLE_ADDRESS
;
955 local_register
[ TIMER_SCHEDULER_MAIN_THREAD_NUMBER
][ CS_R21
] = US_CPU_RX_METER_TABLE_ADDRESS
;
957 /* US Policers budget allocator */
958 local_register
[ POLICER_BUDGET_ALLOCATOR_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_b
, policer_budget_allocator_1st_wakeup_request
) << 16;
962 local_register
[ WAN1_TX_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_b
, wan_tx_wakeup_request
) << 16;
963 local_register
[ WAN1_TX_THREAD_NUMBER
][ CS_R8
] = ( RDD_WAN1_CHANNEL_BASE
<< 16 ) | ( DATA_POINTER_DUMMY_TARGET_ADDRESS
+ 4 );
964 local_register
[ WAN1_TX_THREAD_NUMBER
][ CS_R9
] = ( ETHWAN_ABSOLUTE_TX_FIRMWARE_COUNTER_ADDRESS
<< 16 ) | ETHWAN_ABSOLUTE_TX_BBH_COUNTER_ADDRESS
;
966 /* WAN enqueue (Flow Cache) */
967 local_register
[ WAN_ENQUEUE_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_b
, wan_interworking_enqueue_wakeup_request
) << 16;
968 local_register
[ WAN_ENQUEUE_THREAD_NUMBER
][ CS_R9
] = ( WAN_ENQUEUE_INGRESS_QUEUE_ADDRESS
<< 16 ) | ADDRESS_OF(runner_b
, wan_interworking_enqueue_wakeup_request
);
969 local_register
[ WAN_ENQUEUE_THREAD_NUMBER
][ CS_R10
] = INGRESS_HANDLER_BUFFER_ADDRESS
;
972 local_register
[ US_TIMER_7_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_b
, timer_7_1st_wakeup_request
) << 16;
975 local_register
[ FREE_SKB_INDEX_FAST_THREAD_NUMBER
][ CS_R16
] = ADDRESS_OF(runner_b
, free_skb_index_wakeup_request
) << 16;
977 rdp_mm_cpyl_context ( sram_fast_context_ptr
, local_register
, sizeof ( RUNNER_CNTXT_MAIN
) );
979 /********** Pico Runner A **********/
981 sram_pico_context_ptr
= ( RUNNER_CNTXT_PICO
* )DEVICE_ADDRESS( RUNNER_CNTXT_PICO_0_OFFSET
);
983 /* read the local registers from the Context memory - maybe it was initialized by the ACE compiler */
984 MREAD_BLK_32( local_register
, sram_pico_context_ptr
, sizeof ( RUNNER_CNTXT_PICO
) );
987 local_register
[ CPU_TX_PICO_THREAD_NUMBER
- 32 ][ CS_R16
] = ADDRESS_OF(runner_c
, cpu_tx_wakeup_request
) << 16;
988 local_register
[ CPU_TX_PICO_THREAD_NUMBER
- 32 ][ CS_R9
] = CPU_TX_PICO_QUEUE_ADDRESS
;
990 /* CPU-RX interrupt coalescing timer */
991 local_register
[ CPU_RX_INTERRUPT_COALESCING_THREAD_NUMBER
- 32 ][ CS_R16
] = ADDRESS_OF(runner_c
, cpu_rx_int_coalesce_timer_1st_wakeup_request
) << 16;
993 /* Timer scheduler */
994 local_register
[ TIMER_SCHEDULER_PICO_A_THREAD_NUMBER
- 32 ][ CS_R16
] = ADDRESS_OF(runner_c
, timer_scheduler_set
) << 16;
995 local_register
[ TIMER_SCHEDULER_PICO_A_THREAD_NUMBER
- 32 ][ CS_R19
] = 0; /* rate limiter index */
996 local_register
[ TIMER_SCHEDULER_PICO_A_THREAD_NUMBER
- 32 ][ CS_R20
] = DS_RATE_LIMITER_TABLE_ADDRESS
;
997 local_register
[ TIMER_SCHEDULER_PICO_A_THREAD_NUMBER
- 32 ][ CS_R21
] = RATE_LIMITER_REMAINDER_TABLE_ADDRESS
;
999 /* Local switching LAN enqueue */
1000 local_register
[ LOCAL_SWITCHING_LAN_ENQUEUE_THREAD_NUMBER
- 32 ][ CS_R16
] = ( ADDRESS_OF(runner_c
, lan_enqueue_pd_wakeup_request
) << 16 ) | ADDRESS_OF(runner_c
, lan_enqueue_pd_wakeup_request
);
1001 local_register
[ LOCAL_SWITCHING_LAN_ENQUEUE_THREAD_NUMBER
- 32 ][ CS_R9
] = LOCAL_SWITCHING_LAN_ENQUEUE_INGRESS_QUEUE_ADDRESS
;
1002 local_register
[ LOCAL_SWITCHING_LAN_ENQUEUE_THREAD_NUMBER
- 32 ][ CS_R10
] = DOWNSTREAM_LAN_ENQUEUE_SQ_PD_ADDRESS
;
1004 /* Downstream LAN enqueue */
1005 local_register
[ DOWNSTREAM_LAN_ENQUEUE_THREAD_NUMBER
- 32 ][ CS_R16
] = ADDRESS_OF(runner_c
, lan_enqueue_ih_wakeup_request
) << 16 | ADDRESS_OF(runner_c
, lan_enqueue_ih_wakeup_request
);
1006 local_register
[ DOWNSTREAM_LAN_ENQUEUE_THREAD_NUMBER
- 32 ][ CS_R9
] = DOWNSTREAM_LAN_ENQUEUE_INGRESS_QUEUE_ADDRESS
<< 16;
1007 local_register
[ DOWNSTREAM_LAN_ENQUEUE_THREAD_NUMBER
- 32 ][ CS_R10
] = INGRESS_HANDLER_BUFFER_ADDRESS
;
1009 /* Downstream multicast LAN enqueue */
1010 local_register
[ DOWNSTREAM_MULTICAST_LAN_ENQUEUE_THREAD_NUMBER
- 32 ][ CS_R16
] = ( ADDRESS_OF(runner_c
, multicast_lan_enqueue_wakeup_request
) << 16 ) | ADDRESS_OF(runner_c
, multicast_lan_enqueue_wakeup_request
);
1011 local_register
[ DOWNSTREAM_MULTICAST_LAN_ENQUEUE_THREAD_NUMBER
- 32 ][ CS_R9
] = DOWNSTREAM_MULTICAST_LAN_ENQUEUE_INGRESS_QUEUE_ADDRESS
;
1012 local_register
[ DOWNSTREAM_MULTICAST_LAN_ENQUEUE_THREAD_NUMBER
- 32 ][ CS_R10
] = INGRESS_HANDLER_BUFFER_ADDRESS
;
1014 /* Free SKB index */
1015 local_register
[ FREE_SKB_INDEX_PICO_A_THREAD_NUMBER
- 32 ][ CS_R16
] = ADDRESS_OF(runner_c
, free_skb_index_wakeup_request
) << 16;
1016 local_register
[ FREE_SKB_INDEX_PICO_A_THREAD_NUMBER
- 32 ][ CS_R9
] = 1; /* lag_port EMAC/BBH 1 */
1018 /* ETH-TX Inter LAN scheduling: thread 42 */
1019 local_register
[ ETH_TX_THREAD_NUMBER
- 32 ][ CS_R16
] = ADDRESS_OF(runner_c
, lan_tx_wakeup_request
) << 16 | (ADDRESS_OF(runner_c
, lan_tx_wakeup_request
) );
1020 local_register
[ ETH_TX_THREAD_NUMBER
- 32 ][ CS_R8
] = 0; /* inter_lan_scheduling_offset */
1023 local_register
[ DS_TIMER_7_THREAD_NUMBER
- 32 ][ CS_R16
] = ADDRESS_OF(runner_c
, timer_7_1st_wakeup_request
) << 16;
1025 /* Service Queue Enqueue: thread 44 */
1026 local_register
[ SERVICE_QUEUE_ENQUEUE_THREAD_NUMBER
- 32 ][ CS_R16
] = ADDRESS_OF(runner_c
, service_queue_enqueue_wakeup_request
) << 16;
1027 local_register
[ SERVICE_QUEUE_ENQUEUE_THREAD_NUMBER
- 32 ][ CS_R9
] = INGRESS_HANDLER_BUFFER_ADDRESS
;
1028 local_register
[ SERVICE_QUEUE_ENQUEUE_THREAD_NUMBER
- 32 ][ CS_R10
] = DS_SQ_ENQUEUE_QUEUE_ADDRESS
;
1030 /* Service Queue Dequeue: thread 45 */
1031 local_register
[ SERVICE_QUEUE_DEQUEUE_THREAD_NUMBER
- 32 ][ CS_R16
] = ADDRESS_OF(runner_c
, service_queue_dequeue_wakeup_request
) << 16 | ADDRESS_OF(runner_c
, service_queue_dequeue_wakeup_request
);
1032 local_register
[ SERVICE_QUEUE_DEQUEUE_THREAD_NUMBER
- 32 ][ CS_R10
] = DOWNSTREAM_LAN_ENQUEUE_SQ_PD_ADDRESS
<< 16;
1033 local_register
[ SERVICE_QUEUE_DEQUEUE_THREAD_NUMBER
- 32 ][ CS_R11
] = CPU_RX_SQ_PD_INGRESS_QUEUE_ADDRESS
;
1035 rdp_mm_cpyl_context ( sram_pico_context_ptr
, local_register
, sizeof ( RUNNER_CNTXT_PICO
) );
1037 /********** Pico Runner B **********/
1039 sram_pico_context_ptr
= ( RUNNER_CNTXT_PICO
* )DEVICE_ADDRESS( RUNNER_CNTXT_PICO_1_OFFSET
);
1041 /* read the local registers from the Context memory - maybe it was initialized by the ACE compiler */
1042 MREAD_BLK_32( local_register
, sram_pico_context_ptr
, sizeof ( RUNNER_CNTXT_PICO
) );
1044 /* Timer scheduler */
1045 local_register
[ TIMER_SCHEDULER_PICO_B_THREAD_NUMBER
- 32 ][ CS_R16
] = ADDRESS_OF(runner_d
, timer_scheduler_set
) << 16;
1046 local_register
[ TIMER_SCHEDULER_PICO_B_THREAD_NUMBER
- 32 ][ CS_R19
] = 0; /* rate limiter index */
1047 local_register
[ TIMER_SCHEDULER_PICO_B_THREAD_NUMBER
- 32 ][ CS_R20
] = US_RATE_LIMITER_TABLE_ADDRESS
;
1049 #if defined(DSL_63138) || defined(DSL_63148)
1050 /* LAN-1 Filters and Classification - used by CFE boot loader */
1051 local_register
[ LAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
- 32 ][ CS_R16
] = ADDRESS_OF(runner_d
, lan_normal_wakeup_request
) << 16;
1052 local_register
[ LAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
- 32 ][ CS_R8
] = ETH1_RX_DESCRIPTORS_ADDRESS
;
1053 local_register
[ LAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
- 32 ][ CS_R10
] = INGRESS_HANDLER_BUFFER_ADDRESS
| ( HASH_RESULT_SLOT_1
<< 16 ) | ( HASH_RESULT_IO_ADDRESS_1
<< 24 );
1054 local_register
[ LAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
- 32 ][ CS_R12
] = ( BBH_PERIPHERAL_ETH1_RX
<< 16 );
1055 local_register
[ LAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER
- 32 ][ CS_R13
] = ( DMA_LOOKUP_RESULT_SLOT_3
<< 5 ) | DMA_LOOKUP_RESULT_FOUR_STEPS
| ( DMA_LOOKUP_RESULT_IO_ADDRESS_3
<< 16 );
1058 /* Free SKB index */
1059 local_register
[ FREE_SKB_INDEX_PICO_B_THREAD_NUMBER
- 32 ][ CS_R16
] = ADDRESS_OF(runner_d
, free_skb_index_wakeup_request
) << 16;
1062 local_register
[ LAN_DISPATCH_THREAD_NUMBER
- 32 ][ CS_R16
] = ADDRESS_OF(runner_d
, lan_dispatch_wakeup_request
) << 16;
1063 local_register
[ LAN_DISPATCH_THREAD_NUMBER
- 32 ][ CS_R10
] = INGRESS_HANDLER_BUFFER_ADDRESS
;
1064 local_register
[ LAN_DISPATCH_THREAD_NUMBER
- 32 ][ CS_R8
] = ETH1_RX_DESCRIPTORS_ADDRESS
;
1065 local_register
[ LAN_DISPATCH_THREAD_NUMBER
- 32 ][ CS_R15
] = BBH_PERIPHERAL_ETH1_RX
;
1066 local_register
[ LAN_DISPATCH_THREAD_NUMBER
- 32 ][ CS_R13
] = ( DMA_LOOKUP_RESULT_SLOT_4
<< 5 ) | DMA_LOOKUP_RESULT_FOUR_STEPS
| ( DMA_LOOKUP_RESULT_IO_ADDRESS_4
<< 16 );
1067 local_register
[ LAN_DISPATCH_THREAD_NUMBER
- 32 ][ CS_R14
] = LAN_DISPATCH_THREAD_NUMBER
;
1070 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER
- 32 ][ CS_R16
] = ADDRESS_OF(runner_d
, flow_cache_wakeup_request
) << 16;
1071 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER
- 32 ][ CS_R8
] = (uint32_t) ((US_CONNECTION_BUFFER_TABLE_ADDRESS
+ 0 * RDD_US_CONNECTION_BUFFER_TABLE_SIZE2
* sizeof(RDD_CONNECTION_ENTRY_DTS
)) << 16) | (US_L2_UCAST_CONNECTION_BUFFER_ADDRESS
+ 0x0000);
1073 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER
- 32 ][ CS_R9
] = ( US_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS
<< 16 );
1074 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER
- 32 ][ CS_R10
] = ( FLOW_CACHE_SLAVE0_VECTOR_MASK
<< 16 ) | INGRESS_HANDLER_BUFFER_ADDRESS
;
1077 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER
- 32 ][ CS_R16
] = ADDRESS_OF(runner_d
, flow_cache_wakeup_request
) << 16;
1079 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER
- 32 ][ CS_R8
] = (uint32_t) ((US_CONNECTION_BUFFER_TABLE_ADDRESS
+ 1 * RDD_US_CONNECTION_BUFFER_TABLE_SIZE2
* sizeof(RDD_CONNECTION_ENTRY_DTS
)) << 16) | (US_L2_UCAST_CONNECTION_BUFFER_ADDRESS
+ 0x0010);
1081 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER
- 32 ][ CS_R9
] = ( ( US_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS
+ 1 ) << 16 );
1082 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER
- 32 ][ CS_R10
] = ( FLOW_CACHE_SLAVE1_VECTOR_MASK
<< 16 ) | INGRESS_HANDLER_BUFFER_ADDRESS
;
1085 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER
- 32 ][ CS_R16
] = ADDRESS_OF(runner_d
, flow_cache_wakeup_request
) << 16;
1086 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER
- 32 ][ CS_R8
] = (uint32_t) ((US_CONNECTION_BUFFER_TABLE_ADDRESS
+ 2 * RDD_US_CONNECTION_BUFFER_TABLE_SIZE2
* sizeof(RDD_CONNECTION_ENTRY_DTS
)) << 16) | (US_L2_UCAST_CONNECTION_BUFFER_ADDRESS
+ 0x0020);
1088 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER
- 32 ][ CS_R9
] = ( ( US_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS
+ 2 ) << 16 );
1089 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER
- 32 ][ CS_R10
] = ( FLOW_CACHE_SLAVE2_VECTOR_MASK
<< 16 ) | INGRESS_HANDLER_BUFFER_ADDRESS
;
1092 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER
- 32 ][ CS_R16
] = ADDRESS_OF(runner_d
, flow_cache_wakeup_request
) << 16;
1094 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER
- 32 ][ CS_R8
] = (uint32_t) ((US_CONNECTION_BUFFER_TABLE_ADDRESS
+ 3 * RDD_US_CONNECTION_BUFFER_TABLE_SIZE2
* sizeof(RDD_CONNECTION_ENTRY_DTS
)) << 16) | (US_L2_UCAST_CONNECTION_BUFFER_ADDRESS
+ 0x0030);
1096 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER
- 32 ][ CS_R9
] = ( ( US_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS
+ 3 ) << 16 );
1097 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER
- 32 ][ CS_R10
] = ( FLOW_CACHE_SLAVE3_VECTOR_MASK
<< 16 ) | INGRESS_HANDLER_BUFFER_ADDRESS
;
1099 #if defined(DSL_63138) || defined(DSL_63148)
1101 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER
- 32 ][ CS_R9
] |= (DSL_PTM_BOND_TX_HDR_TABLE_ADDRESS
+ 0x00);
1102 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER
- 32 ][ CS_R9
] |= (DSL_PTM_BOND_TX_HDR_TABLE_ADDRESS
+ 0x10);
1103 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER
- 32 ][ CS_R9
] |= (DSL_PTM_BOND_TX_HDR_TABLE_ADDRESS
+ 0x20);
1104 local_register
[ UPSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER
- 32 ][ CS_R9
] |= (DSL_PTM_BOND_TX_HDR_TABLE_ADDRESS
+ 0x30);
1107 rdp_mm_cpyl_context ( sram_pico_context_ptr
, local_register
, sizeof ( RUNNER_CNTXT_PICO
) );
1109 return ( BL_LILAC_RDD_OK
);
1112 static BL_LILAC_RDD_ERROR_DTE
f_rdd_transmit_from_abs_address_initialize ( void )
1114 uint8_t *free_indexes_local_fifo_tail_ptr
;
1115 uint16_t *free_indexes_fifo_tail_ptr
;
1116 uint16_t skb_enqueued_indexes_fifo
;
1117 uint16_t *skb_enqueued_indexes_fifo_ptr
;
1118 uint8_t *absolute_tx_counters_ptr
;
1120 uint32_t *ddr_address_ptr
;
1121 uint8_t skb_enqueued_indexes_fifo_size
;
1122 uint8_t *skb_enqueued_indexes_fifo_counters_ptr
;
1124 #if !defined(FIRMWARE_INIT)
1125 bdmf_phys_addr_t phy_addr
= 0;
1127 /* allocate skb pointer array reference (used only by SW) */
1128 g_cpu_tx_skb_pointers_reference_array
= (uint8_t **)KMALLOC(sizeof(uint8_t *) * g_cpu_tx_abs_packet_limit
, 0);
1129 g_dhd_tx_cpu_usage_reference_array
= (uint8_t *)KMALLOC(g_cpu_tx_abs_packet_limit
, 0);
1131 /* allocate data pointer array pointer (used both by SW & FW) */
1132 g_cpu_tx_data_pointers_reference_array
= (rdd_phys_addr_t
*)rdp_mm_aligned_alloc(sizeof(rdd_phys_addr_t
) * g_cpu_tx_abs_packet_limit
, &phy_addr
);
1134 ddr_address_ptr
= (uint32_t *)(DEVICE_ADDRESS( RUNNER_COMMON_0_OFFSET
) + DDR_ADDRESS_FOR_SKB_DATA_POINTERS_TABLE_ADDRESS
);
1135 MWRITE_32(ddr_address_ptr
, (uint32_t)phy_addr
);
1137 /* allocate Free Indexes table (used both by SW & FW) */
1138 g_free_skb_indexes_fifo_table
= ( uint16_t * )rdp_mm_aligned_alloc( sizeof( uint16_t ) * g_cpu_tx_abs_packet_limit
, &phy_addr
);
1140 g_free_skb_indexes_fifo_table_physical_address
= (rdd_phys_addr_t
)phy_addr
;
1141 ddr_address_ptr
= ( uint32_t * )(DEVICE_ADDRESS( RUNNER_COMMON_0_OFFSET
) + DDR_ADDRESS_FOR_FREE_SKB_INDEXES_FIFO_TABLE_ADDRESS
);
1142 MWRITE_32( ddr_address_ptr
, g_free_skb_indexes_fifo_table_physical_address
);
1144 g_free_skb_indexes_fifo_table_physical_address_last_idx
= g_free_skb_indexes_fifo_table_physical_address
;
1145 g_free_skb_indexes_fifo_table_physical_address_last_idx
+= (g_cpu_tx_abs_packet_limit
- 1) * sizeof(uint16_t);
1146 ddr_address_ptr
= ( uint32_t * )(DEVICE_ADDRESS( RUNNER_COMMON_0_OFFSET
) + DDR_ADDRESS_FOR_FREE_SKB_INDEXES_FIFO_TABLE_LAST_ENTRY_ADDRESS
);
1147 MWRITE_32( ddr_address_ptr
, g_free_skb_indexes_fifo_table_physical_address_last_idx
);
1149 /* Fill free indexes FIFO */
1150 for ( i
= 0; i
< g_cpu_tx_abs_packet_limit
; i
++ )
1152 g_free_skb_indexes_fifo_table
[ i
] = swap2bytes( i
);
1153 g_cpu_tx_data_pointers_reference_array
[ i
] = 0;
1154 g_cpu_tx_skb_pointers_reference_array
[ i
] = NULL
;
1155 g_dhd_tx_cpu_usage_reference_array
[ i
] = 0;
1159 /* update all local tail pointers to 0 */
1160 free_indexes_local_fifo_tail_ptr
= ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + DS_FAST_FREE_SKB_INDEXES_FIFO_LOCAL_TABLE_PTR_ADDRESS
);
1161 MWRITE_8( free_indexes_local_fifo_tail_ptr
, 0 );
1162 free_indexes_local_fifo_tail_ptr
= ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + DS_PICO_FREE_SKB_INDEXES_FIFO_LOCAL_TABLE_PTR_ADDRESS
);
1163 MWRITE_8( free_indexes_local_fifo_tail_ptr
, 0 );
1164 free_indexes_local_fifo_tail_ptr
= ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + US_FAST_FREE_SKB_INDEXES_FIFO_LOCAL_TABLE_PTR_ADDRESS
);
1165 MWRITE_8( free_indexes_local_fifo_tail_ptr
, 0 );
1166 free_indexes_local_fifo_tail_ptr
= ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + US_PICO_FREE_SKB_INDEXES_FIFO_LOCAL_TABLE_PTR_ADDRESS
);
1167 MWRITE_8( free_indexes_local_fifo_tail_ptr
, 0 );
1169 free_indexes_fifo_tail_ptr
= ( uint16_t * )(DEVICE_ADDRESS( RUNNER_COMMON_0_OFFSET
) + FREE_SKB_INDEXES_DDR_FIFO_TAIL_ADDRESS
);
1170 MWRITE_32( free_indexes_fifo_tail_ptr
, g_free_skb_indexes_fifo_table_physical_address
);
1172 /* Initialize pointers to EMAC enqueued indexes FIFO */
1173 skb_enqueued_indexes_fifo_ptr
= ( uint16_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + EMAC_SKB_ENQUEUED_INDEXES_PUT_PTR_ADDRESS
);
1174 skb_enqueued_indexes_fifo_counters_ptr
= ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + EMAC_SKB_ENQUEUED_INDEXES_FIFO_COUNTERS_ADDRESS
);
1176 skb_enqueued_indexes_fifo
= EMAC_SKB_ENQUEUED_INDEXES_FIFO_ADDRESS
;
1178 for ( i
= BL_LILAC_RDD_EMAC_ID_0
; i
<= BL_LILAC_RDD_EMAC_ID_4
; i
++ )
1180 MWRITE_16( skb_enqueued_indexes_fifo_ptr
, skb_enqueued_indexes_fifo
);
1181 MWRITE_8(skb_enqueued_indexes_fifo_counters_ptr
, 16);
1183 skb_enqueued_indexes_fifo_ptr
++;
1184 skb_enqueued_indexes_fifo_counters_ptr
++;
1186 skb_enqueued_indexes_fifo
+= 32;
1189 skb_enqueued_indexes_fifo_ptr
= ( uint16_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + EMAC_SKB_ENQUEUED_INDEXES_FREE_PTR_ADDRESS
);
1191 skb_enqueued_indexes_fifo
= EMAC_SKB_ENQUEUED_INDEXES_FIFO_ADDRESS
;
1193 for ( i
= BL_LILAC_RDD_EMAC_ID_0
; i
<= BL_LILAC_RDD_EMAC_ID_4
; i
++ )
1195 MWRITE_16( skb_enqueued_indexes_fifo_ptr
, skb_enqueued_indexes_fifo
);
1197 skb_enqueued_indexes_fifo_ptr
++;
1198 skb_enqueued_indexes_fifo
+= 32;
1201 skb_enqueued_indexes_fifo_size
= 32;
1203 /* Initialize pointers to WAN enqueued indexes FIFO */
1204 skb_enqueued_indexes_fifo_ptr
= ( uint16_t * )(DEVICE_ADDRESS( RUNNER_COMMON_1_OFFSET
) + GPON_SKB_ENQUEUED_INDEXES_PUT_PTR_ADDRESS
- sizeof ( RUNNER_COMMON
) );
1206 skb_enqueued_indexes_fifo
= GPON_SKB_ENQUEUED_INDEXES_FIFO_ADDRESS
;
1208 for ( i
= 0; i
< ( RDD_WAN_CHANNELS_0_7_TABLE_SIZE
+ RDD_WAN_CHANNELS_8_39_TABLE_SIZE
); i
++ )
1210 MWRITE_16( skb_enqueued_indexes_fifo_ptr
, skb_enqueued_indexes_fifo
);
1212 skb_enqueued_indexes_fifo_ptr
++;
1213 skb_enqueued_indexes_fifo
+= skb_enqueued_indexes_fifo_size
;
1217 skb_enqueued_indexes_fifo_ptr
= ( uint16_t * )(DEVICE_ADDRESS( RUNNER_COMMON_1_OFFSET
) + GPON_SKB_ENQUEUED_INDEXES_FREE_PTR_ADDRESS
- sizeof ( RUNNER_COMMON
) );
1219 skb_enqueued_indexes_fifo
= GPON_SKB_ENQUEUED_INDEXES_FIFO_ADDRESS
;
1221 for ( i
= 0; i
< ( RDD_WAN_CHANNELS_0_7_TABLE_SIZE
+ RDD_WAN_CHANNELS_8_39_TABLE_SIZE
); i
++ )
1223 MWRITE_16( skb_enqueued_indexes_fifo_ptr
, skb_enqueued_indexes_fifo
);
1225 skb_enqueued_indexes_fifo_ptr
++;
1226 skb_enqueued_indexes_fifo
+= skb_enqueued_indexes_fifo_size
;
1230 /* Initialize to (-1) 6-bit value BBH and FW absolute TX counters */
1231 absolute_tx_counters_ptr
= ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + EMAC_ABSOLUTE_TX_BBH_COUNTER_ADDRESS
);
1233 for ( i
= BL_LILAC_RDD_EMAC_ID_0
; i
<= BL_LILAC_RDD_EMAC_ID_4
; i
++ )
1235 MWRITE_8( absolute_tx_counters_ptr
, 0x3F );
1236 absolute_tx_counters_ptr
+= 8;
1239 absolute_tx_counters_ptr
= ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + EMAC_ABSOLUTE_TX_FIRMWARE_COUNTER_ADDRESS
);
1241 for ( i
= BL_LILAC_RDD_EMAC_ID_0
; i
<= BL_LILAC_RDD_EMAC_ID_4
; i
++ )
1243 MWRITE_8( absolute_tx_counters_ptr
, 0x3F );
1244 absolute_tx_counters_ptr
++;
1247 absolute_tx_counters_ptr
= ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + GPON_ABSOLUTE_TX_BBH_COUNTER_ADDRESS
);
1249 for ( i
= 0; i
< ( RDD_WAN_CHANNELS_0_7_TABLE_SIZE
+ RDD_WAN_CHANNELS_8_39_TABLE_SIZE
); i
++ )
1251 MWRITE_8( absolute_tx_counters_ptr
, 0x3F );
1252 absolute_tx_counters_ptr
++;
1255 absolute_tx_counters_ptr
= ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + GPON_ABSOLUTE_TX_FIRMWARE_COUNTER_ADDRESS
);
1257 for ( i
= 0; i
< ( RDD_WAN_CHANNELS_0_7_TABLE_SIZE
+ RDD_WAN_CHANNELS_8_39_TABLE_SIZE
); i
++ )
1259 MWRITE_8( absolute_tx_counters_ptr
, 0x3F );
1260 absolute_tx_counters_ptr
++;
1263 absolute_tx_counters_ptr
= ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + ETHWAN_ABSOLUTE_TX_BBH_COUNTER_ADDRESS
);
1264 MWRITE_8( absolute_tx_counters_ptr
, 0x3F );
1266 absolute_tx_counters_ptr
= ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + ETHWAN_ABSOLUTE_TX_FIRMWARE_COUNTER_ADDRESS
);
1267 MWRITE_8( absolute_tx_counters_ptr
, 0x3F );
1269 return ( BL_LILAC_RDD_OK
);
1272 static BL_LILAC_RDD_ERROR_DTE
f_rdd_ingress_classification_table_initialize ( void )
1274 RDD_DS_INGRESS_CLASSIFICATION_RULE_CFG_TABLE_DTS
*ds_rule_cfg_table_ptr
;
1275 RDD_US_INGRESS_CLASSIFICATION_RULE_CFG_TABLE_DTS
*us_rule_cfg_table_ptr
;
1276 RDD_INGRESS_CLASSIFICATION_RULE_CFG_ENTRY_DTS
*rule_cfg_entry_ptr
;
1277 uint8_t *rule_cfg_descriptor_ptr
;
1278 uint32_t rule_cfg_id
;
1280 for (rule_cfg_id
= 0; rule_cfg_id
< 16; rule_cfg_id
++)
1282 g_ingress_classification_rule_cfg_table
[ rdpa_dir_ds
].rule_cfg
[ rule_cfg_id
].valid
= 0;
1283 g_ingress_classification_rule_cfg_table
[ rdpa_dir_ds
].rule_cfg
[ rule_cfg_id
].priority
= -1;
1284 g_ingress_classification_rule_cfg_table
[ rdpa_dir_ds
].rule_cfg
[ rule_cfg_id
].rule_type
= 0;
1285 g_ingress_classification_rule_cfg_table
[ rdpa_dir_ds
].rule_cfg
[ rule_cfg_id
].next_group_id
= 16;
1286 g_ingress_classification_rule_cfg_table
[ rdpa_dir_ds
].rule_cfg
[ rule_cfg_id
].next_rule_cfg_id
= 16;
1288 ds_rule_cfg_table_ptr
= RDD_DS_INGRESS_CLASSIFICATION_RULE_CFG_TABLE_PTR();
1290 rule_cfg_entry_ptr
= &( ds_rule_cfg_table_ptr
->entry
[ rule_cfg_id
] );
1292 RDD_INGRESS_CLASSIFICATION_RULE_CFG_ENTRY_NEXT_RULE_CFG_ID_WRITE ( 16, rule_cfg_entry_ptr
);
1293 RDD_INGRESS_CLASSIFICATION_RULE_CFG_ENTRY_NEXT_GROUP_ID_WRITE ( 16, rule_cfg_entry_ptr
);
1295 g_ingress_classification_rule_cfg_table
[ rdpa_dir_us
].rule_cfg
[ rule_cfg_id
].valid
= 0;
1296 g_ingress_classification_rule_cfg_table
[ rdpa_dir_us
].rule_cfg
[ rule_cfg_id
].priority
= -1;
1297 g_ingress_classification_rule_cfg_table
[ rdpa_dir_us
].rule_cfg
[ rule_cfg_id
].rule_type
= 0;
1298 g_ingress_classification_rule_cfg_table
[ rdpa_dir_us
].rule_cfg
[ rule_cfg_id
].next_group_id
= 16;
1299 g_ingress_classification_rule_cfg_table
[ rdpa_dir_us
].rule_cfg
[ rule_cfg_id
].next_rule_cfg_id
= 16;
1301 us_rule_cfg_table_ptr
= RDD_US_INGRESS_CLASSIFICATION_RULE_CFG_TABLE_PTR();
1303 rule_cfg_entry_ptr
= &( us_rule_cfg_table_ptr
->entry
[ rule_cfg_id
] );
1305 RDD_INGRESS_CLASSIFICATION_RULE_CFG_ENTRY_NEXT_RULE_CFG_ID_WRITE ( 16, rule_cfg_entry_ptr
);
1306 RDD_INGRESS_CLASSIFICATION_RULE_CFG_ENTRY_NEXT_GROUP_ID_WRITE ( 16, rule_cfg_entry_ptr
);
1309 g_ingress_classification_rule_cfg_table
[ rdpa_dir_ds
].first_rule_cfg_id
= 16;
1310 g_ingress_classification_rule_cfg_table
[ rdpa_dir_ds
].first_gen_filter_rule_cfg_id
= 16;
1311 g_ingress_classification_rule_cfg_table
[ rdpa_dir_us
].first_rule_cfg_id
= 16;
1312 g_ingress_classification_rule_cfg_table
[ rdpa_dir_us
].first_gen_filter_rule_cfg_id
= 16;
1314 rule_cfg_descriptor_ptr
= ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + DS_INGRESS_CLASSIFICATION_RULE_CFG_DESCRIPTOR_ADDRESS
);
1316 MWRITE_8( rule_cfg_descriptor_ptr
, 16 );
1318 rule_cfg_descriptor_ptr
= ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + US_INGRESS_CLASSIFICATION_RULE_CFG_DESCRIPTOR_ADDRESS
);
1320 MWRITE_8( rule_cfg_descriptor_ptr
, 16 );
1322 rule_cfg_descriptor_ptr
= ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + DS_INGRESS_CLASSIFICATION_IP_FLOW_RULE_CFG_DESCRIPTOR_ADDRESS
);
1324 MWRITE_8( rule_cfg_descriptor_ptr
, 16 );
1326 rule_cfg_descriptor_ptr
= ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + US_INGRESS_CLASSIFICATION_IP_FLOW_RULE_CFG_DESCRIPTOR_ADDRESS
);
1328 MWRITE_8( rule_cfg_descriptor_ptr
, 16 );
1329 return ( BL_LILAC_RDD_OK
);
1332 static BL_LILAC_RDD_ERROR_DTE
f_rdd_eth_tx_initialize ( void )
1334 RDD_ETH_TX_MAC_TABLE_DTS
*eth_tx_mac_table
;
1335 RDD_ETH_TX_MAC_DESCRIPTOR_DTS
*eth_tx_mac_descriptor
;
1336 RDD_ETH_TX_QUEUES_TABLE_DTS
*eth_tx_queues_table
;
1337 RDD_ETH_TX_QUEUE_DESCRIPTOR_DTS
*eth_tx_queue_descriptor
;
1338 RDD_ETH_TX_QUEUES_POINTERS_TABLE_DTS
*eth_tx_queues_pointers_table
;
1339 RDD_ETH_TX_QUEUE_POINTERS_ENTRY_DTS
*eth_tx_queue_pointers_entry
;
1340 RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_DTS
*free_packet_descriptors_pool_descriptor
;
1341 RDD_ETH_TX_LOCAL_REGISTERS_DTS
*eth_tx_local_registers
;
1342 RDD_ETH_TX_LOCAL_REGISTERS_ENTRY_DTS
*eth_tx_local_registers_entry
;
1343 uint16_t eth_tx_queue_address
;
1344 uint16_t mac_descriptor_address
;
1348 eth_tx_mac_table
= RDD_ETH_TX_MAC_TABLE_PTR();
1350 eth_tx_queues_table
= RDD_ETH_TX_QUEUES_TABLE_PTR();
1352 eth_tx_queues_pointers_table
= RDD_ETH_TX_QUEUES_POINTERS_TABLE_PTR();
1354 eth_tx_local_registers
= RDD_ETH_TX_LOCAL_REGISTERS_PTR();
1356 for (emac
= BL_LILAC_RDD_EMAC_ID_0
; emac
< BL_LILAC_RDD_EMAC_ID_COUNT
; emac
++)
1358 eth_tx_mac_descriptor
= &(eth_tx_mac_table
->entry
[emac
]);
1360 RDD_ETH_TX_MAC_DESCRIPTOR_TX_TASK_NUMBER_WRITE(ETH_TX_THREAD_NUMBER
, eth_tx_mac_descriptor
);
1361 RDD_ETH_TX_MAC_DESCRIPTOR_EMAC_MASK_WRITE((1 << emac
), eth_tx_mac_descriptor
);
1362 RDD_ETH_TX_MAC_DESCRIPTOR_GPIO_FLOW_CONTROL_VECTOR_PTR_WRITE((RDD_GPIO_IO_ADDRESS
+ (emac
- BL_LILAC_RDD_EMAC_ID_0
)), eth_tx_mac_descriptor
);
1363 RDD_ETH_TX_MAC_DESCRIPTOR_PACKET_COUNTERS_PTR_1_WRITE(ETH_TX_MAC_TABLE_ADDRESS
+
1364 BL_LILAC_RDD_EMAC_ID_1
* sizeof(RDD_ETH_TX_MAC_DESCRIPTOR_DTS
) + RDD_EMAC_DESCRIPTOR_EGRESS_COUNTER_OFFSET
,
1365 eth_tx_mac_descriptor
);
1366 RDD_ETH_TX_MAC_DESCRIPTOR_BBH_DESTINATION_1_WRITE(BBH_PERIPHERAL_ETH1_TX
, eth_tx_mac_descriptor
);
1367 RDD_ETH_TX_MAC_DESCRIPTOR_EGRESS_PORT_WRITE((emac
- BL_LILAC_RDD_EMAC_ID_0
), eth_tx_mac_descriptor
);
1368 RDD_ETH_TX_MAC_DESCRIPTOR_RATE_LIMITER_ID_WRITE(RDD_RATE_LIMITER_IDLE
, eth_tx_mac_descriptor
);
1370 for (tx_queue
= 0; tx_queue
< RDD_EMAC_NUMBER_OF_QUEUES
; tx_queue
++)
1372 eth_tx_queue_address
= ETH_TX_QUEUES_TABLE_ADDRESS
+
1373 ((emac
- BL_LILAC_RDD_EMAC_ID_0
) * RDD_EMAC_NUMBER_OF_QUEUES
+ tx_queue
) * sizeof(RDD_ETH_TX_QUEUE_DESCRIPTOR_DTS
);
1375 mac_descriptor_address
= ETH_TX_MAC_TABLE_ADDRESS
+ emac
* sizeof(RDD_ETH_TX_MAC_DESCRIPTOR_DTS
);
1377 eth_tx_queue_pointers_entry
=
1378 &(eth_tx_queues_pointers_table
->entry
[emac
* RDD_EMAC_NUMBER_OF_QUEUES
+ tx_queue
]);
1380 RDD_ETH_TX_QUEUE_POINTERS_ENTRY_ETH_MAC_POINTER_WRITE(mac_descriptor_address
, eth_tx_queue_pointers_entry
);
1381 RDD_ETH_TX_QUEUE_POINTERS_ENTRY_TX_QUEUE_POINTER_WRITE(eth_tx_queue_address
, eth_tx_queue_pointers_entry
);
1383 eth_tx_queue_descriptor
= &(eth_tx_queues_table
->entry
[(emac
- BL_LILAC_RDD_EMAC_ID_0
) * RDD_EMAC_NUMBER_OF_QUEUES
+ tx_queue
]);
1385 RDD_ETH_TX_QUEUE_DESCRIPTOR_QUEUE_MASK_WRITE(1 << tx_queue
, eth_tx_queue_descriptor
);
1386 RDD_ETH_TX_QUEUE_DESCRIPTOR_INDEX_WRITE((emac
* RDD_EMAC_NUMBER_OF_QUEUES
) + tx_queue
, eth_tx_queue_descriptor
);
1388 eth_tx_local_registers_entry
= &(eth_tx_local_registers
->entry
[emac
]);
1390 RDD_ETH_TX_LOCAL_REGISTERS_ENTRY_EMAC_DESCRIPTOR_PTR_WRITE(ETH_TX_MAC_TABLE_ADDRESS
+
1391 emac
* sizeof(RDD_ETH_TX_MAC_DESCRIPTOR_DTS
), eth_tx_local_registers_entry
);
1393 RDD_ETH_TX_LOCAL_REGISTERS_ENTRY_ETH_TX_QUEUES_POINTERS_TABLE_PTR_WRITE(ETH_TX_QUEUES_POINTERS_TABLE_ADDRESS
+
1394 emac
* RDD_EMAC_NUMBER_OF_QUEUES
* sizeof(RDD_ETH_TX_QUEUE_POINTERS_ENTRY_DTS
), eth_tx_local_registers_entry
);
1397 free_packet_descriptors_pool_descriptor
=
1398 (RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_DTS
*)(DEVICE_ADDRESS(RUNNER_PRIVATE_0_OFFSET
) +
1399 FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ADDRESS
);
1401 /*Initial values, will be updated by rdd_tm_ds_free_packet_descriptors_pool_size_update.*/
1402 RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_GUARANTEED_THRESHOLD_WRITE ( DS_FREE_PACKET_DESCRIPTOR_POOL_GUARANTEED_QUEUE_THRESHOLD
, free_packet_descriptors_pool_descriptor
);
1403 RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_GUARANTEED_FREE_COUNT_WRITE (DS_FREE_PACKET_DESCRIPTOR_POOL_MIN_GUARANTEED_POOL_SIZE
, free_packet_descriptors_pool_descriptor
);
1404 RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_NON_GUARANTEED_FREE_COUNT_WRITE ( RDD_DS_FREE_PACKET_DESCRIPTORS_POOL_SIZE
- DS_FREE_PACKET_DESCRIPTOR_POOL_MIN_GUARANTEED_POOL_SIZE
, free_packet_descriptors_pool_descriptor
);
1406 return ( BL_LILAC_RDD_OK
);
1411 static BL_LILAC_RDD_ERROR_DTE
f_rdd_wan_tx_initialize ( void )
1413 RDD_WAN_CHANNELS_0_7_TABLE_DTS
*wan_channels_0_7_table_ptr
;
1414 RDD_WAN_CHANNEL_0_7_DESCRIPTOR_DTS
*wan_channel_0_7_descriptor_ptr
;
1415 RDD_WAN_CHANNELS_8_39_TABLE_DTS
*wan_channels_8_39_table_ptr
;
1416 RDD_WAN_CHANNEL_8_39_DESCRIPTOR_DTS
*wan_channel_8_39_descriptor_ptr
;
1417 RDD_US_RATE_CONTROLLER_DESCRIPTOR_DTS
*dummy_rate_controller_descriptor_ptr
;
1418 RDD_WAN_TX_QUEUE_DESCRIPTOR_DTS
*dummy_wan_tx_queue_descriptor_ptr
;
1419 RDD_RATE_CONTROLLER_EXPONENT_TABLE_DTS
*exponent_table_ptr
;
1420 RDD_RATE_CONTROLLER_EXPONENT_ENTRY_DTS
*exponent_entry_ptr
;
1421 uint32_t wan_channel_id
;
1422 uint32_t rate_controller_id
;
1423 uint32_t tx_queue_id
;
1425 /* initialize WAN TX pointers table */
1426 wan_tx_pointers_table_ptr
= ( RDD_WAN_TX_POINTERS_TABLE_DTS
* )malloc( sizeof( RDD_WAN_TX_POINTERS_TABLE_DTS
) );
1428 if ( wan_tx_pointers_table_ptr
== NULL
)
1430 return ( BL_LILAC_RDD_ERROR_MALLOC_FAILED
);
1433 memset ( wan_tx_pointers_table_ptr
, 0, sizeof ( RDD_WAN_TX_POINTERS_TABLE_DTS
) );
1435 /* reset the dummy segmentation descriptors threshold to zero in order to drop packets */
1436 dummy_wan_tx_queue_descriptor_ptr
= ( RDD_WAN_TX_QUEUE_DESCRIPTOR_DTS
* )(DEVICE_ADDRESS( RUNNER_COMMON_1_OFFSET
) + DUMMY_WAN_TX_QUEUE_DESCRIPTOR_ADDRESS
- sizeof ( RUNNER_COMMON
) );
1438 RDD_WAN_TX_QUEUE_DESCRIPTOR_PACKET_THRESHOLD_WRITE ( 0, dummy_wan_tx_queue_descriptor_ptr
);
1439 RDD_WAN_TX_QUEUE_DESCRIPTOR_PROFILE_PTR_WRITE ( 0, dummy_wan_tx_queue_descriptor_ptr
);
1441 /* all the queues of the dummy rate controller will point to the dummy queue */
1442 dummy_rate_controller_descriptor_ptr
= ( RDD_US_RATE_CONTROLLER_DESCRIPTOR_DTS
* )(DEVICE_ADDRESS( RUNNER_COMMON_1_OFFSET
) + DUMMY_RATE_CONTROLLER_DESCRIPTOR_ADDRESS
- sizeof ( RUNNER_COMMON
) );
1444 for ( tx_queue_id
= 0; tx_queue_id
< RDD_US_RATE_CONTROLLER_DESCRIPTOR_TX_QUEUE_ADDR_NUMBER
; tx_queue_id
++ )
1446 RDD_US_RATE_CONTROLLER_DESCRIPTOR_TX_QUEUE_ADDR_WRITE ( DUMMY_WAN_TX_QUEUE_DESCRIPTOR_ADDRESS
, dummy_rate_controller_descriptor_ptr
, tx_queue_id
);
1449 /* connect all the tconts to the dummy rate rate controller */
1450 wan_channels_0_7_table_ptr
= ( RDD_WAN_CHANNELS_0_7_TABLE_DTS
* )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + WAN_CHANNELS_0_7_TABLE_ADDRESS
);
1452 for ( wan_channel_id
= RDD_WAN_CHANNEL_0
; wan_channel_id
<= RDD_WAN_CHANNEL_7
; wan_channel_id
++ )
1454 wan_channel_0_7_descriptor_ptr
= &( wan_channels_0_7_table_ptr
->entry
[ wan_channel_id
] );
1456 for ( rate_controller_id
= BL_LILAC_RDD_RATE_CONTROLLER_0
; rate_controller_id
<= BL_LILAC_RDD_RATE_CONTROLLER_31
; rate_controller_id
++ )
1458 RDD_WAN_CHANNEL_0_7_DESCRIPTOR_RATE_CONTROLLER_ADDR_WRITE ( DUMMY_RATE_CONTROLLER_DESCRIPTOR_ADDRESS
, wan_channel_0_7_descriptor_ptr
, rate_controller_id
);
1462 wan_channels_8_39_table_ptr
= ( RDD_WAN_CHANNELS_8_39_TABLE_DTS
* )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + WAN_CHANNELS_8_39_TABLE_ADDRESS
);
1464 for ( wan_channel_id
= RDD_WAN_CHANNEL_8
; wan_channel_id
<= RDD_WAN_CHANNEL_39
; wan_channel_id
++ )
1466 wan_channel_8_39_descriptor_ptr
= &( wan_channels_8_39_table_ptr
->entry
[ wan_channel_id
- RDD_WAN_CHANNEL_8
] );
1468 for ( rate_controller_id
= BL_LILAC_RDD_RATE_CONTROLLER_0
; rate_controller_id
<= BL_LILAC_RDD_RATE_CONTROLLER_3
; rate_controller_id
++ )
1470 RDD_WAN_CHANNEL_8_39_DESCRIPTOR_RATE_CONTROLLER_ADDR_WRITE ( DUMMY_RATE_CONTROLLER_DESCRIPTOR_ADDRESS
, wan_channel_8_39_descriptor_ptr
, rate_controller_id
);
1474 g_rate_controllers_pool_idx
= 0;
1476 /* initialize exponents table */
1477 exponent_table_ptr
= ( RDD_RATE_CONTROLLER_EXPONENT_TABLE_DTS
* )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + US_RATE_CONTROLLER_EXPONENT_TABLE_ADDRESS
);
1479 exponent_entry_ptr
= &( exponent_table_ptr
->entry
[ 0 ] );
1480 RDD_RATE_CONTROLLER_EXPONENT_ENTRY_EXPONENT_WRITE ( RDD_RATE_CONTROL_EXPONENT0
, exponent_entry_ptr
);
1482 exponent_entry_ptr
= &( exponent_table_ptr
->entry
[ 1 ] );
1483 RDD_RATE_CONTROLLER_EXPONENT_ENTRY_EXPONENT_WRITE ( RDD_RATE_CONTROL_EXPONENT1
, exponent_entry_ptr
);
1485 exponent_entry_ptr
= &( exponent_table_ptr
->entry
[ 2 ] );
1486 RDD_RATE_CONTROLLER_EXPONENT_ENTRY_EXPONENT_WRITE ( RDD_RATE_CONTROL_EXPONENT2
, exponent_entry_ptr
);
1488 return ( BL_LILAC_RDD_OK
);
1492 static BL_LILAC_RDD_ERROR_DTE
f_rdd_inter_task_queues_initialize ( void )
1494 uint16_t *wan_enqueue_ingress_queue_ptr
;
1495 uint16_t *ethwan2_rx_ingress_queue_ptr
;
1497 wan_enqueue_ingress_queue_ptr
= ( uint16_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + WAN_ENQUEUE_INGRESS_QUEUE_PTR_ADDRESS
);
1498 MWRITE_16( wan_enqueue_ingress_queue_ptr
, WAN_ENQUEUE_INGRESS_QUEUE_ADDRESS
);
1500 ethwan2_rx_ingress_queue_ptr
= ( uint16_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + ETHWAN2_RX_INGRESS_QUEUE_PTR_ADDRESS
);
1501 MWRITE_16( ethwan2_rx_ingress_queue_ptr
, ETHWAN2_RX_INGRESS_QUEUE_ADDRESS
);
1503 return ( BL_LILAC_RDD_OK
);
1507 static BL_LILAC_RDD_ERROR_DTE
f_rdd_pm_counters_initialize ( void )
1509 RUNNER_REGS_CFG_CNTR_CFG runner_counter_cfg_register
;
1511 runner_counter_cfg_register
.base_address
= ( PM_COUNTERS_ADDRESS
>> 3 );
1513 RUNNER_REGS_0_CFG_CNTR_CFG_WRITE ( runner_counter_cfg_register
);
1514 RUNNER_REGS_1_CFG_CNTR_CFG_WRITE ( runner_counter_cfg_register
);
1516 return ( BL_LILAC_RDD_OK
);
1520 static BL_LILAC_RDD_ERROR_DTE
f_rdd_parallel_processing_initialize ( void )
1522 RDD_DS_PARALLEL_PROCESSING_CONTEXT_INDEX_CACHE_CAM_DTS
*ds_context_index_cache_cam
;
1523 RDD_US_PARALLEL_PROCESSING_CONTEXT_INDEX_CACHE_CAM_DTS
*us_context_index_cache_cam
;
1524 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_DTS
*ds_available_slave_vector_ptr
;
1525 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_DTS
*us_available_slave_vector_ptr
;
1526 RDD_PARALLEL_PROCESSING_IH_BUFFER_PTR_DTS
*ds_slave_ih_buffer_ptr
;
1527 RDD_PARALLEL_PROCESSING_IH_BUFFER_PTR_DTS
*us_slave_ih_buffer_ptr
;
1528 uint16_t *ds_context_index_cache_cam_entry
;
1529 uint16_t *us_context_index_cache_cam_entry
;
1530 uint8_t *context_cache_state_ptr
;
1534 ds_available_slave_vector_ptr
= ( RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_DTS
* )( DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + DS_PARALLEL_PROCESSING_SLAVE_VECTOR_ADDRESS
);
1536 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_AVAILABLE_SLAVE0_WRITE ( LILAC_RDD_TRUE
, ds_available_slave_vector_ptr
);
1537 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_AVAILABLE_SLAVE1_WRITE ( LILAC_RDD_TRUE
, ds_available_slave_vector_ptr
);
1538 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_AVAILABLE_SLAVE2_WRITE ( LILAC_RDD_TRUE
, ds_available_slave_vector_ptr
);
1539 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_AVAILABLE_SLAVE3_WRITE ( LILAC_RDD_TRUE
, ds_available_slave_vector_ptr
);
1541 ds_slave_ih_buffer_ptr
= ( RDD_PARALLEL_PROCESSING_IH_BUFFER_PTR_DTS
* )( DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + DS_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_PTR_ADDRESS
);
1543 MWRITE_16( ds_slave_ih_buffer_ptr
, DS_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS
);
1545 ds_context_index_cache_cam
= RDD_DS_PARALLEL_PROCESSING_CONTEXT_INDEX_CACHE_CAM_PTR();
1547 for ( i
= 0; i
< RDD_DS_PARALLEL_PROCESSING_CONTEXT_INDEX_CACHE_CAM_SIZE
; i
++ )
1549 ds_context_index_cache_cam_entry
= ( uint16_t * ) &ds_context_index_cache_cam
->entry
[ i
];
1551 MWRITE_16( ds_context_index_cache_cam_entry
, 0xFFFF );
1554 /* set context cache in enable mode */
1555 context_cache_state_ptr
= ( uint8_t * )( DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET
) + DS_PARALLEL_PROCESSING_CONTEXT_CACHE_MODE_ADDRESS
);
1557 MWRITE_8( context_cache_state_ptr
, 0x0 );
1560 us_available_slave_vector_ptr
= ( RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_DTS
* )( DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + US_PARALLEL_PROCESSING_SLAVE_VECTOR_ADDRESS
);
1562 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_AVAILABLE_SLAVE0_WRITE ( LILAC_RDD_TRUE
, us_available_slave_vector_ptr
);
1563 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_AVAILABLE_SLAVE1_WRITE ( LILAC_RDD_TRUE
, us_available_slave_vector_ptr
);
1564 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_AVAILABLE_SLAVE2_WRITE ( LILAC_RDD_TRUE
, us_available_slave_vector_ptr
);
1565 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_AVAILABLE_SLAVE3_WRITE ( LILAC_RDD_TRUE
, us_available_slave_vector_ptr
);
1567 us_slave_ih_buffer_ptr
= ( RDD_PARALLEL_PROCESSING_IH_BUFFER_PTR_DTS
* )( DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + US_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_PTR_ADDRESS
);
1569 MWRITE_16( us_slave_ih_buffer_ptr
, US_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS
);
1571 us_context_index_cache_cam
= RDD_US_PARALLEL_PROCESSING_CONTEXT_INDEX_CACHE_CAM_PTR();
1573 for ( i
= 0; i
< RDD_US_PARALLEL_PROCESSING_CONTEXT_INDEX_CACHE_CAM_SIZE
; i
++ )
1575 us_context_index_cache_cam_entry
= ( uint16_t * ) &us_context_index_cache_cam
->entry
[ i
];
1577 MWRITE_16( us_context_index_cache_cam_entry
, 0xFFFF );
1580 /* set context cache in enable mode */
1581 context_cache_state_ptr
= ( uint8_t * )( DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + US_PARALLEL_PROCESSING_CONTEXT_CACHE_MODE_ADDRESS
);
1583 MWRITE_8( context_cache_state_ptr
, 0x0 );
1585 return ( BL_LILAC_RDD_OK
);
1588 BL_LILAC_RDD_ERROR_DTE
rdd_ethwan2_switch_port_config ( uint8_t xi_switch_port
)
1590 uint8_t *ethwan2_switch_port_config_ptr
;
1592 ethwan2_switch_port_config_ptr
= ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET
) + ETHWAN2_SWITCH_PORT_ADDRESS
);
1593 MWRITE_8( ethwan2_switch_port_config_ptr
, xi_switch_port
);
1594 return ( BL_LILAC_RDD_OK
);