Add Broadcom / Netgear changes from RAXE 1.0.0.48
[project/bcm63xx/u-boot.git] / arch / arm / mach-bcmbca / rdp / rdd_init.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (c) 2013 Broadcom
4 */
5 /*
6
7 */
8
9 #include "rdd.h"
10 #include "rdp_mm.h"
11
12 /******************************************************************************/
13 /* */
14 /* Global Variables */
15 /* */
16 /******************************************************************************/
17
18 uint8_t *ContextTableBase;
19 extern RDD_CONNECTION_TABLE_DTS *g_ds_connection_table_ptr;
20 extern int g_dbg_lvl;
21 extern RDD_FC_MCAST_CONNECTION2_TABLE_DTS *g_fc_mcast_connection2_table_ptr;
22 extern uint8_t *g_runner_ddr_base_addr;
23 extern uint32_t g_runner_ddr_base_addr_phys;
24 extern uint8_t *g_runner_extra_ddr_base_addr;
25 extern uint32_t g_runner_extra_ddr_base_addr_phys;
26 extern uint32_t g_ddr_headroom_size;
27 extern uint8_t *g_runner_tables_ptr;
28 extern uint8_t g_broadcom_switch_mode;
29 extern BL_LILAC_RDD_BRIDGE_PORT_DTE g_broadcom_switch_physical_port;
30 extern uint32_t g_bridge_flow_cache_mode;
31 extern uint8_t **g_cpu_tx_skb_pointers_reference_array;
32 extern uint8_t *g_dhd_tx_cpu_usage_reference_array;
33 extern rdd_phys_addr_t *g_cpu_tx_data_pointers_reference_array;
34 extern uint32_t g_cpu_tx_abs_packet_limit;
35 extern rdd_phys_addr_t g_free_skb_indexes_fifo_table_physical_address;
36 extern rdd_phys_addr_t g_free_skb_indexes_fifo_table_physical_address_last_idx;
37 extern uint16_t *g_free_skb_indexes_fifo_table;
38 extern RDD_INGRESS_CLASSIFICATION_RULE_CFG_TABLE_DTE g_ingress_classification_rule_cfg_table[ 2 ];
39 extern uint32_t g_rate_controllers_pool_idx;
40 extern uint32_t g_chip_revision;
41 extern RDD_WAN_TX_POINTERS_TABLE_DTS *wan_tx_pointers_table_ptr;
42 rdpa_bpm_buffer_size_t g_bpm_buffer_size = LILAC_RDD_RUNNER_PACKET_BUFFER_SIZE;
43
44 static BL_LILAC_RDD_ERROR_DTE f_rdd_bpm_initialize ( uint32_t, uint32_t, uint32_t );
45 static BL_LILAC_RDD_ERROR_DTE f_rdd_ddr_initialize ( uint32_t, uint32_t, uint32_t );
46 static BL_LILAC_RDD_ERROR_DTE f_rdd_psram_initialize ( void );
47 static BL_LILAC_RDD_ERROR_DTE f_rdd_scheduler_initialize ( void );
48 static BL_LILAC_RDD_ERROR_DTE f_rdd_free_packet_descriptors_pool_initialize ( void );
49 static BL_LILAC_RDD_ERROR_DTE f_rdd_global_registers_initialize ( void );
50 static BL_LILAC_RDD_ERROR_DTE f_rdd_local_registers_initialize ( void );
51 static BL_LILAC_RDD_ERROR_DTE f_rdd_ingress_classification_table_initialize ( void );
52 static BL_LILAC_RDD_ERROR_DTE f_rdd_eth_tx_initialize ( void );
53 static BL_LILAC_RDD_ERROR_DTE f_rdd_wan_tx_initialize ( void );
54 static BL_LILAC_RDD_ERROR_DTE f_rdd_inter_task_queues_initialize ( void );
55 static BL_LILAC_RDD_ERROR_DTE f_rdd_pm_counters_initialize ( void );
56 static BL_LILAC_RDD_ERROR_DTE f_rdd_transmit_from_abs_address_initialize ( void );
57 static BL_LILAC_RDD_ERROR_DTE f_rdd_parallel_processing_initialize ( void );
58
59 extern BL_LILAC_RDD_ERROR_DTE rdd_firewall_initialize ( void );
60 extern BL_LILAC_RDD_ERROR_DTE rdd_cpu_tx_initialize ( void );
61 extern BL_LILAC_RDD_ERROR_DTE rdd_cpu_rx_initialize ( void );
62 extern BL_LILAC_RDD_ERROR_DTE f_rdd_mac_table_initialize ( uint32_t, uint32_t );
63 extern BL_LILAC_RDD_ERROR_DTE f_rdd_ingress_filters_cam_initialize ( void );
64 extern BL_LILAC_RDD_ERROR_DTE f_rdd_layer4_filters_initialize ( void );
65 extern BL_LILAC_RDD_ERROR_DTE f_rdd_vlan_matrix_initialize ( void );
66 extern BL_LILAC_RDD_ERROR_DTE f_rdd_connection_table_initialize ( void );
67 extern BL_LILAC_RDD_ERROR_DTE f_rdd_multicast_initialize ( void );
68 extern BL_LILAC_RDD_ERROR_DTE f_rdd_vid_cam_initialize ( void );
69 extern BL_LILAC_RDD_ERROR_DTE f_rdd_ds_exponent_table_initialize ( void );
70 extern void f_rdd_full_flow_cache_config ( bdmf_boolean );
71
72 BL_LILAC_RDD_ERROR_DTE rdd_init ( void )
73 {
74 RUNNER_INST_MAIN *sram_fast_program_ptr;
75 RUNNER_INST_PICO *sram_pico_program_ptr;
76 RUNNER_COMMON *sram_common_data_ptr;
77 RUNNER_PRIVATE *sram_private_data_ptr;
78 RUNNER_CNTXT_MAIN *sram_fast_context_ptr;
79 RUNNER_CNTXT_PICO *sram_pico_context_ptr;
80 RUNNER_PRED_MAIN *sram_fast_prediction_ptr;
81 RUNNER_PRED_PICO *sram_pico_prediction_ptr;
82
83 /* reset SRAM program memory of both Runners */
84 sram_fast_program_ptr = ( RUNNER_INST_MAIN * )DEVICE_ADDRESS( RUNNER_INST_MAIN_0_OFFSET );
85 rdp_mm_setl ( sram_fast_program_ptr, 0, sizeof ( RUNNER_INST_MAIN ) );
86
87 sram_fast_program_ptr = ( RUNNER_INST_MAIN * )DEVICE_ADDRESS( RUNNER_INST_MAIN_1_OFFSET );
88 rdp_mm_setl ( sram_fast_program_ptr, 0, sizeof ( RUNNER_INST_MAIN ) );
89
90 sram_pico_program_ptr = ( RUNNER_INST_PICO * )DEVICE_ADDRESS( RUNNER_INST_PICO_0_OFFSET );
91 rdp_mm_setl ( sram_pico_program_ptr, 0, sizeof ( RUNNER_INST_PICO ) );
92
93 sram_pico_program_ptr = ( RUNNER_INST_PICO * )DEVICE_ADDRESS( RUNNER_INST_PICO_1_OFFSET );
94 rdp_mm_setl ( sram_fast_program_ptr, 0, sizeof ( RUNNER_INST_PICO ) );
95
96 /* reset SRAM common data memory of both Runners */
97 sram_common_data_ptr = ( RUNNER_COMMON * )DEVICE_ADDRESS( RUNNER_COMMON_0_OFFSET );
98 rdp_mm_setl ( sram_common_data_ptr, 0, sizeof ( RUNNER_COMMON ) );
99
100 sram_common_data_ptr = ( RUNNER_COMMON * )DEVICE_ADDRESS( RUNNER_COMMON_1_OFFSET );
101 rdp_mm_setl ( sram_common_data_ptr, 0, sizeof ( RUNNER_COMMON ) );
102
103 /* reset SRAM private data memory of both Runners */
104 sram_private_data_ptr = ( RUNNER_PRIVATE * )DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET );
105 rdp_mm_setl ( sram_private_data_ptr, 0, sizeof ( RUNNER_PRIVATE ) );
106
107 sram_private_data_ptr = ( RUNNER_PRIVATE * )DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET );
108 rdp_mm_setl ( sram_private_data_ptr, 0, sizeof ( RUNNER_PRIVATE ) );
109
110 /* reset SRAM context memory of both Runners */
111 sram_fast_context_ptr = ( RUNNER_CNTXT_MAIN * )DEVICE_ADDRESS( RUNNER_CNTXT_MAIN_0_OFFSET );
112 rdp_mm_setl_context ( sram_fast_context_ptr, 0, sizeof ( RUNNER_CNTXT_MAIN ) );
113
114 sram_fast_context_ptr = ( RUNNER_CNTXT_MAIN * )DEVICE_ADDRESS( RUNNER_CNTXT_MAIN_1_OFFSET );
115 rdp_mm_setl_context ( sram_fast_context_ptr, 0, sizeof ( RUNNER_CNTXT_MAIN ) );
116
117 sram_pico_context_ptr = ( RUNNER_CNTXT_PICO * )DEVICE_ADDRESS( RUNNER_CNTXT_PICO_0_OFFSET );
118 rdp_mm_setl_context ( sram_pico_context_ptr, 0, sizeof ( RUNNER_CNTXT_PICO ) );
119
120 sram_pico_context_ptr = ( RUNNER_CNTXT_PICO * )DEVICE_ADDRESS( RUNNER_CNTXT_PICO_1_OFFSET );
121 rdp_mm_setl_context ( sram_pico_context_ptr, 0, sizeof ( RUNNER_CNTXT_PICO ) );
122
123 /* reset SRAM prediction memory of both Runners */
124 sram_fast_prediction_ptr = ( RUNNER_PRED_MAIN * )DEVICE_ADDRESS( RUNNER_PRED_MAIN_0_OFFSET );
125 rdp_mm_setl ( sram_fast_prediction_ptr, 0, sizeof ( RUNNER_PRED_MAIN ) * 2 );
126
127 sram_fast_prediction_ptr = ( RUNNER_PRED_MAIN * )DEVICE_ADDRESS( RUNNER_PRED_MAIN_1_OFFSET );
128 rdp_mm_setl ( sram_fast_prediction_ptr, 0, sizeof ( RUNNER_PRED_MAIN ) * 2 );
129
130 sram_pico_prediction_ptr = ( RUNNER_PRED_PICO * )DEVICE_ADDRESS( RUNNER_PRED_PICO_0_OFFSET );
131 rdp_mm_setl ( sram_pico_prediction_ptr, 0, sizeof ( RUNNER_PRED_PICO ) * 2 );
132
133 sram_pico_prediction_ptr = ( RUNNER_PRED_PICO * )DEVICE_ADDRESS( RUNNER_PRED_PICO_1_OFFSET );
134 rdp_mm_setl ( sram_pico_prediction_ptr, 0, sizeof ( RUNNER_PRED_PICO ) * 2 );
135
136 return ( BL_LILAC_RDD_OK );
137 }
138
139 BL_LILAC_RDD_ERROR_DTE rdd_load_microcode ( uint8_t *xi_runer_A_microcode_ptr,
140 uint8_t *xi_runer_B_microcode_ptr,
141 uint8_t *xi_runer_C_microcode_ptr,
142 uint8_t *xi_runer_D_microcode_ptr )
143 {
144 RUNNER_INST_MAIN *sram_fast_program_ptr;
145 RUNNER_INST_PICO *sram_pico_program_ptr;
146
147
148 /* load the code segment into the SRAM program memory of fast Runner B */
149 sram_fast_program_ptr = ( RUNNER_INST_MAIN * )DEVICE_ADDRESS( RUNNER_INST_MAIN_1_OFFSET );
150 MWRITE_BLK_32( sram_fast_program_ptr, xi_runer_B_microcode_ptr, sizeof ( RUNNER_INST_MAIN ) );
151
152 /* load the code segment into the SRAM program memory of pico Runner A */
153 sram_pico_program_ptr = ( RUNNER_INST_PICO * )DEVICE_ADDRESS( RUNNER_INST_PICO_0_OFFSET );
154 MWRITE_BLK_32( sram_pico_program_ptr, xi_runer_C_microcode_ptr, sizeof ( RUNNER_INST_PICO ) );
155
156 /* load the code segment into the SRAM program memory of pico Runner B */
157 sram_pico_program_ptr = ( RUNNER_INST_PICO * )DEVICE_ADDRESS( RUNNER_INST_PICO_1_OFFSET );
158 MWRITE_BLK_32( sram_pico_program_ptr, xi_runer_D_microcode_ptr, sizeof ( RUNNER_INST_PICO ) );
159
160 return ( BL_LILAC_RDD_OK );
161 }
162
163
164 static void memcpyl_prediction ( void * __to, void * __from, unsigned int __n )
165 {
166 uint8_t *src = (uint8_t *)__from;
167 uint8_t *dst = (uint8_t *)__to;
168 int i;
169
170 for (i = 0; i < (__n / 2); i++, src += 2, dst += 4)
171 {
172 #ifdef _BYTE_ORDER_LITTLE_ENDIAN_
173 *(volatile unsigned int *)dst = swap4bytes((unsigned int)(*(volatile unsigned short *)src));
174 #else
175 *(volatile unsigned int *)dst = (unsigned int)(*(volatile unsigned short *)src);
176 #endif
177 }
178 }
179
180
181 BL_LILAC_RDD_ERROR_DTE rdd_load_prediction ( uint8_t *xi_runer_A_prediction_ptr,
182 uint8_t *xi_runer_B_prediction_ptr,
183 uint8_t *xi_runer_C_prediction_ptr,
184 uint8_t *xi_runer_D_prediction_ptr )
185 {
186 RUNNER_PRED_MAIN *sram_fast_prediction_ptr;
187 RUNNER_PRED_PICO *sram_pico_prediction_ptr;
188
189 sram_fast_prediction_ptr = ( RUNNER_PRED_MAIN * )DEVICE_ADDRESS( RUNNER_PRED_MAIN_0_OFFSET );
190 memcpyl_prediction ( sram_fast_prediction_ptr, xi_runer_A_prediction_ptr, sizeof ( RUNNER_PRED_MAIN ) );
191
192 sram_fast_prediction_ptr = ( RUNNER_PRED_MAIN * )DEVICE_ADDRESS( RUNNER_PRED_MAIN_1_OFFSET );
193 memcpyl_prediction ( sram_fast_prediction_ptr, xi_runer_B_prediction_ptr, sizeof ( RUNNER_PRED_MAIN ) );
194
195 sram_pico_prediction_ptr = ( RUNNER_PRED_PICO * )DEVICE_ADDRESS( RUNNER_PRED_PICO_0_OFFSET );
196 memcpyl_prediction ( sram_pico_prediction_ptr, xi_runer_C_prediction_ptr, sizeof ( RUNNER_PRED_PICO ) );
197
198 sram_pico_prediction_ptr = ( RUNNER_PRED_PICO * )DEVICE_ADDRESS( RUNNER_PRED_PICO_1_OFFSET );
199 memcpyl_prediction ( sram_pico_prediction_ptr, xi_runer_D_prediction_ptr, sizeof ( RUNNER_PRED_PICO ) );
200
201 return ( BL_LILAC_RDD_OK );
202 }
203
204
205 BL_LILAC_RDD_ERROR_DTE rdd_runner_enable ( void )
206 {
207 #if !defined(FIRMWARE_INIT)
208 RUNNER_REGS_CFG_GLOBAL_CTRL runner_global_control_register;
209
210 /* enable Runner A through the global control register */
211 RUNNER_REGS_0_CFG_GLOBAL_CTRL_READ ( runner_global_control_register );
212 runner_global_control_register.pico_en = LILAC_RDD_TRUE;
213 runner_global_control_register.main_cntxt_reb_en = LILAC_RDD_TRUE;
214 RUNNER_REGS_0_CFG_GLOBAL_CTRL_WRITE ( runner_global_control_register );
215
216 /* enable Runner B through the global control register */
217 RUNNER_REGS_1_CFG_GLOBAL_CTRL_READ ( runner_global_control_register );
218 runner_global_control_register.main_en = LILAC_RDD_TRUE;
219 runner_global_control_register.pico_en = LILAC_RDD_TRUE;
220 runner_global_control_register.main_cntxt_reb_en = LILAC_RDD_TRUE;
221 RUNNER_REGS_1_CFG_GLOBAL_CTRL_WRITE ( runner_global_control_register );
222 #endif
223
224 return ( BL_LILAC_RDD_OK );
225 }
226
227 BL_LILAC_RDD_ERROR_DTE rdd_runner_frequency_set ( uint16_t xi_runner_frequency )
228 {
229 #if !defined(FIRMWARE_INIT)
230 RUNNER_REGS_CFG_GLOBAL_CTRL runner_global_control_register;
231
232 /* set the frequency of the Runner through the global control register */
233 RUNNER_REGS_0_CFG_GLOBAL_CTRL_READ ( runner_global_control_register );
234 runner_global_control_register.micro_sec_val = xi_runner_frequency - 1;
235 RUNNER_REGS_0_CFG_GLOBAL_CTRL_WRITE ( runner_global_control_register );
236
237 RUNNER_REGS_1_CFG_GLOBAL_CTRL_READ ( runner_global_control_register );
238 runner_global_control_register.micro_sec_val = xi_runner_frequency - 1;
239 RUNNER_REGS_1_CFG_GLOBAL_CTRL_WRITE ( runner_global_control_register );
240 #endif
241
242 return ( BL_LILAC_RDD_OK );
243 }
244
245
246 BL_LILAC_RDD_ERROR_DTE rdd_data_structures_init ( RDD_INIT_PARAMS *init_params )
247 {
248 /* initialize the base address of the packets in the ddr */
249 g_runner_ddr_base_addr = init_params->ddr_pool_ptr;
250 g_runner_ddr_base_addr_phys = init_params->ddr_pool_ptr_phys;
251 g_runner_extra_ddr_base_addr = init_params->extra_ddr_pool_ptr;
252 g_runner_extra_ddr_base_addr_phys = init_params->extra_ddr_pool_ptr_phys;
253 g_runner_tables_ptr = init_params->ddr_runner_tables_ptr;
254 g_ds_connection_table_ptr = ( RDD_CONNECTION_TABLE_DTS * )DsConnectionTableBase;
255 #if !defined(FIRMWARE_INIT)
256 /* In simulation these are setup in rdd_sim_alloc_segments */
257 ContextTableBase = g_runner_tables_ptr + CONTEXT_TABLE_ADDRESS;
258 #endif
259
260 g_dbg_lvl = 0;
261 g_fc_mcast_connection2_table_ptr = ( RDD_FC_MCAST_CONNECTION2_TABLE_DTS * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + FC_MCAST_CONNECTION2_TABLE_ADDRESS );
262
263 g_ddr_headroom_size = init_params->ddr_headroom_size;
264
265 g_broadcom_switch_mode = init_params->broadcom_switch_mode;
266 g_broadcom_switch_physical_port = init_params->broadcom_switch_physical_port;
267
268 g_bridge_flow_cache_mode = init_params->bridge_flow_cache_mode;
269 g_chip_revision = init_params->chip_revision;
270
271 /* check abs packet limit legal value*/
272 if( ( init_params->cpu_tx_abs_packet_limit <= LILAC_RDD_CPU_TX_SKB_LIMIT_MAX ) &&
273 ( init_params->cpu_tx_abs_packet_limit >= LILAC_RDD_CPU_TX_SKB_LIMIT_MIN ) &&
274 ( init_params->cpu_tx_abs_packet_limit % LILAC_RDD_CPU_TX_SKB_LIMIT_MIN == 0 ) )
275 {
276 g_cpu_tx_abs_packet_limit = init_params->cpu_tx_abs_packet_limit;
277 }
278 else
279 {
280 g_cpu_tx_abs_packet_limit = LILAC_RDD_CPU_TX_SKB_LIMIT_MIN;
281 }
282
283 /* initialize the base address of the BPM base address */
284 f_rdd_bpm_initialize(init_params->ddr_pool_ptr_phys, 0, init_params->extra_ddr_pool_ptr_phys);
285
286 /* initialize runner dma base address */
287 f_rdd_ddr_initialize(init_params->ddr_pool_ptr_phys, 0, g_ddr_headroom_size);
288
289 /* initialize runner dma base address */
290 f_rdd_psram_initialize ();
291
292 /* initialize scheduler */
293 f_rdd_scheduler_initialize ();
294
295 /* create the Runner's free packet descriptors pool */
296 f_rdd_free_packet_descriptors_pool_initialize ();
297
298 /* initialize the CPU-RX mechanism */
299 rdd_cpu_rx_initialize ();
300
301 /* initialize the CPU-TX queue */
302 rdd_cpu_tx_initialize ();
303
304 /* initialize global registers */
305 f_rdd_global_registers_initialize ();
306
307 /* initialize the local registers through the Context memory */
308 f_rdd_local_registers_initialize ();
309
310 /* initialize ethernet tx queues and ports */
311 f_rdd_eth_tx_initialize ();
312
313 /* initialize WAN tx */
314 f_rdd_wan_tx_initialize ();
315
316 /* initialize inter task queues */
317 f_rdd_inter_task_queues_initialize ();
318
319 /* initialize PM counters */
320 f_rdd_pm_counters_initialize ();
321
322 /* initialize ingress classification table */
323 f_rdd_ingress_classification_table_initialize ();
324
325 /* set up the ETH0 EEE mode config message*/
326 MWRITE_32(DEVICE_ADDRESS(RUNNER_PRIVATE_1_OFFSET) + US_ETH0_EEE_MODE_CONFIG_MESSAGE_ADDRESS,
327 (BBH_PERIPHERAL_ETH0_TX<<16)|BBTX_EEE_MODE_CONFIG_MESSAGE);
328
329 /* initialize free skb indexes fifo and pointers*/
330 f_rdd_transmit_from_abs_address_initialize ();
331
332 /* Part of the bridge initialization. */
333 MWRITE_16( (DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + DOWNSTREAM_MULTICAST_LAN_ENQUEUE_INGRESS_QUEUE_PTR_ADDRESS), DOWNSTREAM_MULTICAST_LAN_ENQUEUE_INGRESS_QUEUE_ADDRESS );
334
335 /* initialize structures supporting parallel processing */
336 f_rdd_parallel_processing_initialize ();
337
338 /* set to not configured */
339 rdd_ethwan2_switch_port_config(0xff);
340
341 /* initialize ds rate limit exponent table */
342 f_rdd_ds_exponent_table_initialize ();
343
344 return ( BL_LILAC_RDD_OK );
345 }
346
347 /******************************************************************************/
348 /* */
349 /* Name: */
350 /* */
351 /* f_rdd_bpm_initialize */
352 /* */
353 /* Title: */
354 /* */
355 /* Runner Initialization - initialize BPM */
356 /* */
357 /* Abstract: */
358 /* */
359 /* This function returns the status of the operation */
360 /* */
361 /* Registers : */
362 /* */
363 /* none */
364 /* */
365 /* Input: */
366 /* */
367 /* xi_runner_ddr_pool_ptr - Packet DDR buffer base address */
368 /* xi_extra_ddr_pool_ptr - Packet DDR buffer base address (Multicast) */
369 /* xi_ddr_headroom_size - configurable headroom in addition to */
370 /* LILAC_RDD_PACKET_DDR_OFFSET */
371 /* */
372 /* Output: */
373 /* */
374 /* none */
375 /* . */
376 /* */
377 /******************************************************************************/
378 static BL_LILAC_RDD_ERROR_DTE f_rdd_bpm_initialize(uint32_t runner_ddr_pool_phys,
379 uint32_t runner_ddr1_pool_phys,
380 uint32_t runner_extra_ddr_pool_phys)
381 {
382 uint32_t *bpm_ddr_base_ptr;
383 uint32_t *bpm_extra_ddr_base_ptr;
384
385 bpm_ddr_base_ptr = (uint32_t *)(DEVICE_ADDRESS(RUNNER_PRIVATE_0_OFFSET) + DS_BPM_DDR_BUFFERS_BASE_ADDRESS);
386 MWRITE_32(bpm_ddr_base_ptr, runner_ddr_pool_phys);
387 bpm_ddr_base_ptr = (uint32_t *)(DEVICE_ADDRESS(RUNNER_PRIVATE_1_OFFSET) + US_BPM_DDR_BUFFERS_BASE_ADDRESS);
388 MWRITE_32(bpm_ddr_base_ptr, runner_ddr_pool_phys);
389
390 bpm_extra_ddr_base_ptr = (uint32_t *)(DEVICE_ADDRESS(RUNNER_PRIVATE_0_OFFSET) + DS_BPM_EXTRA_DDR_BUFFERS_BASE_ADDRESS);
391 MWRITE_32(bpm_extra_ddr_base_ptr, runner_extra_ddr_pool_phys);
392
393 bpm_extra_ddr_base_ptr = (uint32_t *)(DEVICE_ADDRESS(RUNNER_PRIVATE_1_OFFSET) + US_BPM_EXTRA_DDR_BUFFERS_BASE_ADDRESS);
394 MWRITE_32(bpm_extra_ddr_base_ptr, runner_extra_ddr_pool_phys);
395
396 return ( BL_LILAC_RDD_OK );
397 }
398
399
400 /******************************************************************************/
401 /* */
402 /* Name: */
403 /* */
404 /* f_rdd_ddr_initialize */
405 /* */
406 /* Title: */
407 /* */
408 /* Runner Initialization - initialize the runner ddr config register */
409 /* */
410 /* Abstract: */
411 /* */
412 /* This function returns the status of the operation */
413 /* */
414 /* Registers : */
415 /* */
416 /* DDR_config Register */
417 /* */
418 /* Input: */
419 /* */
420 /* xi_runner_ddr_pool_phys - Packet DDR buffer base address */
421 /* xi_ddr_headroom_size - configurable headroom in addition to */
422 /* LILAC_RDD_PACKET_DDR_OFFSET */
423 /* */
424 /* Output: */
425 /* */
426 /* none */
427 /* . */
428 /* */
429 /******************************************************************************/
430 static BL_LILAC_RDD_ERROR_DTE f_rdd_ddr_initialize(uint32_t xi_runner_ddr_pool_phys,
431 uint32_t xi_runner_ddr1_pool_phys,
432 uint32_t xi_ddr_headroom_size)
433 {
434 RUNNER_REGS_CFG_DDR_CFG runner_ddr_config_register;
435 RUNNER_REGS_CFG_DDR_LKUP_MASK0 runner_ddr_lkup_mask0_register;
436 RUNNER_REGS_CFG_DDR_LKUP_MASK1 runner_ddr_lkup_mask1_register;
437 uint32_t *ddr_address_ptr; /* DSL */
438
439 runner_ddr_config_register.buffer_offset = LILAC_RDD_PACKET_DDR_OFFSET;
440 runner_ddr_config_register.rserved1 = 0;
441 runner_ddr_config_register.dma_base = (xi_runner_ddr_pool_phys & 0x07E00000) >> 21;
442 runner_ddr_config_register.buffer_size = RDP_CFG_BUF_SIZE_VALUE;
443 runner_ddr_config_register.rserved2 = 0;
444
445 RUNNER_REGS_0_CFG_DDR_CFG_WRITE ( runner_ddr_config_register );
446 RUNNER_REGS_1_CFG_DDR_CFG_WRITE ( runner_ddr_config_register );
447
448 /* DDR lookup for routed packet - 5 tupples */
449 runner_ddr_lkup_mask0_register.global_mask = 0x000001FF;
450
451 RUNNER_REGS_0_CFG_DDR_LKUP_MASK0_WRITE ( runner_ddr_lkup_mask0_register );
452 RUNNER_REGS_1_CFG_DDR_LKUP_MASK0_WRITE ( runner_ddr_lkup_mask0_register );
453
454 /* DDR lookup for IPTV table - destination MAC, destination MAC + VLAN, destination IP */
455 runner_ddr_lkup_mask1_register.global_mask = 0x00000000;
456
457 RUNNER_REGS_0_CFG_DDR_LKUP_MASK1_WRITE ( runner_ddr_lkup_mask1_register );
458 RUNNER_REGS_1_CFG_DDR_LKUP_MASK1_WRITE ( runner_ddr_lkup_mask1_register );
459
460 ddr_address_ptr = ( uint32_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + DS_PACKET_BUFFER_SIZE_ASR_8_ADDRESS );
461 MWRITE_8( ddr_address_ptr, g_bpm_buffer_size >> 8 );
462
463 ddr_address_ptr = ( uint32_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + US_PACKET_BUFFER_SIZE_ASR_8_ADDRESS );
464 MWRITE_8( ddr_address_ptr, g_bpm_buffer_size >> 8 );
465
466 return ( BL_LILAC_RDD_OK );
467 }
468
469
470 /******************************************************************************/
471 /* */
472 /* Name: */
473 /* */
474 /* f_rdd_psram_initialize */
475 /* */
476 /* Title: */
477 /* */
478 /* Runner Initialization - initialize the runner psram config register */
479 /* */
480 /* Abstract: */
481 /* */
482 /* This function returns the status of the operation */
483 /* */
484 /* Registers : */
485 /* */
486 /* PSRAM_config Register */
487 /* */
488 /* Input: */
489 /* */
490 /* none */
491 /* */
492 /* Output: */
493 /* */
494 /* none */
495 /* . */
496 /* */
497 /******************************************************************************/
498 static BL_LILAC_RDD_ERROR_DTE f_rdd_psram_initialize ( void )
499 {
500 RUNNER_REGS_CFG_PSRAM_CFG runner_psram_config_register;
501 RUNNER_REGS_CFG_PSRAM_LKUP_MASK0 runner_psram_lkup_mask0_register;
502
503 runner_psram_config_register.buffer_offset = LILAC_RDD_PACKET_DDR_OFFSET;
504 runner_psram_config_register.rserved1 = 0;
505 runner_psram_config_register.buffer_size = RUNNER_REGS_CFG_PSRAM_CFG_BUFFER_SIZE_BUFFER_SIZE_128BYTE_VALUE;
506 runner_psram_config_register.rserved2 = 0;
507 runner_psram_config_register.dma_base = 0;
508
509 RUNNER_REGS_0_CFG_PSRAM_CFG_WRITE ( runner_psram_config_register );
510 RUNNER_REGS_1_CFG_PSRAM_CFG_WRITE ( runner_psram_config_register );
511
512
513 /* PSRAM lookup for data collection - 5 tupples & layer 2 */
514 runner_psram_lkup_mask0_register.global_mask = 0x0000FFFF;
515
516 RUNNER_REGS_0_CFG_PSRAM_LKUP_MASK0_WRITE ( runner_psram_lkup_mask0_register );
517 RUNNER_REGS_1_CFG_PSRAM_LKUP_MASK0_WRITE ( runner_psram_lkup_mask0_register );
518
519 return ( BL_LILAC_RDD_OK );
520 }
521
522
523 /******************************************************************************/
524 /* */
525 /* Name: */
526 /* */
527 /* f_rdd_scheduler_initialize */
528 /* */
529 /* Title: */
530 /* */
531 /* Runner Initialization - initialize the scheduler config register */
532 /* */
533 /* Abstract: */
534 /* */
535 /* This function returns the status of the operation */
536 /* */
537 /* Registers : */
538 /* */
539 /* DDR_config Register */
540 /* */
541 /* Input: */
542 /* */
543 /* none */
544 /* */
545 /* Output: */
546 /* */
547 /* none */
548 /* . */
549 /* */
550 /******************************************************************************/
551 static BL_LILAC_RDD_ERROR_DTE f_rdd_scheduler_initialize ( void )
552 {
553 uint32_t runner_scheduler_cfg_register;
554
555 /* fast Runner A - class C */
556 runner_scheduler_cfg_register = ( RUNNER_REGS_CFG_MAIN_SCH_CFG_ARB_CLASS_USE_RR_VALUE << 6 ) |
557 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_USE_CLASS_B_DONT_USE_CLASS_B_VALUE << 5 ) |
558 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_USE_CLASS_A_DONT_USE_CLASS_A_VALUE << 4 ) |
559 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_CLASS_31_24_RR_VALUE << 3 ) |
560 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_CLASS_23_16_RR_VALUE << 2 ) |
561 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_CLASS_15_8_RR_VALUE << 1 ) |
562 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_CLASS_7_0_STRICT_VALUE << 0 );
563
564 RUNNER_REGS_0_CFG_MAIN_SCH_CFG_WRITE ( runner_scheduler_cfg_register );
565
566 /* fast Runner B - class C */
567 runner_scheduler_cfg_register = ( RUNNER_REGS_CFG_MAIN_SCH_CFG_ARB_CLASS_USE_RR_VALUE << 6 ) |
568 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_USE_CLASS_B_DONT_USE_CLASS_B_VALUE << 5 ) |
569 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_USE_CLASS_A_DONT_USE_CLASS_A_VALUE << 4 ) |
570 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_CLASS_31_24_RR_VALUE << 3 ) |
571 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_CLASS_23_16_RR_VALUE << 2 ) |
572 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_CLASS_15_8_RR_VALUE << 1 ) |
573 ( RUNNER_REGS_CFG_MAIN_SCH_CFG_CLASS_7_0_STRICT_VALUE << 0 );
574
575 RUNNER_REGS_1_CFG_MAIN_SCH_CFG_WRITE ( runner_scheduler_cfg_register );
576
577 /* pico Runner A - class A */
578 runner_scheduler_cfg_register = ( RUNNER_REGS_CFG_PICO_SCH_CFG_ARB_CLASS_USE_RR_VALUE << 6 ) |
579 ( RUNNER_REGS_CFG_PICO_SCH_CFG_USE_CLASS_B_DONT_USE_CLASS_B_VALUE << 5 ) |
580 ( RUNNER_REGS_CFG_PICO_SCH_CFG_USE_CLASS_A_USE_CLASS_A_VALUE << 4 ) |
581 ( RUNNER_REGS_CFG_PICO_SCH_CFG_CLASS_15_8_RR_VALUE << 1 ) |
582 ( RUNNER_REGS_CFG_PICO_SCH_CFG_CLASS_7_0_RR_VALUE << 0 );
583
584 RUNNER_REGS_0_CFG_PICO_SCH_CFG_WRITE ( runner_scheduler_cfg_register );
585
586 /* pico Runner B - class A */
587 runner_scheduler_cfg_register = ( RUNNER_REGS_CFG_PICO_SCH_CFG_ARB_CLASS_USE_RR_VALUE << 6 ) |
588 ( RUNNER_REGS_CFG_PICO_SCH_CFG_USE_CLASS_B_DONT_USE_CLASS_B_VALUE << 5 ) |
589 ( RUNNER_REGS_CFG_PICO_SCH_CFG_USE_CLASS_A_USE_CLASS_A_VALUE << 4 ) |
590 ( RUNNER_REGS_CFG_PICO_SCH_CFG_CLASS_15_8_RR_VALUE << 1 ) |
591 ( RUNNER_REGS_CFG_PICO_SCH_CFG_CLASS_7_0_RR_VALUE << 0 );
592
593 RUNNER_REGS_1_CFG_PICO_SCH_CFG_WRITE ( runner_scheduler_cfg_register );
594
595 return ( BL_LILAC_RDD_OK );
596 }
597
598
599 /******************************************************************************/
600 /* */
601 /* Name: */
602 /* */
603 /* f_rdd_free_packet_descriptors_pool_initialize */
604 /* */
605 /* Title: */
606 /* */
607 /* Runner Initialization - initialize the list of the free buffers pool */
608 /* */
609 /* Abstract: */
610 /* */
611 /* Upstream pool is implemented as a stack of 3072 packet descriptors */
612 /* Downstream pool is implemented as a list of 2048 packet descriptors */
613 /* */
614 /* Input: */
615 /* */
616 /* none */
617 /* */
618 /* Output: */
619 /* */
620 /* none */
621 /* . */
622 /* */
623 /******************************************************************************/
624 static BL_LILAC_RDD_ERROR_DTE f_rdd_free_packet_descriptors_pool_initialize ( void )
625 {
626 RDD_DS_FREE_PACKET_DESCRIPTORS_POOL_DTS *ds_free_packet_descriptors_pool_ptr;
627 RDD_US_FREE_PACKET_DESCRIPTORS_POOL_DTS *us_free_packet_descriptors_pool_ptr;
628 RDD_PACKET_DESCRIPTOR_DTS *packet_descriptor_ptr;
629 RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_DTS *free_packet_descriptors_pool_descriptor_ptr;
630 RDD_US_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_DTS *us_free_packet_descriptors_pool_descriptor_ptr;
631 uint32_t next_packet_descriptor_address;
632 uint32_t i;
633
634 ds_free_packet_descriptors_pool_ptr = ( RDD_DS_FREE_PACKET_DESCRIPTORS_POOL_DTS * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + DS_FREE_PACKET_DESCRIPTORS_POOL_ADDRESS );
635
636 /* create the free packet descriptors pool as a list of packet descriptors */
637 for ( i = 0; i < RDD_DS_FREE_PACKET_DESCRIPTORS_POOL_SIZE; i++ )
638 {
639 packet_descriptor_ptr = &( ds_free_packet_descriptors_pool_ptr->entry[ i ].packet_descriptor );
640
641 /* the last packet descriptor should point to NULL, the others points to the next packet descriptor */
642 if ( i == ( RDD_DS_FREE_PACKET_DESCRIPTORS_POOL_SIZE - 1 ) )
643 {
644 next_packet_descriptor_address = 0;
645 }
646 else
647 {
648 next_packet_descriptor_address = DS_FREE_PACKET_DESCRIPTORS_POOL_ADDRESS + ( i + 1 ) * sizeof(RDD_PACKET_DESCRIPTOR_DTS);
649 }
650
651 RDD_PACKET_DESCRIPTOR_NEXT_PACKET_DESCRIPTOR_POINTER_WRITE ( next_packet_descriptor_address, packet_descriptor_ptr );
652 }
653
654 free_packet_descriptors_pool_descriptor_ptr = ( RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_DTS * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ADDRESS );
655
656 RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_HEAD_POINTER_WRITE ( DS_FREE_PACKET_DESCRIPTORS_POOL_ADDRESS, free_packet_descriptors_pool_descriptor_ptr );
657
658 RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_TAIL_POINTER_WRITE ( DS_FREE_PACKET_DESCRIPTORS_POOL_ADDRESS + ( RDD_DS_FREE_PACKET_DESCRIPTORS_POOL_SIZE - 1 ) * sizeof(RDD_PACKET_DESCRIPTOR_DTS),
659 free_packet_descriptors_pool_descriptor_ptr );
660
661 us_free_packet_descriptors_pool_ptr = ( RDD_US_FREE_PACKET_DESCRIPTORS_POOL_DTS * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + US_FREE_PACKET_DESCRIPTORS_POOL_ADDRESS );
662
663 /* create the free packet descriptors pool as a stack of packet descriptors */
664 for ( i = 0; i < RDD_US_FREE_PACKET_DESCRIPTORS_POOL_SIZE; i++ )
665 {
666 packet_descriptor_ptr = &( us_free_packet_descriptors_pool_ptr->entry[ i ].packet_descriptor );
667
668 /* the last packet descriptor should point to NULL, the others points to the next packet descriptor */
669 if ( i == ( RDD_US_FREE_PACKET_DESCRIPTORS_POOL_SIZE - 1 ) )
670 {
671 next_packet_descriptor_address = 0;
672 }
673 else
674 {
675 next_packet_descriptor_address = US_FREE_PACKET_DESCRIPTORS_POOL_ADDRESS + ( i + 1 ) * sizeof(RDD_PACKET_DESCRIPTOR_DTS);
676 }
677
678 RDD_PACKET_DESCRIPTOR_NEXT_PACKET_DESCRIPTOR_POINTER_WRITE ( next_packet_descriptor_address, packet_descriptor_ptr );
679 }
680
681 us_free_packet_descriptors_pool_descriptor_ptr = ( RDD_US_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_DTS * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + US_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ADDRESS );
682
683 RDD_US_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_GUARANTEED_THRESHOLD_WRITE ( US_FREE_PACKET_DESCRIPTOR_POOL_GUARANTEED_QUEUE_THRESHOLD, us_free_packet_descriptors_pool_descriptor_ptr );
684 RDD_US_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_GUARANTEED_FREE_COUNT_WRITE (US_FREE_PACKET_DESCRIPTOR_POOL_MIN_GUARANTEED_POOL_SIZE, us_free_packet_descriptors_pool_descriptor_ptr );
685 RDD_US_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_NON_GUARANTEED_FREE_COUNT_WRITE ( RDD_US_FREE_PACKET_DESCRIPTORS_POOL_SIZE - US_FREE_PACKET_DESCRIPTOR_POOL_MIN_GUARANTEED_POOL_SIZE, us_free_packet_descriptors_pool_descriptor_ptr );
686
687 return ( BL_LILAC_RDD_OK );
688 }
689
690
691 /******************************************************************************/
692 /* */
693 /* Name: */
694 /* */
695 /* f_rdd_global_registers_initialize */
696 /* */
697 /* Title: */
698 /* */
699 /* Runner Initialization - initialize the global registers (R1-R7) */
700 /* */
701 /* Registers : */
702 /* */
703 /* Runners global registers (R1-R7) */
704 /* */
705 /* Input: */
706 /* */
707 /* none */
708 /* */
709 /* Output: */
710 /* */
711 /* none */
712 /* . */
713 /* */
714 /******************************************************************************/
715 static BL_LILAC_RDD_ERROR_DTE f_rdd_global_registers_initialize ( void )
716 {
717 uint32_t *global_register_init_ptr;
718 uint32_t global_register[ 8 ];
719
720
721 /********** Fast Runner A **********/
722
723 /* zero all global registers */
724 memset ( global_register, 0, sizeof ( global_register ) );
725
726 /* R1 - constant one */
727 global_register[ 1 ] = 1;
728
729 global_register[ 2 ] = ( g_broadcom_switch_mode << DS_GLOBAL_CFG_BROADCOM_SWITCH_MODE_BIT_OFFSET ) |
730 ( 1 << DS_GLOBAL_CFG_FLOW_CACHE_MODE_BIT_OFFSET ) |
731 ( g_bridge_flow_cache_mode << DS_GLOBAL_CFG_BRIDGE_FLOW_CACHE_MODE_BIT_OFFSET ) |
732 ( g_chip_revision << DS_GLOBAL_CFG_CHIP_REVISION_OFFSET );
733
734 global_register[ 3 ] = ( DS_PARALLEL_PROCESSING_TASK_REORDER_FIFO_ADDRESS << 16 ) | DS_PARALLEL_PROCESSING_TASK_REORDER_FIFO_ADDRESS;
735 global_register[ 4 ] = DOWNSTREAM_LAN_ENQUEUE_INGRESS_QUEUE_ADDRESS << 16 | DS_SQ_ENQUEUE_QUEUE_ADDRESS;
736 global_register[ 6 ] = ( DOWNSTREAM_MULTICAST_INGRESS_QUEUE_ADDRESS << 16 ) | DOWNSTREAM_MULTICAST_INGRESS_QUEUE_ADDRESS;
737
738 global_register_init_ptr = ( uint32_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + DS_FAST_RUNNER_GLOBAL_REGISTERS_INIT_ADDRESS );
739
740 /* copy the global regsiters to the data SRAM, the firmware will load it from the SRAM at task -1 (initialization task) */
741 MWRITE_BLK_32( global_register_init_ptr, global_register, sizeof ( global_register ) );
742
743
744 /********** Fast Runner B **********/
745
746 /* zero all global registers */
747 memset ( global_register, 0, sizeof ( global_register ) );
748
749 /* R1 - constant one */
750 global_register[ 1 ] = 1;
751
752 /* R2 - head pointer of the free buffers pool stack */
753 global_register[ 2 ] = US_FREE_PACKET_DESCRIPTORS_POOL_ADDRESS;
754
755 /* R4 - Not used */
756
757 global_register[ 7 ] = ( g_broadcom_switch_mode << US_GLOBAL_CFG_BROADCOM_SWITCH_MODE_BIT_OFFSET ) |
758 ( g_chip_revision << US_GLOBAL_CFG_CHIP_REVISION_OFFSET );
759
760 global_register_init_ptr = ( uint32_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + US_FAST_RUNNER_GLOBAL_REGISTERS_INIT_ADDRESS );
761
762 /* copy the global regsiters to the data SRAM, the firmware will load it from the SRAM at task -1 (initialization task) */
763 MWRITE_BLK_32( global_register_init_ptr, global_register, sizeof ( global_register ) );
764
765
766 /********** Pico Runner A **********/
767
768 /* zero all global registers */
769 memset ( global_register, 0, sizeof ( global_register ) );
770
771 /* R1 - constant one */
772 global_register[ 1 ] = 1;
773
774 global_register[ 2 ] = ( g_chip_revision << DS_GLOBAL_CFG_CHIP_REVISION_OFFSET );
775
776 global_register_init_ptr = ( uint32_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + DS_PICO_RUNNER_GLOBAL_REGISTERS_INIT_ADDRESS );
777
778 /* copy the global regsiters to the data SRAM, the firmware will load it from the SRAM at task -1 (initialization task) */
779 MWRITE_BLK_32( global_register_init_ptr, global_register, sizeof ( global_register ) );
780
781
782 /********** Pico Runner B **********/
783
784 /* zero all global registers */
785 memset ( global_register, 0, sizeof ( global_register ) );
786
787 /* R1 - constant one */
788 global_register[ 1 ] = 1;
789
790 global_register[ 3 ] = US_PARALLEL_PROCESSING_TASK_REORDER_FIFO_ADDRESS;
791 global_register[ 3 ] |= US_PARALLEL_PROCESSING_TASK_REORDER_FIFO_ADDRESS << 16;
792
793 /* R4 - context_index_cache_write_index */
794 global_register[ 4 ] = 0;
795 global_register[ 7 ] = ( g_broadcom_switch_mode << US_GLOBAL_CFG_BROADCOM_SWITCH_MODE_BIT_OFFSET ) |
796 ( g_chip_revision << US_GLOBAL_CFG_CHIP_REVISION_OFFSET );
797
798 global_register_init_ptr = ( uint32_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + US_PICO_RUNNER_GLOBAL_REGISTERS_INIT_ADDRESS );
799
800 MWRITE_BLK_32( global_register_init_ptr, global_register, sizeof ( global_register ) );
801
802 return ( BL_LILAC_RDD_OK );
803 }
804
805
806 /******************************************************************************/
807 /* */
808 /* Name: */
809 /* */
810 /* f_rdd_local_registers_initialize */
811 /* */
812 /* Title: */
813 /* */
814 /* Runner Initialization - initialize context memeories of 4 Runners */
815 /* */
816 /* Abstract: */
817 /* */
818 /* initialize the local registers (R8-R31), 32 threads for fast Runners */
819 /* and 16 threads for Pico Runners */
820 /* */
821 /* Registers : */
822 /* */
823 /* Runners local registers (R8-R31) */
824 /* */
825 /* Input: */
826 /* */
827 /* none */
828 /* */
829 /* Output: */
830 /* */
831 /* none */
832 /* . */
833 /* */
834 /******************************************************************************/
835 static BL_LILAC_RDD_ERROR_DTE f_rdd_local_registers_initialize ( void )
836 {
837 RUNNER_CNTXT_MAIN *sram_fast_context_ptr;
838 RUNNER_CNTXT_PICO *sram_pico_context_ptr;
839 static uint32_t local_register[ 32 ][ 32 ];
840
841 /********** Fast Runner A **********/
842
843 sram_fast_context_ptr = ( RUNNER_CNTXT_MAIN * )DEVICE_ADDRESS( RUNNER_CNTXT_MAIN_0_OFFSET );
844
845 /* read the local registers from the Context memory - maybe it was initialized by the ACE compiler */
846 MREAD_BLK_32( local_register, sram_fast_context_ptr, sizeof ( RUNNER_CNTXT_MAIN ) );
847
848 /* CPU TX fast */
849 local_register[ CPU_TX_FAST_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_a, cpu_tx_wakeup_request) << 16;
850 local_register[ CPU_TX_FAST_THREAD_NUMBER ][ CS_R8 ] = ( CPU_TX_FAST_QUEUE_ADDRESS << 16 );
851 local_register[ CPU_TX_FAST_THREAD_NUMBER ][ CS_R9 ] = ( INGRESS_HANDLER_BUFFER_ADDRESS << 16 ) | DS_CPU_TX_BBH_DESCRIPTORS_ADDRESS;
852 local_register[ CPU_TX_FAST_THREAD_NUMBER ][ CS_R10 ] = ( BBH_PERIPHERAL_IH << 16 ) | ( LILAC_RDD_IH_BUFFER_BBH_ADDRESS + LILAC_RDD_RUNNER_A_IH_BUFFER_BBH_OFFSET );
853 local_register[ CPU_TX_FAST_THREAD_NUMBER ][ CS_R11 ] = ( BBH_PERIPHERAL_IH << 16 ) | LILAC_RDD_IH_HEADER_DESCRIPTOR_BBH_ADDRESS;
854
855 /* CPU-RX */
856 local_register[ CPU_RX_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_a, cpu_rx_wakeup_request) << 16;
857 local_register[ CPU_RX_THREAD_NUMBER ][ CS_R8 ] = CPU_RX_FAST_PD_INGRESS_QUEUE_ADDRESS;
858 local_register[ CPU_RX_THREAD_NUMBER ][ CS_R9 ] = DS_CPU_RX_FAST_INGRESS_QUEUE_ADDRESS | ( DS_CPU_RX_PICO_INGRESS_QUEUE_ADDRESS << 16 );
859 local_register[ CPU_RX_THREAD_NUMBER ][ CS_R10 ] = INGRESS_HANDLER_BUFFER_ADDRESS | ( CPU_RX_SQ_PD_INGRESS_QUEUE_ADDRESS << 16 );
860 local_register[ CPU_RX_THREAD_NUMBER ][ CS_R11 ] = DS_CPU_REASON_TO_METER_TABLE_ADDRESS | ( CPU_RX_PD_INGRESS_QUEUE_ADDRESS << 16 );
861
862 /* Timer scheduler */
863 local_register[ TIMER_SCHEDULER_MAIN_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_a, timer_scheduler_set) << 16;
864 local_register[ TIMER_SCHEDULER_MAIN_THREAD_NUMBER ][ CS_R19 ] = 0; /* RX_METER_INDEX */
865 local_register[ TIMER_SCHEDULER_MAIN_THREAD_NUMBER ][ CS_R21 ] = DS_CPU_RX_METER_TABLE_ADDRESS;
866
867 /* DS Policers budget allocator */
868 local_register[ POLICER_BUDGET_ALLOCATOR_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_a, policer_budget_allocator_1st_wakeup_request) << 16;
869
870
871 /* WAN Filters and Classification */
872 local_register[ WAN_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_a, wan_normal_wakeup_request) << 16;
873 local_register[ WAN_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R8 ] = GPON_RX_NORMAL_DESCRIPTORS_ADDRESS << 16 | BBH_PERIPHERAL_WAN_RX;
874 local_register[ WAN_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R9 ] = CAM_RESULT_SLOT_1 | ( CAM_RESULT_IO_ADDRESS_1 << 16 );
875 local_register[ WAN_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R10 ] = INGRESS_HANDLER_BUFFER_ADDRESS | CPU_REASON_WAN0_TABLE_INDEX << 16;
876 local_register[ WAN_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R13 ] = ( DMA_LOOKUP_RESULT_SLOT_0 << 5 ) | DMA_LOOKUP_RESULT_FOUR_STEPS | ( DMA_LOOKUP_RESULT_IO_ADDRESS_0 << 16 );
877 local_register[ WAN_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R14 ] = WAN_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER;
878
879
880 /* WAN1 Filters and Classification */
881 local_register[ WAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_a, wan_normal_wakeup_request) << 16;
882 local_register[ WAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R8 ] = ( ETH0_RX_DESCRIPTORS_ADDRESS << 16 ) | BBH_PERIPHERAL_ETH0_RX;
883 local_register[ WAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R9 ] = CAM_RESULT_SLOT_2 | ( CAM_RESULT_IO_ADDRESS_2 << 16 );
884 local_register[ WAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R10 ] = INGRESS_HANDLER_BUFFER_ADDRESS | ( CPU_REASON_WAN1_TABLE_INDEX << 16 );
885 local_register[ WAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R13 ] = ( DMA_LOOKUP_RESULT_SLOT_1 << 5 ) | DMA_LOOKUP_RESULT_FOUR_STEPS | ( DMA_LOOKUP_RESULT_IO_ADDRESS_1 << 16 );
886 local_register[ WAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R14 ] = WAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER;
887
888 /* ETHWAN2 Filters and Classification */
889 // FIXME!!! since this is a different thread from WAN1_FILTER... doesn't it require its own CAM_RESULT and DMA_LOOKUP?
890 local_register[ ETHWAN2_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_a, ethwan2_normal_wakeup_request) << 16;
891 local_register[ ETHWAN2_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R8 ] = (ETHWAN2_RX_INGRESS_QUEUE_ADDRESS <<16 ) | ( 1 << WAN_FILTERS_AND_CLASSIFICATON_R8_ETHWAN2_INDICATION_OFFSET ) | BBH_PERIPHERAL_ETH0_RX;
892 local_register[ ETHWAN2_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R9 ] = CAM_RESULT_SLOT_2 | ( CAM_RESULT_IO_ADDRESS_2 << 16 );
893 local_register[ ETHWAN2_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R10 ] = INGRESS_HANDLER_BUFFER_ADDRESS | ( CPU_REASON_WAN1_TABLE_INDEX << 16 );
894 local_register[ ETHWAN2_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R13 ] = ( DMA_LOOKUP_RESULT_SLOT_1 << 5 ) | DMA_LOOKUP_RESULT_FOUR_STEPS | ( DMA_LOOKUP_RESULT_IO_ADDRESS_1 << 16 );
895 local_register[ ETHWAN2_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER ][ CS_R14 ] = ETHWAN2_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER;
896
897 /* FLOW_CACHE */
898 local_register[ DOWNSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_a, flow_cache_wakeup_request) << 16;
899 local_register[ DOWNSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER ][ CS_R8 ] = ((DS_CONNECTION_BUFFER_TABLE_ADDRESS + 0 * RDD_DS_CONNECTION_BUFFER_TABLE_SIZE2 * sizeof(RDD_CONNECTION_ENTRY_DTS)) << 16) | (DS_L2_UCAST_CONNECTION_BUFFER_ADDRESS + 0x0000);
900 local_register[ DOWNSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER ][ CS_R9 ] = ( DS_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS << 16 ) | ADDRESS_OF(runner_a, flow_cache_wakeup_request);
901 local_register[ DOWNSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER ][ CS_R10 ] = ( FLOW_CACHE_SLAVE0_VECTOR_MASK << 16 ) | INGRESS_HANDLER_BUFFER_ADDRESS;
902
903 local_register[ DOWNSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_a, flow_cache_wakeup_request) << 16;
904 local_register[ DOWNSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER ][ CS_R8 ] = ((DS_CONNECTION_BUFFER_TABLE_ADDRESS + 1 * RDD_DS_CONNECTION_BUFFER_TABLE_SIZE2 * sizeof(RDD_CONNECTION_ENTRY_DTS)) << 16) | (DS_L2_UCAST_CONNECTION_BUFFER_ADDRESS + 0x0010);
905
906 local_register[ DOWNSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER ][ CS_R9 ] = (( DS_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS + 1 ) << 16 ) | ADDRESS_OF(runner_a, flow_cache_wakeup_request);
907 local_register[ DOWNSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER ][ CS_R10 ] = ( FLOW_CACHE_SLAVE1_VECTOR_MASK << 16 ) | INGRESS_HANDLER_BUFFER_ADDRESS;
908
909 local_register[ DOWNSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_a, flow_cache_wakeup_request) << 16;
910 local_register[ DOWNSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER ][ CS_R8 ] = ((DS_CONNECTION_BUFFER_TABLE_ADDRESS + 2 * RDD_DS_CONNECTION_BUFFER_TABLE_SIZE2 * sizeof(RDD_CONNECTION_ENTRY_DTS)) << 16) | (DS_L2_UCAST_CONNECTION_BUFFER_ADDRESS + 0x0020);
911 local_register[ DOWNSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER ][ CS_R9 ] = (( DS_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS + 2 ) << 16 ) | ADDRESS_OF(runner_a, flow_cache_wakeup_request);
912 local_register[ DOWNSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER ][ CS_R10 ] = ( FLOW_CACHE_SLAVE2_VECTOR_MASK << 16 ) | INGRESS_HANDLER_BUFFER_ADDRESS;
913
914 local_register[ DOWNSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_a, flow_cache_wakeup_request) << 16;
915 local_register[ DOWNSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER ][ CS_R8 ] = ((DS_CONNECTION_BUFFER_TABLE_ADDRESS + 3 * RDD_DS_CONNECTION_BUFFER_TABLE_SIZE2 * sizeof(RDD_CONNECTION_ENTRY_DTS)) << 16) | (DS_L2_UCAST_CONNECTION_BUFFER_ADDRESS + 0x0030);
916 local_register[ DOWNSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER ][ CS_R9 ] = (( DS_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS + 3 ) << 16 ) | ADDRESS_OF(runner_a, flow_cache_wakeup_request);
917 local_register[ DOWNSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER ][ CS_R10 ] = ( FLOW_CACHE_SLAVE3_VECTOR_MASK << 16 ) | INGRESS_HANDLER_BUFFER_ADDRESS;
918
919 /* Downstream Multicast */
920 local_register[ DOWNSTREAM_MULTICAST_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_a, downstream_multicast_wakeup_request) << 16;
921 local_register[ DOWNSTREAM_MULTICAST_THREAD_NUMBER ][ CS_R10 ] = INGRESS_HANDLER_BUFFER_ADDRESS;
922
923 /* Free SKB index */
924 local_register[ FREE_SKB_INDEX_FAST_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_a, free_skb_index_wakeup_request) << 16;
925
926 rdp_mm_cpyl_context ( sram_fast_context_ptr, local_register, sizeof ( RUNNER_CNTXT_MAIN ) );
927
928 /********** Fast Runner B **********/
929
930 sram_fast_context_ptr = ( RUNNER_CNTXT_MAIN * )DEVICE_ADDRESS( RUNNER_CNTXT_MAIN_1_OFFSET );
931
932 /* read the local registers from the Context memory - maybe it was initialized by the ACE compiler */
933 MREAD_BLK_32( local_register, sram_fast_context_ptr, sizeof ( RUNNER_CNTXT_MAIN ) );
934
935 /* CPU-TX */
936 local_register[ CPU_TX_FAST_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_b, cpu_tx_wakeup_request) << 16;
937 local_register[ CPU_TX_FAST_THREAD_NUMBER ][ CS_R8 ] = CPU_TX_FAST_QUEUE_ADDRESS;
938
939 /* CPU-RX */
940 local_register[ CPU_RX_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_b, cpu_rx_wakeup_request) << 16;
941 local_register[ CPU_RX_THREAD_NUMBER ][ CS_R9 ] = US_CPU_RX_FAST_INGRESS_QUEUE_ADDRESS + ( US_CPU_RX_PICO_INGRESS_QUEUE_ADDRESS << 16 );
942 local_register[ CPU_RX_THREAD_NUMBER ][ CS_R10 ] = INGRESS_HANDLER_BUFFER_ADDRESS | (CPU_RX_SQ_PD_INGRESS_QUEUE_ADDRESS << 16);
943 local_register[ CPU_RX_THREAD_NUMBER ][ CS_R11 ] = US_CPU_REASON_TO_METER_TABLE_ADDRESS;
944
945 /* upstream rate controllers budget allocator */
946 local_register[ RATE_CONTROLLER_BUDGET_ALLOCATOR_THREAD_NUMBER ][ CS_R14 ] = US_RATE_CONTROLLER_EXPONENT_TABLE_ADDRESS;
947 local_register[ RATE_CONTROLLER_BUDGET_ALLOCATOR_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_b, rate_control_budget_allocator_1st_wakeup_request) << 16;
948 local_register[ RATE_CONTROLLER_BUDGET_ALLOCATOR_THREAD_NUMBER ][ CS_R18 ] = US_RATE_CONTROL_BUDGET_ALLOCATOR_TABLE_ADDRESS;
949 local_register[ RATE_CONTROLLER_BUDGET_ALLOCATOR_THREAD_NUMBER ][ CS_R31 ] = 0; /* rate_controllers_group */
950
951 /* Timer scheduler */
952 local_register[ TIMER_SCHEDULER_MAIN_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_b, timer_scheduler_set) << 16;
953 local_register[ TIMER_SCHEDULER_MAIN_THREAD_NUMBER ][ CS_R19 ] = 0; /* RX_METER_INDEX */
954 local_register[ TIMER_SCHEDULER_MAIN_THREAD_NUMBER ][ CS_R20 ] = US_RATE_LIMITER_TABLE_ADDRESS;
955 local_register[ TIMER_SCHEDULER_MAIN_THREAD_NUMBER ][ CS_R21 ] = US_CPU_RX_METER_TABLE_ADDRESS;
956
957 /* US Policers budget allocator */
958 local_register[ POLICER_BUDGET_ALLOCATOR_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_b, policer_budget_allocator_1st_wakeup_request) << 16;
959
960
961 /* WAN1-TX */
962 local_register[ WAN1_TX_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_b, wan_tx_wakeup_request) << 16;
963 local_register[ WAN1_TX_THREAD_NUMBER ][ CS_R8 ] = ( RDD_WAN1_CHANNEL_BASE << 16 ) | ( DATA_POINTER_DUMMY_TARGET_ADDRESS + 4 );
964 local_register[ WAN1_TX_THREAD_NUMBER ][ CS_R9 ] = ( ETHWAN_ABSOLUTE_TX_FIRMWARE_COUNTER_ADDRESS << 16 ) | ETHWAN_ABSOLUTE_TX_BBH_COUNTER_ADDRESS;
965
966 /* WAN enqueue (Flow Cache) */
967 local_register[ WAN_ENQUEUE_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_b, wan_interworking_enqueue_wakeup_request) << 16;
968 local_register[ WAN_ENQUEUE_THREAD_NUMBER ][ CS_R9 ] = ( WAN_ENQUEUE_INGRESS_QUEUE_ADDRESS << 16 ) | ADDRESS_OF(runner_b, wan_interworking_enqueue_wakeup_request);
969 local_register[ WAN_ENQUEUE_THREAD_NUMBER ][ CS_R10 ] = INGRESS_HANDLER_BUFFER_ADDRESS;
970
971 /* Timer 7 */
972 local_register[ US_TIMER_7_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_b, timer_7_1st_wakeup_request) << 16;
973
974 /* Free SKB index */
975 local_register[ FREE_SKB_INDEX_FAST_THREAD_NUMBER ][ CS_R16 ] = ADDRESS_OF(runner_b, free_skb_index_wakeup_request) << 16;
976
977 rdp_mm_cpyl_context ( sram_fast_context_ptr, local_register, sizeof ( RUNNER_CNTXT_MAIN ) );
978
979 /********** Pico Runner A **********/
980
981 sram_pico_context_ptr = ( RUNNER_CNTXT_PICO * )DEVICE_ADDRESS( RUNNER_CNTXT_PICO_0_OFFSET );
982
983 /* read the local registers from the Context memory - maybe it was initialized by the ACE compiler */
984 MREAD_BLK_32( local_register, sram_pico_context_ptr, sizeof ( RUNNER_CNTXT_PICO ) );
985
986 /* CPU-TX */
987 local_register[ CPU_TX_PICO_THREAD_NUMBER - 32 ][ CS_R16 ] = ADDRESS_OF(runner_c, cpu_tx_wakeup_request) << 16;
988 local_register[ CPU_TX_PICO_THREAD_NUMBER - 32 ][ CS_R9 ] = CPU_TX_PICO_QUEUE_ADDRESS;
989
990 /* CPU-RX interrupt coalescing timer */
991 local_register[ CPU_RX_INTERRUPT_COALESCING_THREAD_NUMBER - 32 ][ CS_R16 ] = ADDRESS_OF(runner_c, cpu_rx_int_coalesce_timer_1st_wakeup_request) << 16;
992
993 /* Timer scheduler */
994 local_register[ TIMER_SCHEDULER_PICO_A_THREAD_NUMBER - 32 ][ CS_R16 ] = ADDRESS_OF(runner_c, timer_scheduler_set) << 16;
995 local_register[ TIMER_SCHEDULER_PICO_A_THREAD_NUMBER - 32 ][ CS_R19 ] = 0; /* rate limiter index */
996 local_register[ TIMER_SCHEDULER_PICO_A_THREAD_NUMBER - 32 ][ CS_R20 ] = DS_RATE_LIMITER_TABLE_ADDRESS;
997 local_register[ TIMER_SCHEDULER_PICO_A_THREAD_NUMBER - 32 ][ CS_R21 ] = RATE_LIMITER_REMAINDER_TABLE_ADDRESS;
998
999 /* Local switching LAN enqueue */
1000 local_register[ LOCAL_SWITCHING_LAN_ENQUEUE_THREAD_NUMBER - 32 ][ CS_R16 ] = ( ADDRESS_OF(runner_c, lan_enqueue_pd_wakeup_request) << 16 ) | ADDRESS_OF(runner_c, lan_enqueue_pd_wakeup_request);
1001 local_register[ LOCAL_SWITCHING_LAN_ENQUEUE_THREAD_NUMBER - 32 ][ CS_R9 ] = LOCAL_SWITCHING_LAN_ENQUEUE_INGRESS_QUEUE_ADDRESS;
1002 local_register[ LOCAL_SWITCHING_LAN_ENQUEUE_THREAD_NUMBER - 32 ][ CS_R10 ] = DOWNSTREAM_LAN_ENQUEUE_SQ_PD_ADDRESS;
1003
1004 /* Downstream LAN enqueue */
1005 local_register[ DOWNSTREAM_LAN_ENQUEUE_THREAD_NUMBER - 32 ][ CS_R16 ] = ADDRESS_OF(runner_c, lan_enqueue_ih_wakeup_request) << 16 | ADDRESS_OF(runner_c, lan_enqueue_ih_wakeup_request);
1006 local_register[ DOWNSTREAM_LAN_ENQUEUE_THREAD_NUMBER - 32 ][ CS_R9 ] = DOWNSTREAM_LAN_ENQUEUE_INGRESS_QUEUE_ADDRESS << 16;
1007 local_register[ DOWNSTREAM_LAN_ENQUEUE_THREAD_NUMBER - 32 ][ CS_R10 ] = INGRESS_HANDLER_BUFFER_ADDRESS;
1008
1009 /* Downstream multicast LAN enqueue */
1010 local_register[ DOWNSTREAM_MULTICAST_LAN_ENQUEUE_THREAD_NUMBER - 32 ][ CS_R16 ] = ( ADDRESS_OF(runner_c, multicast_lan_enqueue_wakeup_request) << 16 ) | ADDRESS_OF(runner_c, multicast_lan_enqueue_wakeup_request);
1011 local_register[ DOWNSTREAM_MULTICAST_LAN_ENQUEUE_THREAD_NUMBER - 32 ][ CS_R9 ] = DOWNSTREAM_MULTICAST_LAN_ENQUEUE_INGRESS_QUEUE_ADDRESS;
1012 local_register[ DOWNSTREAM_MULTICAST_LAN_ENQUEUE_THREAD_NUMBER - 32 ][ CS_R10 ] = INGRESS_HANDLER_BUFFER_ADDRESS;
1013
1014 /* Free SKB index */
1015 local_register[ FREE_SKB_INDEX_PICO_A_THREAD_NUMBER - 32 ][ CS_R16 ] = ADDRESS_OF(runner_c, free_skb_index_wakeup_request) << 16;
1016 local_register[ FREE_SKB_INDEX_PICO_A_THREAD_NUMBER - 32 ][ CS_R9 ] = 1; /* lag_port EMAC/BBH 1 */
1017
1018 /* ETH-TX Inter LAN scheduling: thread 42 */
1019 local_register[ ETH_TX_THREAD_NUMBER - 32 ][ CS_R16 ] = ADDRESS_OF(runner_c, lan_tx_wakeup_request) << 16 | (ADDRESS_OF(runner_c, lan_tx_wakeup_request) );
1020 local_register[ ETH_TX_THREAD_NUMBER - 32 ][ CS_R8 ] = 0; /* inter_lan_scheduling_offset */
1021
1022 /* Timer 7 */
1023 local_register[ DS_TIMER_7_THREAD_NUMBER - 32 ][ CS_R16 ] = ADDRESS_OF(runner_c, timer_7_1st_wakeup_request) << 16;
1024
1025 /* Service Queue Enqueue: thread 44 */
1026 local_register[ SERVICE_QUEUE_ENQUEUE_THREAD_NUMBER - 32 ][ CS_R16 ] = ADDRESS_OF(runner_c, service_queue_enqueue_wakeup_request) << 16;
1027 local_register[ SERVICE_QUEUE_ENQUEUE_THREAD_NUMBER - 32 ][ CS_R9 ] = INGRESS_HANDLER_BUFFER_ADDRESS;
1028 local_register[ SERVICE_QUEUE_ENQUEUE_THREAD_NUMBER - 32 ][ CS_R10 ] = DS_SQ_ENQUEUE_QUEUE_ADDRESS;
1029
1030 /* Service Queue Dequeue: thread 45 */
1031 local_register[ SERVICE_QUEUE_DEQUEUE_THREAD_NUMBER - 32 ][ CS_R16 ] = ADDRESS_OF(runner_c, service_queue_dequeue_wakeup_request) << 16 | ADDRESS_OF(runner_c, service_queue_dequeue_wakeup_request);
1032 local_register[ SERVICE_QUEUE_DEQUEUE_THREAD_NUMBER - 32 ][ CS_R10 ] = DOWNSTREAM_LAN_ENQUEUE_SQ_PD_ADDRESS << 16;
1033 local_register[ SERVICE_QUEUE_DEQUEUE_THREAD_NUMBER - 32 ][ CS_R11 ] = CPU_RX_SQ_PD_INGRESS_QUEUE_ADDRESS;
1034
1035 rdp_mm_cpyl_context ( sram_pico_context_ptr, local_register, sizeof ( RUNNER_CNTXT_PICO ) );
1036
1037 /********** Pico Runner B **********/
1038
1039 sram_pico_context_ptr = ( RUNNER_CNTXT_PICO * )DEVICE_ADDRESS( RUNNER_CNTXT_PICO_1_OFFSET );
1040
1041 /* read the local registers from the Context memory - maybe it was initialized by the ACE compiler */
1042 MREAD_BLK_32( local_register, sram_pico_context_ptr, sizeof ( RUNNER_CNTXT_PICO ) );
1043
1044 /* Timer scheduler */
1045 local_register[ TIMER_SCHEDULER_PICO_B_THREAD_NUMBER - 32 ][ CS_R16 ] = ADDRESS_OF(runner_d, timer_scheduler_set) << 16;
1046 local_register[ TIMER_SCHEDULER_PICO_B_THREAD_NUMBER - 32 ][ CS_R19 ] = 0; /* rate limiter index */
1047 local_register[ TIMER_SCHEDULER_PICO_B_THREAD_NUMBER - 32 ][ CS_R20 ] = US_RATE_LIMITER_TABLE_ADDRESS;
1048
1049 #if defined(DSL_63138) || defined(DSL_63148)
1050 /* LAN-1 Filters and Classification - used by CFE boot loader */
1051 local_register[ LAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER - 32 ][ CS_R16 ] = ADDRESS_OF(runner_d, lan_normal_wakeup_request) << 16;
1052 local_register[ LAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER - 32 ][ CS_R8 ] = ETH1_RX_DESCRIPTORS_ADDRESS;
1053 local_register[ LAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER - 32 ][ CS_R10 ] = INGRESS_HANDLER_BUFFER_ADDRESS | ( HASH_RESULT_SLOT_1 << 16 ) | ( HASH_RESULT_IO_ADDRESS_1 << 24 );
1054 local_register[ LAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER - 32 ][ CS_R12 ] = ( BBH_PERIPHERAL_ETH1_RX << 16 );
1055 local_register[ LAN1_FILTERS_AND_CLASSIFICATION_THREAD_NUMBER - 32 ][ CS_R13 ] = ( DMA_LOOKUP_RESULT_SLOT_3 << 5 ) | DMA_LOOKUP_RESULT_FOUR_STEPS | ( DMA_LOOKUP_RESULT_IO_ADDRESS_3 << 16 );
1056 #endif
1057
1058 /* Free SKB index */
1059 local_register[ FREE_SKB_INDEX_PICO_B_THREAD_NUMBER - 32 ][ CS_R16 ] = ADDRESS_OF(runner_d, free_skb_index_wakeup_request) << 16;
1060
1061 /* LAN Dispatch */
1062 local_register[ LAN_DISPATCH_THREAD_NUMBER - 32 ][ CS_R16 ] = ADDRESS_OF(runner_d, lan_dispatch_wakeup_request) << 16;
1063 local_register[ LAN_DISPATCH_THREAD_NUMBER - 32 ][ CS_R10 ] = INGRESS_HANDLER_BUFFER_ADDRESS;
1064 local_register[ LAN_DISPATCH_THREAD_NUMBER - 32 ][ CS_R8 ] = ETH1_RX_DESCRIPTORS_ADDRESS;
1065 local_register[ LAN_DISPATCH_THREAD_NUMBER - 32 ][ CS_R15 ] = BBH_PERIPHERAL_ETH1_RX;
1066 local_register[ LAN_DISPATCH_THREAD_NUMBER - 32 ][ CS_R13 ] = ( DMA_LOOKUP_RESULT_SLOT_4 << 5 ) | DMA_LOOKUP_RESULT_FOUR_STEPS | ( DMA_LOOKUP_RESULT_IO_ADDRESS_4 << 16 );
1067 local_register[ LAN_DISPATCH_THREAD_NUMBER - 32 ][ CS_R14 ] = LAN_DISPATCH_THREAD_NUMBER;
1068
1069 /* SLAVE0 */
1070 local_register[ UPSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER - 32 ][ CS_R16 ] = ADDRESS_OF(runner_d, flow_cache_wakeup_request) << 16;
1071 local_register[ UPSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER - 32 ][ CS_R8 ] = (uint32_t) ((US_CONNECTION_BUFFER_TABLE_ADDRESS + 0 * RDD_US_CONNECTION_BUFFER_TABLE_SIZE2 * sizeof(RDD_CONNECTION_ENTRY_DTS)) << 16) | (US_L2_UCAST_CONNECTION_BUFFER_ADDRESS + 0x0000);
1072
1073 local_register[ UPSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER - 32 ][ CS_R9 ] = ( US_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS << 16 );
1074 local_register[ UPSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER - 32 ][ CS_R10 ] = ( FLOW_CACHE_SLAVE0_VECTOR_MASK << 16 ) | INGRESS_HANDLER_BUFFER_ADDRESS;
1075
1076 /* SLAVE1 */
1077 local_register[ UPSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER - 32 ][ CS_R16 ] = ADDRESS_OF(runner_d, flow_cache_wakeup_request) << 16;
1078
1079 local_register[ UPSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER - 32 ][ CS_R8 ] = (uint32_t) ((US_CONNECTION_BUFFER_TABLE_ADDRESS + 1 * RDD_US_CONNECTION_BUFFER_TABLE_SIZE2 * sizeof(RDD_CONNECTION_ENTRY_DTS)) << 16) | (US_L2_UCAST_CONNECTION_BUFFER_ADDRESS + 0x0010);
1080
1081 local_register[ UPSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER - 32 ][ CS_R9 ] = ( ( US_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS + 1 ) << 16 );
1082 local_register[ UPSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER - 32 ][ CS_R10 ] = ( FLOW_CACHE_SLAVE1_VECTOR_MASK << 16 ) | INGRESS_HANDLER_BUFFER_ADDRESS;
1083
1084 /* SLAVE2 */
1085 local_register[ UPSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER - 32 ][ CS_R16 ] = ADDRESS_OF(runner_d, flow_cache_wakeup_request) << 16;
1086 local_register[ UPSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER - 32 ][ CS_R8 ] = (uint32_t) ((US_CONNECTION_BUFFER_TABLE_ADDRESS + 2 * RDD_US_CONNECTION_BUFFER_TABLE_SIZE2 * sizeof(RDD_CONNECTION_ENTRY_DTS)) << 16) | (US_L2_UCAST_CONNECTION_BUFFER_ADDRESS + 0x0020);
1087
1088 local_register[ UPSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER - 32 ][ CS_R9 ] = ( ( US_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS + 2 ) << 16 );
1089 local_register[ UPSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER - 32 ][ CS_R10 ] = ( FLOW_CACHE_SLAVE2_VECTOR_MASK << 16 ) | INGRESS_HANDLER_BUFFER_ADDRESS;
1090
1091 /* SLAVE3 */
1092 local_register[ UPSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER - 32 ][ CS_R16 ] = ADDRESS_OF(runner_d, flow_cache_wakeup_request) << 16;
1093
1094 local_register[ UPSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER - 32 ][ CS_R8 ] = (uint32_t) ((US_CONNECTION_BUFFER_TABLE_ADDRESS + 3 * RDD_US_CONNECTION_BUFFER_TABLE_SIZE2 * sizeof(RDD_CONNECTION_ENTRY_DTS)) << 16) | (US_L2_UCAST_CONNECTION_BUFFER_ADDRESS + 0x0030);
1095
1096 local_register[ UPSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER - 32 ][ CS_R9 ] = ( ( US_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS + 3 ) << 16 );
1097 local_register[ UPSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER - 32 ][ CS_R10 ] = ( FLOW_CACHE_SLAVE3_VECTOR_MASK << 16 ) | INGRESS_HANDLER_BUFFER_ADDRESS;
1098
1099 #if defined(DSL_63138) || defined(DSL_63148)
1100 /* SLAVE 0-3 */
1101 local_register[ UPSTREAM_FLOW_CACHE_SLAVE0_THREAD_NUMBER - 32 ][ CS_R9 ] |= (DSL_PTM_BOND_TX_HDR_TABLE_ADDRESS + 0x00);
1102 local_register[ UPSTREAM_FLOW_CACHE_SLAVE1_THREAD_NUMBER - 32 ][ CS_R9 ] |= (DSL_PTM_BOND_TX_HDR_TABLE_ADDRESS + 0x10);
1103 local_register[ UPSTREAM_FLOW_CACHE_SLAVE2_THREAD_NUMBER - 32 ][ CS_R9 ] |= (DSL_PTM_BOND_TX_HDR_TABLE_ADDRESS + 0x20);
1104 local_register[ UPSTREAM_FLOW_CACHE_SLAVE3_THREAD_NUMBER - 32 ][ CS_R9 ] |= (DSL_PTM_BOND_TX_HDR_TABLE_ADDRESS + 0x30);
1105 #endif
1106
1107 rdp_mm_cpyl_context ( sram_pico_context_ptr, local_register, sizeof ( RUNNER_CNTXT_PICO ) );
1108
1109 return ( BL_LILAC_RDD_OK );
1110 }
1111
1112 static BL_LILAC_RDD_ERROR_DTE f_rdd_transmit_from_abs_address_initialize ( void )
1113 {
1114 uint8_t *free_indexes_local_fifo_tail_ptr;
1115 uint16_t *free_indexes_fifo_tail_ptr;
1116 uint16_t skb_enqueued_indexes_fifo;
1117 uint16_t *skb_enqueued_indexes_fifo_ptr;
1118 uint8_t *absolute_tx_counters_ptr;
1119 uint16_t i;
1120 uint32_t *ddr_address_ptr;
1121 uint8_t skb_enqueued_indexes_fifo_size;
1122 uint8_t *skb_enqueued_indexes_fifo_counters_ptr;
1123
1124 #if !defined(FIRMWARE_INIT)
1125 bdmf_phys_addr_t phy_addr = 0;
1126
1127 /* allocate skb pointer array reference (used only by SW) */
1128 g_cpu_tx_skb_pointers_reference_array = (uint8_t **)KMALLOC(sizeof(uint8_t *) * g_cpu_tx_abs_packet_limit, 0);
1129 g_dhd_tx_cpu_usage_reference_array = (uint8_t *)KMALLOC(g_cpu_tx_abs_packet_limit, 0);
1130
1131 /* allocate data pointer array pointer (used both by SW & FW) */
1132 g_cpu_tx_data_pointers_reference_array = (rdd_phys_addr_t *)rdp_mm_aligned_alloc(sizeof(rdd_phys_addr_t) * g_cpu_tx_abs_packet_limit, &phy_addr);
1133
1134 ddr_address_ptr = (uint32_t *)(DEVICE_ADDRESS( RUNNER_COMMON_0_OFFSET ) + DDR_ADDRESS_FOR_SKB_DATA_POINTERS_TABLE_ADDRESS );
1135 MWRITE_32(ddr_address_ptr, (uint32_t)phy_addr);
1136
1137 /* allocate Free Indexes table (used both by SW & FW) */
1138 g_free_skb_indexes_fifo_table = ( uint16_t * )rdp_mm_aligned_alloc( sizeof( uint16_t ) * g_cpu_tx_abs_packet_limit, &phy_addr );
1139
1140 g_free_skb_indexes_fifo_table_physical_address = (rdd_phys_addr_t)phy_addr;
1141 ddr_address_ptr = ( uint32_t * )(DEVICE_ADDRESS( RUNNER_COMMON_0_OFFSET ) + DDR_ADDRESS_FOR_FREE_SKB_INDEXES_FIFO_TABLE_ADDRESS );
1142 MWRITE_32( ddr_address_ptr, g_free_skb_indexes_fifo_table_physical_address );
1143
1144 g_free_skb_indexes_fifo_table_physical_address_last_idx = g_free_skb_indexes_fifo_table_physical_address;
1145 g_free_skb_indexes_fifo_table_physical_address_last_idx += (g_cpu_tx_abs_packet_limit - 1) * sizeof(uint16_t);
1146 ddr_address_ptr = ( uint32_t * )(DEVICE_ADDRESS( RUNNER_COMMON_0_OFFSET ) + DDR_ADDRESS_FOR_FREE_SKB_INDEXES_FIFO_TABLE_LAST_ENTRY_ADDRESS );
1147 MWRITE_32( ddr_address_ptr, g_free_skb_indexes_fifo_table_physical_address_last_idx );
1148
1149 /* Fill free indexes FIFO */
1150 for ( i = 0; i < g_cpu_tx_abs_packet_limit ; i++ )
1151 {
1152 g_free_skb_indexes_fifo_table[ i ] = swap2bytes( i );
1153 g_cpu_tx_data_pointers_reference_array[ i ] = 0;
1154 g_cpu_tx_skb_pointers_reference_array[ i ] = NULL;
1155 g_dhd_tx_cpu_usage_reference_array[ i ] = 0;
1156 }
1157 #endif
1158
1159 /* update all local tail pointers to 0 */
1160 free_indexes_local_fifo_tail_ptr = ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + DS_FAST_FREE_SKB_INDEXES_FIFO_LOCAL_TABLE_PTR_ADDRESS );
1161 MWRITE_8( free_indexes_local_fifo_tail_ptr, 0 );
1162 free_indexes_local_fifo_tail_ptr = ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + DS_PICO_FREE_SKB_INDEXES_FIFO_LOCAL_TABLE_PTR_ADDRESS );
1163 MWRITE_8( free_indexes_local_fifo_tail_ptr, 0 );
1164 free_indexes_local_fifo_tail_ptr = ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + US_FAST_FREE_SKB_INDEXES_FIFO_LOCAL_TABLE_PTR_ADDRESS );
1165 MWRITE_8( free_indexes_local_fifo_tail_ptr, 0 );
1166 free_indexes_local_fifo_tail_ptr = ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + US_PICO_FREE_SKB_INDEXES_FIFO_LOCAL_TABLE_PTR_ADDRESS );
1167 MWRITE_8( free_indexes_local_fifo_tail_ptr, 0 );
1168
1169 free_indexes_fifo_tail_ptr = ( uint16_t * )(DEVICE_ADDRESS( RUNNER_COMMON_0_OFFSET ) + FREE_SKB_INDEXES_DDR_FIFO_TAIL_ADDRESS );
1170 MWRITE_32( free_indexes_fifo_tail_ptr, g_free_skb_indexes_fifo_table_physical_address );
1171
1172 /* Initialize pointers to EMAC enqueued indexes FIFO */
1173 skb_enqueued_indexes_fifo_ptr = ( uint16_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + EMAC_SKB_ENQUEUED_INDEXES_PUT_PTR_ADDRESS );
1174 skb_enqueued_indexes_fifo_counters_ptr = ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + EMAC_SKB_ENQUEUED_INDEXES_FIFO_COUNTERS_ADDRESS );
1175
1176 skb_enqueued_indexes_fifo = EMAC_SKB_ENQUEUED_INDEXES_FIFO_ADDRESS;
1177
1178 for ( i = BL_LILAC_RDD_EMAC_ID_0; i <= BL_LILAC_RDD_EMAC_ID_4; i++ )
1179 {
1180 MWRITE_16( skb_enqueued_indexes_fifo_ptr, skb_enqueued_indexes_fifo );
1181 MWRITE_8(skb_enqueued_indexes_fifo_counters_ptr, 16);
1182
1183 skb_enqueued_indexes_fifo_ptr++;
1184 skb_enqueued_indexes_fifo_counters_ptr++;
1185
1186 skb_enqueued_indexes_fifo += 32;
1187 }
1188
1189 skb_enqueued_indexes_fifo_ptr = ( uint16_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + EMAC_SKB_ENQUEUED_INDEXES_FREE_PTR_ADDRESS );
1190
1191 skb_enqueued_indexes_fifo = EMAC_SKB_ENQUEUED_INDEXES_FIFO_ADDRESS;
1192
1193 for ( i = BL_LILAC_RDD_EMAC_ID_0; i <= BL_LILAC_RDD_EMAC_ID_4; i++ )
1194 {
1195 MWRITE_16( skb_enqueued_indexes_fifo_ptr, skb_enqueued_indexes_fifo );
1196
1197 skb_enqueued_indexes_fifo_ptr++;
1198 skb_enqueued_indexes_fifo += 32;
1199 }
1200
1201 skb_enqueued_indexes_fifo_size = 32;
1202
1203 /* Initialize pointers to WAN enqueued indexes FIFO */
1204 skb_enqueued_indexes_fifo_ptr = ( uint16_t * )(DEVICE_ADDRESS( RUNNER_COMMON_1_OFFSET ) + GPON_SKB_ENQUEUED_INDEXES_PUT_PTR_ADDRESS - sizeof ( RUNNER_COMMON ) );
1205
1206 skb_enqueued_indexes_fifo = GPON_SKB_ENQUEUED_INDEXES_FIFO_ADDRESS;
1207
1208 for ( i = 0; i < ( RDD_WAN_CHANNELS_0_7_TABLE_SIZE + RDD_WAN_CHANNELS_8_39_TABLE_SIZE ); i++ )
1209 {
1210 MWRITE_16( skb_enqueued_indexes_fifo_ptr, skb_enqueued_indexes_fifo );
1211
1212 skb_enqueued_indexes_fifo_ptr++;
1213 skb_enqueued_indexes_fifo += skb_enqueued_indexes_fifo_size;
1214 }
1215
1216
1217 skb_enqueued_indexes_fifo_ptr = ( uint16_t * )(DEVICE_ADDRESS( RUNNER_COMMON_1_OFFSET ) + GPON_SKB_ENQUEUED_INDEXES_FREE_PTR_ADDRESS - sizeof ( RUNNER_COMMON ) );
1218
1219 skb_enqueued_indexes_fifo = GPON_SKB_ENQUEUED_INDEXES_FIFO_ADDRESS;
1220
1221 for ( i = 0; i < ( RDD_WAN_CHANNELS_0_7_TABLE_SIZE + RDD_WAN_CHANNELS_8_39_TABLE_SIZE ); i++ )
1222 {
1223 MWRITE_16( skb_enqueued_indexes_fifo_ptr, skb_enqueued_indexes_fifo );
1224
1225 skb_enqueued_indexes_fifo_ptr++;
1226 skb_enqueued_indexes_fifo += skb_enqueued_indexes_fifo_size;
1227 }
1228
1229
1230 /* Initialize to (-1) 6-bit value BBH and FW absolute TX counters */
1231 absolute_tx_counters_ptr = ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + EMAC_ABSOLUTE_TX_BBH_COUNTER_ADDRESS );
1232
1233 for ( i = BL_LILAC_RDD_EMAC_ID_0; i <= BL_LILAC_RDD_EMAC_ID_4; i++ )
1234 {
1235 MWRITE_8( absolute_tx_counters_ptr, 0x3F );
1236 absolute_tx_counters_ptr += 8;
1237 }
1238
1239 absolute_tx_counters_ptr = ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + EMAC_ABSOLUTE_TX_FIRMWARE_COUNTER_ADDRESS );
1240
1241 for ( i = BL_LILAC_RDD_EMAC_ID_0; i <= BL_LILAC_RDD_EMAC_ID_4; i++ )
1242 {
1243 MWRITE_8( absolute_tx_counters_ptr, 0x3F );
1244 absolute_tx_counters_ptr ++;
1245 }
1246
1247 absolute_tx_counters_ptr = ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + GPON_ABSOLUTE_TX_BBH_COUNTER_ADDRESS );
1248
1249 for ( i = 0; i < ( RDD_WAN_CHANNELS_0_7_TABLE_SIZE + RDD_WAN_CHANNELS_8_39_TABLE_SIZE ); i++ )
1250 {
1251 MWRITE_8( absolute_tx_counters_ptr, 0x3F );
1252 absolute_tx_counters_ptr ++;
1253 }
1254
1255 absolute_tx_counters_ptr = ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + GPON_ABSOLUTE_TX_FIRMWARE_COUNTER_ADDRESS );
1256
1257 for ( i = 0; i < ( RDD_WAN_CHANNELS_0_7_TABLE_SIZE + RDD_WAN_CHANNELS_8_39_TABLE_SIZE ); i++ )
1258 {
1259 MWRITE_8( absolute_tx_counters_ptr, 0x3F );
1260 absolute_tx_counters_ptr ++;
1261 }
1262
1263 absolute_tx_counters_ptr = ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + ETHWAN_ABSOLUTE_TX_BBH_COUNTER_ADDRESS );
1264 MWRITE_8( absolute_tx_counters_ptr, 0x3F );
1265
1266 absolute_tx_counters_ptr = ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + ETHWAN_ABSOLUTE_TX_FIRMWARE_COUNTER_ADDRESS );
1267 MWRITE_8( absolute_tx_counters_ptr, 0x3F );
1268
1269 return ( BL_LILAC_RDD_OK );
1270 }
1271
1272 static BL_LILAC_RDD_ERROR_DTE f_rdd_ingress_classification_table_initialize ( void )
1273 {
1274 RDD_DS_INGRESS_CLASSIFICATION_RULE_CFG_TABLE_DTS *ds_rule_cfg_table_ptr;
1275 RDD_US_INGRESS_CLASSIFICATION_RULE_CFG_TABLE_DTS *us_rule_cfg_table_ptr;
1276 RDD_INGRESS_CLASSIFICATION_RULE_CFG_ENTRY_DTS *rule_cfg_entry_ptr;
1277 uint8_t *rule_cfg_descriptor_ptr;
1278 uint32_t rule_cfg_id;
1279
1280 for (rule_cfg_id = 0; rule_cfg_id < 16; rule_cfg_id++)
1281 {
1282 g_ingress_classification_rule_cfg_table[ rdpa_dir_ds ].rule_cfg[ rule_cfg_id ].valid = 0;
1283 g_ingress_classification_rule_cfg_table[ rdpa_dir_ds ].rule_cfg[ rule_cfg_id ].priority = -1;
1284 g_ingress_classification_rule_cfg_table[ rdpa_dir_ds ].rule_cfg[ rule_cfg_id ].rule_type = 0;
1285 g_ingress_classification_rule_cfg_table[ rdpa_dir_ds ].rule_cfg[ rule_cfg_id ].next_group_id = 16;
1286 g_ingress_classification_rule_cfg_table[ rdpa_dir_ds ].rule_cfg[ rule_cfg_id ].next_rule_cfg_id = 16;
1287
1288 ds_rule_cfg_table_ptr = RDD_DS_INGRESS_CLASSIFICATION_RULE_CFG_TABLE_PTR();
1289
1290 rule_cfg_entry_ptr = &( ds_rule_cfg_table_ptr->entry[ rule_cfg_id ] );
1291
1292 RDD_INGRESS_CLASSIFICATION_RULE_CFG_ENTRY_NEXT_RULE_CFG_ID_WRITE ( 16, rule_cfg_entry_ptr );
1293 RDD_INGRESS_CLASSIFICATION_RULE_CFG_ENTRY_NEXT_GROUP_ID_WRITE ( 16, rule_cfg_entry_ptr );
1294
1295 g_ingress_classification_rule_cfg_table[ rdpa_dir_us ].rule_cfg[ rule_cfg_id ].valid = 0;
1296 g_ingress_classification_rule_cfg_table[ rdpa_dir_us ].rule_cfg[ rule_cfg_id ].priority = -1;
1297 g_ingress_classification_rule_cfg_table[ rdpa_dir_us ].rule_cfg[ rule_cfg_id ].rule_type = 0;
1298 g_ingress_classification_rule_cfg_table[ rdpa_dir_us ].rule_cfg[ rule_cfg_id ].next_group_id = 16;
1299 g_ingress_classification_rule_cfg_table[ rdpa_dir_us ].rule_cfg[ rule_cfg_id ].next_rule_cfg_id = 16;
1300
1301 us_rule_cfg_table_ptr = RDD_US_INGRESS_CLASSIFICATION_RULE_CFG_TABLE_PTR();
1302
1303 rule_cfg_entry_ptr = &( us_rule_cfg_table_ptr->entry[ rule_cfg_id ] );
1304
1305 RDD_INGRESS_CLASSIFICATION_RULE_CFG_ENTRY_NEXT_RULE_CFG_ID_WRITE ( 16, rule_cfg_entry_ptr );
1306 RDD_INGRESS_CLASSIFICATION_RULE_CFG_ENTRY_NEXT_GROUP_ID_WRITE ( 16, rule_cfg_entry_ptr );
1307 }
1308
1309 g_ingress_classification_rule_cfg_table[ rdpa_dir_ds ].first_rule_cfg_id = 16;
1310 g_ingress_classification_rule_cfg_table[ rdpa_dir_ds ].first_gen_filter_rule_cfg_id = 16;
1311 g_ingress_classification_rule_cfg_table[ rdpa_dir_us ].first_rule_cfg_id = 16;
1312 g_ingress_classification_rule_cfg_table[ rdpa_dir_us ].first_gen_filter_rule_cfg_id = 16;
1313
1314 rule_cfg_descriptor_ptr = ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + DS_INGRESS_CLASSIFICATION_RULE_CFG_DESCRIPTOR_ADDRESS );
1315
1316 MWRITE_8( rule_cfg_descriptor_ptr, 16 );
1317
1318 rule_cfg_descriptor_ptr = ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + US_INGRESS_CLASSIFICATION_RULE_CFG_DESCRIPTOR_ADDRESS );
1319
1320 MWRITE_8( rule_cfg_descriptor_ptr, 16 );
1321
1322 rule_cfg_descriptor_ptr = ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + DS_INGRESS_CLASSIFICATION_IP_FLOW_RULE_CFG_DESCRIPTOR_ADDRESS );
1323
1324 MWRITE_8( rule_cfg_descriptor_ptr, 16 );
1325
1326 rule_cfg_descriptor_ptr = ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + US_INGRESS_CLASSIFICATION_IP_FLOW_RULE_CFG_DESCRIPTOR_ADDRESS );
1327
1328 MWRITE_8( rule_cfg_descriptor_ptr, 16 );
1329 return ( BL_LILAC_RDD_OK );
1330 }
1331
1332 static BL_LILAC_RDD_ERROR_DTE f_rdd_eth_tx_initialize ( void )
1333 {
1334 RDD_ETH_TX_MAC_TABLE_DTS *eth_tx_mac_table;
1335 RDD_ETH_TX_MAC_DESCRIPTOR_DTS *eth_tx_mac_descriptor;
1336 RDD_ETH_TX_QUEUES_TABLE_DTS *eth_tx_queues_table;
1337 RDD_ETH_TX_QUEUE_DESCRIPTOR_DTS *eth_tx_queue_descriptor;
1338 RDD_ETH_TX_QUEUES_POINTERS_TABLE_DTS *eth_tx_queues_pointers_table;
1339 RDD_ETH_TX_QUEUE_POINTERS_ENTRY_DTS *eth_tx_queue_pointers_entry;
1340 RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_DTS *free_packet_descriptors_pool_descriptor;
1341 RDD_ETH_TX_LOCAL_REGISTERS_DTS *eth_tx_local_registers;
1342 RDD_ETH_TX_LOCAL_REGISTERS_ENTRY_DTS *eth_tx_local_registers_entry;
1343 uint16_t eth_tx_queue_address;
1344 uint16_t mac_descriptor_address;
1345 uint32_t emac;
1346 uint32_t tx_queue;
1347
1348 eth_tx_mac_table = RDD_ETH_TX_MAC_TABLE_PTR();
1349
1350 eth_tx_queues_table = RDD_ETH_TX_QUEUES_TABLE_PTR();
1351
1352 eth_tx_queues_pointers_table = RDD_ETH_TX_QUEUES_POINTERS_TABLE_PTR();
1353
1354 eth_tx_local_registers = RDD_ETH_TX_LOCAL_REGISTERS_PTR();
1355
1356 for (emac = BL_LILAC_RDD_EMAC_ID_0; emac < BL_LILAC_RDD_EMAC_ID_COUNT; emac++)
1357 {
1358 eth_tx_mac_descriptor = &(eth_tx_mac_table->entry[emac]);
1359
1360 RDD_ETH_TX_MAC_DESCRIPTOR_TX_TASK_NUMBER_WRITE(ETH_TX_THREAD_NUMBER, eth_tx_mac_descriptor);
1361 RDD_ETH_TX_MAC_DESCRIPTOR_EMAC_MASK_WRITE((1 << emac), eth_tx_mac_descriptor);
1362 RDD_ETH_TX_MAC_DESCRIPTOR_GPIO_FLOW_CONTROL_VECTOR_PTR_WRITE((RDD_GPIO_IO_ADDRESS + (emac - BL_LILAC_RDD_EMAC_ID_0)), eth_tx_mac_descriptor);
1363 RDD_ETH_TX_MAC_DESCRIPTOR_PACKET_COUNTERS_PTR_1_WRITE(ETH_TX_MAC_TABLE_ADDRESS +
1364 BL_LILAC_RDD_EMAC_ID_1 * sizeof(RDD_ETH_TX_MAC_DESCRIPTOR_DTS) + RDD_EMAC_DESCRIPTOR_EGRESS_COUNTER_OFFSET,
1365 eth_tx_mac_descriptor);
1366 RDD_ETH_TX_MAC_DESCRIPTOR_BBH_DESTINATION_1_WRITE(BBH_PERIPHERAL_ETH1_TX, eth_tx_mac_descriptor);
1367 RDD_ETH_TX_MAC_DESCRIPTOR_EGRESS_PORT_WRITE((emac - BL_LILAC_RDD_EMAC_ID_0), eth_tx_mac_descriptor);
1368 RDD_ETH_TX_MAC_DESCRIPTOR_RATE_LIMITER_ID_WRITE(RDD_RATE_LIMITER_IDLE, eth_tx_mac_descriptor);
1369
1370 for (tx_queue = 0; tx_queue < RDD_EMAC_NUMBER_OF_QUEUES; tx_queue++)
1371 {
1372 eth_tx_queue_address = ETH_TX_QUEUES_TABLE_ADDRESS +
1373 ((emac - BL_LILAC_RDD_EMAC_ID_0) * RDD_EMAC_NUMBER_OF_QUEUES + tx_queue) * sizeof(RDD_ETH_TX_QUEUE_DESCRIPTOR_DTS);
1374
1375 mac_descriptor_address = ETH_TX_MAC_TABLE_ADDRESS + emac * sizeof(RDD_ETH_TX_MAC_DESCRIPTOR_DTS);
1376
1377 eth_tx_queue_pointers_entry =
1378 &(eth_tx_queues_pointers_table->entry[emac * RDD_EMAC_NUMBER_OF_QUEUES + tx_queue]);
1379
1380 RDD_ETH_TX_QUEUE_POINTERS_ENTRY_ETH_MAC_POINTER_WRITE(mac_descriptor_address, eth_tx_queue_pointers_entry);
1381 RDD_ETH_TX_QUEUE_POINTERS_ENTRY_TX_QUEUE_POINTER_WRITE(eth_tx_queue_address, eth_tx_queue_pointers_entry);
1382
1383 eth_tx_queue_descriptor = &(eth_tx_queues_table->entry[(emac - BL_LILAC_RDD_EMAC_ID_0) * RDD_EMAC_NUMBER_OF_QUEUES + tx_queue]);
1384
1385 RDD_ETH_TX_QUEUE_DESCRIPTOR_QUEUE_MASK_WRITE(1 << tx_queue , eth_tx_queue_descriptor);
1386 RDD_ETH_TX_QUEUE_DESCRIPTOR_INDEX_WRITE((emac * RDD_EMAC_NUMBER_OF_QUEUES) + tx_queue, eth_tx_queue_descriptor);
1387 }
1388 eth_tx_local_registers_entry = &(eth_tx_local_registers->entry[emac]);
1389
1390 RDD_ETH_TX_LOCAL_REGISTERS_ENTRY_EMAC_DESCRIPTOR_PTR_WRITE(ETH_TX_MAC_TABLE_ADDRESS +
1391 emac * sizeof(RDD_ETH_TX_MAC_DESCRIPTOR_DTS), eth_tx_local_registers_entry);
1392
1393 RDD_ETH_TX_LOCAL_REGISTERS_ENTRY_ETH_TX_QUEUES_POINTERS_TABLE_PTR_WRITE(ETH_TX_QUEUES_POINTERS_TABLE_ADDRESS +
1394 emac * RDD_EMAC_NUMBER_OF_QUEUES * sizeof(RDD_ETH_TX_QUEUE_POINTERS_ENTRY_DTS), eth_tx_local_registers_entry);
1395 }
1396
1397 free_packet_descriptors_pool_descriptor =
1398 (RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_DTS *)(DEVICE_ADDRESS(RUNNER_PRIVATE_0_OFFSET) +
1399 FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ADDRESS);
1400
1401 /*Initial values, will be updated by rdd_tm_ds_free_packet_descriptors_pool_size_update.*/
1402 RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_GUARANTEED_THRESHOLD_WRITE ( DS_FREE_PACKET_DESCRIPTOR_POOL_GUARANTEED_QUEUE_THRESHOLD, free_packet_descriptors_pool_descriptor );
1403 RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_GUARANTEED_FREE_COUNT_WRITE (DS_FREE_PACKET_DESCRIPTOR_POOL_MIN_GUARANTEED_POOL_SIZE, free_packet_descriptors_pool_descriptor );
1404 RDD_FREE_PACKET_DESCRIPTORS_POOL_DESCRIPTOR_ENTRY_NON_GUARANTEED_FREE_COUNT_WRITE ( RDD_DS_FREE_PACKET_DESCRIPTORS_POOL_SIZE - DS_FREE_PACKET_DESCRIPTOR_POOL_MIN_GUARANTEED_POOL_SIZE, free_packet_descriptors_pool_descriptor );
1405
1406 return ( BL_LILAC_RDD_OK );
1407 }
1408
1409
1410
1411 static BL_LILAC_RDD_ERROR_DTE f_rdd_wan_tx_initialize ( void )
1412 {
1413 RDD_WAN_CHANNELS_0_7_TABLE_DTS *wan_channels_0_7_table_ptr;
1414 RDD_WAN_CHANNEL_0_7_DESCRIPTOR_DTS *wan_channel_0_7_descriptor_ptr;
1415 RDD_WAN_CHANNELS_8_39_TABLE_DTS *wan_channels_8_39_table_ptr;
1416 RDD_WAN_CHANNEL_8_39_DESCRIPTOR_DTS *wan_channel_8_39_descriptor_ptr;
1417 RDD_US_RATE_CONTROLLER_DESCRIPTOR_DTS *dummy_rate_controller_descriptor_ptr;
1418 RDD_WAN_TX_QUEUE_DESCRIPTOR_DTS *dummy_wan_tx_queue_descriptor_ptr;
1419 RDD_RATE_CONTROLLER_EXPONENT_TABLE_DTS *exponent_table_ptr;
1420 RDD_RATE_CONTROLLER_EXPONENT_ENTRY_DTS *exponent_entry_ptr;
1421 uint32_t wan_channel_id;
1422 uint32_t rate_controller_id;
1423 uint32_t tx_queue_id;
1424
1425 /* initialize WAN TX pointers table */
1426 wan_tx_pointers_table_ptr = ( RDD_WAN_TX_POINTERS_TABLE_DTS * )malloc( sizeof( RDD_WAN_TX_POINTERS_TABLE_DTS ) );
1427
1428 if ( wan_tx_pointers_table_ptr == NULL)
1429 {
1430 return ( BL_LILAC_RDD_ERROR_MALLOC_FAILED );
1431 }
1432
1433 memset ( wan_tx_pointers_table_ptr, 0, sizeof ( RDD_WAN_TX_POINTERS_TABLE_DTS ) );
1434
1435 /* reset the dummy segmentation descriptors threshold to zero in order to drop packets */
1436 dummy_wan_tx_queue_descriptor_ptr = ( RDD_WAN_TX_QUEUE_DESCRIPTOR_DTS * )(DEVICE_ADDRESS( RUNNER_COMMON_1_OFFSET ) + DUMMY_WAN_TX_QUEUE_DESCRIPTOR_ADDRESS - sizeof ( RUNNER_COMMON ) );
1437
1438 RDD_WAN_TX_QUEUE_DESCRIPTOR_PACKET_THRESHOLD_WRITE ( 0, dummy_wan_tx_queue_descriptor_ptr );
1439 RDD_WAN_TX_QUEUE_DESCRIPTOR_PROFILE_PTR_WRITE ( 0, dummy_wan_tx_queue_descriptor_ptr );
1440
1441 /* all the queues of the dummy rate controller will point to the dummy queue */
1442 dummy_rate_controller_descriptor_ptr = ( RDD_US_RATE_CONTROLLER_DESCRIPTOR_DTS * )(DEVICE_ADDRESS( RUNNER_COMMON_1_OFFSET ) + DUMMY_RATE_CONTROLLER_DESCRIPTOR_ADDRESS - sizeof ( RUNNER_COMMON ) );
1443
1444 for ( tx_queue_id = 0; tx_queue_id < RDD_US_RATE_CONTROLLER_DESCRIPTOR_TX_QUEUE_ADDR_NUMBER; tx_queue_id++ )
1445 {
1446 RDD_US_RATE_CONTROLLER_DESCRIPTOR_TX_QUEUE_ADDR_WRITE ( DUMMY_WAN_TX_QUEUE_DESCRIPTOR_ADDRESS, dummy_rate_controller_descriptor_ptr, tx_queue_id );
1447 }
1448
1449 /* connect all the tconts to the dummy rate rate controller */
1450 wan_channels_0_7_table_ptr = ( RDD_WAN_CHANNELS_0_7_TABLE_DTS * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + WAN_CHANNELS_0_7_TABLE_ADDRESS );
1451
1452 for ( wan_channel_id = RDD_WAN_CHANNEL_0; wan_channel_id <= RDD_WAN_CHANNEL_7; wan_channel_id++ )
1453 {
1454 wan_channel_0_7_descriptor_ptr = &( wan_channels_0_7_table_ptr->entry[ wan_channel_id ] );
1455
1456 for ( rate_controller_id = BL_LILAC_RDD_RATE_CONTROLLER_0; rate_controller_id <= BL_LILAC_RDD_RATE_CONTROLLER_31; rate_controller_id++ )
1457 {
1458 RDD_WAN_CHANNEL_0_7_DESCRIPTOR_RATE_CONTROLLER_ADDR_WRITE ( DUMMY_RATE_CONTROLLER_DESCRIPTOR_ADDRESS, wan_channel_0_7_descriptor_ptr, rate_controller_id );
1459 }
1460 }
1461
1462 wan_channels_8_39_table_ptr = ( RDD_WAN_CHANNELS_8_39_TABLE_DTS * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + WAN_CHANNELS_8_39_TABLE_ADDRESS );
1463
1464 for ( wan_channel_id = RDD_WAN_CHANNEL_8; wan_channel_id <= RDD_WAN_CHANNEL_39; wan_channel_id++ )
1465 {
1466 wan_channel_8_39_descriptor_ptr = &( wan_channels_8_39_table_ptr->entry[ wan_channel_id - RDD_WAN_CHANNEL_8 ] );
1467
1468 for ( rate_controller_id = BL_LILAC_RDD_RATE_CONTROLLER_0; rate_controller_id <= BL_LILAC_RDD_RATE_CONTROLLER_3; rate_controller_id++ )
1469 {
1470 RDD_WAN_CHANNEL_8_39_DESCRIPTOR_RATE_CONTROLLER_ADDR_WRITE ( DUMMY_RATE_CONTROLLER_DESCRIPTOR_ADDRESS, wan_channel_8_39_descriptor_ptr, rate_controller_id );
1471 }
1472 }
1473
1474 g_rate_controllers_pool_idx = 0;
1475
1476 /* initialize exponents table */
1477 exponent_table_ptr = ( RDD_RATE_CONTROLLER_EXPONENT_TABLE_DTS * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + US_RATE_CONTROLLER_EXPONENT_TABLE_ADDRESS );
1478
1479 exponent_entry_ptr = &( exponent_table_ptr->entry[ 0 ] );
1480 RDD_RATE_CONTROLLER_EXPONENT_ENTRY_EXPONENT_WRITE ( RDD_RATE_CONTROL_EXPONENT0, exponent_entry_ptr );
1481
1482 exponent_entry_ptr = &( exponent_table_ptr->entry[ 1 ] );
1483 RDD_RATE_CONTROLLER_EXPONENT_ENTRY_EXPONENT_WRITE ( RDD_RATE_CONTROL_EXPONENT1, exponent_entry_ptr );
1484
1485 exponent_entry_ptr = &( exponent_table_ptr->entry[ 2 ] );
1486 RDD_RATE_CONTROLLER_EXPONENT_ENTRY_EXPONENT_WRITE ( RDD_RATE_CONTROL_EXPONENT2, exponent_entry_ptr );
1487
1488 return ( BL_LILAC_RDD_OK );
1489 }
1490
1491
1492 static BL_LILAC_RDD_ERROR_DTE f_rdd_inter_task_queues_initialize ( void )
1493 {
1494 uint16_t *wan_enqueue_ingress_queue_ptr;
1495 uint16_t *ethwan2_rx_ingress_queue_ptr;
1496
1497 wan_enqueue_ingress_queue_ptr = ( uint16_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + WAN_ENQUEUE_INGRESS_QUEUE_PTR_ADDRESS );
1498 MWRITE_16( wan_enqueue_ingress_queue_ptr, WAN_ENQUEUE_INGRESS_QUEUE_ADDRESS );
1499
1500 ethwan2_rx_ingress_queue_ptr = ( uint16_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + ETHWAN2_RX_INGRESS_QUEUE_PTR_ADDRESS );
1501 MWRITE_16( ethwan2_rx_ingress_queue_ptr, ETHWAN2_RX_INGRESS_QUEUE_ADDRESS );
1502
1503 return ( BL_LILAC_RDD_OK );
1504 }
1505
1506
1507 static BL_LILAC_RDD_ERROR_DTE f_rdd_pm_counters_initialize ( void )
1508 {
1509 RUNNER_REGS_CFG_CNTR_CFG runner_counter_cfg_register;
1510
1511 runner_counter_cfg_register.base_address = ( PM_COUNTERS_ADDRESS >> 3 );
1512
1513 RUNNER_REGS_0_CFG_CNTR_CFG_WRITE ( runner_counter_cfg_register );
1514 RUNNER_REGS_1_CFG_CNTR_CFG_WRITE ( runner_counter_cfg_register );
1515
1516 return ( BL_LILAC_RDD_OK );
1517 }
1518
1519
1520 static BL_LILAC_RDD_ERROR_DTE f_rdd_parallel_processing_initialize ( void )
1521 {
1522 RDD_DS_PARALLEL_PROCESSING_CONTEXT_INDEX_CACHE_CAM_DTS *ds_context_index_cache_cam;
1523 RDD_US_PARALLEL_PROCESSING_CONTEXT_INDEX_CACHE_CAM_DTS *us_context_index_cache_cam;
1524 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_DTS *ds_available_slave_vector_ptr;
1525 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_DTS *us_available_slave_vector_ptr;
1526 RDD_PARALLEL_PROCESSING_IH_BUFFER_PTR_DTS *ds_slave_ih_buffer_ptr;
1527 RDD_PARALLEL_PROCESSING_IH_BUFFER_PTR_DTS *us_slave_ih_buffer_ptr;
1528 uint16_t *ds_context_index_cache_cam_entry;
1529 uint16_t *us_context_index_cache_cam_entry;
1530 uint8_t *context_cache_state_ptr;
1531 uint8_t i;
1532
1533 /* downstream */
1534 ds_available_slave_vector_ptr = ( RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_DTS * )( DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + DS_PARALLEL_PROCESSING_SLAVE_VECTOR_ADDRESS );
1535
1536 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_AVAILABLE_SLAVE0_WRITE ( LILAC_RDD_TRUE, ds_available_slave_vector_ptr );
1537 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_AVAILABLE_SLAVE1_WRITE ( LILAC_RDD_TRUE, ds_available_slave_vector_ptr );
1538 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_AVAILABLE_SLAVE2_WRITE ( LILAC_RDD_TRUE, ds_available_slave_vector_ptr );
1539 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_AVAILABLE_SLAVE3_WRITE ( LILAC_RDD_TRUE, ds_available_slave_vector_ptr );
1540
1541 ds_slave_ih_buffer_ptr = ( RDD_PARALLEL_PROCESSING_IH_BUFFER_PTR_DTS * )( DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + DS_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_PTR_ADDRESS );
1542
1543 MWRITE_16( ds_slave_ih_buffer_ptr, DS_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS );
1544
1545 ds_context_index_cache_cam = RDD_DS_PARALLEL_PROCESSING_CONTEXT_INDEX_CACHE_CAM_PTR();
1546
1547 for ( i = 0; i < RDD_DS_PARALLEL_PROCESSING_CONTEXT_INDEX_CACHE_CAM_SIZE; i++ )
1548 {
1549 ds_context_index_cache_cam_entry = ( uint16_t * ) &ds_context_index_cache_cam->entry[ i ];
1550
1551 MWRITE_16( ds_context_index_cache_cam_entry, 0xFFFF );
1552 }
1553
1554 /* set context cache in enable mode */
1555 context_cache_state_ptr = ( uint8_t * )( DEVICE_ADDRESS( RUNNER_PRIVATE_0_OFFSET ) + DS_PARALLEL_PROCESSING_CONTEXT_CACHE_MODE_ADDRESS );
1556
1557 MWRITE_8( context_cache_state_ptr, 0x0 );
1558
1559 /* upstream */
1560 us_available_slave_vector_ptr = ( RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_DTS * )( DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + US_PARALLEL_PROCESSING_SLAVE_VECTOR_ADDRESS );
1561
1562 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_AVAILABLE_SLAVE0_WRITE ( LILAC_RDD_TRUE, us_available_slave_vector_ptr );
1563 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_AVAILABLE_SLAVE1_WRITE ( LILAC_RDD_TRUE, us_available_slave_vector_ptr );
1564 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_AVAILABLE_SLAVE2_WRITE ( LILAC_RDD_TRUE, us_available_slave_vector_ptr );
1565 RDD_PARALLEL_PROCESSING_SLAVE_VECTOR_AVAILABLE_SLAVE3_WRITE ( LILAC_RDD_TRUE, us_available_slave_vector_ptr );
1566
1567 us_slave_ih_buffer_ptr = ( RDD_PARALLEL_PROCESSING_IH_BUFFER_PTR_DTS * )( DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + US_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_PTR_ADDRESS );
1568
1569 MWRITE_16( us_slave_ih_buffer_ptr, US_PARALLEL_PROCESSING_IH_BUFFER_VECTOR_ADDRESS );
1570
1571 us_context_index_cache_cam = RDD_US_PARALLEL_PROCESSING_CONTEXT_INDEX_CACHE_CAM_PTR();
1572
1573 for ( i = 0; i < RDD_US_PARALLEL_PROCESSING_CONTEXT_INDEX_CACHE_CAM_SIZE; i++ )
1574 {
1575 us_context_index_cache_cam_entry = ( uint16_t * ) &us_context_index_cache_cam->entry[ i ];
1576
1577 MWRITE_16( us_context_index_cache_cam_entry, 0xFFFF );
1578 }
1579
1580 /* set context cache in enable mode */
1581 context_cache_state_ptr = ( uint8_t * )( DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + US_PARALLEL_PROCESSING_CONTEXT_CACHE_MODE_ADDRESS );
1582
1583 MWRITE_8( context_cache_state_ptr, 0x0 );
1584
1585 return ( BL_LILAC_RDD_OK );
1586 }
1587
1588 BL_LILAC_RDD_ERROR_DTE rdd_ethwan2_switch_port_config ( uint8_t xi_switch_port )
1589 {
1590 uint8_t *ethwan2_switch_port_config_ptr;
1591
1592 ethwan2_switch_port_config_ptr = ( uint8_t * )(DEVICE_ADDRESS( RUNNER_PRIVATE_1_OFFSET ) + ETHWAN2_SWITCH_PORT_ADDRESS );
1593 MWRITE_8( ethwan2_switch_port_config_ptr, xi_switch_port );
1594 return ( BL_LILAC_RDD_OK );
1595 }