lkml.org 
[lkml]   [2021]   [Jul]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
Subject[ti:ti-linux-5.10.y 6302/6309] drivers/net/ethernet/ti/prueth_lre.c:120:6: warning: no previous prototype for 'pru_spin_lock'
tree:   git://git.ti.com/ti-linux-kernel/ti-linux-kernel.git ti-linux-5.10.y
head: 31b50abb3ee1c8f78cb2d61c6fbbf074c7f5d99f
commit: d2e8eb5a46ec7216223407be3d3840648f554be0 [6302/6309] net: ti: prueth_core: hsr/prp: add HSR/PRP driver
config: arm64-allyesconfig (attached as .config)
compiler: aarch64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
git remote add ti git://git.ti.com/ti-linux-kernel/ti-linux-kernel.git
git fetch --no-tags ti ti-linux-5.10.y
git checkout d2e8eb5a46ec7216223407be3d3840648f554be0
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=arm64

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

>> drivers/net/ethernet/ti/prueth_lre.c:120:6: warning: no previous prototype for 'pru_spin_lock' [-Wmissing-prototypes]
120 | void pru_spin_lock(struct node_tbl *nt)
| ^~~~~~~~~~~~~
>> drivers/net/ethernet/ti/prueth_lre.c:267:6: warning: no previous prototype for 'node_table_update_time' [-Wmissing-prototypes]
267 | void node_table_update_time(struct node_tbl *nt)
| ^~~~~~~~~~~~~~~~~~~~~~
>> drivers/net/ethernet/ti/prueth_lre.c:470:6: warning: no previous prototype for 'node_table_check_and_remove' [-Wmissing-prototypes]
470 | void node_table_check_and_remove(struct node_tbl *nt, u16 forget_time)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~
>> drivers/net/ethernet/ti/prueth_lre.c:530:6: warning: no previous prototype for 'pop_queue_process' [-Wmissing-prototypes]
530 | void pop_queue_process(struct prueth *prueth, spinlock_t *lock)
| ^~~~~~~~~~~~~~~~~
drivers/net/ethernet/ti/prueth_lre.c: In function 'prueth_lre_emac_rx_packets':
>> drivers/net/ethernet/ti/prueth_lre.c:706:36: warning: variable 'queue_desc_p' set but not used [-Wunused-but-set-variable]
706 | struct prueth_queue_desc __iomem *queue_desc_p;
| ^~~~~~~~~~~~
>> drivers/net/ethernet/ti/prueth_lre.c:705:43: warning: variable 'status_o' set but not used [-Wunused-but-set-variable]
705 | u8 overflow_cnt, overflow_cnt_o, status, status_o;
| ^~~~~~~~
>> drivers/net/ethernet/ti/prueth_lre.c:705:35: warning: variable 'status' set but not used [-Wunused-but-set-variable]
705 | u8 overflow_cnt, overflow_cnt_o, status, status_o;
| ^~~~~~


vim +/pru_spin_lock +120 drivers/net/ethernet/ti/prueth_lre.c

119
> 120 void pru_spin_lock(struct node_tbl *nt)
121 {
122 while (1) {
123 nt->nt_info->arm_lock = 1;
124 if (!nt->nt_info->fw_lock)
125 break;
126 nt->nt_info->arm_lock = 0;
127 }
128 }
129
130 static inline void pru_spin_unlock(struct node_tbl *nt)
131 {
132 nt->nt_info->arm_lock = 0;
133 }
134
135 int prueth_lre_nt_insert(struct prueth *prueth,
136 u8 *mac, int port, int sv_frame, int proto)
137 {
138 struct nt_queue_t *q = prueth->mac_queue;
139 unsigned long flags;
140 int ret = LRE_OK;
141
142 /* Will encounter a null mac_queue if we are in the middle of
143 * ndo_close. So check and return. Otherwise a kernel crash is
144 * seen when doing ifdown continuously.
145 */
146 if (!q)
147 return ret;
148
149 spin_lock_irqsave(&prueth->nt_lock, flags);
150 if (q->full) {
151 ret = LRE_ERR;
152 } else {
153 memcpy(q->nt_queue[q->wr_ind].mac, mac, ETH_ALEN);
154 q->nt_queue[q->wr_ind].sv_frame = sv_frame;
155 q->nt_queue[q->wr_ind].port_id = port;
156 q->nt_queue[q->wr_ind].proto = proto;
157
158 q->wr_ind++;
159 q->wr_ind &= (PRUETH_MAC_QUEUE_MAX - 1);
160 if (q->wr_ind == q->rd_ind)
161 q->full = true;
162 }
163 spin_unlock_irqrestore(&prueth->nt_lock, flags);
164
165 return ret;
166 }
167
168 static inline bool node_expired(struct node_tbl *nt, u16 node, u16 forget_time)
169 {
170 struct node_tbl_t nt_node = nt->nt_array->node_tbl[node];
171
172 return ((nt_node.time_last_seen_s > forget_time ||
173 nt_node.status & ICSS_LRE_NT_REM_NODE_TYPE_SANAB) &&
174 nt_node.time_last_seen_a > forget_time &&
175 nt_node.time_last_seen_b > forget_time);
176 }
177
178 #define IND_BIN_NO(x) nt->index_array->index_tbl[x].bin_no_entries
179 #define IND_BINOFS(x) nt->index_array->index_tbl[x].bin_offset
180 #define BIN_NODEOFS(x) nt->bin_array->bin_tbl[x].node_tbl_offset
181
182 static void _prueth_lre_init_node_table(struct prueth *prueth)
183 {
184 struct nt_queue_t *q = prueth->mac_queue;
185 struct node_tbl *nt = prueth->nt;
186 int j;
187
188 const struct prueth_fw_offsets *fw_offsets = prueth->fw_offsets;
189
190 nt->nt_array = prueth->mem[fw_offsets->nt_array_loc].va +
191 fw_offsets->nt_array_offset;
192 memset_io(nt->nt_array, 0, sizeof(struct node_tbl_t) *
193 fw_offsets->nt_array_max_entries);
194
195 nt->bin_array = prueth->mem[fw_offsets->bin_array_loc].va +
196 fw_offsets->bin_array_offset;
197 memset_io(nt->bin_array, 0, sizeof(struct bin_tbl_t) *
198 fw_offsets->bin_array_max_entries);
199
200 nt->index_array = prueth->mem[fw_offsets->index_array_loc].va +
201 fw_offsets->index_array_offset;
202 memset_io(nt->index_array, 0, sizeof(struct node_index_tbl_t) *
203 fw_offsets->index_array_max_entries);
204
205 nt->nt_info = prueth->mem[fw_offsets->nt_array_loc].va +
206 fw_offsets->nt_array_offset +
207 (sizeof(struct node_tbl_t) *
208 fw_offsets->nt_array_max_entries);
209 memset_io(nt->nt_info, 0, sizeof(struct node_tbl_info_t));
210
211 nt->nt_lre_cnt =
212 prueth->mem[PRUETH_MEM_SHARED_RAM].va + ICSS_LRE_CNT_NODES;
213 memset_io(nt->nt_lre_cnt, 0, sizeof(struct node_tbl_lre_cnt_t));
214
215 nt->nt_array_max_entries = fw_offsets->nt_array_max_entries;
216 nt->bin_array_max_entries = fw_offsets->bin_array_max_entries;
217 nt->index_array_max_entries = fw_offsets->index_array_max_entries;
218 nt->hash_mask = fw_offsets->hash_mask;
219
220 for (j = 0; j < fw_offsets->index_array_max_entries; j++)
221 IND_BINOFS(j) = fw_offsets->bin_array_max_entries;
222 for (j = 0; j < fw_offsets->bin_array_max_entries; j++)
223 BIN_NODEOFS(j) = fw_offsets->nt_array_max_entries;
224 for (j = 0; j < fw_offsets->nt_array_max_entries; j++)
225 nt->nt_array->node_tbl[j].entry_state = ICSS_LRE_NODE_FREE;
226
227 q->rd_ind = 0;
228 q->wr_ind = 0;
229 q->full = false;
230 }
231
232 static u16 find_free_bin(struct node_tbl *nt)
233 {
234 u16 j;
235
236 for (j = 0; j < nt->bin_array_max_entries; j++)
237 if (BIN_NODEOFS(j) == nt->nt_array_max_entries)
238 break;
239
240 return j;
241 }
242
243 /* find first free node table slot and write it to the next_free_slot */
244 static u16 next_free_slot_update(struct node_tbl *nt)
245 {
246 int j;
247
248 nt->nt_info->next_free_slot = nt->nt_array_max_entries;
249 for (j = 0; j < nt->nt_array_max_entries; j++) {
250 if (nt->nt_array->node_tbl[j].entry_state ==
251 ICSS_LRE_NODE_FREE) {
252 nt->nt_info->next_free_slot = j;
253 break;
254 }
255 }
256
257 return nt->nt_info->next_free_slot;
258 }
259
260 static void inc_time(u16 *t)
261 {
262 *t += 1;
263 if (*t > ICSS_LRE_MAX_FORGET_TIME)
264 *t = ICSS_LRE_MAX_FORGET_TIME;
265 }
266
> 267 void node_table_update_time(struct node_tbl *nt)
268 {
269 int j;
270 u16 ofs;
271 struct nt_array_t *nt_arr = nt->nt_array;
272 struct node_tbl_t *node;
273
274 for (j = 0; j < nt->bin_array_max_entries; j++) {
275 ofs = nt->bin_array->bin_tbl[j].node_tbl_offset;
276 if (ofs < nt->nt_array_max_entries) {
277 node = &nt_arr->node_tbl[ofs];
278 inc_time(&node->time_last_seen_a);
279 inc_time(&node->time_last_seen_b);
280 /* increment time_last_seen_s if nod is not SAN */
281 if ((node->status &
282 ICSS_LRE_NT_REM_NODE_TYPE_SANAB) == 0)
283 inc_time(&node->time_last_seen_s);
284 }
285 }
286 }
287

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
[unhandled content-type:application/gzip]
\
 
 \ /
  Last update: 2021-07-11 14:23    [W:0.036 / U:0.664 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site