lkml.org 
[lkml]   [2021]   [Jul]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
Subjectarch/arm64/kvm/va_layout.c:292:6: warning: no previous prototype for 'kvm_compute_final_ctr_el0'
tree:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
head: dbe69e43372212527abf48609aba7fc39a6daa27
commit: 755db23420a1ce4b740186543432983e9bbe713e KVM: arm64: Generate final CTR_EL0 value when running in Protected mode
date: 3 months ago
config: arm64-buildonly-randconfig-r002-20210701 (attached as .config)
compiler: aarch64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=755db23420a1ce4b740186543432983e9bbe713e
git remote add linus https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
git fetch --no-tags linus master
git checkout 755db23420a1ce4b740186543432983e9bbe713e
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=arm64

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

arch/arm64/kvm/va_layout.c:188:6: warning: no previous prototype for 'kvm_patch_vector_branch' [-Wmissing-prototypes]
188 | void kvm_patch_vector_branch(struct alt_instr *alt,
| ^~~~~~~~~~~~~~~~~~~~~~~
arch/arm64/kvm/va_layout.c:286:6: warning: no previous prototype for 'kvm_get_kimage_voffset' [-Wmissing-prototypes]
286 | void kvm_get_kimage_voffset(struct alt_instr *alt,
| ^~~~~~~~~~~~~~~~~~~~~~
>> arch/arm64/kvm/va_layout.c:292:6: warning: no previous prototype for 'kvm_compute_final_ctr_el0' [-Wmissing-prototypes]
292 | void kvm_compute_final_ctr_el0(struct alt_instr *alt,
| ^~~~~~~~~~~~~~~~~~~~~~~~~


vim +/kvm_compute_final_ctr_el0 +292 arch/arm64/kvm/va_layout.c

187
> 188 void kvm_patch_vector_branch(struct alt_instr *alt,
189 __le32 *origptr, __le32 *updptr, int nr_inst)
190 {
191 u64 addr;
192 u32 insn;
193
194 BUG_ON(nr_inst != 4);
195
196 if (!cpus_have_const_cap(ARM64_SPECTRE_V3A) || WARN_ON_ONCE(has_vhe()))
197 return;
198
199 /*
200 * Compute HYP VA by using the same computation as kern_hyp_va()
201 */
202 addr = __early_kern_hyp_va((u64)kvm_ksym_ref(__kvm_hyp_vector));
203
204 /* Use PC[10:7] to branch to the same vector in KVM */
205 addr |= ((u64)origptr & GENMASK_ULL(10, 7));
206
207 /*
208 * Branch over the preamble in order to avoid the initial store on
209 * the stack (which we already perform in the hardening vectors).
210 */
211 addr += KVM_VECTOR_PREAMBLE;
212
213 /* movz x0, #(addr & 0xffff) */
214 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
215 (u16)addr,
216 0,
217 AARCH64_INSN_VARIANT_64BIT,
218 AARCH64_INSN_MOVEWIDE_ZERO);
219 *updptr++ = cpu_to_le32(insn);
220
221 /* movk x0, #((addr >> 16) & 0xffff), lsl #16 */
222 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
223 (u16)(addr >> 16),
224 16,
225 AARCH64_INSN_VARIANT_64BIT,
226 AARCH64_INSN_MOVEWIDE_KEEP);
227 *updptr++ = cpu_to_le32(insn);
228
229 /* movk x0, #((addr >> 32) & 0xffff), lsl #32 */
230 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
231 (u16)(addr >> 32),
232 32,
233 AARCH64_INSN_VARIANT_64BIT,
234 AARCH64_INSN_MOVEWIDE_KEEP);
235 *updptr++ = cpu_to_le32(insn);
236
237 /* br x0 */
238 insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_0,
239 AARCH64_INSN_BRANCH_NOLINK);
240 *updptr++ = cpu_to_le32(insn);
241 }
242
243 static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst)
244 {
245 u32 insn, oinsn, rd;
246
247 BUG_ON(nr_inst != 4);
248
249 /* Compute target register */
250 oinsn = le32_to_cpu(*origptr);
251 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
252
253 /* movz rd, #(val & 0xffff) */
254 insn = aarch64_insn_gen_movewide(rd,
255 (u16)val,
256 0,
257 AARCH64_INSN_VARIANT_64BIT,
258 AARCH64_INSN_MOVEWIDE_ZERO);
259 *updptr++ = cpu_to_le32(insn);
260
261 /* movk rd, #((val >> 16) & 0xffff), lsl #16 */
262 insn = aarch64_insn_gen_movewide(rd,
263 (u16)(val >> 16),
264 16,
265 AARCH64_INSN_VARIANT_64BIT,
266 AARCH64_INSN_MOVEWIDE_KEEP);
267 *updptr++ = cpu_to_le32(insn);
268
269 /* movk rd, #((val >> 32) & 0xffff), lsl #32 */
270 insn = aarch64_insn_gen_movewide(rd,
271 (u16)(val >> 32),
272 32,
273 AARCH64_INSN_VARIANT_64BIT,
274 AARCH64_INSN_MOVEWIDE_KEEP);
275 *updptr++ = cpu_to_le32(insn);
276
277 /* movk rd, #((val >> 48) & 0xffff), lsl #48 */
278 insn = aarch64_insn_gen_movewide(rd,
279 (u16)(val >> 48),
280 48,
281 AARCH64_INSN_VARIANT_64BIT,
282 AARCH64_INSN_MOVEWIDE_KEEP);
283 *updptr++ = cpu_to_le32(insn);
284 }
285
286 void kvm_get_kimage_voffset(struct alt_instr *alt,
287 __le32 *origptr, __le32 *updptr, int nr_inst)
288 {
289 generate_mov_q(kimage_voffset, origptr, updptr, nr_inst);
290 }
291
> 292 void kvm_compute_final_ctr_el0(struct alt_instr *alt,

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
[unhandled content-type:application/gzip]
\
 
 \ /
  Last update: 2021-07-01 14:39    [W:0.035 / U:0.248 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site