Print this page
104 Bring back lx brand
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/ia32/os/archdep.c
+++ new/usr/src/uts/intel/ia32/os/archdep.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 26 /* All Rights Reserved */
27 27
28 28 #include <sys/param.h>
29 29 #include <sys/types.h>
30 30 #include <sys/vmparam.h>
31 31 #include <sys/systm.h>
32 32 #include <sys/signal.h>
33 33 #include <sys/stack.h>
34 34 #include <sys/regset.h>
35 35 #include <sys/privregs.h>
36 36 #include <sys/frame.h>
37 37 #include <sys/proc.h>
38 38 #include <sys/psw.h>
39 39 #include <sys/siginfo.h>
40 40 #include <sys/cpuvar.h>
41 41 #include <sys/asm_linkage.h>
42 42 #include <sys/kmem.h>
43 43 #include <sys/errno.h>
44 44 #include <sys/bootconf.h>
45 45 #include <sys/archsystm.h>
46 46 #include <sys/debug.h>
47 47 #include <sys/elf.h>
48 48 #include <sys/spl.h>
49 49 #include <sys/time.h>
50 50 #include <sys/atomic.h>
51 51 #include <sys/sysmacros.h>
52 52 #include <sys/cmn_err.h>
53 53 #include <sys/modctl.h>
54 54 #include <sys/kobj.h>
55 55 #include <sys/panic.h>
56 56 #include <sys/reboot.h>
57 57 #include <sys/time.h>
58 58 #include <sys/fp.h>
59 59 #include <sys/x86_archext.h>
60 60 #include <sys/auxv.h>
61 61 #include <sys/auxv_386.h>
62 62 #include <sys/dtrace.h>
63 63 #include <sys/brand.h>
64 64 #include <sys/machbrand.h>
65 65 #include <sys/cmn_err.h>
66 66
67 67 extern const struct fnsave_state x87_initial;
68 68 extern const struct fxsave_state sse_initial;
69 69
70 70 /*
71 71 * Map an fnsave-formatted save area into an fxsave-formatted save area.
72 72 *
73 73 * Most fields are the same width, content and semantics. However
74 74 * the tag word is compressed.
75 75 */
76 76 static void
77 77 fnsave_to_fxsave(const struct fnsave_state *fn, struct fxsave_state *fx)
78 78 {
79 79 uint_t i, tagbits;
80 80
81 81 fx->fx_fcw = fn->f_fcw;
82 82 fx->fx_fsw = fn->f_fsw;
83 83
84 84 /*
85 85 * copy element by element (because of holes)
86 86 */
87 87 for (i = 0; i < 8; i++)
88 88 bcopy(&fn->f_st[i].fpr_16[0], &fx->fx_st[i].fpr_16[0],
89 89 sizeof (fn->f_st[0].fpr_16)); /* 80-bit x87-style floats */
90 90
91 91 /*
92 92 * synthesize compressed tag bits
93 93 */
94 94 fx->fx_fctw = 0;
95 95 for (tagbits = fn->f_ftw, i = 0; i < 8; i++, tagbits >>= 2)
96 96 if ((tagbits & 3) != 3)
97 97 fx->fx_fctw |= (1 << i);
98 98
99 99 fx->fx_fop = fn->f_fop;
100 100
101 101 #if defined(__amd64)
102 102 fx->fx_rip = (uint64_t)fn->f_eip;
103 103 fx->fx_rdp = (uint64_t)fn->f_dp;
104 104 #else
105 105 fx->fx_eip = fn->f_eip;
106 106 fx->fx_cs = fn->f_cs;
107 107 fx->__fx_ign0 = 0;
108 108 fx->fx_dp = fn->f_dp;
109 109 fx->fx_ds = fn->f_ds;
110 110 fx->__fx_ign1 = 0;
111 111 #endif
112 112 }
113 113
114 114 /*
115 115 * Map from an fxsave-format save area to an fnsave-format save area.
116 116 */
117 117 static void
118 118 fxsave_to_fnsave(const struct fxsave_state *fx, struct fnsave_state *fn)
119 119 {
120 120 uint_t i, top, tagbits;
121 121
122 122 fn->f_fcw = fx->fx_fcw;
123 123 fn->__f_ign0 = 0;
124 124 fn->f_fsw = fx->fx_fsw;
125 125 fn->__f_ign1 = 0;
126 126
127 127 top = (fx->fx_fsw & FPS_TOP) >> 11;
128 128
129 129 /*
130 130 * copy element by element (because of holes)
131 131 */
132 132 for (i = 0; i < 8; i++)
133 133 bcopy(&fx->fx_st[i].fpr_16[0], &fn->f_st[i].fpr_16[0],
134 134 sizeof (fn->f_st[0].fpr_16)); /* 80-bit x87-style floats */
135 135
136 136 /*
137 137 * synthesize uncompressed tag bits
138 138 */
139 139 fn->f_ftw = 0;
140 140 for (tagbits = fx->fx_fctw, i = 0; i < 8; i++, tagbits >>= 1) {
141 141 uint_t ibit, expo;
142 142 const uint16_t *fpp;
143 143 static const uint16_t zero[5] = { 0, 0, 0, 0, 0 };
144 144
145 145 if ((tagbits & 1) == 0) {
146 146 fn->f_ftw |= 3 << (i << 1); /* empty */
147 147 continue;
148 148 }
149 149
150 150 /*
151 151 * (tags refer to *physical* registers)
152 152 */
153 153 fpp = &fx->fx_st[(i - top + 8) & 7].fpr_16[0];
154 154 ibit = fpp[3] >> 15;
155 155 expo = fpp[4] & 0x7fff;
156 156
157 157 if (ibit && expo != 0 && expo != 0x7fff)
158 158 continue; /* valid fp number */
159 159
160 160 if (bcmp(fpp, &zero, sizeof (zero)))
161 161 fn->f_ftw |= 2 << (i << 1); /* NaN */
162 162 else
163 163 fn->f_ftw |= 1 << (i << 1); /* fp zero */
164 164 }
165 165
166 166 fn->f_fop = fx->fx_fop;
167 167
168 168 fn->__f_ign2 = 0;
169 169 #if defined(__amd64)
170 170 fn->f_eip = (uint32_t)fx->fx_rip;
171 171 fn->f_cs = U32CS_SEL;
172 172 fn->f_dp = (uint32_t)fx->fx_rdp;
173 173 fn->f_ds = UDS_SEL;
174 174 #else
175 175 fn->f_eip = fx->fx_eip;
176 176 fn->f_cs = fx->fx_cs;
177 177 fn->f_dp = fx->fx_dp;
178 178 fn->f_ds = fx->fx_ds;
179 179 #endif
180 180 fn->__f_ign3 = 0;
181 181 }
182 182
183 183 /*
184 184 * Map from an fpregset_t into an fxsave-format save area
185 185 */
186 186 static void
187 187 fpregset_to_fxsave(const fpregset_t *fp, struct fxsave_state *fx)
188 188 {
189 189 #if defined(__amd64)
190 190 bcopy(fp, fx, sizeof (*fx));
191 191 #else
192 192 const struct fpchip_state *fc = &fp->fp_reg_set.fpchip_state;
193 193
194 194 fnsave_to_fxsave((const struct fnsave_state *)fc, fx);
195 195 fx->fx_mxcsr = fc->mxcsr;
196 196 bcopy(&fc->xmm[0], &fx->fx_xmm[0], sizeof (fc->xmm));
197 197 #endif
198 198 /*
199 199 * avoid useless #gp exceptions - mask reserved bits
200 200 */
201 201 fx->fx_mxcsr &= sse_mxcsr_mask;
202 202 }
203 203
204 204 /*
205 205 * Map from an fxsave-format save area into a fpregset_t
206 206 */
207 207 static void
208 208 fxsave_to_fpregset(const struct fxsave_state *fx, fpregset_t *fp)
209 209 {
210 210 #if defined(__amd64)
211 211 bcopy(fx, fp, sizeof (*fx));
212 212 #else
213 213 struct fpchip_state *fc = &fp->fp_reg_set.fpchip_state;
214 214
215 215 fxsave_to_fnsave(fx, (struct fnsave_state *)fc);
216 216 fc->mxcsr = fx->fx_mxcsr;
217 217 bcopy(&fx->fx_xmm[0], &fc->xmm[0], sizeof (fc->xmm));
218 218 #endif
219 219 }
220 220
221 221 #if defined(_SYSCALL32_IMPL)
222 222 static void
223 223 fpregset32_to_fxsave(const fpregset32_t *fp, struct fxsave_state *fx)
224 224 {
225 225 const struct fpchip32_state *fc = &fp->fp_reg_set.fpchip_state;
226 226
227 227 fnsave_to_fxsave((const struct fnsave_state *)fc, fx);
228 228 /*
229 229 * avoid useless #gp exceptions - mask reserved bits
230 230 */
231 231 fx->fx_mxcsr = sse_mxcsr_mask & fc->mxcsr;
232 232 bcopy(&fc->xmm[0], &fx->fx_xmm[0], sizeof (fc->xmm));
233 233 }
234 234
235 235 static void
236 236 fxsave_to_fpregset32(const struct fxsave_state *fx, fpregset32_t *fp)
237 237 {
238 238 struct fpchip32_state *fc = &fp->fp_reg_set.fpchip_state;
239 239
240 240 fxsave_to_fnsave(fx, (struct fnsave_state *)fc);
241 241 fc->mxcsr = fx->fx_mxcsr;
242 242 bcopy(&fx->fx_xmm[0], &fc->xmm[0], sizeof (fc->xmm));
243 243 }
244 244
245 245 static void
246 246 fpregset_nto32(const fpregset_t *src, fpregset32_t *dst)
247 247 {
248 248 fxsave_to_fpregset32((struct fxsave_state *)src, dst);
249 249 dst->fp_reg_set.fpchip_state.status =
250 250 src->fp_reg_set.fpchip_state.status;
251 251 dst->fp_reg_set.fpchip_state.xstatus =
252 252 src->fp_reg_set.fpchip_state.xstatus;
253 253 }
254 254
255 255 static void
256 256 fpregset_32ton(const fpregset32_t *src, fpregset_t *dst)
257 257 {
258 258 fpregset32_to_fxsave(src, (struct fxsave_state *)dst);
259 259 dst->fp_reg_set.fpchip_state.status =
260 260 src->fp_reg_set.fpchip_state.status;
261 261 dst->fp_reg_set.fpchip_state.xstatus =
262 262 src->fp_reg_set.fpchip_state.xstatus;
263 263 }
264 264 #endif
265 265
266 266 /*
267 267 * Set floating-point registers from a native fpregset_t.
268 268 */
269 269 void
270 270 setfpregs(klwp_t *lwp, fpregset_t *fp)
271 271 {
272 272 struct fpu_ctx *fpu = &lwp->lwp_pcb.pcb_fpu;
273 273
274 274 if (fpu->fpu_flags & FPU_EN) {
275 275 if (!(fpu->fpu_flags & FPU_VALID)) {
276 276 /*
277 277 * FPU context is still active, release the
278 278 * ownership.
279 279 */
280 280 fp_free(fpu, 0);
281 281 }
282 282 }
283 283 /*
284 284 * Else: if we are trying to change the FPU state of a thread which
285 285 * hasn't yet initialized floating point, store the state in
286 286 * the pcb and indicate that the state is valid. When the
287 287 * thread enables floating point, it will use this state instead
288 288 * of the default state.
289 289 */
290 290
291 291 switch (fp_save_mech) {
292 292 #if defined(__i386)
293 293 case FP_FNSAVE:
294 294 bcopy(fp, &fpu->fpu_regs.kfpu_u.kfpu_fn,
295 295 sizeof (fpu->fpu_regs.kfpu_u.kfpu_fn));
296 296 break;
297 297 #endif
298 298 case FP_FXSAVE:
299 299 fpregset_to_fxsave(fp, &fpu->fpu_regs.kfpu_u.kfpu_fx);
300 300 fpu->fpu_regs.kfpu_xstatus =
301 301 fp->fp_reg_set.fpchip_state.xstatus;
302 302 break;
303 303
304 304 case FP_XSAVE:
305 305 fpregset_to_fxsave(fp,
306 306 &fpu->fpu_regs.kfpu_u.kfpu_xs.xs_fxsave);
307 307 fpu->fpu_regs.kfpu_xstatus =
308 308 fp->fp_reg_set.fpchip_state.xstatus;
309 309 fpu->fpu_regs.kfpu_u.kfpu_xs.xs_xstate_bv |=
310 310 (XFEATURE_LEGACY_FP | XFEATURE_SSE);
311 311 break;
312 312 default:
313 313 panic("Invalid fp_save_mech");
314 314 /*NOTREACHED*/
315 315 }
316 316
317 317 fpu->fpu_regs.kfpu_status = fp->fp_reg_set.fpchip_state.status;
318 318 fpu->fpu_flags |= FPU_VALID;
319 319 }
320 320
321 321 /*
322 322 * Get floating-point registers into a native fpregset_t.
323 323 */
324 324 void
325 325 getfpregs(klwp_t *lwp, fpregset_t *fp)
326 326 {
327 327 struct fpu_ctx *fpu = &lwp->lwp_pcb.pcb_fpu;
328 328
329 329 kpreempt_disable();
330 330 if (fpu->fpu_flags & FPU_EN) {
331 331 /*
332 332 * If we have FPU hw and the thread's pcb doesn't have
333 333 * a valid FPU state then get the state from the hw.
334 334 */
335 335 if (fpu_exists && ttolwp(curthread) == lwp &&
336 336 !(fpu->fpu_flags & FPU_VALID))
337 337 fp_save(fpu); /* get the current FPU state */
338 338 }
339 339
340 340 /*
341 341 * There are 3 possible cases we have to be aware of here:
342 342 *
343 343 * 1. FPU is enabled. FPU state is stored in the current LWP.
344 344 *
345 345 * 2. FPU is not enabled, and there have been no intervening /proc
346 346 * modifications. Return initial FPU state.
347 347 *
348 348 * 3. FPU is not enabled, but a /proc consumer has modified FPU state.
349 349 * FPU state is stored in the current LWP.
350 350 */
351 351 if ((fpu->fpu_flags & FPU_EN) || (fpu->fpu_flags & FPU_VALID)) {
352 352 /*
353 353 * Cases 1 and 3.
354 354 */
355 355 switch (fp_save_mech) {
356 356 #if defined(__i386)
357 357 case FP_FNSAVE:
358 358 bcopy(&fpu->fpu_regs.kfpu_u.kfpu_fn, fp,
359 359 sizeof (fpu->fpu_regs.kfpu_u.kfpu_fn));
360 360 break;
361 361 #endif
362 362 case FP_FXSAVE:
363 363 fxsave_to_fpregset(&fpu->fpu_regs.kfpu_u.kfpu_fx, fp);
364 364 fp->fp_reg_set.fpchip_state.xstatus =
365 365 fpu->fpu_regs.kfpu_xstatus;
366 366 break;
367 367 case FP_XSAVE:
368 368 fxsave_to_fpregset(
369 369 &fpu->fpu_regs.kfpu_u.kfpu_xs.xs_fxsave, fp);
370 370 fp->fp_reg_set.fpchip_state.xstatus =
371 371 fpu->fpu_regs.kfpu_xstatus;
372 372 break;
373 373 default:
374 374 panic("Invalid fp_save_mech");
375 375 /*NOTREACHED*/
376 376 }
377 377 fp->fp_reg_set.fpchip_state.status = fpu->fpu_regs.kfpu_status;
378 378 } else {
379 379 /*
380 380 * Case 2.
381 381 */
382 382 switch (fp_save_mech) {
383 383 #if defined(__i386)
384 384 case FP_FNSAVE:
385 385 bcopy(&x87_initial, fp, sizeof (x87_initial));
386 386 break;
387 387 #endif
388 388 case FP_FXSAVE:
389 389 case FP_XSAVE:
390 390 /*
391 391 * For now, we don't have any AVX specific field in ABI.
392 392 * If we add any in the future, we need to initial them
393 393 * as well.
394 394 */
395 395 fxsave_to_fpregset(&sse_initial, fp);
396 396 fp->fp_reg_set.fpchip_state.xstatus =
397 397 fpu->fpu_regs.kfpu_xstatus;
398 398 break;
399 399 default:
400 400 panic("Invalid fp_save_mech");
401 401 /*NOTREACHED*/
402 402 }
403 403 fp->fp_reg_set.fpchip_state.status = fpu->fpu_regs.kfpu_status;
404 404 }
405 405 kpreempt_enable();
406 406 }
407 407
408 408 #if defined(_SYSCALL32_IMPL)
409 409
410 410 /*
411 411 * Set floating-point registers from an fpregset32_t.
412 412 */
413 413 void
414 414 setfpregs32(klwp_t *lwp, fpregset32_t *fp)
415 415 {
416 416 fpregset_t fpregs;
417 417
418 418 fpregset_32ton(fp, &fpregs);
419 419 setfpregs(lwp, &fpregs);
420 420 }
421 421
422 422 /*
423 423 * Get floating-point registers into an fpregset32_t.
424 424 */
425 425 void
426 426 getfpregs32(klwp_t *lwp, fpregset32_t *fp)
427 427 {
428 428 fpregset_t fpregs;
429 429
430 430 getfpregs(lwp, &fpregs);
431 431 fpregset_nto32(&fpregs, fp);
432 432 }
433 433
434 434 #endif /* _SYSCALL32_IMPL */
435 435
436 436 /*
437 437 * Return the general registers
438 438 */
439 439 void
440 440 getgregs(klwp_t *lwp, gregset_t grp)
441 441 {
442 442 struct regs *rp = lwptoregs(lwp);
443 443 #if defined(__amd64)
444 444 struct pcb *pcb = &lwp->lwp_pcb;
445 445 int thisthread = lwptot(lwp) == curthread;
446 446
447 447 grp[REG_RDI] = rp->r_rdi;
448 448 grp[REG_RSI] = rp->r_rsi;
449 449 grp[REG_RDX] = rp->r_rdx;
450 450 grp[REG_RCX] = rp->r_rcx;
451 451 grp[REG_R8] = rp->r_r8;
452 452 grp[REG_R9] = rp->r_r9;
453 453 grp[REG_RAX] = rp->r_rax;
454 454 grp[REG_RBX] = rp->r_rbx;
455 455 grp[REG_RBP] = rp->r_rbp;
456 456 grp[REG_R10] = rp->r_r10;
457 457 grp[REG_R11] = rp->r_r11;
458 458 grp[REG_R12] = rp->r_r12;
459 459 grp[REG_R13] = rp->r_r13;
460 460 grp[REG_R14] = rp->r_r14;
461 461 grp[REG_R15] = rp->r_r15;
462 462 grp[REG_FSBASE] = pcb->pcb_fsbase;
463 463 grp[REG_GSBASE] = pcb->pcb_gsbase;
464 464 if (thisthread)
465 465 kpreempt_disable();
466 466 if (pcb->pcb_rupdate == 1) {
467 467 grp[REG_DS] = pcb->pcb_ds;
468 468 grp[REG_ES] = pcb->pcb_es;
469 469 grp[REG_FS] = pcb->pcb_fs;
470 470 grp[REG_GS] = pcb->pcb_gs;
471 471 } else {
472 472 grp[REG_DS] = rp->r_ds;
473 473 grp[REG_ES] = rp->r_es;
474 474 grp[REG_FS] = rp->r_fs;
475 475 grp[REG_GS] = rp->r_gs;
476 476 }
477 477 if (thisthread)
478 478 kpreempt_enable();
479 479 grp[REG_TRAPNO] = rp->r_trapno;
480 480 grp[REG_ERR] = rp->r_err;
481 481 grp[REG_RIP] = rp->r_rip;
482 482 grp[REG_CS] = rp->r_cs;
483 483 grp[REG_SS] = rp->r_ss;
484 484 grp[REG_RFL] = rp->r_rfl;
485 485 grp[REG_RSP] = rp->r_rsp;
486 486 #else
487 487 bcopy(&rp->r_gs, grp, sizeof (gregset_t));
488 488 #endif
489 489 }
490 490
491 491 #if defined(_SYSCALL32_IMPL)
492 492
493 493 void
494 494 getgregs32(klwp_t *lwp, gregset32_t grp)
495 495 {
496 496 struct regs *rp = lwptoregs(lwp);
497 497 struct pcb *pcb = &lwp->lwp_pcb;
498 498 int thisthread = lwptot(lwp) == curthread;
499 499
500 500 if (thisthread)
501 501 kpreempt_disable();
502 502 if (pcb->pcb_rupdate == 1) {
503 503 grp[GS] = (uint16_t)pcb->pcb_gs;
504 504 grp[FS] = (uint16_t)pcb->pcb_fs;
505 505 grp[DS] = (uint16_t)pcb->pcb_ds;
506 506 grp[ES] = (uint16_t)pcb->pcb_es;
507 507 } else {
508 508 grp[GS] = (uint16_t)rp->r_gs;
509 509 grp[FS] = (uint16_t)rp->r_fs;
510 510 grp[DS] = (uint16_t)rp->r_ds;
511 511 grp[ES] = (uint16_t)rp->r_es;
512 512 }
513 513 if (thisthread)
514 514 kpreempt_enable();
515 515 grp[EDI] = (greg32_t)rp->r_rdi;
516 516 grp[ESI] = (greg32_t)rp->r_rsi;
517 517 grp[EBP] = (greg32_t)rp->r_rbp;
518 518 grp[ESP] = 0;
519 519 grp[EBX] = (greg32_t)rp->r_rbx;
520 520 grp[EDX] = (greg32_t)rp->r_rdx;
521 521 grp[ECX] = (greg32_t)rp->r_rcx;
522 522 grp[EAX] = (greg32_t)rp->r_rax;
523 523 grp[TRAPNO] = (greg32_t)rp->r_trapno;
524 524 grp[ERR] = (greg32_t)rp->r_err;
525 525 grp[EIP] = (greg32_t)rp->r_rip;
526 526 grp[CS] = (uint16_t)rp->r_cs;
527 527 grp[EFL] = (greg32_t)rp->r_rfl;
528 528 grp[UESP] = (greg32_t)rp->r_rsp;
529 529 grp[SS] = (uint16_t)rp->r_ss;
530 530 }
531 531
532 532 void
533 533 ucontext_32ton(const ucontext32_t *src, ucontext_t *dst)
534 534 {
535 535 mcontext_t *dmc = &dst->uc_mcontext;
536 536 const mcontext32_t *smc = &src->uc_mcontext;
537 537
538 538 bzero(dst, sizeof (*dst));
539 539 dst->uc_flags = src->uc_flags;
540 540 dst->uc_link = (ucontext_t *)(uintptr_t)src->uc_link;
541 541
542 542 bcopy(&src->uc_sigmask, &dst->uc_sigmask, sizeof (dst->uc_sigmask));
543 543
544 544 dst->uc_stack.ss_sp = (void *)(uintptr_t)src->uc_stack.ss_sp;
545 545 dst->uc_stack.ss_size = (size_t)src->uc_stack.ss_size;
546 546 dst->uc_stack.ss_flags = src->uc_stack.ss_flags;
547 547
548 548 dmc->gregs[REG_GS] = (greg_t)(uint32_t)smc->gregs[GS];
549 549 dmc->gregs[REG_FS] = (greg_t)(uint32_t)smc->gregs[FS];
550 550 dmc->gregs[REG_ES] = (greg_t)(uint32_t)smc->gregs[ES];
551 551 dmc->gregs[REG_DS] = (greg_t)(uint32_t)smc->gregs[DS];
552 552 dmc->gregs[REG_RDI] = (greg_t)(uint32_t)smc->gregs[EDI];
553 553 dmc->gregs[REG_RSI] = (greg_t)(uint32_t)smc->gregs[ESI];
554 554 dmc->gregs[REG_RBP] = (greg_t)(uint32_t)smc->gregs[EBP];
555 555 dmc->gregs[REG_RBX] = (greg_t)(uint32_t)smc->gregs[EBX];
556 556 dmc->gregs[REG_RDX] = (greg_t)(uint32_t)smc->gregs[EDX];
557 557 dmc->gregs[REG_RCX] = (greg_t)(uint32_t)smc->gregs[ECX];
558 558 dmc->gregs[REG_RAX] = (greg_t)(uint32_t)smc->gregs[EAX];
559 559 dmc->gregs[REG_TRAPNO] = (greg_t)(uint32_t)smc->gregs[TRAPNO];
560 560 dmc->gregs[REG_ERR] = (greg_t)(uint32_t)smc->gregs[ERR];
561 561 dmc->gregs[REG_RIP] = (greg_t)(uint32_t)smc->gregs[EIP];
562 562 dmc->gregs[REG_CS] = (greg_t)(uint32_t)smc->gregs[CS];
563 563 dmc->gregs[REG_RFL] = (greg_t)(uint32_t)smc->gregs[EFL];
564 564 dmc->gregs[REG_RSP] = (greg_t)(uint32_t)smc->gregs[UESP];
565 565 dmc->gregs[REG_SS] = (greg_t)(uint32_t)smc->gregs[SS];
566 566
567 567 /*
568 568 * A valid fpregs is only copied in if uc.uc_flags has UC_FPU set
569 569 * otherwise there is no guarantee that anything in fpregs is valid.
570 570 */
571 571 if (src->uc_flags & UC_FPU)
572 572 fpregset_32ton(&src->uc_mcontext.fpregs,
573 573 &dst->uc_mcontext.fpregs);
574 574 }
575 575
576 576 #endif /* _SYSCALL32_IMPL */
577 577
578 578 /*
579 579 * Return the user-level PC.
580 580 * If in a system call, return the address of the syscall trap.
581 581 */
582 582 greg_t
583 583 getuserpc()
584 584 {
585 585 greg_t upc = lwptoregs(ttolwp(curthread))->r_pc;
586 586 uint32_t insn;
587 587
588 588 if (curthread->t_sysnum == 0)
589 589 return (upc);
590 590
591 591 /*
592 592 * We might've gotten here from sysenter (0xf 0x34),
593 593 * syscall (0xf 0x5) or lcall (0x9a 0 0 0 0 0x27 0).
594 594 *
595 595 * Go peek at the binary to figure it out..
596 596 */
597 597 if (fuword32((void *)(upc - 2), &insn) != -1 &&
598 598 (insn & 0xffff) == 0x340f || (insn & 0xffff) == 0x050f)
599 599 return (upc - 2);
600 600 return (upc - 7);
601 601 }
602 602
603 603 /*
604 604 * Protect segment registers from non-user privilege levels and GDT selectors
605 605 * other than USER_CS, USER_DS and lwp FS and GS values. If the segment
606 606 * selector is non-null and not USER_CS/USER_DS, we make sure that the
607 607 * TI bit is set to point into the LDT and that the RPL is set to 3.
608 608 *
609 609 * Since struct regs stores each 16-bit segment register as a 32-bit greg_t, we
610 610 * also explicitly zero the top 16 bits since they may be coming from the
611 611 * user's address space via setcontext(2) or /proc.
612 612 *
613 613 * Note about null selector. When running on the hypervisor if we allow a
614 614 * process to set its %cs to null selector with RPL of 0 the hypervisor will
615 615 * crash the domain. If running on bare metal we would get a #gp fault and
616 616 * be able to kill the process and continue on. Therefore we make sure to
617 617 * force RPL to SEL_UPL even for null selector when setting %cs.
618 618 */
619 619
620 620 #if defined(IS_CS) || defined(IS_NOT_CS)
↓ open down ↓ |
620 lines elided |
↑ open up ↑ |
621 621 #error "IS_CS and IS_NOT_CS already defined"
622 622 #endif
623 623
624 624 #define IS_CS 1
625 625 #define IS_NOT_CS 0
626 626
627 627 /*ARGSUSED*/
628 628 static greg_t
629 629 fix_segreg(greg_t sr, int iscs, model_t datamodel)
630 630 {
631 + kthread_t *t = curthread;
632 +
631 633 switch (sr &= 0xffff) {
632 634
633 635 case 0:
634 636 if (iscs == IS_CS)
635 637 return (0 | SEL_UPL);
636 638 else
637 639 return (0);
638 640
639 641 #if defined(__amd64)
640 642 /*
641 643 * If lwp attempts to switch data model then force their
642 644 * code selector to be null selector.
643 645 */
644 646 case U32CS_SEL:
645 647 if (datamodel == DATAMODEL_NATIVE)
646 648 return (0 | SEL_UPL);
647 649 else
648 650 return (sr);
649 651
650 652 case UCS_SEL:
651 653 if (datamodel == DATAMODEL_ILP32)
652 654 return (0 | SEL_UPL);
653 655 #elif defined(__i386)
654 656 case UCS_SEL:
655 657 #endif
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
656 658 /*FALLTHROUGH*/
657 659 case UDS_SEL:
658 660 case LWPFS_SEL:
659 661 case LWPGS_SEL:
660 662 case SEL_UPL:
661 663 return (sr);
662 664 default:
663 665 break;
664 666 }
665 667
668 + /*
669 + * Allow this process's brand to do any necessary segment register
670 + * manipulation.
671 + */
672 + if (PROC_IS_BRANDED(t->t_procp) && BRMOP(t->t_procp)->b_fixsegreg) {
673 + greg_t bsr = BRMOP(t->t_procp)->b_fixsegreg(sr, datamodel);
674 +
675 + if (bsr == 0 && iscs == IS_CS)
676 + return (0 | SEL_UPL);
677 + else
678 + return (bsr);
679 + }
680 +
666 681 /*
667 682 * Force it into the LDT in ring 3 for 32-bit processes, which by
668 683 * default do not have an LDT, so that any attempt to use an invalid
669 684 * selector will reference the (non-existant) LDT, and cause a #gp
670 685 * fault for the process.
671 686 *
672 687 * 64-bit processes get the null gdt selector since they
673 688 * are not allowed to have a private LDT.
674 689 */
675 690 #if defined(__amd64)
676 691 if (datamodel == DATAMODEL_ILP32) {
677 692 return (sr | SEL_TI_LDT | SEL_UPL);
678 693 } else {
679 694 if (iscs == IS_CS)
680 695 return (0 | SEL_UPL);
681 696 else
682 697 return (0);
683 698 }
684 699
685 700 #elif defined(__i386)
686 701 return (sr | SEL_TI_LDT | SEL_UPL);
687 702 #endif
688 703 }
689 704
690 705 /*
691 706 * Set general registers.
692 707 */
693 708 void
694 709 setgregs(klwp_t *lwp, gregset_t grp)
695 710 {
696 711 struct regs *rp = lwptoregs(lwp);
697 712 model_t datamodel = lwp_getdatamodel(lwp);
698 713
699 714 #if defined(__amd64)
700 715 struct pcb *pcb = &lwp->lwp_pcb;
701 716 int thisthread = lwptot(lwp) == curthread;
702 717
703 718 if (datamodel == DATAMODEL_NATIVE) {
704 719
705 720 if (thisthread)
706 721 (void) save_syscall_args(); /* copy the args */
707 722
708 723 rp->r_rdi = grp[REG_RDI];
709 724 rp->r_rsi = grp[REG_RSI];
710 725 rp->r_rdx = grp[REG_RDX];
711 726 rp->r_rcx = grp[REG_RCX];
712 727 rp->r_r8 = grp[REG_R8];
713 728 rp->r_r9 = grp[REG_R9];
714 729 rp->r_rax = grp[REG_RAX];
715 730 rp->r_rbx = grp[REG_RBX];
716 731 rp->r_rbp = grp[REG_RBP];
717 732 rp->r_r10 = grp[REG_R10];
718 733 rp->r_r11 = grp[REG_R11];
719 734 rp->r_r12 = grp[REG_R12];
720 735 rp->r_r13 = grp[REG_R13];
721 736 rp->r_r14 = grp[REG_R14];
722 737 rp->r_r15 = grp[REG_R15];
723 738 rp->r_trapno = grp[REG_TRAPNO];
724 739 rp->r_err = grp[REG_ERR];
725 740 rp->r_rip = grp[REG_RIP];
726 741 /*
727 742 * Setting %cs or %ss to anything else is quietly but
728 743 * quite definitely forbidden!
729 744 */
730 745 rp->r_cs = UCS_SEL;
731 746 rp->r_ss = UDS_SEL;
732 747 rp->r_rsp = grp[REG_RSP];
733 748
734 749 if (thisthread)
735 750 kpreempt_disable();
736 751
737 752 pcb->pcb_ds = UDS_SEL;
738 753 pcb->pcb_es = UDS_SEL;
739 754
740 755 /*
741 756 * 64-bit processes -are- allowed to set their fsbase/gsbase
742 757 * values directly, but only if they're using the segment
743 758 * selectors that allow that semantic.
744 759 *
745 760 * (32-bit processes must use lwp_set_private().)
746 761 */
747 762 pcb->pcb_fsbase = grp[REG_FSBASE];
748 763 pcb->pcb_gsbase = grp[REG_GSBASE];
749 764 pcb->pcb_fs = fix_segreg(grp[REG_FS], IS_NOT_CS, datamodel);
750 765 pcb->pcb_gs = fix_segreg(grp[REG_GS], IS_NOT_CS, datamodel);
751 766
752 767 /*
753 768 * Ensure that we go out via update_sregs
754 769 */
755 770 pcb->pcb_rupdate = 1;
756 771 lwptot(lwp)->t_post_sys = 1;
757 772 if (thisthread)
758 773 kpreempt_enable();
759 774 #if defined(_SYSCALL32_IMPL)
760 775 } else {
761 776 rp->r_rdi = (uint32_t)grp[REG_RDI];
762 777 rp->r_rsi = (uint32_t)grp[REG_RSI];
763 778 rp->r_rdx = (uint32_t)grp[REG_RDX];
764 779 rp->r_rcx = (uint32_t)grp[REG_RCX];
765 780 rp->r_rax = (uint32_t)grp[REG_RAX];
766 781 rp->r_rbx = (uint32_t)grp[REG_RBX];
767 782 rp->r_rbp = (uint32_t)grp[REG_RBP];
768 783 rp->r_trapno = (uint32_t)grp[REG_TRAPNO];
769 784 rp->r_err = (uint32_t)grp[REG_ERR];
770 785 rp->r_rip = (uint32_t)grp[REG_RIP];
771 786
772 787 rp->r_cs = fix_segreg(grp[REG_CS], IS_CS, datamodel);
773 788 rp->r_ss = fix_segreg(grp[REG_DS], IS_NOT_CS, datamodel);
774 789
775 790 rp->r_rsp = (uint32_t)grp[REG_RSP];
776 791
777 792 if (thisthread)
778 793 kpreempt_disable();
779 794
780 795 pcb->pcb_ds = fix_segreg(grp[REG_DS], IS_NOT_CS, datamodel);
781 796 pcb->pcb_es = fix_segreg(grp[REG_ES], IS_NOT_CS, datamodel);
782 797
783 798 /*
784 799 * (See fsbase/gsbase commentary above)
785 800 */
786 801 pcb->pcb_fs = fix_segreg(grp[REG_FS], IS_NOT_CS, datamodel);
787 802 pcb->pcb_gs = fix_segreg(grp[REG_GS], IS_NOT_CS, datamodel);
788 803
789 804 /*
790 805 * Ensure that we go out via update_sregs
791 806 */
792 807 pcb->pcb_rupdate = 1;
793 808 lwptot(lwp)->t_post_sys = 1;
794 809 if (thisthread)
795 810 kpreempt_enable();
796 811 #endif
797 812 }
798 813
799 814 /*
800 815 * Only certain bits of the flags register can be modified.
801 816 */
802 817 rp->r_rfl = (rp->r_rfl & ~PSL_USERMASK) |
803 818 (grp[REG_RFL] & PSL_USERMASK);
804 819
805 820 #elif defined(__i386)
806 821
807 822 /*
808 823 * Only certain bits of the flags register can be modified.
809 824 */
810 825 grp[EFL] = (rp->r_efl & ~PSL_USERMASK) | (grp[EFL] & PSL_USERMASK);
811 826
812 827 /*
813 828 * Copy saved registers from user stack.
814 829 */
815 830 bcopy(grp, &rp->r_gs, sizeof (gregset_t));
816 831
817 832 rp->r_cs = fix_segreg(rp->r_cs, IS_CS, datamodel);
818 833 rp->r_ss = fix_segreg(rp->r_ss, IS_NOT_CS, datamodel);
819 834 rp->r_ds = fix_segreg(rp->r_ds, IS_NOT_CS, datamodel);
820 835 rp->r_es = fix_segreg(rp->r_es, IS_NOT_CS, datamodel);
821 836 rp->r_fs = fix_segreg(rp->r_fs, IS_NOT_CS, datamodel);
822 837 rp->r_gs = fix_segreg(rp->r_gs, IS_NOT_CS, datamodel);
823 838
824 839 #endif /* __i386 */
825 840 }
826 841
827 842 /*
828 843 * Determine whether eip is likely to have an interrupt frame
829 844 * on the stack. We do this by comparing the address to the
830 845 * range of addresses spanned by several well-known routines.
831 846 */
832 847 extern void _interrupt();
833 848 extern void _allsyscalls();
834 849 extern void _cmntrap();
835 850 extern void fakesoftint();
836 851
837 852 extern size_t _interrupt_size;
838 853 extern size_t _allsyscalls_size;
839 854 extern size_t _cmntrap_size;
840 855 extern size_t _fakesoftint_size;
841 856
842 857 /*
843 858 * Get a pc-only stacktrace. Used for kmem_alloc() buffer ownership tracking.
844 859 * Returns MIN(current stack depth, pcstack_limit).
845 860 */
846 861 int
847 862 getpcstack(pc_t *pcstack, int pcstack_limit)
848 863 {
849 864 struct frame *fp = (struct frame *)getfp();
850 865 struct frame *nextfp, *minfp, *stacktop;
851 866 int depth = 0;
852 867 int on_intr;
853 868 uintptr_t pc;
854 869
855 870 if ((on_intr = CPU_ON_INTR(CPU)) != 0)
856 871 stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));
857 872 else
858 873 stacktop = (struct frame *)curthread->t_stk;
859 874 minfp = fp;
860 875
861 876 pc = ((struct regs *)fp)->r_pc;
862 877
863 878 while (depth < pcstack_limit) {
864 879 nextfp = (struct frame *)fp->fr_savfp;
865 880 pc = fp->fr_savpc;
866 881 if (nextfp <= minfp || nextfp >= stacktop) {
867 882 if (on_intr) {
868 883 /*
869 884 * Hop from interrupt stack to thread stack.
870 885 */
871 886 stacktop = (struct frame *)curthread->t_stk;
872 887 minfp = (struct frame *)curthread->t_stkbase;
873 888 on_intr = 0;
874 889 continue;
875 890 }
876 891 break;
877 892 }
878 893 pcstack[depth++] = (pc_t)pc;
879 894 fp = nextfp;
880 895 minfp = fp;
881 896 }
882 897 return (depth);
883 898 }
884 899
885 900 /*
886 901 * The following ELF header fields are defined as processor-specific
887 902 * in the V8 ABI:
888 903 *
889 904 * e_ident[EI_DATA] encoding of the processor-specific
890 905 * data in the object file
891 906 * e_machine processor identification
892 907 * e_flags processor-specific flags associated
893 908 * with the file
894 909 */
895 910
896 911 /*
897 912 * The value of at_flags reflects a platform's cpu module support.
898 913 * at_flags is used to check for allowing a binary to execute and
899 914 * is passed as the value of the AT_FLAGS auxiliary vector.
900 915 */
901 916 int at_flags = 0;
902 917
903 918 /*
904 919 * Check the processor-specific fields of an ELF header.
905 920 *
906 921 * returns 1 if the fields are valid, 0 otherwise
907 922 */
908 923 /*ARGSUSED2*/
909 924 int
910 925 elfheadcheck(
911 926 unsigned char e_data,
912 927 Elf32_Half e_machine,
913 928 Elf32_Word e_flags)
914 929 {
915 930 if (e_data != ELFDATA2LSB)
916 931 return (0);
917 932 #if defined(__amd64)
918 933 if (e_machine == EM_AMD64)
919 934 return (1);
920 935 #endif
921 936 return (e_machine == EM_386);
922 937 }
923 938
924 939 uint_t auxv_hwcap_include = 0; /* patch to enable unrecognized features */
925 940 uint_t auxv_hwcap_exclude = 0; /* patch for broken cpus, debugging */
926 941 #if defined(_SYSCALL32_IMPL)
927 942 uint_t auxv_hwcap32_include = 0; /* ditto for 32-bit apps */
928 943 uint_t auxv_hwcap32_exclude = 0; /* ditto for 32-bit apps */
929 944 #endif
930 945
931 946 /*
932 947 * Gather information about the processor and place it into auxv_hwcap
933 948 * so that it can be exported to the linker via the aux vector.
934 949 *
935 950 * We use this seemingly complicated mechanism so that we can ensure
936 951 * that /etc/system can be used to override what the system can or
937 952 * cannot discover for itself.
938 953 */
939 954 void
940 955 bind_hwcap(void)
941 956 {
942 957 uint_t cpu_hwcap_flags = cpuid_pass4(NULL);
943 958
944 959 auxv_hwcap = (auxv_hwcap_include | cpu_hwcap_flags) &
945 960 ~auxv_hwcap_exclude;
946 961
947 962 #if defined(__amd64)
948 963 /*
949 964 * On AMD processors, sysenter just doesn't work at all
950 965 * when the kernel is in long mode. On IA-32e processors
951 966 * it does, but there's no real point in all the alternate
952 967 * mechanism when syscall works on both.
953 968 *
954 969 * Besides, the kernel's sysenter handler is expecting a
955 970 * 32-bit lwp ...
956 971 */
957 972 auxv_hwcap &= ~AV_386_SEP;
958 973 #else
959 974 /*
960 975 * 32-bit processes can -always- use the lahf/sahf instructions
961 976 */
962 977 auxv_hwcap |= AV_386_AHF;
963 978 #endif
964 979
965 980 if (auxv_hwcap_include || auxv_hwcap_exclude)
966 981 cmn_err(CE_CONT, "?user ABI extensions: %b\n",
967 982 auxv_hwcap, FMT_AV_386);
968 983
969 984 #if defined(_SYSCALL32_IMPL)
970 985 auxv_hwcap32 = (auxv_hwcap32_include | cpu_hwcap_flags) &
971 986 ~auxv_hwcap32_exclude;
972 987
973 988 #if defined(__amd64)
974 989 /*
975 990 * If this is an amd64 architecture machine from Intel, then
976 991 * syscall -doesn't- work in compatibility mode, only sysenter does.
977 992 *
978 993 * Sigh.
979 994 */
980 995 if (!cpuid_syscall32_insn(NULL))
981 996 auxv_hwcap32 &= ~AV_386_AMD_SYSC;
982 997
983 998 /*
984 999 * 32-bit processes can -always- use the lahf/sahf instructions
985 1000 */
986 1001 auxv_hwcap32 |= AV_386_AHF;
987 1002 #endif
988 1003
989 1004 if (auxv_hwcap32_include || auxv_hwcap32_exclude)
990 1005 cmn_err(CE_CONT, "?32-bit user ABI extensions: %b\n",
991 1006 auxv_hwcap32, FMT_AV_386);
992 1007 #endif
993 1008 }
994 1009
995 1010 /*
996 1011 * sync_icache() - this is called
997 1012 * in proc/fs/prusrio.c. x86 has an unified cache and therefore
998 1013 * this is a nop.
999 1014 */
1000 1015 /* ARGSUSED */
1001 1016 void
1002 1017 sync_icache(caddr_t addr, uint_t len)
1003 1018 {
1004 1019 /* Do nothing for now */
1005 1020 }
1006 1021
1007 1022 /*ARGSUSED*/
1008 1023 void
1009 1024 sync_data_memory(caddr_t va, size_t len)
1010 1025 {
1011 1026 /* Not implemented for this platform */
1012 1027 }
1013 1028
1014 1029 int
1015 1030 __ipltospl(int ipl)
1016 1031 {
1017 1032 return (ipltospl(ipl));
1018 1033 }
1019 1034
1020 1035 /*
1021 1036 * The panic code invokes panic_saveregs() to record the contents of a
1022 1037 * regs structure into the specified panic_data structure for debuggers.
1023 1038 */
1024 1039 void
1025 1040 panic_saveregs(panic_data_t *pdp, struct regs *rp)
1026 1041 {
1027 1042 panic_nv_t *pnv = PANICNVGET(pdp);
1028 1043
1029 1044 struct cregs creg;
1030 1045
1031 1046 getcregs(&creg);
1032 1047
1033 1048 #if defined(__amd64)
1034 1049 PANICNVADD(pnv, "rdi", rp->r_rdi);
1035 1050 PANICNVADD(pnv, "rsi", rp->r_rsi);
1036 1051 PANICNVADD(pnv, "rdx", rp->r_rdx);
1037 1052 PANICNVADD(pnv, "rcx", rp->r_rcx);
1038 1053 PANICNVADD(pnv, "r8", rp->r_r8);
1039 1054 PANICNVADD(pnv, "r9", rp->r_r9);
1040 1055 PANICNVADD(pnv, "rax", rp->r_rax);
1041 1056 PANICNVADD(pnv, "rbx", rp->r_rbx);
1042 1057 PANICNVADD(pnv, "rbp", rp->r_rbp);
1043 1058 PANICNVADD(pnv, "r10", rp->r_r10);
1044 1059 PANICNVADD(pnv, "r10", rp->r_r10);
1045 1060 PANICNVADD(pnv, "r11", rp->r_r11);
1046 1061 PANICNVADD(pnv, "r12", rp->r_r12);
1047 1062 PANICNVADD(pnv, "r13", rp->r_r13);
1048 1063 PANICNVADD(pnv, "r14", rp->r_r14);
1049 1064 PANICNVADD(pnv, "r15", rp->r_r15);
1050 1065 PANICNVADD(pnv, "fsbase", rdmsr(MSR_AMD_FSBASE));
1051 1066 PANICNVADD(pnv, "gsbase", rdmsr(MSR_AMD_GSBASE));
1052 1067 PANICNVADD(pnv, "ds", rp->r_ds);
1053 1068 PANICNVADD(pnv, "es", rp->r_es);
1054 1069 PANICNVADD(pnv, "fs", rp->r_fs);
1055 1070 PANICNVADD(pnv, "gs", rp->r_gs);
1056 1071 PANICNVADD(pnv, "trapno", rp->r_trapno);
1057 1072 PANICNVADD(pnv, "err", rp->r_err);
1058 1073 PANICNVADD(pnv, "rip", rp->r_rip);
1059 1074 PANICNVADD(pnv, "cs", rp->r_cs);
1060 1075 PANICNVADD(pnv, "rflags", rp->r_rfl);
1061 1076 PANICNVADD(pnv, "rsp", rp->r_rsp);
1062 1077 PANICNVADD(pnv, "ss", rp->r_ss);
1063 1078 PANICNVADD(pnv, "gdt_hi", (uint64_t)(creg.cr_gdt._l[3]));
1064 1079 PANICNVADD(pnv, "gdt_lo", (uint64_t)(creg.cr_gdt._l[0]));
1065 1080 PANICNVADD(pnv, "idt_hi", (uint64_t)(creg.cr_idt._l[3]));
1066 1081 PANICNVADD(pnv, "idt_lo", (uint64_t)(creg.cr_idt._l[0]));
1067 1082 #elif defined(__i386)
1068 1083 PANICNVADD(pnv, "gs", (uint32_t)rp->r_gs);
1069 1084 PANICNVADD(pnv, "fs", (uint32_t)rp->r_fs);
1070 1085 PANICNVADD(pnv, "es", (uint32_t)rp->r_es);
1071 1086 PANICNVADD(pnv, "ds", (uint32_t)rp->r_ds);
1072 1087 PANICNVADD(pnv, "edi", (uint32_t)rp->r_edi);
1073 1088 PANICNVADD(pnv, "esi", (uint32_t)rp->r_esi);
1074 1089 PANICNVADD(pnv, "ebp", (uint32_t)rp->r_ebp);
1075 1090 PANICNVADD(pnv, "esp", (uint32_t)rp->r_esp);
1076 1091 PANICNVADD(pnv, "ebx", (uint32_t)rp->r_ebx);
1077 1092 PANICNVADD(pnv, "edx", (uint32_t)rp->r_edx);
1078 1093 PANICNVADD(pnv, "ecx", (uint32_t)rp->r_ecx);
1079 1094 PANICNVADD(pnv, "eax", (uint32_t)rp->r_eax);
1080 1095 PANICNVADD(pnv, "trapno", (uint32_t)rp->r_trapno);
1081 1096 PANICNVADD(pnv, "err", (uint32_t)rp->r_err);
1082 1097 PANICNVADD(pnv, "eip", (uint32_t)rp->r_eip);
1083 1098 PANICNVADD(pnv, "cs", (uint32_t)rp->r_cs);
1084 1099 PANICNVADD(pnv, "eflags", (uint32_t)rp->r_efl);
1085 1100 PANICNVADD(pnv, "uesp", (uint32_t)rp->r_uesp);
1086 1101 PANICNVADD(pnv, "ss", (uint32_t)rp->r_ss);
1087 1102 PANICNVADD(pnv, "gdt", creg.cr_gdt);
1088 1103 PANICNVADD(pnv, "idt", creg.cr_idt);
1089 1104 #endif /* __i386 */
1090 1105
1091 1106 PANICNVADD(pnv, "ldt", creg.cr_ldt);
1092 1107 PANICNVADD(pnv, "task", creg.cr_task);
1093 1108 PANICNVADD(pnv, "cr0", creg.cr_cr0);
1094 1109 PANICNVADD(pnv, "cr2", creg.cr_cr2);
1095 1110 PANICNVADD(pnv, "cr3", creg.cr_cr3);
1096 1111 if (creg.cr_cr4)
1097 1112 PANICNVADD(pnv, "cr4", creg.cr_cr4);
1098 1113
1099 1114 PANICNVSET(pdp, pnv);
1100 1115 }
1101 1116
1102 1117 #define TR_ARG_MAX 6 /* Max args to print, same as SPARC */
1103 1118
1104 1119 #if !defined(__amd64)
1105 1120
1106 1121 /*
1107 1122 * Given a return address (%eip), determine the likely number of arguments
1108 1123 * that were pushed on the stack prior to its execution. We do this by
1109 1124 * expecting that a typical call sequence consists of pushing arguments on
1110 1125 * the stack, executing a call instruction, and then performing an add
1111 1126 * on %esp to restore it to the value prior to pushing the arguments for
1112 1127 * the call. We attempt to detect such an add, and divide the addend
1113 1128 * by the size of a word to determine the number of pushed arguments.
1114 1129 *
1115 1130 * If we do not find such an add, we punt and return TR_ARG_MAX. It is not
1116 1131 * possible to reliably determine if a function took no arguments (i.e. was
1117 1132 * void) because assembler routines do not reliably perform an add on %esp
1118 1133 * immediately upon returning (eg. _sys_call()), so returning TR_ARG_MAX is
1119 1134 * safer than returning 0.
1120 1135 */
1121 1136 static ulong_t
1122 1137 argcount(uintptr_t eip)
1123 1138 {
1124 1139 const uint8_t *ins = (const uint8_t *)eip;
1125 1140 ulong_t n;
1126 1141
1127 1142 enum {
1128 1143 M_MODRM_ESP = 0xc4, /* Mod/RM byte indicates %esp */
1129 1144 M_ADD_IMM32 = 0x81, /* ADD imm32 to r/m32 */
1130 1145 M_ADD_IMM8 = 0x83 /* ADD imm8 to r/m32 */
1131 1146 };
1132 1147
1133 1148 if (eip < KERNELBASE || ins[1] != M_MODRM_ESP)
1134 1149 return (TR_ARG_MAX);
1135 1150
1136 1151 switch (ins[0]) {
1137 1152 case M_ADD_IMM32:
1138 1153 n = ins[2] + (ins[3] << 8) + (ins[4] << 16) + (ins[5] << 24);
1139 1154 break;
1140 1155
1141 1156 case M_ADD_IMM8:
1142 1157 n = ins[2];
1143 1158 break;
1144 1159
1145 1160 default:
1146 1161 return (TR_ARG_MAX);
1147 1162 }
1148 1163
1149 1164 n /= sizeof (long);
1150 1165 return (MIN(n, TR_ARG_MAX));
1151 1166 }
1152 1167
1153 1168 #endif /* !__amd64 */
1154 1169
1155 1170 /*
1156 1171 * Print a stack backtrace using the specified frame pointer. We delay two
1157 1172 * seconds before continuing, unless this is the panic traceback.
1158 1173 * If we are in the process of panicking, we also attempt to write the
1159 1174 * stack backtrace to a staticly assigned buffer, to allow the panic
1160 1175 * code to find it and write it in to uncompressed pages within the
1161 1176 * system crash dump.
1162 1177 * Note that the frame for the starting stack pointer value is omitted because
1163 1178 * the corresponding %eip is not known.
1164 1179 */
1165 1180
1166 1181 extern char *dump_stack_scratch;
1167 1182
1168 1183 #if defined(__amd64)
1169 1184
1170 1185 void
1171 1186 traceback(caddr_t fpreg)
1172 1187 {
1173 1188 struct frame *fp = (struct frame *)fpreg;
1174 1189 struct frame *nextfp;
1175 1190 uintptr_t pc, nextpc;
1176 1191 ulong_t off;
1177 1192 char args[TR_ARG_MAX * 2 + 16], *sym;
1178 1193 uint_t offset = 0;
1179 1194 uint_t next_offset = 0;
1180 1195 char stack_buffer[1024];
1181 1196
1182 1197 if (!panicstr)
1183 1198 printf("traceback: %%fp = %p\n", (void *)fp);
1184 1199
1185 1200 if (panicstr && !dump_stack_scratch) {
1186 1201 printf("Warning - stack not written to the dump buffer\n");
1187 1202 }
1188 1203
1189 1204 fp = (struct frame *)plat_traceback(fpreg);
1190 1205 if ((uintptr_t)fp < KERNELBASE)
1191 1206 goto out;
1192 1207
1193 1208 pc = fp->fr_savpc;
1194 1209 fp = (struct frame *)fp->fr_savfp;
1195 1210
1196 1211 while ((uintptr_t)fp >= KERNELBASE) {
1197 1212 /*
1198 1213 * XX64 Until port is complete tolerate 8-byte aligned
1199 1214 * frame pointers but flag with a warning so they can
1200 1215 * be fixed.
1201 1216 */
1202 1217 if (((uintptr_t)fp & (STACK_ALIGN - 1)) != 0) {
1203 1218 if (((uintptr_t)fp & (8 - 1)) == 0) {
1204 1219 printf(" >> warning! 8-byte"
1205 1220 " aligned %%fp = %p\n", (void *)fp);
1206 1221 } else {
1207 1222 printf(
1208 1223 " >> mis-aligned %%fp = %p\n", (void *)fp);
1209 1224 break;
1210 1225 }
1211 1226 }
1212 1227
1213 1228 args[0] = '\0';
1214 1229 nextpc = (uintptr_t)fp->fr_savpc;
1215 1230 nextfp = (struct frame *)fp->fr_savfp;
1216 1231 if ((sym = kobj_getsymname(pc, &off)) != NULL) {
1217 1232 printf("%016lx %s:%s+%lx (%s)\n", (uintptr_t)fp,
1218 1233 mod_containing_pc((caddr_t)pc), sym, off, args);
1219 1234 (void) snprintf(stack_buffer, sizeof (stack_buffer),
1220 1235 "%s:%s+%lx (%s) | ",
1221 1236 mod_containing_pc((caddr_t)pc), sym, off, args);
1222 1237 } else {
1223 1238 printf("%016lx %lx (%s)\n",
1224 1239 (uintptr_t)fp, pc, args);
1225 1240 (void) snprintf(stack_buffer, sizeof (stack_buffer),
1226 1241 "%lx (%s) | ", pc, args);
1227 1242 }
1228 1243
1229 1244 if (panicstr && dump_stack_scratch) {
1230 1245 next_offset = offset + strlen(stack_buffer);
1231 1246 if (next_offset < STACK_BUF_SIZE) {
1232 1247 bcopy(stack_buffer, dump_stack_scratch + offset,
1233 1248 strlen(stack_buffer));
1234 1249 offset = next_offset;
1235 1250 } else {
1236 1251 /*
1237 1252 * In attempting to save the panic stack
1238 1253 * to the dumpbuf we have overflowed that area.
1239 1254 * Print a warning and continue to printf the
1240 1255 * stack to the msgbuf
1241 1256 */
1242 1257 printf("Warning: stack in the dump buffer"
1243 1258 " may be incomplete\n");
1244 1259 offset = next_offset;
1245 1260 }
1246 1261 }
1247 1262
1248 1263 pc = nextpc;
1249 1264 fp = nextfp;
1250 1265 }
1251 1266 out:
1252 1267 if (!panicstr) {
1253 1268 printf("end of traceback\n");
1254 1269 DELAY(2 * MICROSEC);
1255 1270 } else if (dump_stack_scratch) {
1256 1271 dump_stack_scratch[offset] = '\0';
1257 1272 }
1258 1273 }
1259 1274
1260 1275 #elif defined(__i386)
1261 1276
1262 1277 void
1263 1278 traceback(caddr_t fpreg)
1264 1279 {
1265 1280 struct frame *fp = (struct frame *)fpreg;
1266 1281 struct frame *nextfp, *minfp, *stacktop;
1267 1282 uintptr_t pc, nextpc;
1268 1283 uint_t offset = 0;
1269 1284 uint_t next_offset = 0;
1270 1285 char stack_buffer[1024];
1271 1286
1272 1287 cpu_t *cpu;
1273 1288
1274 1289 /*
1275 1290 * args[] holds TR_ARG_MAX hex long args, plus ", " or '\0'.
1276 1291 */
1277 1292 char args[TR_ARG_MAX * 2 + 8], *p;
1278 1293
1279 1294 int on_intr;
1280 1295 ulong_t off;
1281 1296 char *sym;
1282 1297
1283 1298 if (!panicstr)
1284 1299 printf("traceback: %%fp = %p\n", (void *)fp);
1285 1300
1286 1301 if (panicstr && !dump_stack_scratch) {
1287 1302 printf("Warning - stack not written to the dumpbuf\n");
1288 1303 }
1289 1304
1290 1305 /*
1291 1306 * If we are panicking, all high-level interrupt information in
1292 1307 * CPU was overwritten. panic_cpu has the correct values.
1293 1308 */
1294 1309 kpreempt_disable(); /* prevent migration */
1295 1310
1296 1311 cpu = (panicstr && CPU->cpu_id == panic_cpu.cpu_id)? &panic_cpu : CPU;
1297 1312
1298 1313 if ((on_intr = CPU_ON_INTR(cpu)) != 0)
1299 1314 stacktop = (struct frame *)(cpu->cpu_intr_stack + SA(MINFRAME));
1300 1315 else
1301 1316 stacktop = (struct frame *)curthread->t_stk;
1302 1317
1303 1318 kpreempt_enable();
1304 1319
1305 1320 fp = (struct frame *)plat_traceback(fpreg);
1306 1321 if ((uintptr_t)fp < KERNELBASE)
1307 1322 goto out;
1308 1323
1309 1324 minfp = fp; /* Baseline minimum frame pointer */
1310 1325 pc = fp->fr_savpc;
1311 1326 fp = (struct frame *)fp->fr_savfp;
1312 1327
1313 1328 while ((uintptr_t)fp >= KERNELBASE) {
1314 1329 ulong_t argc;
1315 1330 long *argv;
1316 1331
1317 1332 if (fp <= minfp || fp >= stacktop) {
1318 1333 if (on_intr) {
1319 1334 /*
1320 1335 * Hop from interrupt stack to thread stack.
1321 1336 */
1322 1337 stacktop = (struct frame *)curthread->t_stk;
1323 1338 minfp = (struct frame *)curthread->t_stkbase;
1324 1339 on_intr = 0;
1325 1340 continue;
1326 1341 }
1327 1342 break; /* we're outside of the expected range */
1328 1343 }
1329 1344
1330 1345 if ((uintptr_t)fp & (STACK_ALIGN - 1)) {
1331 1346 printf(" >> mis-aligned %%fp = %p\n", (void *)fp);
1332 1347 break;
1333 1348 }
1334 1349
1335 1350 nextpc = fp->fr_savpc;
1336 1351 nextfp = (struct frame *)fp->fr_savfp;
1337 1352 argc = argcount(nextpc);
1338 1353 argv = (long *)((char *)fp + sizeof (struct frame));
1339 1354
1340 1355 args[0] = '\0';
1341 1356 p = args;
1342 1357 while (argc-- > 0 && argv < (long *)stacktop) {
1343 1358 p += snprintf(p, args + sizeof (args) - p,
1344 1359 "%s%lx", (p == args) ? "" : ", ", *argv++);
1345 1360 }
1346 1361
1347 1362 if ((sym = kobj_getsymname(pc, &off)) != NULL) {
1348 1363 printf("%08lx %s:%s+%lx (%s)\n", (uintptr_t)fp,
1349 1364 mod_containing_pc((caddr_t)pc), sym, off, args);
1350 1365 (void) snprintf(stack_buffer, sizeof (stack_buffer),
1351 1366 "%s:%s+%lx (%s) | ",
1352 1367 mod_containing_pc((caddr_t)pc), sym, off, args);
1353 1368
1354 1369 } else {
1355 1370 printf("%08lx %lx (%s)\n",
1356 1371 (uintptr_t)fp, pc, args);
1357 1372 (void) snprintf(stack_buffer, sizeof (stack_buffer),
1358 1373 "%lx (%s) | ", pc, args);
1359 1374
1360 1375 }
1361 1376
1362 1377 if (panicstr && dump_stack_scratch) {
1363 1378 next_offset = offset + strlen(stack_buffer);
1364 1379 if (next_offset < STACK_BUF_SIZE) {
1365 1380 bcopy(stack_buffer, dump_stack_scratch + offset,
1366 1381 strlen(stack_buffer));
1367 1382 offset = next_offset;
1368 1383 } else {
1369 1384 /*
1370 1385 * In attempting to save the panic stack
1371 1386 * to the dumpbuf we have overflowed that area.
1372 1387 * Print a warning and continue to printf the
1373 1388 * stack to the msgbuf
1374 1389 */
1375 1390 printf("Warning: stack in the dumpbuf"
1376 1391 " may be incomplete\n");
1377 1392 offset = next_offset;
1378 1393 }
1379 1394 }
1380 1395
1381 1396 minfp = fp;
1382 1397 pc = nextpc;
1383 1398 fp = nextfp;
1384 1399 }
1385 1400 out:
1386 1401 if (!panicstr) {
1387 1402 printf("end of traceback\n");
1388 1403 DELAY(2 * MICROSEC);
1389 1404 } else if (dump_stack_scratch) {
1390 1405 dump_stack_scratch[offset] = '\0';
1391 1406 }
1392 1407
1393 1408 }
1394 1409
1395 1410 #endif /* __i386 */
1396 1411
1397 1412 /*
1398 1413 * Generate a stack backtrace from a saved register set.
1399 1414 */
1400 1415 void
1401 1416 traceregs(struct regs *rp)
1402 1417 {
1403 1418 traceback((caddr_t)rp->r_fp);
1404 1419 }
1405 1420
1406 1421 void
1407 1422 exec_set_sp(size_t stksize)
1408 1423 {
1409 1424 klwp_t *lwp = ttolwp(curthread);
1410 1425
1411 1426 lwptoregs(lwp)->r_sp = (uintptr_t)curproc->p_usrstack - stksize;
1412 1427 }
1413 1428
1414 1429 hrtime_t
1415 1430 gethrtime_waitfree(void)
1416 1431 {
1417 1432 return (dtrace_gethrtime());
1418 1433 }
1419 1434
1420 1435 hrtime_t
1421 1436 gethrtime(void)
1422 1437 {
1423 1438 return (gethrtimef());
1424 1439 }
1425 1440
1426 1441 hrtime_t
1427 1442 gethrtime_unscaled(void)
1428 1443 {
1429 1444 return (gethrtimeunscaledf());
1430 1445 }
1431 1446
1432 1447 void
1433 1448 scalehrtime(hrtime_t *hrt)
1434 1449 {
1435 1450 scalehrtimef(hrt);
1436 1451 }
1437 1452
1438 1453 uint64_t
1439 1454 unscalehrtime(hrtime_t nsecs)
1440 1455 {
1441 1456 return (unscalehrtimef(nsecs));
1442 1457 }
1443 1458
1444 1459 void
1445 1460 gethrestime(timespec_t *tp)
1446 1461 {
1447 1462 gethrestimef(tp);
1448 1463 }
1449 1464
1450 1465 #if defined(__amd64)
1451 1466 /*
1452 1467 * Part of the implementation of hres_tick(); this routine is
1453 1468 * easier in C than assembler .. called with the hres_lock held.
1454 1469 *
1455 1470 * XX64 Many of these timekeeping variables need to be extern'ed in a header
1456 1471 */
1457 1472
1458 1473 #include <sys/time.h>
1459 1474 #include <sys/machlock.h>
1460 1475
1461 1476 extern int one_sec;
1462 1477 extern int max_hres_adj;
1463 1478
1464 1479 void
1465 1480 __adj_hrestime(void)
1466 1481 {
1467 1482 long long adj;
1468 1483
1469 1484 if (hrestime_adj == 0)
1470 1485 adj = 0;
1471 1486 else if (hrestime_adj > 0) {
1472 1487 if (hrestime_adj < max_hres_adj)
1473 1488 adj = hrestime_adj;
1474 1489 else
1475 1490 adj = max_hres_adj;
1476 1491 } else {
1477 1492 if (hrestime_adj < -max_hres_adj)
1478 1493 adj = -max_hres_adj;
1479 1494 else
1480 1495 adj = hrestime_adj;
1481 1496 }
1482 1497
1483 1498 timedelta -= adj;
1484 1499 hrestime_adj = timedelta;
1485 1500 hrestime.tv_nsec += adj;
1486 1501
1487 1502 while (hrestime.tv_nsec >= NANOSEC) {
1488 1503 one_sec++;
1489 1504 hrestime.tv_sec++;
1490 1505 hrestime.tv_nsec -= NANOSEC;
1491 1506 }
1492 1507 }
1493 1508 #endif
1494 1509
1495 1510 /*
1496 1511 * Wrapper functions to maintain backwards compability
1497 1512 */
1498 1513 int
1499 1514 xcopyin(const void *uaddr, void *kaddr, size_t count)
1500 1515 {
1501 1516 return (xcopyin_nta(uaddr, kaddr, count, UIO_COPY_CACHED));
1502 1517 }
1503 1518
1504 1519 int
1505 1520 xcopyout(const void *kaddr, void *uaddr, size_t count)
1506 1521 {
1507 1522 return (xcopyout_nta(kaddr, uaddr, count, UIO_COPY_CACHED));
1508 1523 }
↓ open down ↓ |
833 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX