248 struct tss_struct {
249 /*
250 * The hardware state:
251 */
252 struct x86_hw_tss x86_tss;
253
254 /*
255 * The extra 1 is there because the CPU will access an
256 * additional byte beyond the end of the IO permission
257 * bitmap. The extra byte must be all 1 bits, and must
258 * be within the limit.
259 */
260 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
261
262 /*
263 * .. and then another 0x100 bytes for the emergency kernel stack:
264 */
265 unsigned long stack[64];
266
267 } ____cacheline_aligned;
190#ifdef CONFIG_X86_32
191 /* This is the TSS defined by the hardware. */
192 struct x86_hw_tss {
193 unsigned short back_link, __blh;
194 unsigned long sp0; //当前进程的内核栈顶指针
195 unsigned short ss0, __ss0h; //当前进程的内核栈段描述符
196 unsigned long sp1;
197 /* ss1 caches MSR_IA32_SYSENTER_CS: */
198 unsigned short ss1, __ss1h;
199 unsigned long sp2;
200 unsigned short ss2, __ss2h;
201 unsigned long __cr3;
202 unsigned long ip;
203 unsigned long flags;
204 unsigned long ax;
205 unsigned long cx;
206 unsigned long dx;
207 unsigned long bx;
208 unsigned long sp; //当前进程用户态栈顶指针
209 unsigned long bp;
210 unsigned long si;
211 unsigned long di;
212 unsigned short es, __esh;
213 unsigned short cs, __csh;
214 unsigned short ss, __ssh;
215 unsigned short ds, __dsh;
216 unsigned short fs, __fsh;
217 unsigned short gs, __gsh;
218 unsigned short ldt, __ldth;
219 unsigned short trace;
220 unsigned short io_bitmap_base;
221
222 } __attribute__((packed));
35 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
36 * no more per-task TSS's. The TSS size is kept cacheline-aligned
37 * so they are allowed to end up in the .data..cacheline_aligned
38 * section. Since TSS's are completely CPU-local, we want them
39 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
40 */
41 DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;