1
|
/*
|
2
|
* Copyright (C) 2016 The BoxedWine Team
|
3
|
*
|
4
|
* This program is free software; you can redistribute it and/or modify
|
5
|
* it under the terms of the GNU General Public License as published by
|
6
|
* the Free Software Foundation; either version 2 of the License, or
|
7
|
* (at your option) any later version.
|
8
|
*
|
9
|
* This program is distributed in the hope that it will be useful,
|
10
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
11
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
12
|
* GNU General Public License for more details.
|
13
|
*
|
14
|
* You should have received a copy of the GNU General Public License
|
15
|
* along with this program; if not, write to the Free Software
|
16
|
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
17
|
*/
|
18
|
|
19
|
#include "boxedwine.h"
|
20
|
|
21
|
#include "kscheduler.h"
|
22
|
#include "ksignal.h"
|
23
|
#include "kscheduler.h"
|
24
|
#include "ksignal.h"
|
25
|
#include <string.h>
|
26
|
#include <setjmp.h>
|
27
|
|
28
|
#ifdef BOXEDWINE_BINARY_TRANSLATOR
|
29
|
THREAD_LOCAL
|
30
|
#endif
|
31
|
KThread* KThread::runningThread;
|
32
|
|
33
|
BOXEDWINE_MUTEX KThread::futexesMutex;
|
34
|
|
35
|
KThread::~KThread() {
|
36
|
this->cleanup();
|
37
|
CPU* cpu = this->cpu;
|
38
|
this->cpu = NULL;
|
39
|
delete cpu;
|
40
|
}
|
41
|
|
42
|
void KThread::cleanup() {
|
43
|
BOXEDWINE_CONDITION_SIGNAL_ALL_NEED_LOCK(this->waitingForSignalToEndCond);
|
44
|
if (!KSystem::shutingDown && this->clear_child_tid && this->process && this->process->memory->isValidWriteAddress(this->clear_child_tid, 4)) {
|
45
|
writed(this->clear_child_tid, 0);
|
46
|
this->futex(this->clear_child_tid, 1, 1, 0);
|
47
|
}
|
48
|
this->clear_child_tid = 0;
|
49
|
#ifndef BOXEDWINE_MULTI_THREADED
|
50
|
if (this->waitingCond) {
|
51
|
BOXEDWINE_CRITICAL_SECTION_WITH_CONDITION(*this->waitingCond);
|
52
|
this->waitThreadNode.remove();
|
53
|
this->waitingCond = NULL;
|
54
|
}
|
55
|
#endif
|
56
|
this->clearFutexes();
|
57
|
if (this->process) {
|
58
|
this->process->removeThread(this);
|
59
|
}
|
60
|
unscheduleThread(this);
|
61
|
}
|
62
|
|
63
|
void KThread::reset() {
|
64
|
this->clearFutexes();
|
65
|
this->cpu->reset();
|
66
|
this->alternateStack = 0;
|
67
|
this->alternateStackSize = 0;
|
68
|
this->setupStack();
|
69
|
}
|
70
|
|
71
|
void KThread::setupStack() {
|
72
|
U32 page = 0;
|
73
|
U32 pageCount = MAX_STACK_SIZE >> K_PAGE_SHIFT; // 1MB for max stack
|
74
|
pageCount+=2; // guard pages
|
75
|
if (!this->memory->findFirstAvailablePage(ADDRESS_PROCESS_STACK_START, pageCount, &page, false)) {
|
76
|
if (!this->memory->findFirstAvailablePage(0xC0000, pageCount, &page, false)) {
|
77
|
if (!this->memory->findFirstAvailablePage(0x80000, pageCount, &page, false)) {
|
78
|
kpanic("Failed to allocate stack for thread");
|
79
|
}
|
80
|
}
|
81
|
}
|
82
|
this->memory->allocPages(page+1, pageCount-2, PAGE_READ|PAGE_WRITE, 0, 0, 0);
|
83
|
// 1 page above (catch stack underrun)
|
84
|
this->memory->allocPages(page+pageCount-1, 1, 0, 0, 0, 0);
|
85
|
// 1 page below (catch stack overrun)
|
86
|
this->memory->allocPages(page, 1, 0, 0, 0, 0);
|
87
|
this->stackPageCount = pageCount;
|
88
|
this->stackPageStart = page;
|
89
|
this->cpu->reg[4].u32 = (this->stackPageStart + this->stackPageCount - 1) << K_PAGE_SHIFT; // one page away from the top
|
90
|
}
|
91
|
|
92
|
KThread::KThread(U32 id, const std::shared_ptr<KProcess>& process) :
|
93
|
id(id),
|
94
|
sigMask(0),
|
95
|
inSigMask(0),
|
96
|
alternateStack(0),
|
97
|
alternateStackSize(0),
|
98
|
cpu(NULL),
|
99
|
stackPageStart(0),
|
100
|
stackPageCount(0),
|
101
|
process(process),
|
102
|
memory(0),
|
103
|
interrupted(false),
|
104
|
inSignal(0),
|
105
|
#ifdef BOXEDWINE_MULTI_THREADED
|
106
|
exited(false),
|
107
|
#endif
|
108
|
terminating(false),
|
109
|
clear_child_tid(0),
|
110
|
userTime(0),
|
111
|
kernelTime(0),
|
112
|
inSysCall(0),
|
113
|
waitingForSignalToEndCond("KThread::waitingForSignalToEndCond"),
|
114
|
waitingForSignalToEndMaskToRestore(0),
|
115
|
pendingSignals(0),
|
116
|
glContext(0),
|
117
|
currentContext(0),
|
118
|
log(false),
|
119
|
waitingCond(0),
|
120
|
pollCond("KThread::pollCond"),
|
121
|
#ifndef BOXEDWINE_MULTI_THREADED
|
122
|
scheduledThreadNode(this),
|
123
|
waitThreadNode(this),
|
124
|
#endif
|
125
|
condStartWaitTime(0),
|
126
|
sleepCond("KThread::sleepCond")
|
127
|
{
|
128
|
int i;
|
129
|
|
130
|
this->sigMask = 0;
|
131
|
for (i=0;i<TLS_ENTRIES;i++) {
|
132
|
this->tls[i].seg_not_present = 1;
|
133
|
this->tls[i].read_exec_only = 1;
|
134
|
}
|
135
|
this->cpu = CPU::allocCPU();
|
136
|
this->cpu->thread = this;
|
137
|
this->memory = process->memory;
|
138
|
if (process->name=="services.exe") {
|
139
|
this->log=true;
|
140
|
}
|
141
|
//char tmp[10];
|
142
|
//itoa(id, tmp, 10);
|
143
|
//strcat(tmp, ".txt");
|
144
|
//if (id==0x1c)
|
145
|
//this->cpu->logFile = fopen(tmp, "w");
|
146
|
}
|
147
|
|
148
|
bool KThread::isLdtEmpty(struct user_desc* desc) {
|
149
|
return (!desc || (desc->seg_not_present==1 && desc->read_exec_only==1));
|
150
|
}
|
151
|
|
152
|
struct user_desc* KThread::getLDT(U32 index) {
|
153
|
if (index>=TLS_ENTRY_START_INDEX && index<TLS_ENTRIES+TLS_ENTRY_START_INDEX) {
|
154
|
BOXEDWINE_CRITICAL_SECTION_WITH_MUTEX(tlsMutex);
|
155
|
return &this->tls[index-TLS_ENTRY_START_INDEX];
|
156
|
} else if (index<LDT_ENTRIES) {
|
157
|
return this->process->getLDT(index);
|
158
|
}
|
159
|
return NULL;
|
160
|
}
|
161
|
|
162
|
void KThread::setTLS(struct user_desc* desc) {
|
163
|
BOXEDWINE_CRITICAL_SECTION_WITH_MUTEX(tlsMutex);
|
164
|
this->tls[desc->entry_number-TLS_ENTRY_START_INDEX] = *desc;
|
165
|
}
|
166
|
|
167
|
U32 KThread::signal(U32 signal, bool wait) {
|
168
|
if (signal==0) {
|
169
|
return 0;
|
170
|
}
|
171
|
|
172
|
memset(process->sigActions[signal].sigInfo, 0, sizeof(process->sigActions[signal].sigInfo));
|
173
|
process->sigActions[signal].sigInfo[0] = signal;
|
174
|
process->sigActions[signal].sigInfo[2] = K_SI_USER;
|
175
|
process->sigActions[signal].sigInfo[3] = process->id;
|
176
|
process->sigActions[signal].sigInfo[4] = process->userId;
|
177
|
|
178
|
if (((U64)1 << (signal-1)) & ~(this->inSignal?this->inSigMask:this->sigMask)) {
|
179
|
// don't return -K_WAIT, we don't want to re-enter tgkill, instead we will return 0 once the thread wakes up
|
180
|
|
181
|
// must set CPU state before runSignal since it will be stored
|
182
|
if (this==KThread::currentThread()) {
|
183
|
this->cpu->reg[0].u32 = 0;
|
184
|
this->cpu->eip.u32+=2;
|
185
|
}
|
186
|
#ifdef BOXEDWINE_MULTI_THREADED
|
187
|
else {
|
188
|
// :TODO: how to interrupt the thread (the current approache assumes the thread will yield to the signal)
|
189
|
{
|
190
|
BOXEDWINE_CRITICAL_SECTION_WITH_MUTEX(this->pendingSignalsMutex);
|
191
|
this->pendingSignals |= ((U64)1 << (signal-1));
|
192
|
}
|
193
|
if (wait) {
|
194
|
BOXEDWINE_CONDITION_LOCK(this->waitingForSignalToEndCond);
|
195
|
BOXEDWINE_CONDITION_WAIT(this->waitingForSignalToEndCond);
|
196
|
BOXEDWINE_CONDITION_UNLOCK(this->waitingForSignalToEndCond);
|
197
|
}
|
198
|
return 0;
|
199
|
}
|
200
|
#endif
|
201
|
this->runSignal(signal, -1, 0);
|
202
|
if (wait && KThread::currentThread()!=this) {
|
203
|
BOXEDWINE_CONDITION_LOCK(this->waitingForSignalToEndCond);
|
204
|
BOXEDWINE_CONDITION_WAIT(this->waitingForSignalToEndCond);
|
205
|
BOXEDWINE_CONDITION_UNLOCK(this->waitingForSignalToEndCond);
|
206
|
}
|
207
|
} else {
|
208
|
BOXEDWINE_CRITICAL_SECTION_WITH_MUTEX(this->pendingSignalsMutex);
|
209
|
this->pendingSignals |= ((U64)1 << (signal-1));
|
210
|
this->process->signalFd(this, signal);
|
211
|
}
|
212
|
return 0;
|
213
|
}
|
214
|
|
215
|
#define FUTEX_WAIT 0
|
216
|
#define FUTEX_WAKE 1
|
217
|
#define FUTEX_WAIT_PRIVATE 128
|
218
|
#define FUTEX_WAKE_PRIVATE 129
|
219
|
|
220
|
struct futex {
|
221
|
public:
|
222
|
futex() : cond("futex") {}
|
223
|
KThread* thread;
|
224
|
U8* address;
|
225
|
U32 expireTimeInMillies;
|
226
|
bool wake;
|
227
|
BOXEDWINE_CONDITION cond;
|
228
|
};
|
229
|
|
230
|
#define MAX_FUTEXES 128
|
231
|
|
232
|
struct futex system_futex[MAX_FUTEXES];
|
233
|
|
234
|
struct futex* getFutex(KThread* thread, U8* address) {
|
235
|
int i=0;
|
236
|
|
237
|
for (i=0;i<MAX_FUTEXES;i++) {
|
238
|
if (system_futex[i].address == address && system_futex[i].thread==thread) {
|
239
|
return &system_futex[i];
|
240
|
}
|
241
|
}
|
242
|
return 0;
|
243
|
}
|
244
|
|
245
|
struct futex* allocFutex(KThread* thread, U8* address, U32 millies) {
|
246
|
BOXEDWINE_CRITICAL_SECTION;
|
247
|
int i=0;
|
248
|
|
249
|
for (i=0;i<MAX_FUTEXES;i++) {
|
250
|
if (system_futex[i].thread==0) {
|
251
|
system_futex[i].thread = thread;
|
252
|
system_futex[i].address = address;
|
253
|
system_futex[i].expireTimeInMillies = millies;
|
254
|
system_futex[i].wake = false;
|
255
|
return &system_futex[i];
|
256
|
}
|
257
|
}
|
258
|
kpanic("ran out of futexes");
|
259
|
return 0;
|
260
|
}
|
261
|
|
262
|
void freeFutex(struct futex* f) {
|
263
|
f->thread = 0;
|
264
|
f->address = 0;
|
265
|
}
|
266
|
|
267
|
void KThread::clearFutexes() {
|
268
|
U32 i;
|
269
|
|
270
|
BOXEDWINE_CRITICAL_SECTION_WITH_MUTEX(KThread::futexesMutex);
|
271
|
for (i=0;i<MAX_FUTEXES;i++) {
|
272
|
if (system_futex[i].thread == this) {
|
273
|
freeFutex(&system_futex[i]);
|
274
|
}
|
275
|
}
|
276
|
}
|
277
|
|
278
|
U32 KThread::futex(U32 addr, U32 op, U32 value, U32 pTime) {
|
279
|
U8* ramAddress = getPhysicalReadAddress(addr, 4);
|
280
|
|
281
|
if (ramAddress==0) {
|
282
|
kpanic("Could not find futex address: %0.8X", addr);
|
283
|
}
|
284
|
if (op==FUTEX_WAIT || op==FUTEX_WAIT_PRIVATE) {
|
285
|
struct futex* f=getFutex(this, ramAddress);
|
286
|
U32 expireTime;
|
287
|
|
288
|
if (pTime == 0) {
|
289
|
expireTime = 0xFFFFFFFF;
|
290
|
} else {
|
291
|
U32 seconds = readd(pTime);
|
292
|
U32 nano = readd(pTime + 4);
|
293
|
expireTime = seconds * 1000 + nano / 1000000 + KSystem::getMilliesSinceStart();
|
294
|
}
|
295
|
bool checkValue = false;
|
296
|
|
297
|
if (!f) {
|
298
|
checkValue = true;
|
299
|
f = allocFutex(this, ramAddress, expireTime);
|
300
|
}
|
301
|
while (true) {
|
302
|
BOXEDWINE_CRITICAL_SECTION_WITH_CONDITION(f->cond);
|
303
|
if (checkValue) {
|
304
|
checkValue = false;
|
305
|
if (readd(addr) != value) { // needs to be protected
|
306
|
freeFutex(f);
|
307
|
return -K_EWOULDBLOCK;
|
308
|
}
|
309
|
}
|
310
|
if (f->wake) {
|
311
|
freeFutex(f);
|
312
|
return 0;
|
313
|
}
|
314
|
if (f->expireTimeInMillies<0x7FFFFFFF) {
|
315
|
S32 diff = f->expireTimeInMillies - KSystem::getMilliesSinceStart();
|
316
|
if (diff<=0) {
|
317
|
freeFutex(f);
|
318
|
return -K_ETIMEDOUT;
|
319
|
}
|
320
|
BOXEDWINE_CONDITION_WAIT_TIMEOUT(f->cond, (U32)diff);
|
321
|
} else {
|
322
|
BOXEDWINE_CONDITION_WAIT(f->cond);
|
323
|
}
|
324
|
#ifdef BOXEDWINE_MULTI_THREADED
|
325
|
if (this->terminating) {
|
326
|
return -K_EINTR; // probably doesn't matter
|
327
|
}
|
328
|
#endif
|
329
|
}
|
330
|
} else if (op==FUTEX_WAKE_PRIVATE || op==FUTEX_WAKE) {
|
331
|
int i;
|
332
|
U32 count = 0;
|
333
|
for (i=0;i<MAX_FUTEXES && count<value;i++) {
|
334
|
if (system_futex[i].address==ramAddress && !system_futex[i].wake) {
|
335
|
BOXEDWINE_CRITICAL_SECTION_WITH_CONDITION(system_futex[i].cond);
|
336
|
system_futex[i].wake = true;
|
337
|
BOXEDWINE_CONDITION_SIGNAL(system_futex[i].cond);
|
338
|
count++;
|
339
|
}
|
340
|
}
|
341
|
return count;
|
342
|
} else {
|
343
|
kwarn("syscall __NR_futex op %d not implemented", op);
|
344
|
return -1;
|
345
|
}
|
346
|
}
|
347
|
|
348
|
void KThread::signalIllegalInstruction(int code) {
|
349
|
memset(this->process->sigActions[K_SIGILL].sigInfo, 0, sizeof(this->process->sigActions[K_SIGILL].sigInfo));
|
350
|
this->process->sigActions[K_SIGILL].sigInfo[0] = K_SIGILL;
|
351
|
this->process->sigActions[K_SIGILL].sigInfo[2] = code;
|
352
|
this->process->sigActions[K_SIGILL].sigInfo[3] = this->process->id;
|
353
|
this->process->sigActions[K_SIGILL].sigInfo[4] = this->process->userId;
|
354
|
this->runSignal(K_SIGILL, -1, 0); // blocking signal, signalfd can't handle this
|
355
|
}
|
356
|
|
357
|
bool KThread::runSignals() {
|
358
|
U64 todoProcess = this->process->pendingSignals & ~(this->inSignal?this->inSigMask:this->sigMask);
|
359
|
U64 todoThread = this->pendingSignals & ~(this->inSignal?this->inSigMask:this->sigMask);
|
360
|
|
361
|
if (todoProcess!=0 || todoThread!=0) {
|
362
|
U32 i;
|
363
|
|
364
|
for (i=0;i<32;i++) {
|
365
|
if ((todoProcess & ((U64)1 << i))!=0) {
|
366
|
BOXEDWINE_CRITICAL_SECTION_WITH_MUTEX(this->process->pendingSignalsMutex);
|
367
|
if ((this->process->pendingSignals & ((U64)1 << i))!=0) {
|
368
|
this->process->pendingSignals &= ~(1 << i);
|
369
|
this->runSignal(i+1, -1, 0);
|
370
|
return true;
|
371
|
}
|
372
|
}
|
373
|
if ((todoThread & ((U64)1 << i))!=0) {
|
374
|
BOXEDWINE_CRITICAL_SECTION_WITH_MUTEX(this->process->pendingSignalsMutex);
|
375
|
if ((this->process->pendingSignals & ((U64)1 << i))!=0) {
|
376
|
this->pendingSignals &= ~(1 << i);
|
377
|
this->runSignal(i+1, -1, 0);
|
378
|
return true;
|
379
|
}
|
380
|
}
|
381
|
}
|
382
|
}
|
383
|
return false;
|
384
|
}
|
385
|
/*
|
386
|
typedef union compat_sigval {
|
387
|
S32 sival_int;
|
388
|
U32 sival_ptr;
|
389
|
} compat_sigval_t;
|
390
|
|
391
|
typedef struct compat_siginfo {
|
392
|
S32 si_signo;
|
393
|
S32 si_errno;
|
394
|
S32 si_code;
|
395
|
|
396
|
union {
|
397
|
S32 _pad[29];
|
398
|
|
399
|
// kill()
|
400
|
struct {
|
401
|
U32 _pid; // sender's pid
|
402
|
U32 _uid; // sender's uid
|
403
|
} _kill;
|
404
|
|
405
|
// POSIX.1b timers
|
406
|
struct {
|
407
|
S32 _tid; // timer id
|
408
|
S32 _overrun; // overrun count
|
409
|
compat_sigval_t _sigval; // same as below
|
410
|
S32 _sys_private; // not to be passed to user
|
411
|
S32 _overrun_incr; // amount to add to overrun
|
412
|
} _timer;
|
413
|
|
414
|
// POSIX.1b signals
|
415
|
struct {
|
416
|
U32 _pid; // sender's pid
|
417
|
U32 _uid; // sender's uid
|
418
|
compat_sigval_t _sigval;
|
419
|
} _rt;
|
420
|
|
421
|
// SIGCHLD
|
422
|
struct {
|
423
|
U32 _pid; // which child
|
424
|
U32 _uid; // sender's uid
|
425
|
S32 _status; // exit code
|
426
|
S32 _utime;
|
427
|
S32 _stime;
|
428
|
} _sigchld;
|
429
|
|
430
|
// SIGCHLD (x32 version)
|
431
|
struct {
|
432
|
U32 _pid; // which child
|
433
|
U32 _uid; // sender's uid
|
434
|
S32 _status; // exit code
|
435
|
S64 _utime;
|
436
|
S64 _stime;
|
437
|
} _sigchld_x32;
|
438
|
|
439
|
// SIGILL, SIGFPE, SIGSEGV, SIGBUS
|
440
|
struct {
|
441
|
U32 _addr; // faulting insn/memory ref.
|
442
|
} _sigfault;
|
443
|
|
444
|
// SIGPOLL
|
445
|
struct {
|
446
|
S32 _band; // POLL_IN, POLL_OUT, POLL_MSG
|
447
|
S32 _fd;
|
448
|
} _sigpoll;
|
449
|
|
450
|
struct {
|
451
|
U32 _call_addr; // calling insn
|
452
|
S32 _syscall; // triggering system call number
|
453
|
U32 _arch; // AUDIT_ARCH_* of syscall
|
454
|
} _sigsys;
|
455
|
} _sifields;
|
456
|
} compat_siginfo_t;
|
457
|
|
458
|
typedef struct fpregset
|
459
|
{
|
460
|
union
|
461
|
{
|
462
|
struct fpchip_state
|
463
|
{
|
464
|
int state[27];
|
465
|
int status;
|
466
|
} fpchip_state;
|
467
|
|
468
|
struct fp_emul_space
|
469
|
{
|
470
|
char fp_emul[246];
|
471
|
char fp_epad[2];
|
472
|
} fp_emul_space;
|
473
|
|
474
|
int f_fpregs[62];
|
475
|
} fp_reg_set;
|
476
|
|
477
|
long int f_wregs[33];
|
478
|
} fpregset_t;
|
479
|
|
480
|
|
481
|
// Number of general registers.
|
482
|
#define NGREG 19
|
483
|
|
484
|
enum
|
485
|
{
|
486
|
REG_GS = 0,
|
487
|
#define REG_GS REG_GS
|
488
|
REG_FS,
|
489
|
#define REG_FS REG_FS
|
490
|
REG_ES,
|
491
|
#define REG_ES REG_ES
|
492
|
REG_DS,
|
493
|
#define REG_DS REG_DS
|
494
|
REG_EDI,
|
495
|
#define REG_EDI REG_EDI
|
496
|
REG_ESI,
|
497
|
#define REG_ESI REG_ESI
|
498
|
REG_EBP,
|
499
|
#define REG_EBP REG_EBP
|
500
|
REG_ESP,
|
501
|
#define REG_ESP REG_ESP
|
502
|
REG_EBX,
|
503
|
#define REG_EBX REG_EBX
|
504
|
REG_EDX,
|
505
|
#define REG_EDX REG_EDX
|
506
|
REG_ECX,
|
507
|
#define REG_ECX REG_ECX
|
508
|
REG_EAX,
|
509
|
#define REG_EAX REG_EAX
|
510
|
REG_TRAPNO,
|
511
|
#define REG_TRAPNO REG_TRAPNO
|
512
|
REG_ERR,
|
513
|
#define REG_ERR REG_ERR
|
514
|
REG_EIP,
|
515
|
#define REG_EIP REG_EIP
|
516
|
REG_CS,
|
517
|
#define REG_CS REG_CS
|
518
|
REG_EFL,
|
519
|
#define REG_EFL REG_EFL
|
520
|
REG_UESP,
|
521
|
#define REG_UESP REG_UESP
|
522
|
REG_SS
|
523
|
#define REG_SS REG_SS
|
524
|
};
|
525
|
|
526
|
// Container for all general registers.
|
527
|
typedef S32 gregset_t[NGREG];
|
528
|
|
529
|
// Context to describe whole processor state.
|
530
|
typedef struct
|
531
|
{
|
532
|
gregset_t gregs;
|
533
|
fpregset_t fpregs;
|
534
|
} mcontext_tt;
|
535
|
|
536
|
typedef struct sigaltstack {
|
537
|
void *ss_sp;
|
538
|
int ss_flags;
|
539
|
S32 ss_size;
|
540
|
} stack_tt;
|
541
|
|
542
|
# define K_SIGSET_NWORDS (1024 / 32)
|
543
|
typedef struct
|
544
|
{
|
545
|
unsigned long int __val[K_SIGSET_NWORDS];
|
546
|
} k__sigset_t;
|
547
|
|
548
|
|
549
|
// Userlevel context.
|
550
|
struct ucontext_ia32 {
|
551
|
unsigned int uc_flags; // 0
|
552
|
unsigned int uc_link; // 4
|
553
|
stack_tt uc_stack; // 8
|
554
|
mcontext_tt uc_mcontext; // 20
|
555
|
k__sigset_t uc_sigmask; // mask last for extensibility
|
556
|
};
|
557
|
|
558
|
*/
|
559
|
|
560
|
#define INFO_SIZE 128
|
561
|
#define CONTEXT_SIZE 128
|
562
|
|
563
|
void writeToContext(KThread* thread, U32 stack, U32 context, bool altStack, U32 trapNo, U32 errorNo) {
|
564
|
CPU* cpu = thread->cpu;
|
565
|
|
566
|
if (altStack) {
|
567
|
writed(context+0x8, thread->alternateStack);
|
568
|
writed(context+0xC, K_SS_ONSTACK);
|
569
|
writed(context+0x10, thread->alternateStackSize);
|
570
|
} else {
|
571
|
writed(context+0x8, thread->alternateStack);
|
572
|
writed(context+0xC, K_SS_DISABLE);
|
573
|
writed(context+0x10, 0);
|
574
|
}
|
575
|
writed(context+0x14, cpu->seg[GS].value);
|
576
|
writed(context+0x18, cpu->seg[FS].value);
|
577
|
writed(context+0x1C, cpu->seg[ES].value);
|
578
|
writed(context+0x20, cpu->seg[DS].value);
|
579
|
writed(context+0x24, cpu->reg[7].u32); // EDI
|
580
|
writed(context+0x28, cpu->reg[6].u32); // ESI
|
581
|
writed(context+0x2C, cpu->reg[5].u32); // EBP
|
582
|
writed(context+0x30, stack); // ESP
|
583
|
writed(context+0x34, cpu->reg[3].u32); // EBX
|
584
|
writed(context+0x38, cpu->reg[2].u32); // EDX
|
585
|
writed(context+0x3C, cpu->reg[1].u32); // ECX
|
586
|
writed(context+0x40, cpu->reg[0].u32); // EAX
|
587
|
writed(context+0x44, trapNo); // REG_TRAPNO
|
588
|
writed(context+0x48, errorNo); // REG_ERR
|
589
|
writed(context+0x4C, cpu->isBig()?cpu->eip.u32:cpu->eip.u16);
|
590
|
writed(context+0x50, cpu->seg[CS].value);
|
591
|
writed(context+0x54, cpu->flags);
|
592
|
writed(context+0x58, 0); // REG_UESP
|
593
|
writed(context+0x5C, cpu->seg[SS].value);
|
594
|
writed(context+0x60, 0); // fpu save state
|
595
|
}
|
596
|
|
597
|
void readFromContext(CPU* cpu, U32 context) {
|
598
|
cpu->setSegment(GS, readd(context+0x14));
|
599
|
cpu->setSegment(FS, readd(context+0x18));
|
600
|
cpu->setSegment(ES, readd(context+0x1C));
|
601
|
cpu->setSegment(DS, readd(context+0x20));
|
602
|
|
603
|
cpu->reg[7].u32 = readd(context+0x24); // EDI
|
604
|
cpu->reg[6].u32 = readd(context+0x28); // ESI
|
605
|
cpu->reg[5].u32 = readd(context+0x2C); // EBP
|
606
|
cpu->reg[4].u32 = readd(context+0x30); // ESP
|
607
|
|
608
|
cpu->reg[3].u32 = readd(context+0x34); // EBX
|
609
|
cpu->reg[2].u32 = readd(context+0x38); // EDX
|
610
|
cpu->reg[1].u32 = readd(context+0x3C); // ECX
|
611
|
cpu->reg[0].u32 = readd(context+0x40); // EAX
|
612
|
|
613
|
cpu->eip.u32 = readd(context+0x4C);
|
614
|
cpu->setSegment(CS, readd(context+0x50));
|
615
|
cpu->flags = readd(context+0x54);
|
616
|
cpu->setSegment(SS, readd(context+0x5C));
|
617
|
}
|
618
|
|
619
|
U32 KThread::sigreturn() {
|
620
|
memcopyToNative(this->cpu->reg[4].u32, &this->cpu, sizeof(CPU));
|
621
|
//klog("signal return (threadId=%d)", thread->id);
|
622
|
return -K_CONTINUE;
|
623
|
}
|
624
|
|
625
|
void OPCALL onExitSignal(CPU* cpu, DecodedOp* op) {
|
626
|
U32 context;
|
627
|
U64 count = cpu->instructionCount;
|
628
|
|
629
|
cpu->pop32(); // signal
|
630
|
cpu->pop32(); // address
|
631
|
context = cpu->pop32();
|
632
|
cpu->thread->condStartWaitTime = cpu->pop32();
|
633
|
cpu->thread->interrupted = cpu->pop32()!=0;
|
634
|
|
635
|
#ifdef LOG_OPS
|
636
|
//klog("onExitSignal signal=%d info=%X context=%X stack=%X interrupted=%d", signal, address, context, cpu->reg[4].u32, cpu->thread->interrupted);
|
637
|
//klog(" before context %.8X EAX=%.8X ECX=%.8X EDX=%.8X EBX=%.8X ESP=%.8X EBP=%.8X ESI=%.8X EDI=%.8X fs=%d(%X) fs18=%X", cpu->eip.u32, cpu->reg[0].u32, cpu->reg[1].u32, cpu->reg[2].u32, cpu->reg[3].u32, cpu->reg[4].u32, cpu->reg[5].u32, cpu->reg[6].u32, cpu->reg[7].u32, cpu->segValue[4], cpu->segAddress[4], cpu->segAddress[4]?readd(cpu->memory, cpu->segAddress[4]+0x18):0);
|
638
|
#endif
|
639
|
readFromContext(cpu, context);
|
640
|
#ifdef LOG_OPS
|
641
|
klog(" after context %.8X EAX=%.8X ECX=%.8X EDX=%.8X EBX=%.8X ESP=%.8X EBP=%.8X ESI=%.8X EDI=%.8X fs=%d(%X) fs18=%X", cpu->eip.u32, cpu->reg[0].u32, cpu->reg[1].u32, cpu->reg[2].u32, cpu->reg[3].u32, cpu->reg[4].u32, cpu->reg[5].u32, cpu->reg[6].u32, cpu->reg[7].u32, cpu->seg[4].value, cpu->seg[4].address, cpu->seg[4].address ? readd(cpu->seg[4].address + 0x18) : 0);
|
642
|
#endif
|
643
|
cpu->instructionCount = count;
|
644
|
cpu->thread->inSignal--;
|
645
|
|
646
|
if (cpu->thread->waitingForSignalToEndMaskToRestore & RESTORE_SIGNAL_MASK) {
|
647
|
cpu->thread->sigMask = cpu->thread->waitingForSignalToEndMaskToRestore & RESTORE_SIGNAL_MASK;
|
648
|
cpu->thread->waitingForSignalToEndMaskToRestore = SIGSUSPEND_RETURN;
|
649
|
}
|
650
|
|
651
|
BOXEDWINE_CONDITION_SIGNAL_ALL_NEED_LOCK(cpu->thread->waitingForSignalToEndCond);
|
652
|
|
653
|
cpu->nextBlock = cpu->getNextBlock();
|
654
|
|
655
|
/*
|
656
|
if (action->flags & K_SA_RESTORER) {
|
657
|
push32(&thread->cpu, thread->cpu->eip.u32);
|
658
|
thread->cpu->eip.u32 = action->restorer;
|
659
|
while (thread->cpu->eip.u32!=savedState.eip.u32) {
|
660
|
runCPU(&thread->cpu);
|
661
|
}
|
662
|
}
|
663
|
*/
|
664
|
}
|
665
|
|
666
|
// interrupted and condStartWaitTime are pushed because syscall's during the signal will clobber them
|
667
|
void KThread::runSignal(U32 signal, U32 trapNo, U32 errorNo) {
|
668
|
KSigAction* action = &this->process->sigActions[signal];
|
669
|
if (action->handlerAndSigAction==K_SIG_DFL) {
|
670
|
|
671
|
} else if (action->handlerAndSigAction != K_SIG_IGN) {
|
672
|
U32 context;
|
673
|
U32 address = 0;
|
674
|
U32 stack = this->cpu->reg[4].u32;
|
675
|
U32 interrupted = 0;
|
676
|
bool altStack = (action->flags & K_SA_ONSTACK) != 0;
|
677
|
ChangeThread c(this);
|
678
|
|
679
|
cpu->fillFlags();
|
680
|
|
681
|
#ifdef LOG_OPS
|
682
|
klog("runSignal %d", signal);
|
683
|
klog(" before signal %.8X EAX=%.8X ECX=%.8X EDX=%.8X EBX=%.8X ESP=%.8X EBP=%.8X ESI=%.8X EDI=%.8X fs=%d(%X) fs18=%X", cpu->eip.u32, cpu->reg[0].u32, cpu->reg[1].u32, cpu->reg[2].u32, cpu->reg[3].u32, cpu->reg[4].u32, cpu->reg[5].u32, cpu->reg[6].u32, cpu->reg[7].u32, cpu->seg[4].value, cpu->seg[4].address, cpu->seg[4].address?readd(cpu->seg[4].address+0x18):0);
|
684
|
#endif
|
685
|
this->inSigMask=action->mask | this->sigMask;
|
686
|
if (action->flags & K_SA_RESETHAND) {
|
687
|
action->handlerAndSigAction=K_SIG_DFL;
|
688
|
} else if (!(action->flags & K_SA_NODEFER)) {
|
689
|
this->inSigMask|= (U64)1 << (signal-1);
|
690
|
}
|
691
|
#ifndef BOXEDWINE_MULTI_THREADED
|
692
|
if (this->waitingCond) {
|
693
|
if (!(action->flags & K_SA_RESTART))
|
694
|
interrupted = 1;
|
695
|
this->waitingCond->signalAll(); // this will make sure it gets cleaned up properly
|
696
|
}
|
697
|
#endif
|
698
|
// move to front of the queue
|
699
|
#ifndef BOXEDWINE_MULTI_THREADED
|
700
|
unscheduleThread(this);
|
701
|
scheduleThread(this);
|
702
|
#endif
|
703
|
if (altStack) {
|
704
|
context = this->alternateStack + this->alternateStackSize - CONTEXT_SIZE;
|
705
|
} else {
|
706
|
context = this->cpu->seg[SS].address + (ESP & this->cpu->stackMask) - CONTEXT_SIZE;
|
707
|
}
|
708
|
writeToContext(this, stack, context, altStack, trapNo, errorNo);
|
709
|
|
710
|
this->cpu->stackMask = 0xFFFFFFFF;
|
711
|
this->cpu->stackNotMask = 0;
|
712
|
this->cpu->seg[SS].address = 0;
|
713
|
this->cpu->reg[4].u32 = context;
|
714
|
|
715
|
this->cpu->reg[4].u32 &= ~15;
|
716
|
if (action->flags & K_SA_SIGINFO) {
|
717
|
U32 i;
|
718
|
|
719
|
this->cpu->reg[4].u32-=INFO_SIZE;
|
720
|
address = this->cpu->reg[4].u32;
|
721
|
for (i=0;i<K_SIG_INFO_SIZE;i++) {
|
722
|
writed(address+i*4, this->process->sigActions[signal].sigInfo[i]);
|
723
|
}
|
724
|
|
725
|
this->cpu->push32(interrupted);
|
726
|
this->cpu->push32(this->condStartWaitTime);
|
727
|
this->cpu->push32(context);
|
728
|
this->cpu->push32(address);
|
729
|
this->cpu->push32(signal);
|
730
|
this->cpu->reg[0].u32 = signal;
|
731
|
this->cpu->reg[1].u32 = address;
|
732
|
this->cpu->reg[2].u32 = context;
|
733
|
} else {
|
734
|
this->cpu->reg[0].u32 = signal;
|
735
|
this->cpu->reg[1].u32 = 0;
|
736
|
this->cpu->reg[2].u32 = 0;
|
737
|
this->cpu->push32(interrupted);
|
738
|
this->cpu->push32(this->condStartWaitTime);
|
739
|
this->cpu->push32(context);
|
740
|
this->cpu->push32(0);
|
741
|
this->cpu->push32(signal);
|
742
|
}
|
743
|
#ifdef LOG_OPS
|
744
|
klog(" context %X interrupted %d", context, interrupted);
|
745
|
#endif
|
746
|
this->cpu->push32(SIG_RETURN_ADDRESS);
|
747
|
this->cpu->eip.u32 = action->handlerAndSigAction;
|
748
|
|
749
|
this->inSignal++;
|
750
|
|
751
|
this->cpu->setSegment(CS, 0xf);
|
752
|
this->cpu->setSegment(SS, 0x17);
|
753
|
this->cpu->setSegment(DS, 0x17);
|
754
|
this->cpu->setSegment(ES, 0x17);
|
755
|
this->cpu->setIsBig(1);
|
756
|
}
|
757
|
}
|
758
|
|
759
|
// bit 0 - 0 = no page found, 1 = protection fault
|
760
|
// bit 1 - 0 = read access, 1 = write access
|
761
|
// bit 2 - 0 = kernel-mode access, 1 = user mode access
|
762
|
// bit 3 - 0 = n/a, 1 = use of reserved bit detected
|
763
|
// bit 4 - 0 = n/a, 1 = fault was an instruction fetch
|
764
|
|
765
|
void KThread::seg_mapper(U32 address, bool readFault, bool writeFault, bool throwException) {
|
766
|
if (this->process->sigActions[K_SIGSEGV].handlerAndSigAction!=K_SIG_IGN && this->process->sigActions[K_SIGSEGV].handlerAndSigAction!=K_SIG_DFL) {
|
767
|
this->process->sigActions[K_SIGSEGV].sigInfo[0] = K_SIGSEGV;
|
768
|
this->process->sigActions[K_SIGSEGV].sigInfo[1] = 0;
|
769
|
this->process->sigActions[K_SIGSEGV].sigInfo[2] = 1; // SEGV_MAPERR
|
770
|
this->process->sigActions[K_SIGSEGV].sigInfo[3] = address;
|
771
|
this->runSignal(K_SIGSEGV, EXCEPTION_PAGE_FAULT, (writeFault?2:0));
|
772
|
if (throwException) {
|
773
|
#ifdef BOXEDWINE_HAS_SETJMP
|
774
|
longjmp(this->cpu->runBlockJump, 1);
|
775
|
#else
|
776
|
kpanic("setjmp is required for this app but it was compiled into boxedwine");
|
777
|
#endif
|
778
|
}
|
779
|
} else {
|
780
|
this->memory->log_pf(this, address);
|
781
|
}
|
782
|
}
|
783
|
|
784
|
void KThread::seg_access(U32 address, bool readFault, bool writeFault, bool throwException) {
|
785
|
if (this->process->sigActions[K_SIGSEGV].handlerAndSigAction!=K_SIG_IGN && this->process->sigActions[K_SIGSEGV].handlerAndSigAction!=K_SIG_DFL) {
|
786
|
|
787
|
this->process->sigActions[K_SIGSEGV].sigInfo[0] = K_SIGSEGV;
|
788
|
this->process->sigActions[K_SIGSEGV].sigInfo[1] = 0;
|
789
|
this->process->sigActions[K_SIGSEGV].sigInfo[2] = 2; // SEGV_ACCERR
|
790
|
this->process->sigActions[K_SIGSEGV].sigInfo[3] = address;
|
791
|
this->runSignal(K_SIGSEGV, EXCEPTION_PAGE_FAULT, 1 | (writeFault?2:0));
|
792
|
if (throwException) {
|
793
|
#ifdef BOXEDWINE_HAS_SETJMP
|
794
|
longjmp(this->cpu->runBlockJump, 1);
|
795
|
#else
|
796
|
kpanic("setjmp is required for this app but it was compiled into boxedwine");
|
797
|
#endif
|
798
|
}
|
799
|
} else {
|
800
|
this->memory->log_pf(this, address);
|
801
|
}
|
802
|
}
|
803
|
|
804
|
void KThread::clone(KThread* from) {
|
805
|
this->sigMask = from->sigMask;
|
806
|
this->stackPageStart = from->stackPageStart;
|
807
|
this->stackPageCount = from->stackPageCount;
|
808
|
this->waitingForSignalToEndMaskToRestore = from->waitingForSignalToEndMaskToRestore;
|
809
|
this->cpu->clone(from->cpu);
|
810
|
this->cpu->thread = this;
|
811
|
}
|
812
|
|
813
|
U32 KThread::modify_ldt(U32 func, U32 ptr, U32 count) {
|
814
|
if (func == 1 || func == 0x11) {
|
815
|
int index = readd(ptr);
|
816
|
U32 address = readd(ptr + 4);
|
817
|
U32 limit = readd(ptr + 8);
|
818
|
U32 flags = readd(ptr + 12);
|
819
|
|
820
|
if (index>=0 && index<LDT_ENTRIES) {
|
821
|
struct user_desc* ldt = this->getLDT(index);
|
822
|
|
823
|
ldt->entry_number = index;
|
824
|
ldt->limit = limit;
|
825
|
ldt->base_addr = address;
|
826
|
ldt->flags = flags;
|
827
|
} else {
|
828
|
kpanic("syscall_modify_ldt invalid index: %d", index);
|
829
|
}
|
830
|
return 0;
|
831
|
} else if (func == 0) {
|
832
|
int index = readd(ptr);
|
833
|
if (index>=0 && index<LDT_ENTRIES) {
|
834
|
struct user_desc* ldt = this->getLDT(index);
|
835
|
|
836
|
writed(ptr + 4, ldt->base_addr);
|
837
|
writed(ptr + 8, ldt->limit);
|
838
|
writed(ptr + 12, ldt->flags);
|
839
|
} else {
|
840
|
kpanic("syscall_modify_ldt invalid index: %d", index);
|
841
|
}
|
842
|
return 16;
|
843
|
} else {
|
844
|
kpanic("syscall_modify_ldt unknown func: %d", func);
|
845
|
return -1;
|
846
|
}
|
847
|
}
|
848
|
|
849
|
U32 KThread::sleep(U32 ms) {
|
850
|
while (true) {
|
851
|
if (!this->condStartWaitTime) {
|
852
|
this->condStartWaitTime = KSystem::getMilliesSinceStart();
|
853
|
} else {
|
854
|
U32 diff = KSystem::getMilliesSinceStart()-this->condStartWaitTime;
|
855
|
if (diff>ms) {
|
856
|
this->condStartWaitTime = 0;
|
857
|
return 0;
|
858
|
}
|
859
|
ms-=diff;
|
860
|
}
|
861
|
|
862
|
BOXEDWINE_CONDITION_LOCK(this->sleepCond);
|
863
|
BOXEDWINE_CONDITION_WAIT_TIMEOUT(this->sleepCond, ms);
|
864
|
BOXEDWINE_CONDITION_UNLOCK(this->sleepCond);
|
865
|
#ifdef BOXEDWINE_MULTI_THREADED
|
866
|
if (this->terminating) {
|
867
|
return -K_EINTR;
|
868
|
}
|
869
|
#endif
|
870
|
}
|
871
|
}
|
872
|
|
873
|
U32 KThread::sigprocmask(U32 how, U32 set, U32 oset, U32 sigsetSize) {
|
874
|
if (oset!=0) {
|
875
|
if (sigsetSize==4) {
|
876
|
writed(oset, (U32)this->sigMask);
|
877
|
} else if (sigsetSize==8) {
|
878
|
writeq(oset, this->sigMask);
|
879
|
} else {
|
880
|
klog("sigprocmask: can't handle sigsetSize=%d", sigsetSize);
|
881
|
}
|
882
|
//klog("syscall_sigprocmask oset=%X", thread->sigMask);
|
883
|
}
|
884
|
if (set!=0) {
|
885
|
U64 mask;
|
886
|
|
887
|
if (sigsetSize==4) {
|
888
|
mask = readd(set);
|
889
|
} else if (sigsetSize==8) {
|
890
|
mask = readq(set);
|
891
|
} else {
|
892
|
klog("sigprocmask: can't handle sigsetSize=%d", sigsetSize);
|
893
|
mask = 0; // removes warning
|
894
|
}
|
895
|
if (how == K_SIG_BLOCK) {
|
896
|
this->sigMask|=mask;
|
897
|
//klog("syscall_sigprocmask block %X(%X)", set, thread->sigMask);
|
898
|
} else if (how == K_SIG_UNBLOCK) {
|
899
|
this->sigMask&=~mask;
|
900
|
//klog("syscall_sigprocmask unblock %X(%X)", set, thread->sigMask);
|
901
|
} else if (how == K_SIG_SETMASK) {
|
902
|
this->sigMask = mask;
|
903
|
//klog("syscall_sigprocmask set %X(%X)", set, thread->sigMask);
|
904
|
} else {
|
905
|
kpanic("sigprocmask how %d unsupported", how);
|
906
|
}
|
907
|
}
|
908
|
return 0;
|
909
|
}
|
910
|
|
911
|
U32 KThread::sigsuspend(U32 mask, U32 sigsetSize) {
|
912
|
if (this->waitingForSignalToEndMaskToRestore==SIGSUSPEND_RETURN) {
|
913
|
this->waitingForSignalToEndMaskToRestore = 0;
|
914
|
return -K_EINTR;
|
915
|
}
|
916
|
this->waitingForSignalToEndMaskToRestore = this->sigMask | RESTORE_SIGNAL_MASK;
|
917
|
if (sigsetSize==4) {
|
918
|
this->sigMask = readd(mask);
|
919
|
} else if (sigsetSize==8) {
|
920
|
this->sigMask = readq(mask);
|
921
|
} else {
|
922
|
klog("sigsuspend: can't handle sigsetSize=%d", sigsetSize);
|
923
|
}
|
924
|
BOXEDWINE_CONDITION_LOCK(this->waitingForSignalToEndCond);
|
925
|
BOXEDWINE_CONDITION_WAIT(this->waitingForSignalToEndCond);
|
926
|
BOXEDWINE_CONDITION_UNLOCK(this->waitingForSignalToEndCond);
|
927
|
#ifdef BOXEDWINE_MULTI_THREADED
|
928
|
this->waitingForSignalToEndMaskToRestore = 0;
|
929
|
return -K_EINTR;
|
930
|
#endif
|
931
|
}
|
932
|
|
933
|
U32 KThread::signalstack(U32 ss, U32 oss) {
|
934
|
if (oss!=0) {
|
935
|
if (!this->memory->isValidWriteAddress(oss, 12)) {
|
936
|
return -K_EFAULT;
|
937
|
}
|
938
|
writed(oss, this->alternateStack);
|
939
|
writed(oss + 4, (this->alternateStack && this->inSignal) ? K_SS_ONSTACK : K_SS_DISABLE);
|
940
|
writed(oss + 8, this->alternateStackSize);
|
941
|
}
|
942
|
if (ss!=0) {
|
943
|
if (!this->memory->isValidReadAddress(ss, 12)) {
|
944
|
return -K_EFAULT;
|
945
|
}
|
946
|
if (this->alternateStack && this->inSignal) {
|
947
|
return -K_EPERM;
|
948
|
}
|
949
|
U32 flags = readd(ss + 4);
|
950
|
if (flags & K_SS_DISABLE) {
|
951
|
this->alternateStack = 0;
|
952
|
this->alternateStackSize = 0;
|
953
|
} else {
|
954
|
this->alternateStack = readd(ss);
|
955
|
this->alternateStackSize = readd(ss + 8);
|
956
|
}
|
957
|
}
|
958
|
return 0;
|
959
|
}
|
960
|
|
961
|
KThreadGlContext* KThread::getGlContextById(U32 id) {
|
962
|
if (this->glContext.count(id))
|
963
|
return &this->glContext[id];
|
964
|
return NULL;
|
965
|
}
|
966
|
|
967
|
void KThread::removeGlContextById(U32 id) {
|
968
|
this->glContext.erase(id);
|
969
|
}
|
970
|
|
971
|
void KThread::addGlContext(U32 id, void* context) {
|
972
|
this->glContext[id] = KThreadGlContext(context);
|
973
|
}
|
974
|
|
975
|
void KThread::removeAllGlContexts() {
|
976
|
this->glContext.clear();
|
977
|
}
|
978
|
|
979
|
ChangeThread::ChangeThread(KThread* thread) {
|
980
|
this->savedThread = KThread::currentThread();
|
981
|
KThread::setCurrentThread(thread);
|
982
|
}
|
983
|
ChangeThread::~ChangeThread() {
|
984
|
KThread::setCurrentThread(savedThread);
|
985
|
}
|
986
|
|
987
|
void common_signalIllegalInstruction(CPU* cpu, int code) {
|
988
|
cpu->thread->signalIllegalInstruction(code);
|
989
|
}
|