summaryrefslogtreecommitdiffstats
path: root/private/mvdm/softpc.new/base/ccpu386
diff options
context:
space:
mode:
Diffstat (limited to 'private/mvdm/softpc.new/base/ccpu386')
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/aaa.c58
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/aaa.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/aad.c60
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/aad.h16
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/aam.c64
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/aam.h16
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/aas.c57
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/aas.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/adc.c79
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/adc.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/add.c79
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/add.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/and.c59
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/and.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/arpl.c56
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/arpl.h17
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/bound.c76
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/bound.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/bsf.c64
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/bsf.h17
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/bsr.c70
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/bsr.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/bswap.c64
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/bswap.h16
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/bt.c54
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/bt.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/btc.c55
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/btc.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/btr.c55
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/btr.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/bts.c55
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/bts.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c-files129
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_addr.c652
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_addr.h39
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_bsic.c369
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_bsic.h138
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_debug.c386
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_debug.h45
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_div64.c147
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_div64.h29
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_getset.c290
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_intr.c515
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_intr.h20
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_main.c4940
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_main.h226
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_mem.h75
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_mul64.c185
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_mul64.h29
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_neg64.c66
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_neg64.h17
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_oprnd.h1135
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_page.c857
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_page.h226
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_prot.c287
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_prot.h56
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_reg.c1879
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_reg.h508
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_seg.c396
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_seg.h58
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_stack.c679
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_stack.h149
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_tlb.c953
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_tlb.h39
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_tsksw.c589
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_tsksw.h31
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_xcptn.c641
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_xcptn.h59
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_xfer.c416
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_xfer.h70
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_xtrn.c135
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/c_xtrn.h38
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/call.c357
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/call.h30
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cbw.c42
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cbw.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/ccpudefs.inc1
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/ccpupig.c320
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/ccpupig.h167
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/ccpusas4.c1960
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/ccpusas4.h34
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cdq.c42
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cdq.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/clc.c39
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/clc.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cld.c39
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cld.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cli.c39
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cli.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/clts.c39
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/clts.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cmc.c39
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cmc.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cmp.c81
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cmp.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cmpxchg.c122
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cmpxchg.h33
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cpu4gen.h3086
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cpuint_c.h42
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cwd.c42
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cwd.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cwde.c43
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/cwde.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/daa.c65
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/daa.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/das.c69
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/das.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/dec.c66
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/dec.h17
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/div.c167
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/div.h30
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/enter.c199
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/enter.h25
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/evid_c.h146
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/evidfunc.h715
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/evidgen.h25
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/fault.h231
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/fpu.c5948
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/fpu_c.h168
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/gdpvar.h1859
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/idiv.c171
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/idiv.h30
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/imul.c243
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/imul.h51
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/in.c101
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/in.h33
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/inc.c66
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/inc.h17
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/into.c55
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/into.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/intx.c57
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/intx.h16
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/invd.c45
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/invd.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/invlpg.c49
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/invlpg.h16
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/iret.c330
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/iret.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/jcxz.c60
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/jcxz.h23
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/jmp.c198
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/jmp.h30
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/jxx.c290
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/jxx.h121
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lahf.c46
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lahf.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lar.c101
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lar.h17
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lea.c46
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lea.h17
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/leave.c65
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/leave.h13
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lgdt.c68
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lgdt.h23
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lidt.c63
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lidt.h23
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lldt.c87
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lldt.h16
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lmsw.c56
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lmsw.h16
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/localfm.c9
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/loopxx.c118
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/loopxx.h51
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lsl.c101
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lsl.h17
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/ltr.c67
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/ltr.h16
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lxs.c149
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/lxs.h49
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/makefile6
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/mov.c239
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/mov.h49
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/movsx.c56
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/movsx.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/mul.c147
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/mul.h33
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/neg.c66
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/neg.h17
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/newnpx.h73
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/nop.c38
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/nop.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/not.c48
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/not.h16
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/ntstubs.c226
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/ntthread.c253
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/ntthread.h9
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/or.c59
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/or.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/out.c91
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/out.h33
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/pigger_c.h4
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/pop.c78
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/pop.h23
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/popa.c61
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/popa.h13
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/popf.c67
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/popf.h13
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/push.c68
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/push.h23
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/pusha.c53
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/pusha.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/pushf.c51
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/pushf.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/rcl.c91
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/rcl.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/rcr.c87
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/rcr.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/ret.c270
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/ret.h23
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/rol.c91
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/rol.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/ror.c91
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/ror.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/rsrvd.c45
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/rsrvd.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sahf.c49
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sahf.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sar.c82
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sar.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sascdef.c142
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sbb.c80
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sbb.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/setxx.c275
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/setxx.h121
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sgdt.c70
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sgdt.h23
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/shl.c88
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/shl.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/shld.c125
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/shld.h19
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/shr.c87
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/shr.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/shrd.c127
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/shrd.h19
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sidt.c71
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sidt.h23
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sldt.c45
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sldt.h16
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/smsw.c43
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/smsw.h16
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sources183
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/stc.c39
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/stc.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/std.c39
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/std.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sti.c39
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sti.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/str.c45
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/str.h16
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/stubs.c21
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sub.c80
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/sub.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/test.c58
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/test.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/univer_c.h4
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/verr.c100
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/verr.h16
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/verw.c96
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/verw.h16
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/vglob.c451
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/wait.c38
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/wait.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/wbinvd.c45
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/wbinvd.h11
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/xadd.c59
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/xadd.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/xchg.c53
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/xchg.h17
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/xlat.c45
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/xlat.h16
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/xor.c59
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/xor.h18
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/zfrsrvd.c4871
-rw-r--r--private/mvdm/softpc.new/base/ccpu386/zfrsrvd.h15
274 files changed, 49430 insertions, 0 deletions
diff --git a/private/mvdm/softpc.new/base/ccpu386/aaa.c b/private/mvdm/softpc.new/base/ccpu386/aaa.c
new file mode 100644
index 000000000..88791b560
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/aaa.c
@@ -0,0 +1,58 @@
+/*[
+
+aaa.c
+
+LOCAL CHAR SccsID[]="@(#)aaa.c 1.5 02/09/94";
+
+AAA CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <aaa.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+AAA()
+ {
+ if ( (GET_AL() & 0xf) > 9 || GET_AF() )
+ {
+ SET_AX(GET_AX() + 6);
+ SET_AH(GET_AH() + 1);
+ SET_CF(1); SET_AF(1);
+ }
+ else
+ {
+ SET_CF(0); SET_AF(0);
+ }
+ SET_AL(GET_AL() & 0xf);
+
+ /* Set undefined flag(s) */
+#ifdef SET_UNDEFINED_FLAG
+ SET_OF(UNDEFINED_FLAG);
+ SET_SF(UNDEFINED_FLAG);
+ SET_ZF(UNDEFINED_FLAG);
+ SET_PF(UNDEFINED_FLAG);
+#endif
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/aaa.h b/private/mvdm/softpc.new/base/ccpu386/aaa.h
new file mode 100644
index 000000000..7f5cb4bcb
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/aaa.h
@@ -0,0 +1,11 @@
+/*
+ aaa.h
+
+ Define all AAA CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)aaa.h 1.4 09/01/94";
+ */
+
+IMPORT VOID AAA IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/aad.c b/private/mvdm/softpc.new/base/ccpu386/aad.c
new file mode 100644
index 000000000..16046d50f
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/aad.c
@@ -0,0 +1,60 @@
+/*[
+
+aad.c
+
+LOCAL CHAR SccsID[]="@(#)aad.c 1.5 02/09/94";
+
+AAD CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <aad.h>
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+AAD
+
+IFN1(
+ IU32, op1
+ )
+
+
+ {
+ IU8 temp_al;
+
+ temp_al = GET_AH() * op1 + GET_AL();
+ SET_AL(temp_al);
+ SET_AH(0);
+
+ /* set ZF,SF,PF according to result */
+ SET_ZF(temp_al == 0);
+ SET_SF((temp_al & BIT7_MASK) != 0);
+ SET_PF(pf_table[temp_al]);
+
+ /* Set undefined flag(s) to zero */
+#ifdef SET_UNDEFINED_FLAG
+ SET_AF(UNDEFINED_FLAG);
+ SET_OF(UNDEFINED_FLAG);
+ SET_CF(UNDEFINED_FLAG);
+#endif
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/aad.h b/private/mvdm/softpc.new/base/ccpu386/aad.h
new file mode 100644
index 000000000..e97bf5aeb
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/aad.h
@@ -0,0 +1,16 @@
+/*
+ aad.h
+
+ Define all AAD CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)aad.h 1.4 02/09/94";
+ */
+
+IMPORT VOID AAD
+
+IPT1(
+ IU32, op1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/aam.c b/private/mvdm/softpc.new/base/ccpu386/aam.c
new file mode 100644
index 000000000..107c60d40
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/aam.c
@@ -0,0 +1,64 @@
+/*[
+
+aam.c
+
+LOCAL CHAR SccsID[]="@(#)aam.c 1.5 02/09/94";
+
+AAM CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <aam.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+AAM
+
+IFN1(
+ IU32, op1
+ )
+
+
+ {
+ IU8 temp_al;
+
+ if ( op1 == 0 )
+ Int0();
+
+ SET_AH(GET_AL() / op1);
+ SET_AL(GET_AL() % op1);
+
+ /* set ZF,SF,PF according to result */
+ temp_al = GET_AL();
+ SET_ZF(temp_al == 0);
+ SET_SF((temp_al & BIT7_MASK) != 0);
+ SET_PF(pf_table[temp_al]);
+
+ /* Set undefined flag(s) */
+#ifdef SET_UNDEFINED_FLAG
+ SET_AF(UNDEFINED_FLAG);
+ SET_OF(UNDEFINED_FLAG);
+ SET_CF(UNDEFINED_FLAG);
+#endif
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/aam.h b/private/mvdm/softpc.new/base/ccpu386/aam.h
new file mode 100644
index 000000000..cbb3b74ae
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/aam.h
@@ -0,0 +1,16 @@
+/*
+ aam.h
+
+ Define all AAM CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)aam.h 1.4 02/09/94";
+ */
+
+IMPORT VOID AAM
+
+IPT1(
+ IU32, op1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/aas.c b/private/mvdm/softpc.new/base/ccpu386/aas.c
new file mode 100644
index 000000000..b222cd191
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/aas.c
@@ -0,0 +1,57 @@
+/*[
+
+aas.c
+
+LOCAL CHAR SccsID[]="@(#)aas.c 1.5 02/09/94";
+
+AAS CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <aas.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+AAS()
+ {
+ if ( (GET_AL() & 0xf) > 9 || GET_AF() )
+ {
+ SET_AX(GET_AX() - 6);
+ SET_AH(GET_AH() - 1);
+ SET_CF(1); SET_AF(1);
+ }
+ else
+ {
+ SET_CF(0); SET_AF(0);
+ }
+ SET_AL(GET_AL() & 0xf);
+
+ /* Set undefined flag(s) */
+#ifdef SET_UNDEFINED_FLAG
+ SET_OF(UNDEFINED_FLAG);
+ SET_SF(UNDEFINED_FLAG);
+ SET_ZF(UNDEFINED_FLAG);
+ SET_PF(UNDEFINED_FLAG);
+#endif
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/aas.h b/private/mvdm/softpc.new/base/ccpu386/aas.h
new file mode 100644
index 000000000..0ee14d609
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/aas.h
@@ -0,0 +1,11 @@
+/*
+ aas.h
+
+ Define all AAS CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)aas.h 1.5 09/01/94";
+ */
+
+IMPORT VOID AAS IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/adc.c b/private/mvdm/softpc.new/base/ccpu386/adc.c
new file mode 100644
index 000000000..779d80034
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/adc.c
@@ -0,0 +1,79 @@
+/*[
+
+adc.c
+
+LOCAL CHAR SccsID[]="@(#)adc.c 1.5 02/09/94";
+
+ADC CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <adc.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'adc'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+ADC
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst/lsrc operand */
+ IU32, op2, /* rsrc operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+ IU32 carry;
+ IU32 msb;
+ IU32 op1_msb;
+ IU32 op2_msb;
+ IU32 res_msb;
+
+ msb = SZ2MSB(op_sz);
+ /* Do operation */
+ result = *pop1 + op2 + GET_CF() & SZ2MASK(op_sz);
+ op1_msb = (*pop1 & msb) != 0; /* Isolate all msb's */
+ op2_msb = (op2 & msb) != 0;
+ res_msb = (result & msb) != 0;
+ carry = *pop1 ^ op2 ^ result; /* Isolate carries */
+ /* Determine flags */
+ /*
+ OF = (op1 == op2) & (op2 ^ res)
+ ie if operand signs same and res sign different set OF.
+ */
+ SET_OF((op1_msb == op2_msb) & (op2_msb ^ res_msb));
+ /*
+ Formally:- CF = op1 & op2 | !res & op1 | !res & op2
+ Equivalently:- CF = OF ^ op1 ^ op2 ^ res
+ */
+ SET_CF(((carry & msb) != 0) ^ GET_OF());
+ SET_PF(pf_table[result & BYTE_MASK]);
+ SET_ZF(result == 0);
+ SET_SF((result & msb) != 0); /* SF = MSB */
+ SET_AF((carry & BIT4_MASK) != 0); /* AF = Bit 4 carry */
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/adc.h b/private/mvdm/softpc.new/base/ccpu386/adc.h
new file mode 100644
index 000000000..38bbe84b8
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/adc.h
@@ -0,0 +1,18 @@
+/*
+ adc.h
+
+ Define all ADC CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)adc.h 1.4 02/09/94";
+ */
+
+IMPORT VOID ADC
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/add.c b/private/mvdm/softpc.new/base/ccpu386/add.c
new file mode 100644
index 000000000..05a95d92a
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/add.c
@@ -0,0 +1,79 @@
+/*[
+
+add.c
+
+LOCAL CHAR SccsID[]="@(#)add.c 1.5 02/09/94";
+
+ADD CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <add.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'add'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+ADD
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst/lsrc operand */
+ IU32, op2, /* rsrc operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+ IU32 carry;
+ IU32 msb;
+ IU32 op1_msb;
+ IU32 op2_msb;
+ IU32 res_msb;
+
+ msb = SZ2MSB(op_sz);
+ /* Do operation */
+ result = *pop1 + op2 & SZ2MASK(op_sz);
+ op1_msb = (*pop1 & msb) != 0; /* Isolate all msb's */
+ op2_msb = (op2 & msb) != 0;
+ res_msb = (result & msb) != 0;
+ carry = *pop1 ^ op2 ^ result; /* Isolate carries */
+ /* Determine flags */
+ /*
+ OF = (op1 == op2) & (op2 ^ res)
+ ie if operand signs same and res sign different set OF.
+ */
+ SET_OF((op1_msb == op2_msb) & (op2_msb ^ res_msb));
+ /*
+ Formally:- CF = op1 & op2 | !res & op1 | !res & op2
+ Equivalently:- CF = OF ^ op1 ^ op2 ^ res
+ */
+ SET_CF(((carry & msb) != 0) ^ GET_OF());
+ SET_PF(pf_table[result & BYTE_MASK]);
+ SET_ZF(result == 0);
+ SET_SF((result & msb) != 0); /* SF = MSB */
+ SET_AF((carry & BIT4_MASK) != 0); /* AF = Bit 4 carry */
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/add.h b/private/mvdm/softpc.new/base/ccpu386/add.h
new file mode 100644
index 000000000..9257a0a8a
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/add.h
@@ -0,0 +1,18 @@
+/*
+ add.h
+
+ Define all ADD CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)add.h 1.4 02/09/94";
+ */
+
+IMPORT VOID ADD
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/and.c b/private/mvdm/softpc.new/base/ccpu386/and.c
new file mode 100644
index 000000000..b1bda5d71
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/and.c
@@ -0,0 +1,59 @@
+/*[
+
+and.c
+
+LOCAL CHAR SccsID[]="@(#)and.c 1.5 02/09/94";
+
+AND CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <and.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'and'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+AND
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst/lsrc operand */
+ IU32, op2, /* rsrc operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+
+ result = *pop1 & op2; /* Do operation */
+ SET_CF(0); /* Determine flags */
+ SET_OF(0);
+ SET_AF(0);
+ SET_PF(pf_table[result & BYTE_MASK]);
+ SET_ZF(result == 0);
+ SET_SF((result & SZ2MSB(op_sz)) != 0); /* SF = MSB */
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/and.h b/private/mvdm/softpc.new/base/ccpu386/and.h
new file mode 100644
index 000000000..911946ff9
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/and.h
@@ -0,0 +1,18 @@
+/*
+ and.h
+
+ Define all AND CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)and.h 1.4 02/09/94";
+ */
+
+IMPORT VOID AND
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/arpl.c b/private/mvdm/softpc.new/base/ccpu386/arpl.c
new file mode 100644
index 000000000..5b87ac31d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/arpl.c
@@ -0,0 +1,56 @@
+/*[
+
+arpl.c
+
+LOCAL CHAR SccsID[]="@(#)arpl.c 1.5 02/09/94";
+
+ARPL CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <arpl.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+GLOBAL VOID
+ARPL
+
+IFN2(
+ IU32 *, pop1,
+ IU32, op2
+ )
+
+
+ {
+ IU32 rpl;
+
+ /* Reduce op1 RPL to lowest privilege (highest value) */
+ if ( GET_SELECTOR_RPL(*pop1) < (rpl = GET_SELECTOR_RPL(op2)) )
+ {
+ SET_SELECTOR_RPL(*pop1, rpl);
+ SET_ZF(1);
+ }
+ else
+ {
+ SET_ZF(0);
+ }
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/arpl.h b/private/mvdm/softpc.new/base/ccpu386/arpl.h
new file mode 100644
index 000000000..8eb1e75e0
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/arpl.h
@@ -0,0 +1,17 @@
+/*
+ arpl.h
+
+ Define all ARPL CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)arpl.h 1.4 02/09/94";
+ */
+
+IMPORT VOID ARPL
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/bound.c b/private/mvdm/softpc.new/base/ccpu386/bound.c
new file mode 100644
index 000000000..a1b59ae66
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/bound.c
@@ -0,0 +1,76 @@
+/*[
+
+bound.c
+
+LOCAL CHAR SccsID[]="@(#)bound.c 1.6 03/28/94";
+
+BOUND CPU functions.
+--------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <bound.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+BOUND
+#ifdef ANSI
+ (
+ IU32 op1, /* lsrc(test value) operand */
+ IU32 op2[2], /* rsrc(lower:upper pair) operand */
+ IUM8 op_sz /* 16 or 32-bit */
+ )
+#else
+ (op1, op2, op_sz)
+ IU32 op1;
+ IU32 op2[2];
+ IUM8 op_sz;
+#endif
+ {
+ IS32 value;
+ IS32 lower;
+ IS32 upper;
+
+ /* transfer to local signed variables */
+ value = op1;
+ lower = op2[0];
+ upper = op2[1];
+
+ if ( op_sz == 16 )
+ {
+ /* sign extend operands */
+ if ( value & BIT15_MASK )
+ value |= ~WORD_MASK;
+
+ if ( lower & BIT15_MASK )
+ lower |= ~WORD_MASK;
+
+ if ( upper & BIT15_MASK )
+ upper |= ~WORD_MASK;
+ }
+
+ op_sz = op_sz / 8; /* determine number of bytes in operand */
+
+ if ( value < lower || value > upper )
+ Int5();
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/bound.h b/private/mvdm/softpc.new/base/ccpu386/bound.h
new file mode 100644
index 000000000..d41220bcf
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/bound.h
@@ -0,0 +1,18 @@
+/*
+ bound.h
+
+ Define all BOUND CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)bound.h 1.4 02/09/94";
+ */
+
+IMPORT VOID BOUND
+
+IPT3(
+ IU32, op1,
+ IU32, op2[2],
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/bsf.c b/private/mvdm/softpc.new/base/ccpu386/bsf.c
new file mode 100644
index 000000000..45c592967
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/bsf.c
@@ -0,0 +1,64 @@
+/*[
+
+bsf.c
+
+LOCAL CHAR SccsID[]="@(#)bsf.c 1.5 02/09/94";
+
+BSF CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <bsf.h>
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'bsf'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+BSF
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst operand */
+ IU32, op2 /* rsrc (ie scanned) operand */
+ )
+
+
+ {
+ IU32 temp = 0;
+
+ if ( op2 == 0 )
+ {
+ SET_ZF(1);
+ /* leave dst unaltered */
+ }
+ else
+ {
+ SET_ZF(0);
+ while ( (op2 & BIT0_MASK) == 0 )
+ {
+ temp += 1;
+ op2 >>= 1;
+ }
+ *pop1 = temp;
+ }
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/bsf.h b/private/mvdm/softpc.new/base/ccpu386/bsf.h
new file mode 100644
index 000000000..300fcaed5
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/bsf.h
@@ -0,0 +1,17 @@
+/*
+ bsf.h
+
+ BSF CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)bsf.h 1.4 02/09/94";
+ */
+
+IMPORT VOID BSF
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/bsr.c b/private/mvdm/softpc.new/base/ccpu386/bsr.c
new file mode 100644
index 000000000..959d1bdc7
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/bsr.c
@@ -0,0 +1,70 @@
+/*[
+
+bsr.c
+
+LOCAL CHAR SccsID[]="@(#)bsr.c 1.5 02/09/94";
+
+BSR CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <bsr.h>
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'bsr'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+BSR
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst operand */
+ IU32, op2, /* rsrc (ie scanned) operand */
+ IUM8, op_sz /* 16 or 32-bit */
+ )
+
+
+ {
+ IU32 temp;
+ IU32 msb;
+
+ if ( op2 == 0 )
+ {
+ SET_ZF(1);
+ /* leave dst unaltered */
+ }
+ else
+ {
+ SET_ZF(0);
+ temp = op_sz - 1;
+ msb = SZ2MSB(op_sz);
+
+ while ( (op2 & msb) == 0 )
+ {
+ temp -= 1;
+ op2 <<= 1;
+ }
+
+ *pop1 = temp;
+ }
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/bsr.h b/private/mvdm/softpc.new/base/ccpu386/bsr.h
new file mode 100644
index 000000000..d12566f7f
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/bsr.h
@@ -0,0 +1,18 @@
+/*
+ bsr.h
+
+ BSR CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)bsr.h 1.4 02/09/94";
+ */
+
+IMPORT VOID BSR
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/bswap.c b/private/mvdm/softpc.new/base/ccpu386/bswap.c
new file mode 100644
index 000000000..8b83d1e7d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/bswap.c
@@ -0,0 +1,64 @@
+/*[
+
+bswap.c
+
+LOCAL CHAR SccsID[]="@(#)bswap.c 1.6 11/30/94";
+
+BSWAP CPU functions.
+--------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <bswap.h>
+#include <stdio.h>
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+#ifdef SPC486
+
+GLOBAL VOID
+BSWAP
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst/src operand */
+ )
+
+
+ {
+ IU32 src; /* temp for source */
+ IU32 dst; /* temp for destination */
+
+ src = *pop1; /* get source operand */
+
+ /*
+ ================= =================
+ Munge bytes from | A | B | C | D | to | D | C | B | A |
+ ================= =================
+ */
+ dst = ((src & 0xff000000) >> 24) | /* A->D */
+ ((src & 0x00ff0000) >> 8) | /* B->C */
+ ((src & 0x0000ff00) << 8) | /* C->B */
+ ((src & 0x000000ff) << 24); /* D->A */
+
+ *pop1 = dst; /* return destination operand */
+ }
+
+#endif /* SPC486 */
diff --git a/private/mvdm/softpc.new/base/ccpu386/bswap.h b/private/mvdm/softpc.new/base/ccpu386/bswap.h
new file mode 100644
index 000000000..a56472cd8
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/bswap.h
@@ -0,0 +1,16 @@
+/*
+ bswap.h
+
+ BSWAP CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)bswap.h 1.4 02/09/94";
+ */
+
+IMPORT VOID BSWAP
+
+IPT1(
+ IU32 *, pop1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/bt.c b/private/mvdm/softpc.new/base/ccpu386/bt.c
new file mode 100644
index 000000000..7cf507c6d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/bt.c
@@ -0,0 +1,54 @@
+/*[
+
+bt.c
+
+LOCAL CHAR SccsID[]="@(#)bt.c 1.5 02/09/94";
+
+BT CPU functions.
+-----------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <bt.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'bt'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+BT
+
+IFN3(
+ IU32, op1, /* lsrc operand */
+ IU32, op2, /* rsrc (ie bit nr.) operand */
+ IUM8, op_sz /* 16 or 32-bit */
+ )
+
+
+ {
+ IU32 bit_mask;
+
+ op2 = op2 % op_sz; /* take bit nr. modulo operand size */
+ bit_mask = 1 << op2; /* form mask for bit */
+ SET_CF((op1 & bit_mask) != 0); /* set CF to given bit */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/bt.h b/private/mvdm/softpc.new/base/ccpu386/bt.h
new file mode 100644
index 000000000..dabb31087
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/bt.h
@@ -0,0 +1,18 @@
+/*
+ bt.h
+
+ BT CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)bt.h 1.4 02/09/94";
+ */
+
+IMPORT VOID BT
+
+IPT3(
+ IU32, op1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/btc.c b/private/mvdm/softpc.new/base/ccpu386/btc.c
new file mode 100644
index 000000000..daa69489b
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/btc.c
@@ -0,0 +1,55 @@
+/*[
+
+btc.c
+
+LOCAL CHAR SccsID[]="@(#)btc.c 1.5 02/09/94";
+
+BTC CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <btc.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'btc'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+BTC
+
+IFN3(
+ IU32 *, pop1, /* pntr to lsrc/dst operand */
+ IU32, op2, /* rsrc (ie bit nr.) operand */
+ IUM8, op_sz /* 16 or 32-bit */
+ )
+
+
+ {
+ IU32 bit_mask;
+
+ op2 = op2 % op_sz; /* take bit nr. modulo operand size */
+ bit_mask = 1 << op2; /* form mask for bit */
+ SET_CF((*pop1 & bit_mask) != 0); /* set CF to given bit */
+ *pop1 = *pop1 ^ bit_mask; /* Set Bit = !Bit */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/btc.h b/private/mvdm/softpc.new/base/ccpu386/btc.h
new file mode 100644
index 000000000..f78211394
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/btc.h
@@ -0,0 +1,18 @@
+/*
+ btc.h
+
+ BTC CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)btc.h 1.4 02/09/94";
+ */
+
+IMPORT VOID BTC
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/btr.c b/private/mvdm/softpc.new/base/ccpu386/btr.c
new file mode 100644
index 000000000..5d00cde29
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/btr.c
@@ -0,0 +1,55 @@
+/*[
+
+btr.c
+
+LOCAL CHAR SccsID[]="@(#)btr.c 1.5 02/09/94";
+
+BTR CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <btr.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'btr'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+BTR
+
+IFN3(
+ IU32 *, pop1, /* pntr to lsrc/dst operand */
+ IU32, op2, /* rsrc (ie bit nr.) operand */
+ IUM8, op_sz /* 16 or 32-bit */
+ )
+
+
+ {
+ IU32 bit_mask;
+
+ op2 = op2 % op_sz; /* take bit nr. modulo operand size */
+ bit_mask = 1 << op2; /* form mask for bit */
+ SET_CF((*pop1 & bit_mask) != 0); /* set CF to given bit */
+ *pop1 = *pop1 & ~bit_mask; /* Set Bit = 0 */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/btr.h b/private/mvdm/softpc.new/base/ccpu386/btr.h
new file mode 100644
index 000000000..c058d582e
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/btr.h
@@ -0,0 +1,18 @@
+/*
+ btr.h
+
+ BTR CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)btr.h 1.4 02/09/94";
+ */
+
+IMPORT VOID BTR
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/bts.c b/private/mvdm/softpc.new/base/ccpu386/bts.c
new file mode 100644
index 000000000..5fd166a84
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/bts.c
@@ -0,0 +1,55 @@
+/*[
+
+bts.c
+
+LOCAL CHAR SccsID[]="@(#)bts.c 1.5 02/09/94";
+
+BTS CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <bts.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'bts'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+BTS
+
+IFN3(
+ IU32 *, pop1, /* pntr to lsrc/dst operand */
+ IU32, op2, /* rsrc (ie bit nr.) operand */
+ IUM8, op_sz /* 16 or 32-bit */
+ )
+
+
+ {
+ IU32 bit_mask;
+
+ op2 = op2 % op_sz; /* take bit nr. modulo operand size */
+ bit_mask = 1 << op2; /* form mask for bit */
+ SET_CF((*pop1 & bit_mask) != 0); /* set CF to given bit */
+ *pop1 = *pop1 | bit_mask; /* Set Bit = 1 */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/bts.h b/private/mvdm/softpc.new/base/ccpu386/bts.h
new file mode 100644
index 000000000..a98015dc9
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/bts.h
@@ -0,0 +1,18 @@
+/*
+ bts.h
+
+ BTS CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)bts.h 1.4 02/09/94";
+ */
+
+IMPORT VOID BTS
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/c-files b/private/mvdm/softpc.new/base/ccpu386/c-files
new file mode 100644
index 000000000..95eb66cb8
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c-files
@@ -0,0 +1,129 @@
+aaa.c
+aad.c
+aam.c
+aas.c
+adc.c
+add.c
+and.c
+arpl.c
+bound.c
+bsf.c
+bsr.c
+bswap.c
+bt.c
+btc.c
+btr.c
+bts.c
+call.c
+cbw.c
+ccpupig.c
+ccpusas4.c
+cdq.c
+clc.c
+cld.c
+cli.c
+clts.c
+cmc.c
+cmp.c
+cmpxchg.c
+cwd.c
+cwde.c
+c_addr.c
+c_bsic.c
+c_debug.c
+c_div64.c
+c_getset.c
+c_intr.c
+c_main.c
+c_mul64.c
+c_neg64.c
+c_page.c
+c_prot.c
+c_reg.c
+c_seg.c
+c_stack.c
+c_tlb.c
+c_tsksw.c
+c_xcptn.c
+c_xfer.c
+c_xtrn.c
+daa.c
+das.c
+dec.c
+div.c
+enter.c
+fpu.c
+idiv.c
+imul.c
+in.c
+inc.c
+into.c
+intx.c
+invd.c
+invlpg.c
+iret.c
+jcxz.c
+jmp.c
+jxx.c
+lahf.c
+lar.c
+lea.c
+leave.c
+lgdt.c
+lidt.c
+lldt.c
+lmsw.c
+loopxx.c
+lsl.c
+ltr.c
+lxs.c
+mov.c
+movsx.c
+mul.c
+neg.c
+nop.c
+not.c
+or.c
+out.c
+p.c_main.c
+p.c_tsksw.c
+pop.c
+popa.c
+popf.c
+push.c
+pusha.c
+pushf.c
+rcl.c
+rcr.c
+ret.c
+rol.c
+ror.c
+rsrvd.c
+sahf.c
+sar.c
+sbb.c
+setxx.c
+sgdt.c
+shl.c
+shld.c
+shr.c
+shrd.c
+sidt.c
+sldt.c
+smsw.c
+stc.c
+std.c
+sti.c
+str.c
+stubs.c
+sub.c
+test.c
+verr.c
+verw.c
+wait.c
+wbinvd.c
+xadd.c
+xchg.c
+xlat.c
+xor.c
+zfrsrvd.c
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_addr.c b/private/mvdm/softpc.new/base/ccpu386/c_addr.c
new file mode 100644
index 000000000..e8b8d8a26
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_addr.c
@@ -0,0 +1,652 @@
+/*[
+
+c_addr.c
+
+LOCAL CHAR SccsID[]="@(#)c_addr.c 1.10 7/19/94";
+
+Memory Addressing Support.
+--------------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <c_mem.h>
+#include <ccpupig.h>
+#include <fault.h>
+
+/*
+ Allowable memory addressing types.
+ */
+
+/* <addr size><mode><r/m> */
+#define A_1600 (IU8) 0 /* [BX + SI] */
+#define A_1601 (IU8) 1 /* [BX + DI] */
+#define A_1602 (IU8) 2 /* [BP + SI] */
+#define A_1603 (IU8) 3 /* [BP + DI] */
+#define A_1604 (IU8) 4 /* [SI] */
+#define A_1605 (IU8) 5 /* [DI] */
+#define A_1606 (IU8) 6 /* [d16] */
+#define A_1607 (IU8) 7 /* [BX] */
+
+#define A_1610 (IU8) 8 /* [BX + SI + d8] */
+#define A_1611 (IU8) 9 /* [BX + DI + d8] */
+#define A_1612 (IU8)10 /* [BP + SI + d8] */
+#define A_1613 (IU8)11 /* [BP + DI + d8] */
+#define A_1614 (IU8)12 /* [SI + d8] */
+#define A_1615 (IU8)13 /* [DI + d8] */
+#define A_1616 (IU8)14 /* [BP + d8] */
+#define A_1617 (IU8)15 /* [BX + d8] */
+
+#define A_1620 (IU8)16 /* [BX + SI + d16] */
+#define A_1621 (IU8)17 /* [BX + DI + d16] */
+#define A_1622 (IU8)18 /* [BP + SI + d16] */
+#define A_1623 (IU8)19 /* [BP + DI + d16] */
+#define A_1624 (IU8)20 /* [SI + d16] */
+#define A_1625 (IU8)21 /* [DI + d16] */
+#define A_1626 (IU8)22 /* [BP + d16] */
+#define A_1627 (IU8)23 /* [BX + d16] */
+
+/* <addr size><mode><r/m> */
+#define A_3200 (IU8)24 /* [EAX] */
+#define A_3201 (IU8)25 /* [ECX] */
+#define A_3202 (IU8)26 /* [EDX] */
+#define A_3203 (IU8)27 /* [EBX] */
+#define A_3205 (IU8)28 /* [d32] */
+#define A_3206 (IU8)29 /* [ESI] */
+#define A_3207 (IU8)30 /* [EDI] */
+
+#define A_3210 (IU8)31 /* [EAX + d8] */
+#define A_3211 (IU8)32 /* [ECX + d8] */
+#define A_3212 (IU8)33 /* [EDX + d8] */
+#define A_3213 (IU8)34 /* [EBX + d8] */
+#define A_3215 (IU8)35 /* [EBP + d8] */
+#define A_3216 (IU8)36 /* [ESI + d8] */
+#define A_3217 (IU8)37 /* [EDI + d8] */
+
+#define A_3220 (IU8)38 /* [EAX + d32] */
+#define A_3221 (IU8)39 /* [ECX + d32] */
+#define A_3222 (IU8)40 /* [EDX + d32] */
+#define A_3223 (IU8)41 /* [EBX + d32] */
+#define A_3225 (IU8)42 /* [EBP + d32] */
+#define A_3226 (IU8)43 /* [ESI + d32] */
+#define A_3227 (IU8)44 /* [EDI + d32] */
+
+/* <addr size><S=SIB form><mode><base> */
+#define A_32S00 (IU8)45 /* [EAX + si] */
+#define A_32S01 (IU8)46 /* [ECX + si] */
+#define A_32S02 (IU8)47 /* [EDX + si] */
+#define A_32S03 (IU8)48 /* [EBX + si] */
+#define A_32S04 (IU8)49 /* [ESP + si] */
+#define A_32S05 (IU8)50 /* [d32 + si] */
+#define A_32S06 (IU8)51 /* [ESI + si] */
+#define A_32S07 (IU8)52 /* [EDI + si] */
+
+#define A_32S10 (IU8)53 /* [EAX + si + d8] */
+#define A_32S11 (IU8)54 /* [ECX + si + d8] */
+#define A_32S12 (IU8)55 /* [EDX + si + d8] */
+#define A_32S13 (IU8)56 /* [EBX + si + d8] */
+#define A_32S14 (IU8)57 /* [ESP + si + d8] */
+#define A_32S15 (IU8)58 /* [EBP + si + d8] */
+#define A_32S16 (IU8)59 /* [ESI + si + d8] */
+#define A_32S17 (IU8)60 /* [EDI + si + d8] */
+
+#define A_32S20 (IU8)61 /* [EAX + si + d32] */
+#define A_32S21 (IU8)62 /* [ECX + si + d32] */
+#define A_32S22 (IU8)63 /* [EDX + si + d32] */
+#define A_32S23 (IU8)64 /* [EBX + si + d32] */
+#define A_32S24 (IU8)65 /* [ESP + si + d32] */
+#define A_32S25 (IU8)66 /* [EBP + si + d32] */
+#define A_32S26 (IU8)67 /* [ESI + si + d32] */
+#define A_32S27 (IU8)68 /* [EDI + si + d32] */
+
+/* Table fillers - never actually referenced */
+#define A_3204 (IU8)0
+#define A_3214 (IU8)0
+#define A_3224 (IU8)0
+
+/* [addr_sz][mode][r/m] */
+/* addr_sz 0 = 16-bit */
+/* addr_sz 1 = 32-bit */
+/* addr_sz 2 = 32-bit (+SIB) */
+LOCAL IU8 addr_maintype[3] [3] [8] =
+ {
+ { {A_1600, A_1601, A_1602, A_1603, A_1604, A_1605, A_1606, A_1607},
+ {A_1610, A_1611, A_1612, A_1613, A_1614, A_1615, A_1616, A_1617},
+ {A_1620, A_1621, A_1622, A_1623, A_1624, A_1625, A_1626, A_1627} },
+
+ { {A_3200, A_3201, A_3202, A_3203, A_3204, A_3205, A_3206, A_3207},
+ {A_3210, A_3211, A_3212, A_3213, A_3214, A_3215, A_3216, A_3217},
+ {A_3220, A_3221, A_3222, A_3223, A_3224, A_3225, A_3226, A_3227} },
+
+ { {A_32S00, A_32S01, A_32S02, A_32S03, A_32S04, A_32S05, A_32S06, A_32S07},
+ {A_32S10, A_32S11, A_32S12, A_32S13, A_32S14, A_32S15, A_32S16, A_32S17},
+ {A_32S20, A_32S21, A_32S22, A_32S23, A_32S24, A_32S25, A_32S26, A_32S27} }
+ };
+
+/*
+ Allowable memory addressing sub types.
+ */
+
+/* <ss><index> */
+#define A_SINO (IU8) 0 /* No SIB byte */
+#define A_SI00 (IU8) 1 /* EAX */
+#define A_SI01 (IU8) 2 /* ECX */
+#define A_SI02 (IU8) 3 /* EDX */
+#define A_SI03 (IU8) 4 /* EBX */
+#define A_SI04 (IU8) 5 /* none */
+#define A_SI05 (IU8) 6 /* EBP */
+#define A_SI06 (IU8) 7 /* ESI */
+#define A_SI07 (IU8) 8 /* EDI */
+
+#define A_SI10 (IU8) 9 /* EAX x 2 */
+#define A_SI11 (IU8)10 /* ECX x 2 */
+#define A_SI12 (IU8)11 /* EDX x 2 */
+#define A_SI13 (IU8)12 /* EBX x 2 */
+#define A_SI14 (IU8)13 /* undefined */
+#define A_SI15 (IU8)14 /* EBP x 2 */
+#define A_SI16 (IU8)15 /* ESI x 2 */
+#define A_SI17 (IU8)16 /* EDI x 2 */
+
+#define A_SI20 (IU8)17 /* EAX x 4 */
+#define A_SI21 (IU8)18 /* ECX x 4 */
+#define A_SI22 (IU8)19 /* EDX x 4 */
+#define A_SI23 (IU8)20 /* EBX x 4 */
+#define A_SI24 (IU8)21 /* undefined */
+#define A_SI25 (IU8)22 /* EBP x 4 */
+#define A_SI26 (IU8)23 /* ESI x 4 */
+#define A_SI27 (IU8)24 /* EDI x 4 */
+
+#define A_SI30 (IU8)25 /* EAX x 8 */
+#define A_SI31 (IU8)26 /* ECX x 8 */
+#define A_SI32 (IU8)27 /* EDX x 8 */
+#define A_SI33 (IU8)28 /* EBX x 8 */
+#define A_SI34 (IU8)29 /* undefined */
+#define A_SI35 (IU8)30 /* EBP x 8 */
+#define A_SI36 (IU8)31 /* ESI x 8 */
+#define A_SI37 (IU8)32 /* EDI x 8 */
+
+/* [ss][index] */
+LOCAL IU8 addr_subtype[4] [8] =
+ {
+ {A_SI00, A_SI01, A_SI02, A_SI03, A_SI04, A_SI05, A_SI06, A_SI07},
+ {A_SI10, A_SI11, A_SI12, A_SI13, A_SI14, A_SI15, A_SI16, A_SI17},
+ {A_SI20, A_SI21, A_SI22, A_SI23, A_SI24, A_SI25, A_SI26, A_SI27},
+ {A_SI30, A_SI31, A_SI32, A_SI33, A_SI34, A_SI35, A_SI36, A_SI37}
+ };
+
+/*
+ Displacement information.
+ */
+#define D_NO (IU8)0
+#define D_S8 (IU8)1
+#define D_S16 (IU8)2
+#define D_Z16 (IU8)3
+#define D_32 (IU8)4
+
+/* [addr_sz][mode][r/m] */
+LOCAL IU8 addr_disp[2] [3] [8] =
+ {
+ { {D_NO , D_NO , D_NO , D_NO , D_NO , D_NO , D_Z16, D_NO },
+ {D_S8 , D_S8 , D_S8 , D_S8 , D_S8 , D_S8 , D_S8 , D_S8 },
+ {D_S16, D_S16, D_S16, D_S16, D_S16, D_S16, D_S16, D_S16} },
+
+ { {D_NO , D_NO , D_NO , D_NO , D_NO , D_32 , D_NO , D_NO },
+ {D_S8 , D_S8 , D_S8 , D_S8 , D_S8 , D_S8 , D_S8 , D_S8 },
+ {D_32 , D_32 , D_32 , D_32 , D_32 , D_32 , D_32 , D_32 } }
+ };
+
+/*
+ Default Segment information.
+ */
+/* [addr_sz][mode][r/m] */
+LOCAL IU8 addr_default_seg[2] [3] [8] =
+ {
+ { {DS_REG, DS_REG, SS_REG, SS_REG, DS_REG, DS_REG, DS_REG, DS_REG},
+ {DS_REG, DS_REG, SS_REG, SS_REG, DS_REG, DS_REG, SS_REG, DS_REG},
+ {DS_REG, DS_REG, SS_REG, SS_REG, DS_REG, DS_REG, SS_REG, DS_REG} },
+
+ { {DS_REG, DS_REG, DS_REG, DS_REG, SS_REG, DS_REG, DS_REG, DS_REG},
+ {DS_REG, DS_REG, DS_REG, DS_REG, SS_REG, SS_REG, DS_REG, DS_REG},
+ {DS_REG, DS_REG, DS_REG, DS_REG, SS_REG, SS_REG, DS_REG, DS_REG} }
+ };
+
+/*
+
+ SIB
+ ---
+
+ 7 6 5 4 3 2 1 0
+ =================
+ |ss |index|base |
+ =================
+
+ */
+
+#define GET_SS(x) ((x) >> 6 & 0x3)
+#define GET_INDEX(x) ((x) >> 3 & 0x7)
+#define GET_BASE(x) ((x) & 0x7)
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Perform arithmetic for addressing functions. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IU32
+address_add
+
+IFN2(
+ IU32, offset,
+ IS32, delta
+ )
+
+
+ {
+ IU32 retval;
+
+ if ( GET_ADDRESS_SIZE() == USE32 )
+ retval = offset + delta;
+ else
+ retval = offset + delta & WORD_MASK;
+
+ return retval;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Decode memory address. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+d_mem
+
+IFN5(
+ IU8, modRM, /* (I ) current mode R/M byte */
+ IU8 **, p, /* (IO) Intel opcode stream */
+ IU8, segment_override, /* (I ) current segment_override */
+ ISM32 *, seg, /* ( O) Segment register index */
+ IU32 *, off /* ( O) Memory offset */
+ )
+
+ /* ANSI */
+ {
+ IU8 mode; /* Working copy of 'mode' field */
+ IU8 r_m; /* Working copy of 'R/M' field */
+ IU32 disp; /* Working copy of displacement */
+ IU32 mem_off; /* Working copy of memory offset */
+ IU8 identifier; /* Memory addressing type */
+ IU8 sub_id; /* Memory addressing sub type */
+
+ mode = GET_MODE(modRM);
+ r_m = GET_R_M(modRM);
+
+ /*
+ DECODE IT.
+ */
+
+ /* check for presence of SIB byte */
+ if ( r_m == 4 && GET_ADDRESS_SIZE() == USE32 )
+ {
+ /* process SIB byte */
+ modRM = GET_INST_BYTE(*p); /* get SIB byte */
+
+ /* subvert the original r_m value with the base value,
+ then addressing mode, displacements and default
+ segments all fall out in the wash */
+ r_m = GET_BASE(modRM);
+
+ /* determine decoded type and sub type */
+ identifier = addr_maintype[2][mode][r_m]; /* 2 = 32-bit addr + SIB */
+ sub_id = addr_subtype[GET_SS(modRM)][GET_INDEX(modRM)];
+ }
+ else
+ {
+ /* no SIB byte */
+ identifier = addr_maintype[GET_ADDRESS_SIZE()][mode][r_m];
+ sub_id = A_SINO;
+ }
+
+ /* encode displacement */
+ switch ( addr_disp[GET_ADDRESS_SIZE()][mode][r_m] )
+ {
+ case D_NO: /* No displacement */
+ disp = 0;
+ break;
+
+ case D_S8: /* Sign extend Intel byte */
+ disp = GET_INST_BYTE(*p);
+ if ( disp & BIT7_MASK )
+ disp |= ~BYTE_MASK;
+ break;
+
+ case D_S16: /* Sign extend Intel word */
+ disp = GET_INST_BYTE(*p);
+ disp |= (IU32)GET_INST_BYTE(*p) << 8;
+ if ( disp & BIT15_MASK )
+ disp |= ~WORD_MASK;
+ break;
+
+ case D_Z16: /* Zero extend Intel word */
+ disp = GET_INST_BYTE(*p);
+ disp |= (IU32)GET_INST_BYTE(*p) << 8;
+ break;
+
+ case D_32: /* Intel double word */
+ disp = GET_INST_BYTE(*p);
+ disp |= (IU32)GET_INST_BYTE(*p) << 8;
+ disp |= (IU32)GET_INST_BYTE(*p) << 16;
+ disp |= (IU32)GET_INST_BYTE(*p) << 24;
+ break;
+ }
+
+ /*
+ DO IT.
+ */
+
+ /* encode segment register */
+ if ( segment_override == SEG_CLR )
+ segment_override = addr_default_seg[GET_ADDRESS_SIZE()][mode][r_m];
+ *seg = segment_override;
+
+ /* caclculate offset */
+ switch ( identifier )
+ {
+ case A_1600: case A_1610: case A_1620:
+ mem_off = GET_BX() + GET_SI() + disp & WORD_MASK;
+ break;
+
+ case A_1601: case A_1611: case A_1621:
+ mem_off = GET_BX() + GET_DI() + disp & WORD_MASK;
+ break;
+
+ case A_1602: case A_1612: case A_1622:
+ mem_off = GET_BP() + GET_SI() + disp & WORD_MASK;
+ break;
+
+ case A_1603: case A_1613: case A_1623:
+ mem_off = GET_BP() + GET_DI() + disp & WORD_MASK;
+ break;
+
+ case A_1604: case A_1614: case A_1624:
+ mem_off = GET_SI() + disp & WORD_MASK;
+ break;
+
+ case A_1605: case A_1615: case A_1625:
+ mem_off = GET_DI() + disp & WORD_MASK;
+ break;
+
+ case A_1606:
+ mem_off = disp & WORD_MASK;
+ break;
+
+ case A_1616: case A_1626:
+ mem_off = GET_BP() + disp & WORD_MASK;
+ break;
+
+ case A_1607: case A_1617: case A_1627:
+ mem_off = GET_BX() + disp & WORD_MASK;
+ break;
+
+ case A_3200: case A_3210: case A_3220:
+ case A_32S00: case A_32S10: case A_32S20:
+ mem_off = GET_EAX() + disp;
+ break;
+
+ case A_3201: case A_3211: case A_3221:
+ case A_32S01: case A_32S11: case A_32S21:
+ mem_off = GET_ECX() + disp;
+ break;
+
+ case A_3202: case A_3212: case A_3222:
+ case A_32S02: case A_32S12: case A_32S22:
+ mem_off = GET_EDX() + disp;
+ break;
+
+ case A_3203: case A_3213: case A_3223:
+ case A_32S03: case A_32S13: case A_32S23:
+ mem_off = GET_EBX() + disp;
+ break;
+
+ case A_32S04: case A_32S14: case A_32S24:
+ mem_off = GET_ESP() + GET_POP_DISP() + disp;
+ break;
+
+ case A_3205:
+ case A_32S05:
+ mem_off = disp;
+ break;
+
+ case A_3215: case A_3225:
+ case A_32S15: case A_32S25:
+ mem_off = GET_EBP() + disp;
+ break;
+
+ case A_3206: case A_3216: case A_3226:
+ case A_32S06: case A_32S16: case A_32S26:
+ mem_off = GET_ESI() + disp;
+ break;
+
+ case A_3207: case A_3217: case A_3227:
+ case A_32S07: case A_32S17: case A_32S27:
+ mem_off = GET_EDI() + disp;
+ break;
+ } /* end switch */
+
+ /* add 'si', scale and index into offset */
+ switch ( sub_id )
+ {
+ case A_SINO: /* No SIB byte */ break;
+
+ case A_SI00: mem_off += GET_EAX(); break;
+ case A_SI01: mem_off += GET_ECX(); break;
+ case A_SI02: mem_off += GET_EDX(); break;
+ case A_SI03: mem_off += GET_EBX(); break;
+ case A_SI04: break;
+ case A_SI05: mem_off += GET_EBP(); break;
+ case A_SI06: mem_off += GET_ESI(); break;
+ case A_SI07: mem_off += GET_EDI(); break;
+
+ case A_SI10: mem_off += GET_EAX() << 1; break;
+ case A_SI11: mem_off += GET_ECX() << 1; break;
+ case A_SI12: mem_off += GET_EDX() << 1; break;
+ case A_SI13: mem_off += GET_EBX() << 1; break;
+ case A_SI14: break;
+ case A_SI15: mem_off += GET_EBP() << 1; break;
+ case A_SI16: mem_off += GET_ESI() << 1; break;
+ case A_SI17: mem_off += GET_EDI() << 1; break;
+
+ case A_SI20: mem_off += GET_EAX() << 2; break;
+ case A_SI21: mem_off += GET_ECX() << 2; break;
+ case A_SI22: mem_off += GET_EDX() << 2; break;
+ case A_SI23: mem_off += GET_EBX() << 2; break;
+ case A_SI24: break;
+ case A_SI25: mem_off += GET_EBP() << 2; break;
+ case A_SI26: mem_off += GET_ESI() << 2; break;
+ case A_SI27: mem_off += GET_EDI() << 2; break;
+
+ case A_SI30: mem_off += GET_EAX() << 3; break;
+ case A_SI31: mem_off += GET_ECX() << 3; break;
+ case A_SI32: mem_off += GET_EDX() << 3; break;
+ case A_SI33: mem_off += GET_EBX() << 3; break;
+ case A_SI34: break;
+ case A_SI35: mem_off += GET_EBP() << 3; break;
+ case A_SI36: mem_off += GET_ESI() << 3; break;
+ case A_SI37: mem_off += GET_EDI() << 3; break;
+ } /* end switch */
+
+ *off = mem_off;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Perform limit checking. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+limit_check
+
+IFN4(
+ ISM32, index, /* (I) segment register identifier */
+ IU32, offset, /* (I) offset for first (lowest memory)
+ data item */
+ ISM32, nr_items, /* (I) number of items to be accessed */
+ IUM8, op_sz /* (I) number of bytes in each item */
+ )
+
+
+ {
+ /*
+ As documented by Intel the basic limit check failures are:
+
+ IU8:- address > limit
+ IU16:- address > (limit-1)
+ IU32:- address > (limit-3)
+
+ We (for efficiency) extend the algorithm to handle multiple
+ operands with one check:- address > (limit-(total_nr_bytes-1)).
+
+ Further we must account for the different interpretation of
+ limit in expand down segments. This leads to two algorithms.
+
+ EXPAND UP:-
+
+ Check address > (limit-(total_nr_bytes-1)) with two caveats.
+ One, beware that the subtraction from limit may underflow
+ (eg a IU32 accessed in a 3 byte segment). Two, beware that
+ wraparound can occur if each individual operand is stored
+ contiguously and we have a 'full sized' segment.
+
+ EXPAND DOWN:-
+
+ Check address <= limit ||
+ address > (segment_top-(total_nr_bytes-1)).
+ Because total_nr_bytes is always a relatively small number
+ the subtraction never underflows. And as you can never have
+ a full size expand down segment you can never have wraparound.
+
+ Additionally although 32-bit addressing mode may be used in Real
+ Mode, all offsets must fit in the range 0 - 0xffff.
+ */
+
+ /*
+ Note a quick summary of inclusive valid bounds is:-
+
+ =================================================
+ | E | G | X | lower bound | upper bound |
+ =================================================
+ | 0 | 0 | 0 | 0 | limit |
+ | 0 | 0 | 1 | 0 | limit |
+ | 0 | 1 | 0 | 0 | limit<<12|fff |
+ | 0 | 1 | 1 | 0 | limit<<12|fff |
+ | 1 | 0 | 0 | limit+1 | ffff |
+ | 1 | 0 | 1 | limit+1 | ffffffff |
+ | 1 | 1 | 0 | (limit<<12|fff)+1 | ffff |
+ | 1 | 1 | 1 | (limit<<12|fff)+1 | ffffffff |
+ =================================================
+ */
+
+ /*
+ We "pre-process" the G-bit when the segment is first loaded
+ and store the limit to reflect the G-bit as required. Hence we
+ don't need to refer to the G-bit here.
+ */
+
+ ISM32 range;
+ BOOL bad_limit = FALSE;
+ IU32 segment_top;
+
+ range = nr_items * op_sz - 1;
+
+ if ( GET_SR_AR_E(index) )
+ {
+ /* expand down */
+ if ( GET_SR_AR_X(index) == USE32 )
+ segment_top = 0xffffffff;
+ else
+ segment_top = 0xffff;
+
+ if ( offset <= GET_SR_LIMIT(index) || /* out of range */
+ offset > segment_top - range ) /* segment too small */
+ {
+ bad_limit = TRUE;
+ }
+ }
+ else
+ {
+ /* expand up */
+ segment_top = GET_SR_LIMIT(index);
+
+ if ( offset > segment_top || /* out of range */
+ segment_top < range ) /* segment too small */
+ {
+ bad_limit = TRUE;
+ }
+ else
+ {
+ if ( offset > segment_top - range )
+ {
+ /* data extends past end of segment */
+ if ( offset % op_sz != 0 )
+ {
+ /* Data mis-aligned, so basic operand won't be
+ contiguously stored */
+ bad_limit = TRUE;
+ }
+ else
+ {
+ /* If 'full sized' segment wraparound can occur */
+ if ( GET_SR_AR_X(index) == USE16 )
+ {
+ if ( GET_SR_LIMIT(index) != 0xffff )
+ bad_limit = TRUE;
+ }
+ else /* USE32 */
+ {
+ if ( GET_SR_LIMIT(index) != 0xffffffff )
+ bad_limit = TRUE;
+ }
+ }
+ }
+ }
+ }
+
+
+
+#ifndef TAKE_REAL_MODE_LIMIT_FAULT
+
+ /* The Soft486 EDL CPU does not take Real Mode limit failures.
+ * Since the Ccpu486 is used as a "reference" cpu we wish it
+ * to behave a C version of the EDL Cpu rather than as a C
+ * version of a i486.
+ */
+
+ if ( GET_PE() == 0 || GET_VM() == 1 )
+ return;
+#endif /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ if ( bad_limit )
+ {
+ if ( index == SS_REG )
+ {
+ SF((IU16)0, FAULT_LIMITCHK_SEG_LIMIT);
+ }
+ else
+ {
+ GP((IU16)0, FAULT_LIMITCHK_SEG_LIMIT);
+ }
+ }
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_addr.h b/private/mvdm/softpc.new/base/ccpu386/c_addr.h
new file mode 100644
index 000000000..44a195430
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_addr.h
@@ -0,0 +1,39 @@
+/*[
+
+c_addr.h
+
+LOCAL CHAR SccsID[]="@(#)c_addr.h 1.4 02/09/94";
+
+Memory Addressing Support.
+--------------------------
+
+]*/
+
+IMPORT IU32 address_add
+
+IPT2(
+ IU32, offset,
+ IS32, delta
+
+ );
+
+IMPORT VOID d_mem
+
+IPT5(
+ IU8, modRM,
+ IU8 **, p,
+ IU8, segment_override,
+ ISM32 *, seg,
+ IU32 *, off
+
+ );
+
+IMPORT VOID limit_check
+
+IPT4(
+ ISM32, indx,
+ IU32, offset,
+ ISM32, nr_items,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_bsic.c b/private/mvdm/softpc.new/base/ccpu386/c_bsic.c
new file mode 100644
index 000000000..ca248fee8
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_bsic.c
@@ -0,0 +1,369 @@
+/*[
+
+c_bsic.c
+
+LOCAL CHAR SccsID[]="@(#)c_bsic.c 1.7 09/20/94";
+
+Basic Protected Mode Support and Flag Support.
+----------------------------------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <c_page.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Determine 'super' type from access rights. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL ISM32
+descriptor_super_type
+
+IFN1(
+ IU16, AR /* (I) access rights */
+ )
+
+
+ {
+ ISM32 super;
+
+ switch ( super = GET_AR_SUPER(AR) )
+ {
+ case 0x0: case 0x8: case 0xa: case 0xd:
+ /* We have just one bad case */
+ return INVALID;
+
+
+ case 0x1: case 0x2: case 0x3:
+ case 0x4: case 0x5: case 0x6: case 0x7:
+ case 0x9: case 0xb: case 0xc: case 0xe: case 0xf:
+ /* system/control segments have one to one mapping */
+ return super;
+
+ case 0x10: case 0x11: case 0x12: case 0x13:
+ case 0x14: case 0x15: case 0x16: case 0x17:
+ case 0x18: case 0x19: case 0x1a: case 0x1b:
+ case 0x1c: case 0x1d: case 0x1e: case 0x1f:
+ /* data/code segments map as if accessed */
+ return super | ACCESSED;
+ }
+
+ /* We 'know' we never get here, but the C compiler doesn't */
+ return 0;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set OF flag after multiple shift or rotate instruction. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+do_multiple_shiftrot_of
+
+IFN1(
+ ISM32, new_of /* (I) overflow that would be written by last bit
+ shift or rotate */
+ )
+
+
+ {
+ SAVED IBOOL cold = TRUE;
+ SAVED IBOOL shiftrot_of_undef = FALSE;
+
+ if( cold )
+ {
+ /*
+ * Determine whether to have the multiple shift/rotates
+ * OF undefined or calculated by the count == 1 algorithm.
+ * The default is the count == 1 option.
+ */
+
+ shiftrot_of_undef = ( host_getenv( "SHIFTROT_OF_UNDEF" ) != NULL );
+ cold = FALSE;
+ }
+ /*
+ There are three possible actions:-
+
+ 1) Set OF based on the last bit shift or rotate.
+
+ 2) Leave OF unchanged
+
+ 3) Set OF to a specific undefined value.
+ */
+
+ if( shiftrot_of_undef )
+ {
+ /* Set undefined flag(s) */
+ SET_OF(UNDEFINED_FLAG);
+ }
+ else
+ {
+ /* Just like count of one case */
+ SET_OF(new_of);
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Retrieve Intel EFLAGS register value */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IU32
+c_getEFLAGS IFN0()
+ {
+ IU32 flags;
+
+ flags = getFLAGS(); /* get lower word */
+
+ flags = flags | GET_VM() << 17 | GET_RF() << 16;
+
+#ifdef SPC486
+ flags = flags | GET_AC() << 18;
+#endif /* SPC486 */
+
+ return flags;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Retrieve Intel FLAGS register value */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IU32
+getFLAGS()
+ {
+ IU32 flags;
+
+ flags = GET_NT() << 14 | GET_IOPL() << 12 | GET_OF() << 11 |
+ GET_DF() << 10 | GET_IF() << 9 | GET_TF() << 8 |
+ GET_SF() << 7 | GET_ZF() << 6 | GET_AF() << 4 |
+ GET_PF() << 2 | GET_CF() | 0x2;
+
+ return flags;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Read a descriptor table at given linear address. */
+/* Take #PF if descriptor not in linear address space. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+read_descriptor_linear
+
+IFN2(
+ IU32, addr, /* (I) Linear address of descriptor */
+ CPU_DESCR *, descr /* (O) Pntr to our internal descriptor structure */
+ )
+
+
+ {
+ IU32 first_dword;
+ IU32 second_dword;
+ IU32 limit;
+
+ /*
+ The format of a 286 descriptor is:-
+
+ ===========================
+ +1 | LIMIT 15-0 | +0
+ ===========================
+ +3 | BASE 15-0 | +2
+ ===========================
+ +5 | AR | BASE 23-16 | +4
+ ===========================
+ +7 | RESERVED | +6
+ ===========================
+ */
+
+ /*
+ The format of a 386 descriptor is:-
+
+ ============================= AR = Access Rights.
+ +1 | LIMIT 15-0 | +0 AVL = Available.
+ ============================= D = Default Operand
+ +3 | BASE 15-0 | +2 Size, = 0 16-bit
+ ============================= = 1 32-bit.
+ +5 | AR | BASE 23-16 | +4 G = Granularity,
+ ============================= = 0 byte limit
+ | | | | |A|LIMIT| = 1 page limit.
+ +7 | BASE 31-24 |G|D|0|V|19-16| +6
+ | | | | |L| |
+ =============================
+
+ */
+
+ /* read in descriptor with minimum interaction with memory */
+ first_dword = spr_read_dword(addr);
+ second_dword = spr_read_dword(addr+4);
+
+ /* load attributes and access rights */
+ descr->AR = second_dword >> 8 & WORD_MASK;
+
+ /* unpack the base */
+ descr->base = (first_dword >> 16) |
+ (second_dword << 16 & 0xff0000 ) |
+ (second_dword & 0xff000000);
+
+ /* unpack the limit */
+ limit = (first_dword & WORD_MASK) | (second_dword & 0xf0000);
+
+ if ( second_dword & BIT23_MASK )
+ {
+ /* Granularity Bit Set. Limit is expressed in pages
+ (4k bytes), convert to byte limit */
+ limit = limit << 12 | 0xfff;
+ }
+ descr->limit = limit;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check for null selector */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL BOOL
+selector_is_null
+
+IFN1(
+ IU16, selector /* selector to be checked */
+ )
+
+
+ {
+ if ( GET_SELECTOR_INDEX(selector) == 0 && GET_SELECTOR_TI(selector) == 0 )
+ return TRUE;
+ return FALSE;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check if selector outside bounds of GDT */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL BOOL
+selector_outside_GDT
+
+IFN2(
+ IU16, selector, /* (I) selector to be checked */
+ IU32 *, descr_addr /* (O) address of related descriptor */
+ )
+
+
+ {
+ IU16 offset;
+
+ offset = GET_SELECTOR_INDEX_TIMES8(selector);
+
+ /* make sure GDT then trap NULL selector or outside table */
+ if ( GET_SELECTOR_TI(selector) == 1 ||
+ offset == 0 || offset + 7 > GET_GDT_LIMIT() )
+ return TRUE;
+
+ *descr_addr = GET_GDT_BASE() + offset;
+ return FALSE;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check if selector outside bounds of GDT or LDT */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL BOOL
+selector_outside_GDT_LDT
+
+IFN2(
+ IU16, selector, /* (I) selector to be checked */
+ IU32 *, descr_addr /* (O) address of related descriptor */
+ )
+
+
+ {
+ IU16 offset;
+
+ offset = GET_SELECTOR_INDEX_TIMES8(selector);
+
+ /* choose a table */
+ if ( GET_SELECTOR_TI(selector) == 0 )
+ {
+ /* GDT - trap NULL selector or outside table */
+ if ( offset == 0 || offset + 7 > GET_GDT_LIMIT() )
+ return TRUE;
+ *descr_addr = GET_GDT_BASE() + offset;
+ }
+ else
+ {
+ /* LDT - trap invalid LDT or outside table */
+#ifndef DONT_CLEAR_LDTR_ON_INVALID
+ if ( GET_LDT_SELECTOR() <= 3 || offset + 7 > GET_LDT_LIMIT() )
+#else
+ if ( GET_LDT_SELECTOR() == 0 || offset + 7 > GET_LDT_LIMIT() )
+#endif /* DONT_CLEAR_LDTR_ON_INVALID */
+ return TRUE;
+ *descr_addr = GET_LDT_BASE() + offset;
+ }
+
+ return FALSE;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Store new value in Intel EFLAGS register. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+c_setEFLAGS
+
+IFN1(
+ IU32, flags
+ )
+
+
+ {
+ setFLAGS(flags); /* set lower word */
+
+ SET_RF((flags & BIT16_MASK) != 0);
+
+ if ( GET_CPL() == 0 )
+ SET_VM((flags & BIT17_MASK) != 0);
+
+#ifdef SPC486
+ SET_AC((flags & BIT18_MASK) != 0);
+#endif /* SPC486 */
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Store new value in Intel FLAGS register */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+setFLAGS
+
+IFN1(
+ IU32, flags
+ )
+
+
+ {
+ SET_CF((flags & BIT0_MASK) != 0);
+ SET_PF((flags & BIT2_MASK) != 0);
+ SET_AF((flags & BIT4_MASK) != 0);
+ SET_ZF((flags & BIT6_MASK) != 0);
+ SET_SF((flags & BIT7_MASK) != 0);
+ SET_TF((flags & BIT8_MASK) != 0);
+ SET_DF((flags & BIT10_MASK) != 0);
+ SET_OF((flags & BIT11_MASK) != 0);
+
+ /* IF only updated if CPL <= IOPL */
+ if ( GET_CPL() <= GET_IOPL() )
+ SET_IF((flags & BIT9_MASK) != 0);
+
+ SET_NT((flags & BIT14_MASK) != 0);
+
+ /* IOPL only updated at highest privilege */
+ if ( GET_CPL() == 0 )
+ SET_IOPL((flags >> 12) & 3);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_bsic.h b/private/mvdm/softpc.new/base/ccpu386/c_bsic.h
new file mode 100644
index 000000000..ba656c371
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_bsic.h
@@ -0,0 +1,138 @@
+/*[
+
+c_bsic.h
+
+Basic Protected Mode Support and Flag Support.
+----------------------------------------------
+
+LOCAL CHAR SccsID[]="@(#)c_bsic.h 1.5 09/01/94";
+
+]*/
+
+
+/*
+ Define descriptor 'super' types.
+ */
+#define INVALID 0x00
+#define AVAILABLE_TSS 0x01
+#define LDT_SEGMENT 0x02
+#define BUSY_TSS 0x03
+#define CALL_GATE 0x04
+#define TASK_GATE 0x05
+#define INTERRUPT_GATE 0x06
+#define TRAP_GATE 0x07
+#define XTND_AVAILABLE_TSS 0x09
+#define XTND_BUSY_TSS 0x0b
+#define XTND_CALL_GATE 0x0c
+#define XTND_INTERRUPT_GATE 0x0e
+#define XTND_TRAP_GATE 0x0f
+#define EXPANDUP_READONLY_DATA 0x11
+#define EXPANDUP_WRITEABLE_DATA 0x13
+#define EXPANDDOWN_READONLY_DATA 0x15
+#define EXPANDDOWN_WRITEABLE_DATA 0x17
+#define NONCONFORM_NOREAD_CODE 0x19
+#define NONCONFORM_READABLE_CODE 0x1b
+#define CONFORM_NOREAD_CODE 0x1d
+#define CONFORM_READABLE_CODE 0x1f
+
+
+/*
+ Macros for access to SELECTOR bit fields.
+
+ 15 3 2 1 0
+ =================================
+ SELECTOR | INDEX |T|RPL|
+ | |I| |
+ =================================
+ */
+#define GET_SELECTOR_INDEX_TIMES8(x) ((x) & 0xfff8)
+#define GET_SELECTOR_INDEX(x) (((x) & 0xfff8) >> 3)
+#define GET_SELECTOR_TI(x) (((x) & 0x0004) >> 2)
+#define GET_SELECTOR_RPL(x) ((x) & 0x3)
+#define SET_SELECTOR_RPL(x,y) (x = ((x) & ~0x3) | y)
+
+/*
+ Macros for access to ACCESS RIGHTS bit fields.
+
+ 1 1 1 1 1 1
+ 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ =================================
+ ACCESS RIGHTS |-|X|-|-|-|-|-|-|P|DPL| SUPER |
+ =================================
+
+ Code Segment: Data Segment:
+ 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
+ ================= =================
+ |P|DPL|1|1|C|R|A| |P|DPL|1|0|E|W|A|
+ ================= =================
+
+ */
+#define GET_AR_P(x) (((x) & BIT7_MASK) != 0) /* present */
+#define GET_AR_E(x) (((x) & BIT2_MASK) != 0) /* expand down */
+#define GET_AR_C(x) (((x) & BIT2_MASK) != 0) /* conforming */
+#define GET_AR_W(x) (((x) & BIT1_MASK) != 0) /* writeable */
+#define GET_AR_R(x) (((x) & BIT1_MASK) != 0) /* readable */
+#define GET_AR_X(x) (((x) & BIT14_MASK) != 0) /* big/default */
+
+#define GET_AR_DPL(x) (((x) & 0x60) >> 5)
+#define GET_AR_SUPER(x) ((x) & 0x1f)
+
+#define NOT_PRESENT 0
+#define PRESENT 1
+#define ACCESSED 1
+
+
+IMPORT ISM32 descriptor_super_type
+
+IPT1(
+ IU16, AR
+
+ );
+
+IMPORT VOID do_multiple_shiftrot_of
+
+IPT1(
+ ISM32, new_of
+
+ );
+
+
+IMPORT IU32 getFLAGS IPT0();
+
+IMPORT VOID read_descriptor_linear
+
+IPT2(
+ IU32, addr,
+ CPU_DESCR *, descr
+
+ );
+
+IMPORT BOOL selector_is_null
+
+IPT1(
+ IU16, selector
+
+ );
+
+IMPORT BOOL selector_outside_GDT
+
+IPT2(
+ IU16, selector,
+ IU32 *, descr_addr
+
+ );
+
+IMPORT BOOL selector_outside_GDT_LDT
+
+IPT2(
+ IU16, selector,
+ IU32 *, descr_addr
+
+ );
+
+IMPORT VOID setFLAGS
+
+IPT1(
+ IU32, flags
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_debug.c b/private/mvdm/softpc.new/base/ccpu386/c_debug.c
new file mode 100644
index 000000000..e3fc7f26d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_debug.c
@@ -0,0 +1,386 @@
+/*[
+
+c_debug.c
+
+LOCAL CHAR SccsID[]="@(#)c_debug.c 1.5 02/09/94";
+
+Debugging Register and Breakpoint Support
+-----------------------------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <c_debug.h>
+
+
+/*
+ IMPLEMENTATION NOTE. We ignore the GE and LE bits, effectively like
+ the 486 we will always generate exact exceptions. As we have no
+ pipeline architecture and have always finished the last instruction
+ before starting the next one, we can easily provide exact exceptions.
+
+ For the same reason we never need to set the BD bit, with no
+ pipelining the debug registers may be freely written to at any time.
+ */
+
+
+/*
+ We hold instruction breakpoints as a linear address, plus
+ an index which identifies the debug register.
+ */
+typedef struct
+ {
+ IU32 addr; /* Linear address of breakpoint */
+ IU32 id; /* debug register identifier */
+ } INST_BREAK;
+
+/*
+ We hold data breakpoints as start and end linear addresses, type
+ and an index which identifies the debug register.
+ */
+typedef struct
+ {
+ IU32 start_addr; /* Linear start address of breakpoint (incl) */
+ IU32 end_addr; /* Linear end address of breakpoint (incl) */
+ ISM32 type; /* D_WO (write) or D_RW (read/write) */
+ IU32 id; /* debug register identifier */
+ } DATA_BREAK;
+
+/*
+ Data breakpoint types.
+ */
+#define D_WO 0 /* write only */
+#define D_RW 1 /* read or write */
+
+#define NR_BRKS 4 /* Intel has 4 breakpoint address regs */
+
+/*
+ Our breakpoint structure.
+ */
+GLOBAL IU32 nr_inst_break = 0; /* number of inst breakpoints active */
+GLOBAL IU32 nr_data_break = 0; /* number of data breakpoints active */
+
+LOCAL INST_BREAK i_brk[NR_BRKS];
+LOCAL DATA_BREAK d_brk[NR_BRKS];
+
+/*
+ Define masks and shifts for components of Debug Control Register.
+
+ DCR = Debug Control Register:-
+
+ 33 22 22 22 22 22 11 11 1 1
+ 10 98 76 54 32 10 98 76 5 0 9 8 7 6 5 4 3 2 1 0
+ ====================================================
+ |L |R |L |R |L |R |L |R | 0 |G|L|G|L|G|L|G|L|G|L|
+ |E |/ |E |/ |E |/ |E |/ | |E|E|3|3|2|2|1|1|0|0|
+ |N |W |N |W |N |W |N |W | | | | | | | | | | | |
+ |3 |3 |2 |2 |1 |1 |0 |0 | | | | | | | | | | | |
+ ====================================================
+
+ */
+
+LOCAL IU32 g_l_shift[NR_BRKS] =
+ {
+ 0, /* access G0 L0 */
+ 2, /* access G1 L1 */
+ 4, /* access G2 L2 */
+ 6 /* access G3 L3 */
+ };
+
+LOCAL IU32 r_w_shift[NR_BRKS] =
+ {
+ 16, /* access R/W0 */
+ 20, /* access R/W1 */
+ 24, /* access R/W2 */
+ 28 /* access R/W3 */
+ };
+
+LOCAL IU32 len_shift[NR_BRKS] =
+ {
+ 18, /* access LEN0 */
+ 22, /* access LEN1 */
+ 26, /* access LEN2 */
+ 30 /* access LEN3 */
+ };
+
+#define COMP_MASK 0x3 /* all fields are 2-bit */
+
+
+/*
+ =====================================================================
+ INTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Map Intel length indicator to start and end address form. */
+/* RETURNS true if valid len processed */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL BOOL
+len_to_addr
+
+IFN3(
+ ISM32, index, /* (I) debug register holding breakpoint */
+ IU32 *, start, /* (O) pntr to start address (inclusive) for
+ debug area */
+ IU32 *, end /* (O) pntr to end address (inclusive) for
+ debug area */
+ )
+
+
+ {
+ BOOL retval;
+
+ /* map length into start and end addresses */
+ switch ( GET_DR(DR_DCR) >> len_shift[index] & COMP_MASK )
+ {
+ case 0: /* one byte */
+ *start = *end = GET_DR(index);
+ retval = TRUE;
+ break;
+
+ case 1: /* two byte */
+ *start = GET_DR(index) & ~BIT0_MASK;
+ *end = *start + 1;
+ retval = TRUE;
+ break;
+
+ case 3: /* four byte */
+ *start = GET_DR(index) & ~(BIT1_MASK | BIT0_MASK);
+ *end = *start + 3;
+ retval = TRUE;
+ break;
+
+ case 2: /* undefined */
+ default:
+ retval = FALSE;
+ break;
+ }
+
+ return retval;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Put Intel inst breakpoint into our internal form. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL VOID
+setup_inst_break
+
+IFN1(
+ ISM32, index /* debug register holding breakpoint */
+ )
+
+
+ {
+ INST_BREAK *p;
+
+ p = &i_brk[nr_inst_break];
+
+ p->addr = GET_DR(index);
+ p->id = index;
+
+ nr_inst_break++;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Put Intel data breakpoint into our internal form. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL VOID
+setup_data_break
+
+IFN2(
+ ISM32, type, /* (I) D_WO(write) or D_RW(read/write) breakpoint */
+ ISM32, index /* (I) debug register holding breakpoint */
+ )
+
+
+ {
+ DATA_BREAK *p;
+
+ p = &d_brk[nr_data_break];
+
+ if ( len_to_addr(index, &p->start_addr, &p->end_addr) )
+ {
+ p->id = index;
+ p->type = type;
+ nr_data_break++;
+ }
+ }
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check Memory Access for Data Breakpoint Exception. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+check_for_data_exception
+
+IFN3(
+ IU32, la, /* linear address */
+ ISM32, attr, /* read or write access to memory */
+ ISM32, size /* encoded IU8, IU16 or IU32 size indicator */
+ )
+
+
+ {
+ ISM32 i; /* index thru active data breakpoints */
+ ISM32 ii; /* index thru all Intel breakpoints */
+ ISM32 trig; /* id of breakpoint which first triggers */
+ IU32 end_la; /* end (inclusive) address of memory access */
+ IU32 start; /* start (inclusive) address of brkpnt. */
+ IU32 end; /* end (inclusive) address of brkpnt. */
+ BOOL data_brk; /* current breakpoint needs range check */
+ DATA_BREAK *p;
+
+ end_la = la + size; /* calc. end address (inclusive) */
+
+ /* look for debugging hit among active breakpoints */
+ for ( i = 0; i < nr_data_break; i++ )
+ {
+ p = &d_brk[i];
+
+ if ( la > p->end_addr || end_la < p->start_addr ||
+ attr == D_R && p->type == D_WO )
+ {
+ ; /* no hit */
+ }
+ else
+ {
+ /* Data breakpoint triggered */
+ trig = p->id; /* get Intel identifier */
+ SET_DR(DR_DSR, GET_DR(DR_DSR) | 1 << trig); /* set B bit */
+
+ /*
+ Now all breakpoints are checked regardless of the
+ enable bits and the appropriate B bit set if the
+ breakpoint would trigger if enabled.
+ */
+ for ( ii = 0; ii < NR_BRKS; ii++ )
+ {
+ if ( ii == trig )
+ continue; /* we have already processed the tigger */
+
+ data_brk = FALSE;
+
+ /* action according to R/W field */
+ switch ( GET_DR(DR_DCR) >> r_w_shift[ii] & COMP_MASK )
+ {
+ case 1: /* data write only */
+ if ( attr == D_W )
+ {
+ data_brk = len_to_addr(ii, &start, &end);
+ }
+ break;
+
+ case 3: /* data read or write */
+ data_brk = len_to_addr(ii, &start, &end);
+ break;
+ }
+
+ if ( data_brk )
+ {
+ if ( la > end || end_la < start )
+ {
+ ; /* no hit */
+ }
+ else
+ {
+ /* set appropriate B bit */
+ SET_DR(DR_DSR, GET_DR(DR_DSR) | 1 << ii);
+ }
+ }
+ }
+
+ break; /* all done after one trigger */
+ }
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check Memory Access for Instruction Breakpoint Exception. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+check_for_inst_exception
+
+IFN1(
+ IU32, la /* linear address */
+ )
+
+
+ {
+ ISM32 i; /* index thru active inst breakpoints */
+ ISM32 trig;
+ INST_BREAK *p;
+
+ /* look for debugging hit among active breakpoints */
+ for ( i = 0; i < nr_inst_break; i++ )
+ {
+ p = &i_brk[i];
+
+ if ( p->addr == la )
+ {
+ /* Inst breakpoint triggered */
+ trig = p->id; /* get Intel identifier */
+ SET_DR(DR_DSR, GET_DR(DR_DSR) | 1 << trig); /* set B bit */
+ }
+ }
+ }
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Put Intel debugging registers into internal breakpoint form. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+setup_breakpoints()
+ {
+ ISM32 i;
+
+ nr_inst_break = nr_data_break = 0; /* set no breakpoints */
+
+ /* look for breakpoints set in DCR */
+ for ( i = DR_DAR0; i <= DR_DAR3; i++ )
+ {
+ /* look for globally or locally active */
+ if ( GET_DR(DR_DCR) >> g_l_shift[i] & COMP_MASK )
+ {
+ /* action according to R/W field */
+ switch ( GET_DR(DR_DCR) >> r_w_shift[i] & COMP_MASK )
+ {
+ case 0: /* instruction breakpoint */
+ setup_inst_break(i);
+ break;
+
+ case 1: /* data write only */
+ setup_data_break(D_WO, i);
+ break;
+
+ case 2: /* undefined */
+ /* do nothing */
+ break;
+
+ case 3: /* data read or write */
+ setup_data_break(D_RW, i);
+ break;
+ }
+ }
+ }
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_debug.h b/private/mvdm/softpc.new/base/ccpu386/c_debug.h
new file mode 100644
index 000000000..82790e622
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_debug.h
@@ -0,0 +1,45 @@
+/*[
+
+c_debug.h
+
+Debugging Register and Breakpoint Support
+-----------------------------------------
+
+LOCAL CHAR SccsID[]="@(#)c_debug.h 1.5 09/01/94";
+
+]*/
+
+
+/*
+ Debug breakpoint status variables.
+ */
+IMPORT IU32 nr_inst_break;
+IMPORT IU32 nr_data_break;
+
+/*
+ Debug attributes.
+ */
+#define D_R 0 /* memory read */
+#define D_W 1 /* memory write */
+
+#define D_BYTE 0
+#define D_WORD 1
+#define D_DWORD 3
+
+IMPORT VOID setup_breakpoints IPT0();
+
+IMPORT VOID check_for_data_exception
+
+IPT3(
+ IU32, la,
+ ISM32, attr,
+ ISM32, size
+
+ );
+
+IMPORT VOID check_for_inst_exception
+
+IPT1(
+ IU32, la
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_div64.c b/private/mvdm/softpc.new/base/ccpu386/c_div64.c
new file mode 100644
index 000000000..ae5826b56
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_div64.c
@@ -0,0 +1,147 @@
+/*[
+
+c_div64.c
+
+LOCAL CHAR SccsID[]="@(#)c_div64.c 1.5 02/09/94";
+
+64-bit Divide Functions.
+------------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <c_div64.h>
+#include <c_neg64.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Do 64bit = 64bit / 32bit Divide (Signed). */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+div64
+
+IFN4(
+ IS32 *, hr, /* High 32 bits of dividend/quotient */
+ IS32 *, lr, /* Low 32 bits of dividend/quotient */
+ IS32, divisor,
+ IS32 *, rem /* Remainder */
+ )
+
+
+ {
+ if ( *hr & BIT31_MASK )
+ {
+ if ( divisor & BIT31_MASK )
+ {
+ /* Negative Dividend :: Negative Divisor */
+ neg64(hr, lr);
+ divisor = -divisor;
+ divu64((IU32 *)hr, (IU32 *)lr, (IU32)divisor, (IU32 *)rem);
+ *rem = -*rem;
+ }
+ else
+ {
+ /* Negative Dividend :: Positive Divisor */
+ neg64(hr, lr);
+ divu64((IU32 *)hr, (IU32 *)lr, (IU32)divisor, (IU32 *)rem);
+ neg64(hr, lr);
+ *rem = -*rem;
+ }
+ }
+ else
+ {
+ if ( divisor & BIT31_MASK )
+ {
+ /* Positive Dividend :: Negative Divisor */
+ divisor = -divisor;
+ divu64((IU32 *)hr, (IU32 *)lr, (IU32)divisor, (IU32 *)rem);
+ neg64(hr, lr);
+ }
+ else
+ {
+ /* Positive Dividend :: Positive Divisor */
+ divu64((IU32 *)hr, (IU32 *)lr, (IU32)divisor, (IU32 *)rem);
+ }
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Do 64bit = 64bit / 32bit Divide (Unsigned). */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+divu64
+
+IFN4(
+ IU32 *, hr, /* High 32 bits of dividend/quotient */
+ IU32 *, lr, /* Low 32 bits of dividend/quotient */
+ IU32, divisor,
+ IU32 *, rem /* Remainder */
+ )
+
+
+ {
+ ISM32 count;
+ IU32 hd; /* High 32 bits of dividend/quotient */
+ IU32 ld; /* Low 32 bits of dividend/quotient */
+ IU32 par_div; /* partial dividend */
+ IU32 carry1;
+ IU32 carry2;
+ IU32 carry3;
+
+ hd = *hr; /* Get local copies */
+ ld = *lr;
+ count = 64; /* Initialise */
+ par_div = 0;
+
+ while ( count != 0 )
+ {
+ /* shift <par_div:dividend> left.
+ We have to watch out for carries from
+ ld<bit31> to hd<bit0> (carry1) and
+ hd<bit31> to par_div<bit0> (carry2) and
+ par_div<bit31> to 'carry' (carry3).
+ */
+ carry1 = carry2 = carry3 = 0;
+ if ( ld & BIT31_MASK )
+ carry1 = 1;
+ if ( hd & BIT31_MASK )
+ carry2 = 1;
+ if ( par_div & BIT31_MASK )
+ carry3 = 1;
+ ld = ld << 1;
+ hd = hd << 1 | carry1;
+ par_div = par_div << 1 | carry2;
+
+ /* check if divisor 'goes into' partial dividend */
+ if ( carry3 || divisor <= par_div )
+ {
+ /* Yes it does */
+ par_div = par_div - divisor;
+ ld = ld | 1; /* output a 1 bit */
+ }
+ count--;
+ }
+
+ *rem = par_div; /* Return results */
+ *hr = hd;
+ *lr = ld;
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_div64.h b/private/mvdm/softpc.new/base/ccpu386/c_div64.h
new file mode 100644
index 000000000..69d29237b
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_div64.h
@@ -0,0 +1,29 @@
+/*
+ c_div64.h
+
+ Define all 64-bit Divide Functions.
+ */
+
+/*
+ static char SccsID[]="@(#)c_div64.h 1.4 02/09/94";
+ */
+
+IMPORT VOID divu64
+
+IPT4(
+ IU32 *, hr,
+ IU32 *, lr,
+ IU32, divisor,
+ IU32 *, rem
+
+ );
+
+IMPORT VOID div64
+
+IPT4(
+ IS32 *, hr,
+ IS32 *, lr,
+ IS32, divisor,
+ IS32 *, rem
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_getset.c b/private/mvdm/softpc.new/base/ccpu386/c_getset.c
new file mode 100644
index 000000000..299917388
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_getset.c
@@ -0,0 +1,290 @@
+#include "insignia.h"
+#include "host_def.h"
+
+/*[
+ * ============================================================================
+ *
+ * Name: c_getset.c
+ *
+ * Derived From: pig/getsetc.c
+ *
+ * Author: Andrew Ogle
+ *
+ * Created On: 9th Febuary 1993
+ *
+ * Sccs ID: @(#)c_getset.c 1.25 12/06/94
+ *
+ * Purpose:
+ *
+ * Defines procedures for getting and setting the complete
+ * C CPU status required for instruction and application testing
+ * against the assembler CPU.
+ * These routines are used by both the instruction and application
+ * piggers.
+ *
+ * (c)Copyright Insignia Solutions Ltd., 1993. All rights reserved.
+ *
+ * ============================================================================
+]*/
+
+#if defined(PIG)
+
+
+/*
+ * Get access to C CPU's global defintions.
+ */
+#include <xt.h>
+#define CPU_PRIVATE
+#include CpuH
+#include <evidgen.h>
+
+/*
+ * Local structure definitions.
+ */
+#include "c_reg.h"
+#include <Fpu_c.h>
+#include <PigReg_c.h>
+#include <ccpupig.h>
+#include <ccpusas4.h>
+
+LOCAL cpustate_t *p_current_state; /* used to check if NPX regs valid */
+
+/*(
+============================ c_setCpuNpxRegisters =============================
+PURPOSE:
+ The NPX registers are only transfered on demand from the CPU
+ under test (EDL) to the CCPU. This is because the information
+ involved is large and costly to process since it must be stored
+ textually in the state structure.
+===============================================================================
+)*/
+GLOBAL void c_setCpuNpxRegisters IFN1(cpustate_t *, p_state)
+{
+ setNpxControlReg(p_state->NPX_regs.NPX_control);
+ setNpxStatusReg(p_state->NPX_regs.NPX_status);
+ setNpxStackRegs(p_state->NPX_regs.NPX_ST);
+ setNpxTagwordReg(p_state->NPX_regs.NPX_tagword);
+}
+
+/*(
+============================ c_checkCpuNpxRegisters ===========================
+PURPOSE:
+ retrieves the NPX state from the assembler CPU and updates the Ccpu.
+===============================================================================
+)*/
+GLOBAL void c_checkCpuNpxRegisters IFN0()
+{
+ if (p_current_state->NPX_valid)
+ {
+ /* The CCPU already has the NPX registers */
+ return;
+ }
+ GetAcpuNpxRegisters(p_current_state);
+ p_current_state->NPX_valid = TRUE;
+ c_setCpuNpxRegisters(p_current_state);
+}
+
+
+/*(
+============================ c_getCpuState =====================================
+PURPOSE:
+ Saves the complete current state of the C CPU in the passed
+ state structure.
+===============================================================================
+)*/
+
+GLOBAL void
+c_getCpuState IFN1(
+ cpustate_t *, p_state
+)
+{
+ /*
+ * Recover machine status word, privilege level and instruction
+ */
+ p_state->cpu_regs.CR0 = GET_CR(0);
+ p_state->cpu_regs.PFLA = GET_CR(2);
+ p_state->cpu_regs.PDBR = GET_CR(3);
+
+ p_state->cpu_regs.CPL = GET_CPL();
+ p_state->cpu_regs.EIP = GET_EIP();
+
+ /*
+ * Recover general registers
+ */
+ p_state->cpu_regs.EAX = GET_EAX();
+ p_state->cpu_regs.EBX = GET_EBX();
+ p_state->cpu_regs.ECX = GET_ECX();
+ p_state->cpu_regs.EDX = GET_EDX();
+ p_state->cpu_regs.ESP = GET_ESP();
+ p_state->cpu_regs.EBP = GET_EBP();
+ p_state->cpu_regs.ESI = GET_ESI();
+ p_state->cpu_regs.EDI = GET_EDI();
+
+ /*
+ * Recover processor status flags.
+ */
+ p_state->cpu_regs.EFLAGS = c_getEFLAGS();
+
+ /*
+ * Recover descriptor table registers.
+ */
+ p_state->cpu_regs.GDT_base = GET_GDT_BASE();
+ p_state->cpu_regs.GDT_limit = GET_GDT_LIMIT();
+
+ p_state->cpu_regs.IDT_base = GET_IDT_BASE();
+ p_state->cpu_regs.IDT_limit = GET_IDT_LIMIT();
+
+ p_state->cpu_regs.LDT_selector = GET_LDT_SELECTOR();
+ p_state->cpu_regs.LDT_base = GET_LDT_BASE();
+ p_state->cpu_regs.LDT_limit = GET_LDT_LIMIT();
+
+ p_state->cpu_regs.TR_selector = GET_TR_SELECTOR();
+ p_state->cpu_regs.TR_base = GET_TR_BASE();
+ p_state->cpu_regs.TR_limit = GET_TR_LIMIT();
+ p_state->cpu_regs.TR_ar = c_getTR_AR();
+
+ /*
+ * Recover segment register details
+ */
+ p_state->cpu_regs.DS_selector = GET_DS_SELECTOR();
+ p_state->cpu_regs.DS_base = GET_DS_BASE();
+ p_state->cpu_regs.DS_limit = GET_DS_LIMIT();
+ p_state->cpu_regs.DS_ar = c_getDS_AR();
+
+ p_state->cpu_regs.ES_selector = GET_ES_SELECTOR();
+ p_state->cpu_regs.ES_base = GET_ES_BASE();
+ p_state->cpu_regs.ES_limit = GET_ES_LIMIT();
+ p_state->cpu_regs.ES_ar = c_getES_AR();
+
+ p_state->cpu_regs.SS_selector = GET_SS_SELECTOR();
+ p_state->cpu_regs.SS_base = GET_SS_BASE();
+ p_state->cpu_regs.SS_limit = GET_SS_LIMIT();
+ p_state->cpu_regs.SS_ar = c_getSS_AR();
+
+ p_state->cpu_regs.CS_selector = GET_CS_SELECTOR();
+ p_state->cpu_regs.CS_base = GET_CS_BASE();
+ p_state->cpu_regs.CS_limit = GET_CS_LIMIT();
+ p_state->cpu_regs.CS_ar = c_getCS_AR();
+
+ p_state->cpu_regs.FS_selector = GET_FS_SELECTOR();
+ p_state->cpu_regs.FS_base = GET_FS_BASE();
+ p_state->cpu_regs.FS_limit = GET_FS_LIMIT();
+ p_state->cpu_regs.FS_ar = c_getFS_AR();
+
+ p_state->cpu_regs.GS_selector = GET_GS_SELECTOR();
+ p_state->cpu_regs.GS_base = GET_GS_BASE();
+ p_state->cpu_regs.GS_limit = GET_GS_LIMIT();
+ p_state->cpu_regs.GS_ar = c_getGS_AR();
+
+ p_state->video_latches = Cpu.Video->GetVideolatches();
+
+ p_state->NPX_valid = FALSE;
+
+ if ((p_current_state != (cpustate_t *)0) && p_current_state->NPX_valid)
+ {
+ p_state->NPX_regs.NPX_control = getNpxControlReg();
+ p_state->NPX_regs.NPX_status = getNpxStatusReg();
+ p_state->NPX_regs.NPX_tagword = getNpxTagwordReg();
+ getNpxStackRegs(&p_state->NPX_regs.NPX_ST);
+ p_state->NPX_valid = TRUE;
+ }
+ p_state->twenty_bit_wrap = (SasWrapMask == 0xFFFFF);
+ p_state->synch_index = ccpu_synch_count;
+}
+
+/*(
+============================ c_setCpuState =====================================
+PURPOSE:
+ Takes the saved CPU state from the passed state structure and
+ uses it to set the current state of the C CPU.
+===============================================================================
+)*/
+
+GLOBAL void
+c_setCpuState IFN1(
+ cpustate_t *, p_new_state
+)
+{
+ c_setCPL(0); /* Allow manipulation of IO flags */
+
+ /*
+ * Setup machine status word, privilege level and instruction
+ * pointer.
+ */
+ MOV_CR(0,(IU32)p_new_state->cpu_regs.CR0);
+ MOV_CR(2,(IU32)p_new_state->cpu_regs.PFLA);
+ MOV_CR(3,(IU32)p_new_state->cpu_regs.PDBR);
+
+ SET_EIP(p_new_state->cpu_regs.EIP);
+
+ /*
+ * Setup general registers
+ */
+ SET_EAX(p_new_state->cpu_regs.EAX);
+ SET_EBX(p_new_state->cpu_regs.EBX);
+ SET_ECX(p_new_state->cpu_regs.ECX);
+ SET_EDX(p_new_state->cpu_regs.EDX);
+ SET_ESP(p_new_state->cpu_regs.ESP);
+ SET_EBP(p_new_state->cpu_regs.EBP);
+ SET_ESI(p_new_state->cpu_regs.ESI);
+ SET_EDI(p_new_state->cpu_regs.EDI);
+
+ /*
+ * Setup processor status flags.
+ */
+ c_setEFLAGS(p_new_state->cpu_regs.EFLAGS);
+
+ SET_CPL(p_new_state->cpu_regs.CPL);
+
+ /*
+ * Setup descriptor table registers.
+ */
+ c_setGDT_BASE_LIMIT(p_new_state->cpu_regs.GDT_base, p_new_state->cpu_regs.GDT_limit);
+
+ c_setIDT_BASE_LIMIT(p_new_state->cpu_regs.IDT_base, p_new_state->cpu_regs.IDT_limit);
+
+ SET_LDT_SELECTOR(p_new_state->cpu_regs.LDT_selector);
+ c_setLDT_BASE_LIMIT(p_new_state->cpu_regs.LDT_base, p_new_state->cpu_regs.LDT_limit);
+
+ SET_TR_SELECTOR(p_new_state->cpu_regs.TR_selector);
+ c_setTR_BASE_LIMIT_AR(p_new_state->cpu_regs.TR_base, p_new_state->cpu_regs.TR_limit, p_new_state->cpu_regs.TR_ar);
+
+ /*
+ * Setup segment register details
+ */
+ SET_DS_SELECTOR(p_new_state->cpu_regs.DS_selector);
+ c_setDS_BASE_LIMIT_AR(p_new_state->cpu_regs.DS_base, p_new_state->cpu_regs.DS_limit, p_new_state->cpu_regs.DS_ar);
+
+ SET_ES_SELECTOR(p_new_state->cpu_regs.ES_selector);
+ c_setES_BASE_LIMIT_AR(p_new_state->cpu_regs.ES_base, p_new_state->cpu_regs.ES_limit, p_new_state->cpu_regs.ES_ar);
+
+ SET_SS_SELECTOR(p_new_state->cpu_regs.SS_selector);
+ c_setSS_BASE_LIMIT_AR(p_new_state->cpu_regs.SS_base, p_new_state->cpu_regs.SS_limit, p_new_state->cpu_regs.SS_ar);
+
+ SET_CS_SELECTOR(p_new_state->cpu_regs.CS_selector);
+ c_setCS_BASE_LIMIT_AR(p_new_state->cpu_regs.CS_base, p_new_state->cpu_regs.CS_limit, p_new_state->cpu_regs.CS_ar);
+
+ SET_FS_SELECTOR(p_new_state->cpu_regs.FS_selector);
+ c_setFS_BASE_LIMIT_AR(p_new_state->cpu_regs.FS_base, p_new_state->cpu_regs.FS_limit, p_new_state->cpu_regs.FS_ar);
+
+ SET_GS_SELECTOR(p_new_state->cpu_regs.GS_selector);
+ c_setGS_BASE_LIMIT_AR(p_new_state->cpu_regs.GS_base, p_new_state->cpu_regs.GS_limit, p_new_state->cpu_regs.GS_ar);
+
+ Cpu.Video->SetVideolatches(p_new_state->video_latches);
+
+ /* The NPX registers are not loaded here, since the extraction
+ * from the EDL Cpu is expensive. Instead we note that we have
+ * not loaded them yet, and will obtain them (if needed) when
+ * the first NPX instruction if encountered.
+ * N.B. we need a pointer to this state structure so that we can
+ * update the NPX registers when the CCPU does require them.
+ */
+ p_new_state->NPX_valid = FALSE;
+ p_current_state = p_new_state;
+ if (p_new_state->twenty_bit_wrap)
+ SasWrapMask = 0xFFFFF;
+ else
+ SasWrapMask = -1;
+}
+
+#endif /* PIG */
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_intr.c b/private/mvdm/softpc.new/base/ccpu386/c_intr.c
new file mode 100644
index 000000000..f4084cfcc
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_intr.c
@@ -0,0 +1,515 @@
+/*[
+
+c_intr.c
+
+LOCAL CHAR SccsID[]="@(#)c_intr.c 1.21 03/07/95";
+
+Interrupt Support.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <c_intr.h>
+#include <c_xfer.h>
+#include <c_tsksw.h>
+#include <c_page.h>
+#include <c_mem.h>
+#include <ccpusas4.h>
+#include <ccpupig.h>
+#include <fault.h>
+
+#ifdef PIG
+#include <gdpvar.h>
+#endif
+
+/*
+ Prototype our internal functions.
+ */
+LOCAL ISM32 validate_int_dest
+
+IPT6(
+ IU16, vector,
+ BOOL, do_priv,
+ IU16 *, cs,
+ IU32 *, ip,
+ IU32 *, descr_addr,
+ ISM32 *, dest_type
+
+ );
+
+
+/*
+ =====================================================================
+ INTERNAL FUNCTIONS STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Validate int destination. Essentially decode int instruction. */
+/* Take #GP_INT(vector) if invalid. */
+/* Take #NP_INT(vector) if not present. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL ISM32
+validate_int_dest
+
+IFN6(
+ IU16, vector, /* (I) vector to be checked */
+ BOOL, do_priv, /* (I) if true do privilege check */
+ IU16 *, cs, /* (O) segment of target address */
+ IU32 *, ip, /* (O) offset of target address */
+ IU32 *, descr_addr, /* (O) related descriptor memory address */
+ ISM32 *, dest_type /* (O) destination type */
+ )
+
+
+ {
+ IU16 offset;
+ IU8 AR;
+ ISM32 super;
+
+ /* calc address within IDT */
+ offset = vector * 8;
+
+ /* check within IDT */
+ if ( offset + 7 > GET_IDT_LIMIT() )
+ GP_INT(vector, FAULT_INT_DEST_NOT_IN_IDT);
+
+ *descr_addr = GET_IDT_BASE() + offset;
+
+ AR = spr_read_byte((*descr_addr)+5);
+
+ /* check type */
+ switch ( super = descriptor_super_type((IU16)AR) )
+ {
+ case INTERRUPT_GATE:
+ case TRAP_GATE:
+ SET_OPERAND_SIZE(USE16);
+ break;
+
+ case XTND_INTERRUPT_GATE:
+ case XTND_TRAP_GATE:
+ SET_OPERAND_SIZE(USE32);
+ break;
+
+ case TASK_GATE:
+ break; /* ok */
+
+ default:
+ GP_INT(vector, FAULT_INT_DEST_BAD_SEG_TYPE);
+ }
+
+ /* access check requires CPL <= DPL */
+ if ( do_priv && (GET_CPL() > GET_AR_DPL(AR)) )
+ GP_INT(vector, FAULT_INT_DEST_ACCESS);
+
+ /* gate must be present */
+ if ( GET_AR_P(AR) == NOT_PRESENT )
+ NP_INT(vector, FAULT_INT_DEST_NOTPRESENT);
+
+ /* ok, get real destination from gate */
+ *cs = spr_read_word((*descr_addr)+2);
+
+ /* action gate type */
+ if ( super == TASK_GATE )
+ {
+ /* Need to set operand size here so that any
+ * error code is push with correct size.
+ */
+ switch (validate_task_dest(*cs, descr_addr))
+ {
+ case BUSY_TSS:
+ case AVAILABLE_TSS:
+ SET_OPERAND_SIZE(USE16);
+ break;
+ case XTND_BUSY_TSS:
+ case XTND_AVAILABLE_TSS:
+ SET_OPERAND_SIZE(USE32);
+ break;
+ }
+ *dest_type = NEW_TASK;
+ }
+ else
+ {
+ /* INTERRUPT or TRAP GATE */
+
+ *ip = (IU32)spr_read_word(*descr_addr);
+ if ( super == XTND_INTERRUPT_GATE || super == XTND_TRAP_GATE )
+ *ip = (IU32)spr_read_word((*descr_addr)+6) << 16 | *ip;
+
+ validate_gate_dest(INT_ID, *cs, descr_addr, dest_type);
+ }
+
+ return super;
+ }
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Process interrupt. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+do_intrupt
+
+IFN4(
+ IU16, vector, /* (I) interrupt vector to call */
+ BOOL, priv_check, /* (I) if true access check is needed */
+ BOOL, has_error_code, /* (I) if true needs error code pushing
+ on stack */
+ IU16, error_code /* (I) error code to be pushed */
+ )
+
+
+ {
+ /* GLOBALS USED */
+ /* doing_contributory cleared on success of interrupt */
+ /* doing_page_fault cleared on success of interrupt */
+ /* doing_double_fault cleared on success of interrupt */
+ /* doing_fault indicates RF should be set in pushed
+ flags, cleared on success */
+
+ IU32 flags; /* temp store for FLAGS register */
+ IU32 ivt_addr; /* address of ivt entry */
+
+ IU16 new_cs; /* The destination */
+ IU32 new_ip;
+
+ IU32 cs_descr_addr; /* code segment descriptor address */
+ CPU_DESCR cs_entry; /* code segment descriptor entry */
+
+ ISM32 dest_type; /* category for destination */
+ ISM32 super; /* super type of destination */
+ IU32 dpl; /* new privilege level (if used) */
+
+ ISM32 stk_sz; /* space (in bytes) reqd on stack */
+ IU16 new_ss; /* The new stack */
+ IU32 new_sp;
+
+ IU32 ss_descr_addr; /* stack segment descriptor address */
+ CPU_DESCR ss_entry; /* stack segment descriptor entry */
+
+ IU32 old_ss; /* Variables used while making stack */
+ IU32 old_sp;
+
+ if ( GET_PE() == 0 )
+ {
+ /* Real Mode */
+
+ /* must be able to push FLAGS:CS:IP */
+ validate_stack_space(USE_SP, (ISM32)NR_ITEMS_3);
+
+ /* get new destination */
+ ivt_addr = (IU32)vector * 4;
+ new_ip = (IU32)phy_read_word(ivt_addr);
+ new_cs = phy_read_word(ivt_addr+2);
+
+#ifdef TAKE_REAL_MODE_LIMIT_FAULT
+ /*
+ * In real mode, there is still an IP limit check. The new IP is
+ * compared against the last CS limit from when the program was last
+ * in protected mode (or 64K if it never was). For us, this is stored
+ * in the CS limit field. (cf i486PRM page 22-4)
+ */
+
+ if ( new_ip > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_INTR_RM_CS_LIMIT);
+
+#else /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ /* The Soft486 EDL CPU does not take Real Mode limit failures.
+ * Since the Ccpu486 is used as a "reference" cpu we wish it
+ * to behave a C version of the EDL Cpu rather than as a C
+ * version of a i486.
+ */
+
+#endif /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ /* ALL SYSTEMS GO */
+
+ flags = c_getEFLAGS();
+
+ if ( doing_fault )
+ {
+#ifdef PIG
+ if (GLOBAL_RF_OnXcptnWanted)
+ flags |= BIT16_MASK; /* SET RF bit */
+#else
+ flags |= BIT16_MASK; /* SET RF bit */
+#endif
+ }
+
+#ifdef PIG
+ if (vector < 31 && (((1 << vector) & NO_FLAGS_EXCEPTION_MASK) != 0))
+ spush_flags(flags);
+ else
+#endif /* PIG */
+ spush(flags);
+
+ spush16((IU32)GET_CS_SELECTOR());
+ spush((IU32)GET_EIP());
+
+ load_CS_cache(new_cs, (IU32)0, (CPU_DESCR *)0);
+ SET_EIP(new_ip);
+ SET_IF(0);
+ SET_TF(0);
+ }
+ else
+ {
+ /* Protected Mode */
+
+ super = validate_int_dest(vector, priv_check, &new_cs, &new_ip,
+ &cs_descr_addr, &dest_type);
+
+ /* check type of indirect target */
+ switch ( dest_type )
+ {
+ case NEW_TASK:
+ switch_tasks(NOT_RETURNING, NESTING, new_cs, cs_descr_addr, GET_EIP());
+
+ /* save error code on new stack */
+ if ( has_error_code )
+ {
+ validate_stack_space(USE_SP, (ISM32)NR_ITEMS_1);
+ spush((IU32)error_code);
+ }
+
+ /* limit check new IP (now in new task) */
+ if ( GET_EIP() > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_INTR_TASK_CS_LIMIT);
+
+ break;
+
+ case SAME_LEVEL:
+ read_descriptor_linear(cs_descr_addr, &cs_entry);
+
+ /* stamp new selector with CPL */
+ SET_SELECTOR_RPL(new_cs, GET_CPL());
+
+ /* check room for return address CS:(E)IP:(E)FLAGS:(Error) */
+ if ( has_error_code )
+ stk_sz = NR_ITEMS_4;
+ else
+ stk_sz = NR_ITEMS_3;
+ validate_stack_space(USE_SP, stk_sz);
+
+ if ( new_ip > cs_entry.limit )
+ GP((IU16)0, FAULT_INTR_PM_CS_LIMIT_1);
+
+ /* ALL SYSTEMS GO */
+
+ /* push flags */
+ flags = c_getEFLAGS();
+
+ if ( doing_fault )
+ {
+#ifdef PIG
+ if (GLOBAL_RF_OnXcptnWanted)
+ flags |= BIT16_MASK; /* SET RF bit */
+#else
+ flags |= BIT16_MASK; /* SET RF bit */
+#endif
+ }
+
+#ifdef PIG
+ if (vector < 31 && (((1 << vector) & NO_FLAGS_EXCEPTION_MASK) != 0))
+ spush_flags(flags);
+ else
+#endif /* PIG */
+ spush(flags);
+
+
+ /* push return address */
+ spush16((IU32)GET_CS_SELECTOR());
+ spush((IU32)GET_EIP());
+
+ /* finally push error code if required */
+ if ( has_error_code )
+ {
+ spush((IU32)error_code);
+ }
+
+ load_CS_cache(new_cs, cs_descr_addr, &cs_entry);
+ SET_EIP(new_ip);
+
+ /* finally action IF, TF and NT flags */
+ if ((super == INTERRUPT_GATE) || (super == XTND_INTERRUPT_GATE))
+ SET_IF(0);
+ SET_TF(0);
+ SET_NT(0);
+ break;
+
+ default: /* MORE PRIVILEGE(0|1|2) */
+ read_descriptor_linear(cs_descr_addr, &cs_entry);
+
+ dpl = dest_type;
+
+ /* stamp new selector with new CPL */
+ SET_SELECTOR_RPL(new_cs, dpl);
+
+ /* find out about new stack */
+ get_stack_selector_from_TSS(dpl, &new_ss, &new_sp);
+
+ /* check new stack selector */
+ validate_SS_on_stack_change(dpl, new_ss,
+ &ss_descr_addr, &ss_entry);
+
+ /* check room for (GS:FS:DS:ES)
+ SS:(E)SP
+ (E)FLAGS
+ CS:(E)IP
+ (ERROR) */
+ if ( GET_VM() == 1 )
+ stk_sz = NR_ITEMS_9;
+ else
+ stk_sz = NR_ITEMS_5;
+
+ if ( has_error_code )
+ stk_sz = stk_sz + NR_ITEMS_1;
+
+ validate_new_stack_space(stk_sz, new_sp, &ss_entry, new_ss);
+
+ if ( new_ip > cs_entry.limit )
+ GP((IU16)0, FAULT_INTR_PM_CS_LIMIT_2);
+
+ /* ALL SYSTEMS GO */
+
+ flags = c_getEFLAGS();
+
+ if ( doing_fault )
+ {
+#ifdef PIG
+ if (GLOBAL_RF_OnXcptnWanted)
+ flags |= BIT16_MASK; /* SET RF bit */
+#else
+ flags |= BIT16_MASK; /* SET RF bit */
+#endif
+ }
+
+ SET_CPL(dpl);
+ SET_VM(0);
+
+ /* update stack segment */
+ old_ss = (IU32)GET_SS_SELECTOR();
+ old_sp = GET_ESP();
+
+ load_SS_cache(new_ss, ss_descr_addr, &ss_entry);
+ set_current_SP(new_sp);
+
+ /*
+ FORM NEW STACK, VIZ
+
+ ==============
+ new SS:IP -> | error code |
+ | old IP |
+ | old CS |
+ | FLAGS |
+ | old SP |
+ | old SS |
+ ==============
+ | old ES |
+ | old DS |
+ | old FS |
+ | old GS |
+ ==============
+ */
+
+ if ( stk_sz >= NR_ITEMS_9 )
+ {
+ /* interrupt from V86 mode */
+ spush16((IU32)GET_GS_SELECTOR());
+ spush16((IU32)GET_FS_SELECTOR());
+ spush16((IU32)GET_DS_SELECTOR());
+ spush16((IU32)GET_ES_SELECTOR());
+
+ /* invalidate data segments */
+ load_data_seg(GS_REG, (IU16)0);
+ load_data_seg(FS_REG, (IU16)0);
+ load_data_seg(DS_REG, (IU16)0);
+ load_data_seg(ES_REG, (IU16)0);
+ }
+
+ /* push old stack values */
+ spush16(old_ss);
+ spush(old_sp);
+
+ /* push old flags */
+#ifdef PIG
+ if (vector < 31 && (((1 << vector) & NO_FLAGS_EXCEPTION_MASK) != 0))
+ spush_flags(flags);
+ else
+#endif /* PIG */
+ spush(flags);
+
+ /* push return address */
+ spush16((IU32)GET_CS_SELECTOR());
+ spush((IU32)GET_EIP());
+
+ /* finally push error code if required */
+ if ( has_error_code )
+ {
+ spush((IU32)error_code);
+ }
+
+ /* update code segment */
+ load_CS_cache(new_cs, cs_descr_addr, &cs_entry);
+ SET_EIP(new_ip);
+
+ /* finally action IF, TF and NT flags */
+ if ((super == INTERRUPT_GATE) || (super == XTND_INTERRUPT_GATE))
+ SET_IF(0);
+ SET_TF(0);
+ SET_NT(0);
+ break;
+ }
+
+ }
+ EXT = INTERNAL;
+#ifdef PIG
+ save_last_inst_details("do_intr");
+ pig_cpu_action = CHECK_ALL;
+ /* If the destination is going to page fault, or need
+ * accessing, then the EDL CPU will do so before issuing
+ * the pig synch. We use the dasm386 decode to prefetch
+ * a single instruction which mimics the EDL Cpu's behaviour
+ * when close to a page boundary.
+ */
+ prefetch_1_instruction(); /* Will PF if destination not present */
+ ccpu_synch_count++;
+#else /* !PIG */
+#ifdef SYNCH_TIMERS
+ if (doing_fault)
+ {
+ extern void SynchTick IPT0();
+ SynchTick();
+ }
+#endif /* SYNCH_TIMERS */
+#endif /* PIG */
+ /* mark successful end to interrupt */
+ doing_fault = FALSE;
+ doing_contributory = FALSE;
+ doing_page_fault = FALSE;
+ doing_double_fault = FALSE;
+#ifdef PIG
+ c_cpu_unsimulate();
+#endif /* PIG */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_intr.h b/private/mvdm/softpc.new/base/ccpu386/c_intr.h
new file mode 100644
index 000000000..8a3310e8e
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_intr.h
@@ -0,0 +1,20 @@
+/*[
+
+c_intr.h
+
+LOCAL CHAR SccsID[]="@(#)c_intr.h 1.4 02/09/94";
+
+Interrupt Support.
+------------------
+
+]*/
+
+IMPORT VOID do_intrupt
+
+IPT4(
+ IU16, vector,
+ BOOL, priv_check,
+ BOOL, has_error_code,
+ IU16, error_code
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_main.c b/private/mvdm/softpc.new/base/ccpu386/c_main.c
new file mode 100644
index 000000000..24b5582a5
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_main.c
@@ -0,0 +1,4940 @@
+/*[
+
+c_main.c
+
+LOCAL CHAR SccsID[]="@(#)c_main.c 1.96 04/11/95";
+
+Main routine for CPU emulator.
+------------------------------
+
+All instruction decoding and addressing is controlled here.
+Actual worker routines are spun off elsewhere.
+
+]*/
+
+#include <insignia.h>
+#include <host_def.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <setjmp.h>
+
+#include <xt.h> /* needed by bios.h */
+#include <sas.h> /* need memory(M) */
+#include <ccpusas4.h> /* the cpu internal sas bits */
+#ifdef PIG
+#include <Cpu_c.h> /* Intel pointer manipulation macros */
+#endif /* PIG */
+#include CpuH
+/* #include "event.h" */ /* Event Manager */
+#include <bios.h> /* need access to bop */
+#include <debug.h>
+#include <config.h>
+#ifdef NTVDM
+#include <ntthread.h>
+#endif
+
+#include <c_main.h> /* C CPU definitions-interfaces */
+#include <c_page.h> /* Paging Interface */
+#include <c_mem.h> /* CPU - Memory Interface */
+#include <c_intr.h> /* Interrupt Interface */
+#include <c_debug.h> /* Debug Regs and Breakpoint Interface */
+#include <c_oprnd.h> /* Operand decoding functions(macros) */
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <c_page.h>
+#include <c_intr.h>
+#include <c_debug.h>
+#include <c_oprnd.h>
+#include <c_bsic.h>
+#include <ccpupig.h>
+#include <fault.h>
+
+#include <aaa.h> /* The workers */
+#include <aad.h> /* ... */
+#include <aam.h> /* ... */
+#include <aas.h> /* ... */
+#include <adc.h> /* ... */
+#include <add.h> /* ... */
+#include <and.h> /* ... */
+#include <arpl.h> /* ... */
+#include <bound.h> /* ... */
+#include <bsf.h> /* ... */
+#include <bsr.h> /* ... */
+#include <bt.h> /* ... */
+#include <btc.h> /* ... */
+#include <btr.h> /* ... */
+#include <bts.h> /* ... */
+#include <call.h> /* ... */
+#include <cbw.h> /* ... */
+#include <cdq.h> /* ... */
+#include <clc.h> /* ... */
+#include <cld.h> /* ... */
+#include <cli.h> /* ... */
+#include <clts.h> /* ... */
+#include <cmc.h> /* ... */
+#include <cmp.h> /* CMP, CMPS, SCAS */
+#include <cwd.h> /* ... */
+#include <cwde.h> /* ... */
+#include <daa.h> /* ... */
+#include <das.h> /* ... */
+#include <dec.h> /* ... */
+#include <div.h> /* ... */
+#include <enter.h> /* ... */
+#include <idiv.h> /* ... */
+#include <imul.h> /* ... */
+#include <in.h> /* ... */
+#include <inc.h> /* ... */
+#include <into.h> /* ... */
+#include <intx.h> /* INT, INT 3 */
+#include <iret.h> /* ... */
+#include <jcxz.h> /* JCXZ, JECXZ */
+#include <jmp.h> /* ... */
+#include <jxx.h> /* JB, JBE, JL, JLE, JNB, JNBE, JNL, JNLE, */
+ /* JNO, JNP, JNS, JNZ, JO, JP, JS, JZ */
+#include <lahf.h> /* ... */
+#include <lar.h> /* ... */
+#include <lea.h> /* ... */
+#include <leave.h> /* ... */
+#include <lgdt.h> /* ... */
+#include <lidt.h> /* ... */
+#include <lldt.h> /* ... */
+#include <lmsw.h> /* ... */
+#include <loopxx.h> /* LOOP, LOOPE, LOOPNE */
+#include <lsl.h> /* ... */
+#include <ltr.h> /* ... */
+#include <lxs.h> /* LDS, LES, LFS, LGS, LSS */
+#include <mov.h> /* LODS, MOV, MOVZX, MOVS, STOS */
+#include <movsx.h> /* ... */
+#include <mul.h> /* ... */
+#include <neg.h> /* ... */
+#include <nop.h> /* ... */
+#include <not.h> /* ... */
+#include <out.h> /* ... */
+#include <or.h> /* ... */
+#include <pop.h> /* ... */
+#include <popa.h> /* ... */
+#include <popf.h> /* ... */
+#include <push.h> /* ... */
+#include <pusha.h> /* ... */
+#include <pushf.h> /* ... */
+#include <rcl.h> /* ... */
+#include <rcr.h> /* ... */
+#include <ret.h> /* ... */
+#include <rol.h> /* ... */
+#include <ror.h> /* ... */
+#include <rsrvd.h> /* ... */
+#include <sahf.h> /* ... */
+#include <sar.h> /* ... */
+#include <sbb.h> /* ... */
+#include <setxx.h> /* SETB, SETBE, SETL, SETLE, SETNB, SETNBE, */
+ /* SETNL, SETNLE, SETNO, SETNP, SETNS, SETNZ, */
+ /* SETO, SETP, SETS, SETZ */
+#include <sgdt.h> /* ... */
+#include <shl.h> /* ... */
+#include <shld.h> /* ... */
+#include <shr.h> /* ... */
+#include <shrd.h> /* ... */
+#include <sidt.h> /* ... */
+#include <sldt.h> /* ... */
+#include <smsw.h> /* ... */
+#include <stc.h> /* ... */
+#include <std.h> /* ... */
+#include <sti.h> /* ... */
+#include <str.h> /* ... */
+#include <sub.h> /* ... */
+#include <test.h> /* ... */
+#include <verr.h> /* ... */
+#include <verw.h> /* ... */
+#include <wait.h> /* ... */
+#include <xchg.h> /* ... */
+#include <xlat.h> /* ... */
+#include <xor.h> /* ... */
+#include <zfrsrvd.h> /* ... */
+
+#ifdef CPU_486
+#include <bswap.h> /* ... */
+#include <cmpxchg.h> /* ... */
+#include <invd.h> /* ... */
+#include <invlpg.h> /* ... */
+#include <wbinvd.h> /* ... */
+#include <xadd.h> /* ... */
+#endif /* CPU_486 */
+
+#define FIX_BT_BUG /* Of course we want the bug fixed! */
+
+#define SYNCH_BOTH_WAYS /* Do a PIG_SYNCH() on not-taken conditionals as well */
+
+/*
+ Types and constants local to this module.
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+typedef union
+ {
+ IU32 sng; /* Single Part Operand */
+ IU32 mlt[2]; /* Multiple (two) Part Operand */
+ DOUBLE flt; /* Floating Point Operand */
+ IU8 npxbuff[108];
+ } OPERAND;
+
+/*
+ The allowable types of repeat prefix.
+ */
+#define REP_CLR (IU8)0
+#define REP_NE (IU8)1
+#define REP_E (IU8)2
+
+/*
+ Offsets to Low byte, High byte and Word parts of Double Word Regs.
+ */
+#ifdef LITTLEND
+
+#define L_OFF 0
+#define H_OFF 1
+#define X_OFF 0
+
+#else /* BIGEND */
+
+#define L_OFF 3
+#define H_OFF 2
+#define X_OFF 2
+
+#endif /* LITTLEND */
+
+/* CPU hardware interrupt definitions */
+#define CPU_HW_INT_MASK (1 << 0)
+
+#ifdef SFELLOW
+ /* for raising NPX interrupt */
+#define IRQ5_SLAVE_PIC 5
+#endif /* SFELLOW */
+
+/* CPU hardware interrupt definitions */
+#define CPU_HW_INT_MASK (1 << 0)
+
+/* Masks for external CPU events. */
+#define CPU_SIGIO_EXCEPTION_MASK (1 << 12)
+#define CPU_SAD_EXCEPTION_MASK (1 << 13)
+#define CPU_RESET_EXCEPTION_MASK (1 << 14)
+#define CPU_SIGALRM_EXCEPTION_MASK (1 << 15)
+#ifdef SFELLOW
+#define CPU_HW_NPX_INT_MASK (1 << 16)
+#endif /* SFELLOW */
+
+LOCAL IU16 cpu_hw_interrupt_number;
+#if defined(SFELLOW)
+extern IU32 cpu_interrupt_map ;
+#else
+LOCAL IUM32 cpu_interrupt_map = 0;
+#endif /* SFELLOW */
+
+
+GLOBAL IBOOL took_relative_jump;
+extern IBOOL NpxIntrNeeded;
+GLOBAL IBOOL took_absolute_toc;
+LOCAL IBOOL single_instruction_delay ;
+LOCAL IBOOL single_instruction_delay_enable ;
+
+/*
+ Define Maximun valid segment register in a 3-bit 'reg' field.
+ */
+#define MAX_VALID_SEG 5
+
+/*
+ Define lowest modRM for register (rather than memory) addressing.
+ */
+#define LOWEST_REG_MODRM 0xc0
+
+GLOBAL VOID clear_any_thingies IFN0()
+{
+ cpu_interrupt_map &= ~CPU_SIGALRM_EXCEPTION_MASK;
+}
+
+
+/*
+ Prototype our internal functions.
+ */
+LOCAL VOID ccpu
+
+IPT1(
+ ISM32, single_step
+
+ );
+
+LOCAL VOID check_io_permission_map IPT2(IU32, port_addr, IUM8, port_width);
+
+/*
+ FRIG for delayed interrupts to *not* occur when IO registers
+ are accessed from our non CPU C code.
+ */
+ISM32 in_C;
+
+LOCAL BOOL quick_mode = FALSE; /* true if no special processing (trap
+ flag, interrupts, yoda...) is needed
+ between instructions. All flow of
+ control insts. and I/O insts. force
+ an exit from quick mode. IE. linear
+ sequences of CPU functions should
+ normally run in quick mode. */
+
+
+#ifdef PIG
+
+/* We must delay the actual synch (i.e. the c_cpu_unsimulate)
+ * until after processing any trap/breakpoint stuff.
+ */
+#define PIG_SYNCH(action) \
+ SYNCH_TICK(); \
+ if (ccpu_pig_enabled) \
+ { \
+ pig_cpu_action = (action); \
+ quick_mode = FALSE; \
+ pig_synch_required = TRUE; \
+ CANCEL_HOST_IP(); \
+ }
+
+
+LOCAL IBOOL pig_synch_required = FALSE; /* This indicates that the current
+ * instruction needs a pig synch,
+ * and after trap/breakpoint
+ * processing we must return to
+ * the pigger.
+ */
+#else
+
+#define PIG_SYNCH(action) \
+ SYNCH_TICK(); \
+ /* No pig operations */
+
+#endif /* PIG */
+/*
+ Recursive CPU variables. Exception Handling.
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#define FRAMES 9
+
+/* keep track of each CPU recursion */
+GLOBAL IS32 simulate_level = 0;
+LOCAL jmp_buf longjmp_env_stack[FRAMES];
+
+/* each level has somewhere for exception processing to bail out to */
+LOCAL jmp_buf next_inst[FRAMES];
+
+
+/* When Pigging we save each opcode byte in the last_inst record.
+ * We must check the prefix length so that we dont overflow
+ * our buffer.
+ */
+#ifdef PIG
+LOCAL int prefix_length = 0;
+#define CHECK_PREFIX_LENGTH() \
+ if (++prefix_length >= MAX_INTEL_PREFIX) \
+ Int6();
+#else /* !PIG */
+#define CHECK_PREFIX_LENGTH()
+#endif /* PIG */
+
+/*
+ The emulation register set.
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+GLOBAL SEGMENT_REGISTER CCPU_SR[6]; /* Segment Registers */
+
+GLOBAL IU32 CCPU_TR[8]; /* Test Registers */
+
+GLOBAL IU32 CCPU_DR[8]; /* Debug Registers */
+
+GLOBAL IU32 CCPU_CR[4]; /* Control Registers */
+
+GLOBAL IU32 CCPU_GR[8]; /* Double Word General Registers */
+
+/*
+ * WARNING: in the initialisation below, (IU8 *) casts are used
+ * to satify dominatrix compilers that will not allow the use of
+ * IHPE casts for pointer types _in initialisation_.
+ */
+GLOBAL IU16 *CCPU_WR[8] = /* Pointers to the Word Registers */
+ {
+ (IU16 *)((IU8 *)&CCPU_GR[0] + X_OFF),
+ (IU16 *)((IU8 *)&CCPU_GR[1] + X_OFF),
+ (IU16 *)((IU8 *)&CCPU_GR[2] + X_OFF),
+ (IU16 *)((IU8 *)&CCPU_GR[3] + X_OFF),
+ (IU16 *)((IU8 *)&CCPU_GR[4] + X_OFF),
+ (IU16 *)((IU8 *)&CCPU_GR[5] + X_OFF),
+ (IU16 *)((IU8 *)&CCPU_GR[6] + X_OFF),
+ (IU16 *)((IU8 *)&CCPU_GR[7] + X_OFF)
+ };
+
+GLOBAL IU8 *CCPU_BR[8] = /* Pointers to the Byte Registers */
+ {
+ (IU8 *)((IU8 *)&CCPU_GR[0] + L_OFF),
+ (IU8 *)((IU8 *)&CCPU_GR[1] + L_OFF),
+ (IU8 *)((IU8 *)&CCPU_GR[2] + L_OFF),
+ (IU8 *)((IU8 *)&CCPU_GR[3] + L_OFF),
+ (IU8 *)((IU8 *)&CCPU_GR[0] + H_OFF),
+ (IU8 *)((IU8 *)&CCPU_GR[1] + H_OFF),
+ (IU8 *)((IU8 *)&CCPU_GR[2] + H_OFF),
+ (IU8 *)((IU8 *)&CCPU_GR[3] + H_OFF)
+ };
+
+GLOBAL IU32 CCPU_IP; /* The Instruction Pointer */
+GLOBAL SYSTEM_TABLE_ADDRESS_REGISTER CCPU_STAR[2]; /* GDTR and IDTR */
+
+GLOBAL SYSTEM_ADDRESS_REGISTER CCPU_SAR[2]; /* LDTR and TR */
+
+GLOBAL IU32 CCPU_CPL; /* Current Privilege Level */
+
+GLOBAL IU32 CCPU_FLAGS[32]; /* The flags. (EFLAGS) */
+
+ /* We allocate one integer per bit posn, multiple
+ bit fields are aligned to the least significant
+ posn. hence:-
+ CF = 0 PF = 2 AF = 4 ZF = 6
+ SF = 7 TF = 8 IF = 9 DF = 10
+ OF = 11 IOPL = 12 NT = 14 RF = 16
+ VM = 17 AC = 18 */
+
+
+GLOBAL IU32 CCPU_MODE[3]; /* Current Operating Mode */
+
+ /* We allocate one integer per modal condition, as follows:-
+ [0] = current operand size (0=16-bit, 1=32-bit)
+ [1] = current address size (0=16-bit, 1=32-bit)
+ [2] = 'POP' displacement. (0=None,
+ 2=Pop word,
+ 4=Pop double word)
+ Set by POP used by [ESP] addressing modes. */
+
+/*
+ Trap Flag Support.
+
+ Basically if the trap flag is set before an instruction, then when
+ the instruction has been executed a trap is taken. This is why
+ instructions which set the trap flag have a one instruction delay
+ (or apparent one instruction delay) before a trap is first taken.
+ However INT's will clear the trap flag and clear any pending trap
+ at the end of the INT.
+ */
+LOCAL IU32 start_trap;
+
+/*
+ Host Pointer to Instruction Start.
+ (Used in Host IP optimisation)
+ */
+LOCAL IU8 *p_start;
+
+/*
+ Host pointer to point to where host may safely read instruction
+ stream bytes.
+ (Used in Host IP optimisation)
+ */
+GLOBAL IU8 *pg_end;
+
+/*
+ Flag support.
+ */
+GLOBAL IU8 pf_table[] =
+ {
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
+ };
+
+/*
+ CPU Heart Beat. A counter is decremented if not zero, and if it becomes
+ zero, this means that an external routine requires an event to occur.
+ The event handling is done through the quick event manager, all we need
+ to do is count down and then call the manager when we get to zero. This
+ mechanism is used to simulate an accurate micro-second
+ timer.
+ */
+LOCAL IU32 cpu_heartbeat;
+GLOBAL IUH PigSynchCount = 0;
+
+IMPORT VOID dispatch_q_event();
+
+#ifndef SFELLOW
+#ifdef SYNCH_TIMERS
+
+#define SYNCH_TICK() \
+ { \
+ PigSynchCount += 1; \
+ if (cpu_heartbeat != 0) \
+ { \
+ if ((--cpu_heartbeat) == 0) \
+ { \
+ dispatch_q_event(); \
+ quick_mode = FALSE; \
+ } \
+ } \
+ }
+
+#define QUICK_EVENT_TICK() /* Nothing */
+
+GLOBAL void SynchTick IFN0()
+{
+ quick_mode = FALSE;
+ SYNCH_TICK();
+}
+
+#else /* !SYNCH_TIMERS */
+
+#define SYNCH_TICK() /* Nothing */
+
+#define QUICK_EVENT_TICK() \
+ { \
+ if (cpu_heartbeat != 0) \
+ { \
+ if ((--cpu_heartbeat) == 0) { \
+ dispatch_q_event(); \
+ quick_mode = FALSE; \
+ } \
+ } \
+ }
+
+#endif /* SYNCH_TIMERS */
+#else /* SFELLOW */
+
+extern IBOOL qEventsToDo;
+extern IBOOL checkForQEvent IPT0();
+
+#define SYNCH_TICK()
+
+#define QUICK_EVENT_TICK() \
+ { \
+ if (qEventsToDo) \
+ { \
+ if (checkForQEvent()) \
+ { \
+ dispatch_q_event(); \
+ quick_mode = FALSE; \
+ } \
+ } \
+ }
+
+#ifdef host_timer_event
+#undef host_timer_event
+#endif
+
+#define host_timer_event()
+#endif /* SFELLOW */
+
+#ifdef SFELLOW
+extern int ica_intack IPT0();
+extern int VectorBase8259Slave IPT0();
+#if !defined(PROD)
+IMPORT IBOOL sf_debug_char_waiting();
+#endif /* !PROD */
+#endif /* SFELLOW */
+
+
+/* debugging stuff */
+IMPORT int do_condition_checks;
+IMPORT void check_I();
+
+/*
+ Define macros which allow Intel and host IP formats to be maintained
+ seperately. This is an 'unclean' implementation but does give a
+ significant performance boost. Specifically it means during the
+ decode of one Intel instruction we can use a host pointer into
+ memory and avoid incrementing the Intel IP on a byte by byte basis.
+ */
+
+/*
+ * SasWrapMask
+ */
+
+GLOBAL PHY_ADDR SasWrapMask = 0xfffff;
+
+/* update Intel format EIP from host format IP
+ * Note we only mask to 16 bits if the original EIP was 16bits so that
+ * pigger scripts that result in very large EIP values pig correctly.
+ */
+#define UPDATE_INTEL_IP(x) \
+ { int len = DIFF_INST_BYTE(x, p_start); \
+ IU32 mask = 0xFFFFFFFF; \
+ IU32 oldEIP = GET_EIP(); \
+ if ((oldEIP < 0x10000) && (GET_CS_AR_X() == USE16)) \
+ mask = 0xFFFF; \
+ SET_EIP((oldEIP + len) & mask); \
+ }
+
+/* update Intel format EIP from host format IP (mask if 16 operand) */
+#define UPDATE_INTEL_IP_USE_OP_SIZE(x) \
+ if ( GET_OPERAND_SIZE() == USE16 ) \
+ SET_EIP(GET_EIP() + DIFF_INST_BYTE(x, p_start) & WORD_MASK);\
+ else \
+ SET_EIP(GET_EIP() + DIFF_INST_BYTE(x, p_start));
+
+/* mark host format IP as inoperative */
+#define CANCEL_HOST_IP() \
+ quick_mode = FALSE; \
+ p_start = p = (IU8 *)0;
+
+/* setup host format IP from Intel format IP */
+/* and set up end of page marker */
+#ifdef PIG
+#define SETUP_HOST_IP(x) \
+ ip_phy_addr = usr_chk_byte(GET_CS_BASE() + GET_EIP(), PG_R) & SasWrapMask; \
+ x = Sas.SasPtrToPhysAddrByte(ip_phy_addr); \
+ pg_end = AddCpuPtrLS8(CeilingIntelPageLS8(x), 1);
+#else /* !PIG */
+GLOBAL UTINY *CCPU_M;
+#ifdef BACK_M
+#define SETUP_HOST_IP(x) \
+ ip_phy_addr = usr_chk_byte(GET_CS_BASE() + GET_EIP(), PG_R) & \
+ SasWrapMask; \
+ x = &CCPU_M[-ip_phy_addr]; \
+ ip_phy_addr = (ip_phy_addr & ~0xfff) + 0x1000; \
+ pg_end = &CCPU_M[-ip_phy_addr];
+#else
+#define SETUP_HOST_IP(x) \
+ ip_phy_addr = usr_chk_byte(GET_CS_BASE() + GET_EIP(), PG_R) & \
+ SasWrapMask; \
+ x = &CCPU_M[ip_phy_addr]; \
+ ip_phy_addr = (ip_phy_addr & ~0xfff) + 0x1000; \
+ pg_end = &CCPU_M[ip_phy_addr];
+#endif /* BACK_M */
+#endif /* PIG */
+
+GLOBAL INT m_seg[3]; /* Memory Operand segment reg. index. */
+GLOBAL ULONG m_off[3]; /* Memory Operand offset. */
+GLOBAL ULONG m_la[3]; /* Memory Operand Linear Addr. */
+GLOBAL ULONG m_pa[3]; /* Memory Operand Physical Addr. */
+GLOBAL UTINY modRM; /* The modRM byte. */
+GLOBAL UTINY segment_override; /* Segment Prefix for current inst. */
+GLOBAL UTINY *p; /* Pntr. to Intel Opcode Stream. */
+GLOBAL BOOL m_isreg[3]; /* Memory Operand Register(true)/
+ Memory(false) indicator */
+GLOBAL OPERAND ops[3]; /* Inst. Operands. */
+GLOBAL ULONG save_id[3]; /* Saved state for Inst. Operands. */
+GLOBAL ULONG m_la2[3]; /* Memory Operand(2) Linear Addr. */
+GLOBAL ULONG m_pa2[3]; /* Memory Operand(2) Physical Addr. */
+
+#if defined(PIG) && defined(SFELLOW)
+/*
+ * memory-mapped I/O information. Counts number of memory-mapped inputs and
+ * outputs since the last pig synch.
+ */
+GLOBAL struct pig_mmio_info pig_mmio_info;
+
+#endif /* PIG && SFELLOW */
+
+extern void InitNpx IPT1(IBOOL, disable);
+
+/*
+ =====================================================================
+ INTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+/*
+ * invalidFunction
+ *
+ * This function will get called if we try to call through the wrong instruction
+ * pointer.
+ */
+
+LOCAL VOID
+invalidFunction IFN0()
+{
+ always_trace0("Invalid Instruction Function Pointer");
+ force_yoda();
+}
+
+/*
+ * note_486_instruction
+ *
+ * This function will get called if we execute a 486-only instruction
+ */
+
+GLOBAL VOID
+note_486_instruction IFN1(char *, text)
+{
+ SAVED IBOOL first = TRUE;
+ SAVED IBOOL want_yoda;
+ SAVED IBOOL want_trace;
+
+ if (first)
+ {
+ char *env = getenv("NOTE_486_INSTRUCTION");
+
+ if (env)
+ {
+ want_yoda = FALSE;
+ want_trace = TRUE;
+ if (strcmp(env, "YODA") == 0)
+ {
+ want_yoda = TRUE;
+ want_trace = TRUE;
+ }
+ if (strcmp(env, "FALSE") == 0)
+ want_trace = FALSE;
+ if (strcmp(env, "TRUE") == 0)
+ want_trace = TRUE;
+ }
+ first = FALSE;
+ }
+ if (want_trace)
+ always_trace0(text);
+ if (want_yoda)
+ force_yoda();
+}
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Internal entry point to CPU. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL VOID
+ccpu
+
+IFN1(
+ ISM32, single_step
+ )
+
+
+ {
+ /* Decoding variables */
+ IU8 opcode; /* Last Opcode Byte Read. */
+
+ /*
+ * A set of function pointers used for pointing to the instruction
+ * emmulation function for the current instruction. We have different
+ * ones because they need to be of different types.
+ *
+ * The name encoding uses 32, 16, or 8 for the size of the parameters,
+ * preceded by a p if it is a pointer. If the parameter string is preceded
+ * by a 2, then this is the second function required by some instructions.
+ *
+ * For safety, these are all set to point at invalidFunction() at the start
+ * of each instruction.
+ */
+
+ VOID (*inst32) IPT1(IU32, op1);
+ VOID (*instp32) IPT1(IU32 *, pop1);
+ VOID (*instp328) IPT2(IU32 *, pop1, IUM8, op_sz);
+ VOID (*instp3232) IPT2(IU32 *, pop1, IU32, op2);
+ VOID (*instp32p32) IPT2(IU32 *, pop1, IU32 *, pop2);
+ VOID (*inst32328) IPT3(IU32, op1, IU32, op2, IUM8, op_sz);
+ VOID (*instp32328) IPT3(IU32 *, pop1, IU32, op2, IUM8, op_sz);
+ VOID (*instp3232328) IPT4(IU32 *, pop1, IU32, op2, IU32, op3, IUM8, op_sz);
+
+ VOID (*inst232) IPT1(IU32, op1);
+ VOID (*inst2p32) IPT1(IU32 *, pop1);
+ VOID (*inst2p3232) IPT2(IU32 *, pop1, IU32, op2);
+
+ /* Operand State variables */
+
+ /* Prefix handling variables */
+ IU8 repeat; /* Repeat Prefix for current inst. */
+ IU32 rep_count; /* Repeat Count for string insts. */
+
+ /* General CPU variables */
+ IU32 ip_phy_addr; /* Used when setting up IP */
+
+ /* Working variables */
+ IU32 immed; /* For immediate generation. */
+
+ ISM32 i;
+ /*
+ Initialise. ----------------------------------------------------
+ */
+
+ single_instruction_delay = FALSE;
+ took_relative_jump = FALSE;
+ took_absolute_toc = FALSE;
+#ifdef PIG
+ pig_synch_required = FALSE;
+#if defined(SFELLOW)
+ pig_mmio_info.flags &= ~(MM_INPUT_OCCURRED | MM_OUTPUT_OCCURRED);
+#endif /* SFELLOW */
+#endif /* PIG */
+
+ /* somewhere for exceptions to return to */
+#ifdef NTVDM
+ setjmp(ccpu386ThrdExptnPtr());
+#else
+ setjmp(next_inst[simulate_level-1]);
+#endif
+
+#ifdef SYNCH_TIMERS
+ /* If we have taken a fault the EDL Cpu will have checked on
+ * the resulting transfer of control.
+ */
+ if (took_absolute_toc || took_relative_jump)
+ goto CHECK_INTERRUPT;
+ quick_mode = TRUE;
+#else /* SYNCH_TIMERS */
+ /* go slow until we are sure we can go fast */
+ quick_mode = FALSE;
+#endif /* SYNCH_TIMERS */
+
+ goto NEXT_INST;
+
+DO_INST:
+
+
+ /* INSIGNIA debugging */
+#ifdef PIG
+ /* We do not want to do check_I() in both CPUs */
+#else /* PIG */
+ if ( do_condition_checks )
+ {
+ check_I();
+ CCPU_save_EIP = GET_EIP(); /* in case yoda changed IP */
+ }
+#endif /* !PIG */
+
+#ifdef PIG
+ save_last_inst_details(NULL);
+ prefix_length = 0;
+#endif
+
+ QUICK_EVENT_TICK();
+
+ /* save beginning of the current instruction */
+
+ p_start = p;
+
+ /*
+ Decode instruction. --------------------------------------------
+ */
+
+ /* 'zero' all prefix byte indicators */
+ segment_override = SEG_CLR;
+ repeat = REP_CLR;
+
+ /*
+ Decode and Action instruction.
+ */
+DECODE:
+
+ opcode = GET_INST_BYTE(p); /* get next byte */
+ /*
+ NB. Each opcode is categorised by a type, instruction name
+ and operand names. The type and operand names are explained
+ further in c_oprnd.h.
+ */
+ switch ( opcode )
+ {
+ case 0x00: /* T5 ADD Eb Gb */
+ instp32328 = ADD;
+TYPE00:
+
+ modRM = GET_INST_BYTE(p);
+ D_Eb(0, RW0, PG_W);
+ D_Gb(1);
+ F_Eb(0);
+ F_Gb(1);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 8);
+ P_Eb(0);
+ break;
+
+ case 0x01: /* T5 ADD Ev Gv */
+ instp32328 = ADD;
+TYPE01:
+
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RW0, PG_W);
+ D_Gw(1);
+ F_Ew(0);
+ F_Gw(1);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 16);
+ P_Ew(0);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, RW0, PG_W);
+ D_Gd(1);
+ F_Ed(0);
+ F_Gd(1);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 32);
+ P_Ed(0);
+ }
+ break;
+
+ case 0x02: /* T5 ADD Gb Eb */
+ instp32328 = ADD;
+TYPE02:
+
+ modRM = GET_INST_BYTE(p);
+ D_Gb(0);
+ D_Eb(1, RO1, PG_R);
+ F_Gb(0);
+ F_Eb(1);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 8);
+ P_Gb(0);
+ break;
+
+ case 0x03: /* T5 ADD Gv Ev */
+ instp32328 = ADD;
+TYPE03:
+
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Gw(0);
+ D_Ew(1, RO1, PG_R);
+ F_Gw(0);
+ F_Ew(1);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 16);
+ P_Gw(0);
+ }
+ else /* USE32 */
+ {
+ D_Gd(0);
+ D_Ed(1, RO1, PG_R);
+ F_Gd(0);
+ F_Ed(1);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 32);
+ P_Gd(0);
+ }
+ break;
+
+ case 0x04: /* T5 ADD Fal Ib */
+ instp32328 = ADD;
+TYPE04:
+
+ D_Ib(1);
+ F_Fal(0);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 8);
+ P_Fal(0);
+ break;
+
+ case 0x05: /* T5 ADD F(e)ax Iv */
+ instp32328 = ADD;
+TYPE05:
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Iw(1);
+ F_Fax(0);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 16);
+ P_Fax(0);
+ }
+ else /* USE32 */
+ {
+ D_Id(1);
+ F_Feax(0);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 32);
+ P_Feax(0);
+ }
+ break;
+
+ case 0x06: /* T2 PUSH Pw */
+ case 0x0e:
+ case 0x16:
+ case 0x1e:
+ D_Pw(0);
+ F_Pw(0);
+ PUSH_SR(ops[0].sng);
+ break;
+
+ case 0x07: /* T3 POP Pw */
+ case 0x17:
+ case 0x1f:
+ D_Pw(0);
+ POP_SR(ops[0].sng);
+ if ( ops[0].sng == SS_REG )
+ {
+ /* locally update IP - interrupts are supressed after POP SS */
+ UPDATE_INTEL_IP(p);
+
+ goto NEXT_INST;
+ }
+ break;
+
+ case 0x08: /* T5 OR Eb Gb */ instp32328 = OR; goto TYPE00;
+ case 0x09: /* T5 OR Ev Gv */ instp32328 = OR; goto TYPE01;
+ case 0x0a: /* T5 OR Gb Eb */ instp32328 = OR; goto TYPE02;
+ case 0x0b: /* T5 OR Gv Ev */ instp32328 = OR; goto TYPE03;
+ case 0x0c: /* T5 OR Fal Ib */ instp32328 = OR; goto TYPE04;
+ case 0x0d: /* T5 OR F(e)ax Iv */ instp32328 = OR; goto TYPE05;
+
+ case 0x0f:
+ opcode = GET_INST_BYTE(p); /* get next opcode byte */
+
+ /* Remove Empty Top of Table here. */
+ if ( opcode >= 0xd0 )
+ Int6();
+
+ switch ( opcode )
+ {
+ case 0x00:
+ if ( GET_PE() == 0 || GET_VM() == 1 )
+ Int6();
+ modRM = GET_INST_BYTE(p);
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T3 SLDT Ew */
+ instp32 = SLDT;
+TYPE0F00_0:
+
+ D_Ew(0, WO0, PG_W);
+ (*instp32)(&ops[0].sng);
+ P_Ew(0);
+ break;
+
+ case 1: /* T3 STR Ew */ instp32 = STR; goto TYPE0F00_0;
+
+ case 2: /* T2 LLDT Ew */
+ if ( GET_CPL() != 0 )
+ GP((IU16)0, FAULT_CCPU_LLDT_ACCESS);
+ inst32 = LLDT;
+TYPE0F00_2:
+
+ D_Ew(0, RO0, PG_R);
+ F_Ew(0);
+ (*inst32)(ops[0].sng);
+ break;
+
+ case 3: /* T2 LTR Ew */
+ if ( GET_CPL() != 0 )
+ GP((IU16)0, FAULT_CCPU_LTR_ACCESS);
+ inst32 = LTR;
+ goto TYPE0F00_2;
+
+ case 4: /* T2 VERR Ew */ inst32 = VERR; goto TYPE0F00_2;
+ case 5: /* T2 VERW Ew */ inst32 = VERW; goto TYPE0F00_2;
+
+ case 6: case 7:
+
+ Int6();
+ break;
+ } /* end switch ( GET_XXX(modRM) ) */
+ break;
+
+ case 0x01:
+ modRM = GET_INST_BYTE(p);
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T3 SGDT Ms */
+ instp32 = SGDT16;
+ inst2p32 = SGDT32;
+TYPE0F01_0:
+
+ if ( GET_MODE(modRM) == 3 )
+ Int6(); /* Register operand not allowed */
+
+ D_Ms(0, WO0, PG_W);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ (*instp32)(ops[0].mlt);
+ }
+ else /* USE32 */
+ {
+ (*inst2p32)(ops[0].mlt);
+ }
+ P_Ms(0);
+ break;
+
+ case 1: /* T3 SIDT Ms */
+ instp32 = SIDT16;
+ inst2p32 = SIDT32;
+ goto TYPE0F01_0;
+
+ case 2: /* T2 LGDT Ms */
+ instp32 = LGDT16;
+ inst2p32 = LGDT32;
+TYPE0F01_2:
+
+ if ( GET_MODE(modRM) == 3 )
+ Int6(); /* Register operand not allowed */
+
+ if ( GET_CPL() != 0 )
+ GP((IU16)0, FAULT_CCPU_LGDT_ACCESS);
+
+ D_Ms(0, RO0, PG_R);
+ F_Ms(0);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ (*instp32)(ops[0].mlt);
+ }
+ else /* USE32 */
+ {
+ (*inst2p32)(ops[0].mlt);
+ }
+ break;
+
+ case 3: /* T2 LIDT Ms */
+ instp32 = LIDT16;
+ inst2p32 = LIDT32;
+ goto TYPE0F01_2;
+
+ case 4: /* T3 SMSW Ew */
+ instp32 = SMSW;
+ goto TYPE0F00_0;
+
+ case 5:
+ Int6();
+ break;
+
+ case 6: /* T2 LMSW Ew */
+ if ( GET_CPL() != 0 )
+ GP((IU16)0, FAULT_CCPU_LMSW_ACCESS);
+ inst32 = LMSW;
+ goto TYPE0F00_2;
+
+ case 7: /* T2 INVLPG Mm */
+
+#ifdef SPC486
+ note_486_instruction("INVLPG");
+
+ if ( GET_CPL() != 0 )
+ GP((IU16)0, FAULT_CCPU_INVLPG_ACCESS);
+ D_Mm(0);
+ F_Mm(0);
+ INVLPG(ops[0].sng);
+#else
+ Int6();
+#endif /* SPC486 */
+
+ break;
+ } /* end switch ( GET_XXX(modRM) ) */
+ break;
+
+ case 0x02: /* T5 LAR Gv Ew */
+ instp3232 = LAR;
+TYPE0F02:
+
+ if ( GET_PE() == 0 || GET_VM() == 1 )
+ Int6();
+ modRM = GET_INST_BYTE(p);
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Gw(0);
+ D_Ew(1, RO1, PG_R);
+ F_Gw(0);
+ F_Ew(1);
+ (*instp3232)(&ops[0].sng, ops[1].sng);
+ P_Gw(0);
+ }
+ else /* USE32 */
+ {
+ D_Gd(0);
+ D_Ew(1, RO1, PG_R);
+ F_Gd(0);
+ F_Ew(1);
+ (*instp3232)(&ops[0].sng, ops[1].sng);
+ P_Gd(0);
+ }
+ break;
+
+ case 0x03: /* T5 LSL Gv Ew */
+ instp3232 = LSL;
+ goto TYPE0F02;
+
+ case 0x04: case 0x05: case 0x0a: case 0x0b:
+ case 0x0c: case 0x0d: case 0x0e:
+
+ case 0x14: case 0x15: case 0x16: case 0x17:
+ case 0x18: case 0x19: case 0x1a: case 0x1b:
+ case 0x1c: case 0x1d: case 0x1e: case 0x1f:
+
+ case 0x25: case 0x27:
+ case 0x28: case 0x29: case 0x2a: case 0x2b:
+ case 0x2c: case 0x2d: case 0x2e: case 0x2f:
+
+ case 0x30: case 0x31: case 0x32: case 0x33:
+ case 0x34: case 0x35: case 0x36: case 0x37:
+ case 0x38: case 0x39: case 0x3a: case 0x3b:
+ case 0x3c: case 0x3d: case 0x3e: case 0x3f:
+
+ case 0x40: case 0x41: case 0x42: case 0x43:
+ case 0x44: case 0x45: case 0x46: case 0x47:
+ case 0x48: case 0x49: case 0x4a: case 0x4b:
+ case 0x4c: case 0x4d: case 0x4e: case 0x4f:
+
+ case 0x50: case 0x51: case 0x52: case 0x53:
+ case 0x54: case 0x55: case 0x56: case 0x57:
+ case 0x58: case 0x59: case 0x5a: case 0x5b:
+ case 0x5c: case 0x5d: case 0x5e: case 0x5f:
+
+ case 0x60: case 0x61: case 0x62: case 0x63:
+ case 0x64: case 0x65: case 0x66: case 0x67:
+ case 0x68: case 0x69: case 0x6a: case 0x6b:
+ case 0x6c: case 0x6d: case 0x6e: case 0x6f:
+
+ case 0x70: case 0x71: case 0x72: case 0x73:
+ case 0x74: case 0x75: case 0x76: case 0x77:
+ case 0x78: case 0x79: case 0x7a: case 0x7b:
+ case 0x7c: case 0x7d: case 0x7e: case 0x7f:
+
+ case 0xae: case 0xb8: case 0xb9:
+
+ case 0xc2: case 0xc3: case 0xc4: case 0xc5:
+ case 0xc6: case 0xc7:
+ Int6();
+ break;
+
+ case 0xa2:
+ /* Pentium CPUID instruction */
+ note_486_instruction("CPUID");
+ Int6();
+ break;
+
+ case 0xa6: case 0xa7:
+ /* 386, A-Step archaic instruction */
+ note_486_instruction("A-step CMPXCHG");
+ Int6();
+ break;
+
+ case 0xaa:
+ /* Pentium RSM instruction, used by Windows95 */
+ note_486_instruction("RSM");
+ RSRVD();
+ break;
+
+ case 0x06: /* T0 CLTS */
+ if ( GET_CPL() != 0 )
+ GP((IU16)0, FAULT_CCPU_CLTS_ACCESS);
+ CLTS();
+ break;
+
+ case 0x07: /* T0 "RESERVED" */
+ case 0x10:
+ case 0x11:
+ case 0x12:
+ case 0x13:
+ RSRVD();
+ break;
+
+ case 0x08: /* T0 INVD */
+
+#ifdef SPC486
+ note_486_instruction("INVD");
+
+ if ( GET_CPL() != 0 )
+ GP((IU16)0, FAULT_CCPU_INVD_ACCESS);
+ INVD();
+#else
+ Int6();
+#endif /* SPC486 */
+
+ break;
+
+ case 0x09: /* T0 WBINVD */
+
+#ifdef SPC486
+ note_486_instruction("WBINVD");
+
+ if ( GET_CPL() != 0 )
+ GP((IU16)0, FAULT_CCPU_WBIND_ACCESS);
+ WBINVD();
+#else
+ Int6();
+#endif /* SPC486 */
+
+ break;
+
+ case 0x0f:
+#ifdef PIG
+ SET_EIP(CCPU_save_EIP);
+ CANCEL_HOST_IP();
+ PIG_SYNCH(CHECK_NO_EXEC);
+#else
+ Int6();
+#endif /* PIG */
+ break;
+
+ case 0x20: /* T4 MOV Rd Cd */
+ if ( GET_CPL() != 0 )
+ GP((IU16)0, FAULT_CCPU_MOV_R_C_ACCESS);
+ modRM = GET_INST_BYTE(p);
+ D_Rd(0);
+ D_Cd(1);
+ F_Cd(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Rd(0);
+ break;
+
+ case 0x21: /* T4 MOV Rd Dd */
+ if ( GET_CPL() != 0 )
+ GP((IU16)0, FAULT_CCPU_MOV_R_D_ACCESS);
+ modRM = GET_INST_BYTE(p);
+ D_Rd(0);
+ D_Dd(1);
+ F_Dd(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Rd(0);
+ break;
+
+ case 0x22: /* T4 MOV Cd Rd */
+ if ( GET_CPL() != 0 )
+ GP((IU16)0, FAULT_CCPU_MOV_C_R_ACCESS);
+ modRM = GET_INST_BYTE(p);
+ D_Cd(0);
+ D_Rd(1);
+ F_Rd(1);
+ MOV_CR(ops[0].sng, ops[1].sng);
+ break;
+
+ case 0x23: /* T4 MOV Dd Rd */
+ if ( GET_CPL() != 0 )
+ GP((IU16)0, FAULT_CCPU_MOV_D_R_ACCESS);
+ modRM = GET_INST_BYTE(p);
+ D_Dd(0);
+ D_Rd(1);
+ F_Rd(1);
+ MOV_DR(ops[0].sng, ops[1].sng);
+ quick_mode = FALSE;
+ break;
+
+ case 0x24: /* T4 MOV Rd Td */
+ if ( GET_CPL() != 0 )
+ GP((IU16)0, FAULT_CCPU_MOV_R_T_ACCESS);
+ modRM = GET_INST_BYTE(p);
+ D_Rd(0);
+ D_Td(1);
+ F_Td(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Rd(0);
+ break;
+
+ case 0x26: /* T4 MOV Td Rd */
+ if ( GET_CPL() != 0 )
+ GP((IU16)0, FAULT_CCPU_MOV_T_R_ACCESS);
+ modRM = GET_INST_BYTE(p);
+ D_Td(0);
+ D_Rd(1);
+ F_Rd(1);
+ MOV_TR(ops[0].sng, ops[1].sng);
+ break;
+
+ case 0x80: /* T2 JO Jv */
+ inst32 = JO;
+TYPE0F80:
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Jw(0);
+ }
+ else
+ {
+ D_Jd(0);
+ }
+ UPDATE_INTEL_IP_USE_OP_SIZE(p);
+ (*inst32)(ops[0].sng);
+ CANCEL_HOST_IP();
+#ifdef SYNCH_BOTH_WAYS
+ took_relative_jump = TRUE;
+#endif /* SYNCH_BOTH_WAYS */
+ if (took_relative_jump)
+ {
+ PIG_SYNCH(CHECK_ALL);
+ }
+ break;
+
+ case 0x81: /* T2 JNO Jv */ inst32 = JNO; goto TYPE0F80;
+ case 0x82: /* T2 JB Jv */ inst32 = JB; goto TYPE0F80;
+ case 0x83: /* T2 JNB Jv */ inst32 = JNB; goto TYPE0F80;
+ case 0x84: /* T2 JZ Jv */ inst32 = JZ; goto TYPE0F80;
+ case 0x85: /* T2 JNZ Jv */ inst32 = JNZ; goto TYPE0F80;
+ case 0x86: /* T2 JBE Jv */ inst32 = JBE; goto TYPE0F80;
+ case 0x87: /* T2 JNBE Jv */ inst32 = JNBE; goto TYPE0F80;
+ case 0x88: /* T2 JS Jv */ inst32 = JS; goto TYPE0F80;
+ case 0x89: /* T2 JNS Jv */ inst32 = JNS; goto TYPE0F80;
+ case 0x8a: /* T2 JP Jv */ inst32 = JP; goto TYPE0F80;
+ case 0x8b: /* T2 JNP Jv */ inst32 = JNP; goto TYPE0F80;
+ case 0x8c: /* T2 JL Jv */ inst32 = JL; goto TYPE0F80;
+ case 0x8d: /* T2 JNL Jv */ inst32 = JNL; goto TYPE0F80;
+ case 0x8e: /* T2 JLE Jv */ inst32 = JLE; goto TYPE0F80;
+ case 0x8f: /* T2 JNLE Jv */ inst32 = JNLE; goto TYPE0F80;
+
+ case 0x90: /* T3 SETO Eb */
+ instp32 = SETO;
+TYPE0F90:
+
+ modRM = GET_INST_BYTE(p);
+ D_Eb(0, WO0, PG_W);
+ (*instp32)(&ops[0].sng);
+ P_Eb(0);
+ break;
+
+ case 0x91: /* T3 SETNO Eb */ instp32 = SETNO; goto TYPE0F90;
+ case 0x92: /* T3 SETB Eb */ instp32 = SETB; goto TYPE0F90;
+ case 0x93: /* T3 SETNB Eb */ instp32 = SETNB; goto TYPE0F90;
+ case 0x94: /* T3 SETZ Eb */ instp32 = SETZ; goto TYPE0F90;
+ case 0x95: /* T3 SETNZ Eb */ instp32 = SETNZ; goto TYPE0F90;
+ case 0x96: /* T3 SETBE Eb */ instp32 = SETBE; goto TYPE0F90;
+ case 0x97: /* T3 SETNBE Eb */ instp32 = SETNBE; goto TYPE0F90;
+ case 0x98: /* T3 SETS Eb */ instp32 = SETS; goto TYPE0F90;
+ case 0x99: /* T3 SETNS Eb */ instp32 = SETNS; goto TYPE0F90;
+ case 0x9a: /* T3 SETP Eb */ instp32 = SETP; goto TYPE0F90;
+ case 0x9b: /* T3 SETNP Eb */ instp32 = SETNP; goto TYPE0F90;
+ case 0x9c: /* T3 SETL Eb */ instp32 = SETL; goto TYPE0F90;
+ case 0x9d: /* T3 SETNL Eb */ instp32 = SETNL; goto TYPE0F90;
+ case 0x9e: /* T3 SETLE Eb */ instp32 = SETLE; goto TYPE0F90;
+ case 0x9f: /* T3 SETNLE Eb */ instp32 = SETNLE; goto TYPE0F90;
+
+ case 0xa0: /* T2 PUSH Qw */
+ case 0xa8:
+ D_Qw(0);
+ F_Qw(0);
+ PUSH_SR(ops[0].sng);
+ break;
+
+ case 0xa1: /* T3 POP Qw */
+ case 0xa9:
+ D_Qw(0);
+ POP_SR(ops[0].sng);
+ break;
+
+ case 0xa3: /* T6 BT Ev Gv */
+ inst32328 = BT;
+#ifndef FIX_BT_BUG
+ goto TYPE39;
+#endif
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ BT_OPSw(RO0, PG_R);
+ (*inst32328)(ops[0].sng, ops[1].sng, 16);
+ }
+ else /* USE32 */
+ {
+ BT_OPSd(RO0, PG_R);
+ (*inst32328)(ops[0].sng, ops[1].sng, 32);
+ }
+ break;
+
+ case 0xa4: /* T9 SHLD Ev Gv Ib */
+ instp3232328 = SHLD;
+TYPE0FA4:
+
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RW0, PG_W);
+ D_Gw(1);
+ D_Ib(2);
+ F_Ew(0);
+ F_Gw(1);
+ (*instp3232328)(&ops[0].sng, ops[1].sng, ops[2].sng, 16);
+ P_Ew(0);
+ }
+ else
+ {
+ D_Ed(0, RW0, PG_W);
+ D_Gd(1);
+ D_Ib(2);
+ F_Ed(0);
+ F_Gd(1);
+ (*instp3232328)(&ops[0].sng, ops[1].sng, ops[2].sng, 32);
+ P_Ed(0);
+ }
+ break;
+
+ case 0xa5: /* T9 SHLD Ev Gv Fcl */
+ instp3232328 = SHLD;
+TYPE0FA5:
+
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RW0, PG_W);
+ D_Gw(1);
+ F_Ew(0);
+ F_Gw(1);
+ F_Fcl(2);
+ (*instp3232328)(&ops[0].sng, ops[1].sng, ops[2].sng, 16);
+ P_Ew(0);
+ }
+ else
+ {
+ D_Ed(0, RW0, PG_W);
+ D_Gd(1);
+ F_Ed(0);
+ F_Gd(1);
+ F_Fcl(2);
+ (*instp3232328)(&ops[0].sng, ops[1].sng, ops[2].sng, 32);
+ P_Ed(0);
+ }
+ break;
+
+ case 0xab: /* T5 BTS Ev Gv */
+ instp32328 = BTS;
+#ifndef FIX_BT_BUG
+ goto TYPE01;
+#endif
+TYPE0FAB:
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ BT_OPSw(RW0, PG_W);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 16);
+ P_Ew(0);
+ }
+ else /* USE32 */
+ {
+ BT_OPSd(RW0, PG_W);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 32);
+ P_Ed(0);
+ }
+ break;
+
+ case 0xac: /* T9 SHRD Ev Gv Ib */
+ instp3232328 = SHRD;
+ goto TYPE0FA4;
+
+ case 0xad: /* T9 SHRD Ev Gv Fcl */
+ instp3232328 = SHRD;
+ goto TYPE0FA5;
+
+ case 0xaf: /* T5 IMUL Gv Ev */
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Gw(0);
+ D_Ew(1, RO1, PG_R);
+ F_Gw(0);
+ F_Ew(1);
+ IMUL16T(&ops[0].sng, ops[0].sng, ops[1].sng);
+ P_Gw(0);
+ }
+ else
+ {
+ D_Gd(0);
+ D_Ed(1, RO1, PG_R);
+ F_Gd(0);
+ F_Ed(1);
+ IMUL32T(&ops[0].sng, ops[0].sng, ops[1].sng);
+ P_Gd(0);
+ }
+ break;
+
+ case 0xb0: /* T5 CMPXCHG Eb Gb */
+
+#ifdef SPC486
+ note_486_instruction("CMPXCHG Eb Gb");
+
+ modRM = GET_INST_BYTE(p);
+ D_Eb(0, RW0, PG_W);
+ D_Gb(1);
+ F_Eb(0);
+ F_Gb(1);
+ CMPXCHG8(&ops[0].sng, ops[1].sng);
+ P_Eb(0);
+#else
+ Int6();
+#endif /* SPC486 */
+
+ break;
+
+ case 0xb1: /* T5 CMPXCHG Ev Gv */
+
+#ifdef SPC486
+ note_486_instruction("CMPXCHG Ev Gv");
+
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RW0, PG_W);
+ D_Gw(1);
+ F_Ew(0);
+ F_Gw(1);
+ CMPXCHG16(&ops[0].sng, ops[1].sng);
+ P_Ew(0);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, RW0, PG_W);
+ D_Gd(1);
+ F_Ed(0);
+ F_Gd(1);
+ CMPXCHG32(&ops[0].sng, ops[1].sng);
+ P_Ed(0);
+ }
+#else
+ Int6();
+#endif /* SPC486 */
+
+ break;
+
+ case 0xb2: /* T4 LSS Gv Mp */ instp32p32 = LSS; goto TYPEC4;
+#ifndef FIX_BT_BUG
+ case 0xb3: /* T5 BTR Ev Gv */ instp32328 = BTR; goto TYPE01;
+#else
+ case 0xb3: /* T5 BTR Ev Gv */ instp32328 = BTR; goto TYPE0FAB;
+#endif
+ case 0xb4: /* T4 LFS Gv Mp */ instp32p32 = LFS; goto TYPEC4;
+ case 0xb5: /* T4 LGS Gv Mp */ instp32p32 = LGS; goto TYPEC4;
+
+ case 0xb6: /* T4 MOVZX Gv Eb */
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Gw(0);
+ D_Eb(1, RO1, PG_R);
+ F_Eb(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Gw(0);
+ }
+ else
+ {
+ D_Gd(0);
+ D_Eb(1, RO1, PG_R);
+ F_Eb(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Gd(0);
+ }
+ break;
+
+ case 0xb7: /* T4 MOVZX Gd Ew */
+ modRM = GET_INST_BYTE(p);
+ D_Gd(0);
+ D_Ew(1, RO1, PG_R);
+ F_Ew(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Gd(0);
+ break;
+
+ case 0xba:
+ modRM = GET_INST_BYTE(p);
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: case 1: case 2: case 3:
+ Int6();
+ break;
+
+ case 4: /* T6 BT Ev Ib */
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RO0, PG_R);
+ D_Ib(1);
+ F_Ew(0);
+ BT(ops[0].sng, ops[1].sng, 16);
+ }
+ else
+ {
+ D_Ed(0, RO0, PG_R);
+ D_Ib(1);
+ F_Ed(0);
+ BT(ops[0].sng, ops[1].sng, 32);
+ }
+ break;
+
+ case 5: /* T5 BTS Ev Ib */ instp32328 = BTS; goto TYPEC1;
+ case 6: /* T5 BTR Ev Ib */ instp32328 = BTR; goto TYPEC1;
+ case 7: /* T5 BTC Ev Ib */ instp32328 = BTC; goto TYPEC1;
+ } /* end switch ( GET_XXX(modRM) ) */
+ break;
+
+ case 0xbb: /* T5 BTC Ev Gv */
+ instp32328 = BTC;
+#ifndef FIX_BT_BUG
+ goto TYPE01;
+#else
+ goto TYPE0FAB;
+#endif
+ case 0xbc: /* T5 BSF Gv Ev */
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Gw(0);
+ D_Ew(1, RO1, PG_R);
+ F_Gw(0);
+ F_Ew(1);
+ BSF(&ops[0].sng, ops[1].sng);
+ P_Gw(0);
+ }
+ else
+ {
+ D_Gd(0);
+ D_Ed(1, RO1, PG_R);
+ F_Gd(0);
+ F_Ed(1);
+ BSF(&ops[0].sng, ops[1].sng);
+ P_Gd(0);
+ }
+ break;
+
+ case 0xbd: /* T5 BSR Gv Ev */
+ instp32328 = BSR;
+ goto TYPE03;
+
+ case 0xbe: /* T4 MOVSX Gv Eb */
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Gw(0);
+ D_Eb(1, RO1, PG_R);
+ F_Eb(1);
+ MOVSX(&ops[0].sng, ops[1].sng, 8);
+ P_Gw(0);
+ }
+ else
+ {
+ D_Gd(0);
+ D_Eb(1, RO1, PG_R);
+ F_Eb(1);
+ MOVSX(&ops[0].sng, ops[1].sng, 8);
+ P_Gd(0);
+ }
+ break;
+
+ case 0xbf: /* T4 MOVSX Gd Ew */
+ modRM = GET_INST_BYTE(p);
+ D_Gd(0);
+ D_Ew(1, RO1, PG_R);
+ F_Ew(1);
+ MOVSX(&ops[0].sng, ops[1].sng, 16);
+ P_Gd(0);
+ break;
+
+ case 0xc0: /* T8 XADD Eb Gb */
+
+#ifdef SPC486
+ note_486_instruction("XADD Eb Gb");
+
+ modRM = GET_INST_BYTE(p);
+ D_Eb(0, RW0, PG_W);
+ D_Gb(1);
+ F_Eb(0);
+ F_Gb(1);
+ XADD(&ops[0].sng, &ops[1].sng, 8);
+ P_Eb(0);
+ P_Gb(1);
+#else
+ Int6();
+#endif /* SPC486 */
+
+ break;
+
+ case 0xc1: /* T8 XADD Ev Gv */
+
+#ifdef SPC486
+ note_486_instruction("XADD Ev Gv");
+
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RW0, PG_W);
+ D_Gw(1);
+ F_Ew(0);
+ F_Gw(1);
+ XADD(&ops[0].sng, &ops[1].sng, 16);
+ P_Ew(0);
+ P_Gw(1);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, RW0, PG_W);
+ D_Gd(1);
+ F_Ed(0);
+ F_Gd(1);
+ XADD(&ops[0].sng, &ops[1].sng, 32);
+ P_Ed(0);
+ P_Gd(1);
+ }
+#else
+ Int6();
+#endif /* SPC486 */
+
+ break;
+
+ case 0xc8: /* T1 BSWAP Hv */
+ case 0xc9:
+ case 0xca:
+ case 0xcb:
+ case 0xcc:
+ case 0xcd:
+ case 0xce:
+ case 0xcf:
+
+#ifdef SPC486
+ note_486_instruction("BSWAP Hv");
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Hd(0); /* BSWAP 16 bit reads 32 bit & writes 16 */
+ F_Hd(0); /* so getting EAX -> EAX' -> AX */
+ BSWAP(&ops[0].sng);
+ P_Hw(0);
+ }
+ else /* USE32 */
+ {
+ D_Hd(0);
+ F_Hd(0);
+ BSWAP(&ops[0].sng);
+ P_Hd(0);
+ }
+#else
+ Int6();
+#endif /* SPC486 */
+
+ break;
+ } /* end switch ( opcode ) 0F */
+ break;
+
+ case 0x10: /* T5 ADC Eb Gb */ instp32328 = ADC; goto TYPE00;
+ case 0x11: /* T5 ADC Ev Gv */ instp32328 = ADC; goto TYPE01;
+ case 0x12: /* T5 ADC Gb Eb */ instp32328 = ADC; goto TYPE02;
+ case 0x13: /* T5 ADC Gv Ev */ instp32328 = ADC; goto TYPE03;
+ case 0x14: /* T5 ADC Fal Ib */ instp32328 = ADC; goto TYPE04;
+ case 0x15: /* T5 ADC F(e)ax Iv */ instp32328 = ADC; goto TYPE05;
+
+ case 0x18: /* T5 SBB Eb Gb */ instp32328 = SBB; goto TYPE00;
+ case 0x19: /* T5 SBB Ev Gv */ instp32328 = SBB; goto TYPE01;
+ case 0x1a: /* T5 SBB Gb Eb */ instp32328 = SBB; goto TYPE02;
+ case 0x1b: /* T5 SBB Gv Ev */ instp32328 = SBB; goto TYPE03;
+ case 0x1c: /* T5 SBB Fal Ib */ instp32328 = SBB; goto TYPE04;
+ case 0x1d: /* T5 SBB F(e)ax Iv */ instp32328 = SBB; goto TYPE05;
+
+ case 0x20: /* T5 AND Eb Gb */ instp32328 = AND; goto TYPE00;
+ case 0x21: /* T5 AND Ev Gv */ instp32328 = AND; goto TYPE01;
+ case 0x22: /* T5 AND Gb Eb */ instp32328 = AND; goto TYPE02;
+ case 0x23: /* T5 AND Gb Eb */ instp32328 = AND; goto TYPE03;
+ case 0x24: /* T5 AND Fal Ib */ instp32328 = AND; goto TYPE04;
+ case 0x25: /* T5 AND F(e)ax Iv */ instp32328 = AND; goto TYPE05;
+
+ case 0x26:
+ segment_override = ES_REG;
+ CHECK_PREFIX_LENGTH();
+ goto DECODE;
+
+ case 0x27: /* T0 DAA */
+ DAA();
+ break;
+
+ case 0x28: /* T5 SUB Eb Gb */ instp32328 = SUB; goto TYPE00;
+ case 0x29: /* T5 SUB Ev Gv */ instp32328 = SUB; goto TYPE01;
+ case 0x2a: /* T5 SUB Gb Eb */ instp32328 = SUB; goto TYPE02;
+ case 0x2b: /* T5 SUB Gv Ev */ instp32328 = SUB; goto TYPE03;
+ case 0x2c: /* T5 SUB Fal Ib */ instp32328 = SUB; goto TYPE04;
+ case 0x2d: /* T5 SUB F(e)ax Iv */ instp32328 = SUB; goto TYPE05;
+
+ case 0x2e:
+ segment_override = CS_REG;
+ CHECK_PREFIX_LENGTH();
+ goto DECODE;
+
+ case 0x2f: /* T0 DAS */
+ DAS();
+ break;
+
+ case 0x30: /* T5 XOR Eb Gb */ instp32328 = XOR; goto TYPE00;
+ case 0x31: /* T5 XOR Ev Gv */ instp32328 = XOR; goto TYPE01;
+ case 0x32: /* T5 XOR Gb Eb */ instp32328 = XOR; goto TYPE02;
+ case 0x33: /* T5 XOR Gv Ev */ instp32328 = XOR; goto TYPE03;
+ case 0x34: /* T5 XOR Fal Ib */ instp32328 = XOR; goto TYPE04;
+ case 0x35: /* T5 XOR F(e)ax Iv */ instp32328 = XOR; goto TYPE05;
+
+ case 0x36:
+ segment_override = SS_REG;
+ CHECK_PREFIX_LENGTH();
+ goto DECODE;
+
+ case 0x37: /* T0 AAA */
+ AAA();
+ break;
+
+ case 0x38: /* T6 CMP Eb Gb */
+ inst32328 = CMP;
+TYPE38:
+
+ modRM = GET_INST_BYTE(p);
+ D_Eb(0, RO0, PG_R);
+ D_Gb(1);
+ F_Eb(0);
+ F_Gb(1);
+ (*inst32328)(ops[0].sng, ops[1].sng, 8);
+ break;
+
+ case 0x39: /* T6 CMP Ev Gv */
+ inst32328 = CMP;
+TYPE39:
+
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RO0, PG_R);
+ D_Gw(1);
+ F_Ew(0);
+ F_Gw(1);
+ (*inst32328)(ops[0].sng, ops[1].sng, 16);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, RO0, PG_R);
+ D_Gd(1);
+ F_Ed(0);
+ F_Gd(1);
+ (*inst32328)(ops[0].sng, ops[1].sng, 32);
+ }
+ break;
+
+ case 0x3a: /* T6 CMP Gb Eb */
+ modRM = GET_INST_BYTE(p);
+ D_Gb(0);
+ D_Eb(1, RO1, PG_R);
+ F_Gb(0);
+ F_Eb(1);
+ CMP(ops[0].sng, ops[1].sng, 8);
+ break;
+
+ case 0x3b: /* T6 CMP Gv Ev */
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Gw(0);
+ D_Ew(1, RO1, PG_R);
+ F_Gw(0);
+ F_Ew(1);
+ CMP(ops[0].sng, ops[1].sng, 16);
+ }
+ else /* USE32 */
+ {
+ D_Gd(0);
+ D_Ed(1, RO1, PG_R);
+ F_Gd(0);
+ F_Ed(1);
+ CMP(ops[0].sng, ops[1].sng, 32);
+ }
+ break;
+
+ case 0x3c: /* T6 CMP Fal Ib */
+ inst32328 = CMP;
+TYPE3C:
+
+ D_Ib(1);
+ F_Fal(0);
+ (*inst32328)(ops[0].sng, ops[1].sng, 8);
+ break;
+
+ case 0x3d: /* T6 CMP F(e)ax Iv */
+ inst32328 = CMP;
+TYPE3D:
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Iw(1);
+ F_Fax(0);
+ (*inst32328)(ops[0].sng, ops[1].sng, 16);
+ }
+ else /* USE32 */
+ {
+ D_Id(1);
+ F_Feax(0);
+ (*inst32328)(ops[0].sng, ops[1].sng, 32);
+ }
+ break;
+
+ case 0x3e:
+ segment_override = DS_REG;
+ CHECK_PREFIX_LENGTH();
+ goto DECODE;
+
+ case 0x3f: /* T0 AAS */
+ AAS();
+ break;
+
+ case 0x40: /* T1 INC Hv */
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x44:
+ case 0x45:
+ case 0x46:
+ case 0x47:
+ instp328 = INC;
+TYPE40:
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Hw(0);
+ F_Hw(0);
+ (*instp328)(&ops[0].sng, 16);
+ P_Hw(0);
+ }
+ else /* USE32 */
+ {
+ D_Hd(0);
+ F_Hd(0);
+ (*instp328)(&ops[0].sng, 32);
+ P_Hd(0);
+ }
+ break;
+
+ case 0x48: /* T1 DEC Hv */
+ case 0x49:
+ case 0x4a:
+ case 0x4b:
+ case 0x4c:
+ case 0x4d:
+ case 0x4e:
+ case 0x4f:
+ instp328 = DEC;
+ goto TYPE40;
+
+ case 0x50: /* T2 PUSH Hv */
+ case 0x51:
+ case 0x52:
+ case 0x53:
+ case 0x54:
+ case 0x55:
+ case 0x56:
+ case 0x57:
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Hw(0);
+ F_Hw(0);
+ }
+ else /* USE32 */
+ {
+ D_Hd(0);
+ F_Hd(0);
+ }
+ PUSH(ops[0].sng);
+ break;
+
+ case 0x58: /* T3 POP Hv */
+ case 0x59:
+ case 0x5a:
+ case 0x5b:
+ case 0x5c:
+ case 0x5d:
+ case 0x5e:
+ case 0x5f:
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Hw(0);
+ POP(&ops[0].sng);
+ P_Hw(0);
+ }
+ else /* USE32 */
+ {
+ D_Hd(0);
+ POP(&ops[0].sng);
+ P_Hd(0);
+ }
+ break;
+
+ case 0x60: /* T0 PUSHA */
+ PUSHA();
+ break;
+
+ case 0x61: /* T0 POPA */
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ POPA();
+ }
+ else /* USE32 */
+ {
+ POPAD();
+ }
+ break;
+
+ case 0x62: /* T6 BOUND Gv Ma */
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Gw(0);
+ D_Ma16(1, RO1, PG_R);
+ F_Gw(0);
+ F_Ma16(1);
+ BOUND(ops[0].sng, ops[1].mlt, 16);
+ }
+ else /* USE32 */
+ {
+ D_Gd(0);
+ D_Ma32(1, RO1, PG_R);
+ F_Gd(0);
+ F_Ma32(1);
+ BOUND(ops[0].sng, ops[1].mlt, 32);
+ }
+ break;
+
+ case 0x63: /* T5 ARPL Ew Gw */
+ if ( GET_PE() == 0 || GET_VM() == 1 )
+ Int6();
+ modRM = GET_INST_BYTE(p);
+ D_Ew(0, RW0, PG_W);
+ D_Gw(1);
+ F_Ew(0);
+ F_Gw(1);
+ ARPL(&ops[0].sng, ops[1].sng);
+ P_Ew(0);
+ break;
+
+ case 0x64:
+ segment_override = FS_REG;
+ CHECK_PREFIX_LENGTH();
+ goto DECODE;
+
+ case 0x65:
+ segment_override = GS_REG;
+ CHECK_PREFIX_LENGTH();
+ goto DECODE;
+
+ case 0x66:
+ SET_OPERAND_SIZE(GET_CS_AR_X());
+ if ( GET_OPERAND_SIZE() == USE16 )
+ SET_OPERAND_SIZE(USE32);
+ else /* USE32 */
+ SET_OPERAND_SIZE(USE16);
+ CHECK_PREFIX_LENGTH();
+ goto DECODE;
+
+ case 0x67:
+ SET_ADDRESS_SIZE(GET_CS_AR_X());
+ if ( GET_ADDRESS_SIZE() == USE16 )
+ SET_ADDRESS_SIZE(USE32);
+ else /* USE32 */
+ SET_ADDRESS_SIZE(USE16);
+ CHECK_PREFIX_LENGTH();
+ goto DECODE;
+
+ case 0x68: /* T2 PUSH Iv */
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Iw(0);
+ }
+ else /* USE32 */
+ {
+ D_Id(0);
+ }
+ PUSH(ops[0].sng);
+ break;
+
+ case 0x69: /* T7 IMUL Gv Ev Iv */
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Gw(0);
+ D_Ew(1, RO1, PG_R);
+ D_Iw(2);
+ F_Gw(0);
+ F_Ew(1);
+ IMUL16T(&ops[0].sng, ops[1].sng, ops[2].sng);
+ P_Gw(0);
+ }
+ else /* USE32 */
+ {
+ D_Gd(0);
+ D_Ed(1, RO1, PG_R);
+ D_Id(2);
+ F_Gd(0);
+ F_Ed(1);
+ IMUL32T(&ops[0].sng, ops[1].sng, ops[2].sng);
+ P_Gd(0);
+ }
+ break;
+
+ case 0x6a: /* T2 PUSH Ib */
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ix(0);
+ }
+ else /* USE32 */
+ {
+ D_Iy(0);
+ }
+ PUSH(ops[0].sng);
+ break;
+
+ case 0x6b: /* T7 IMUL Gv Ev Ib */
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Gw(0);
+ D_Ew(1, RO1, PG_R);
+ D_Ix(2);
+ F_Gw(0);
+ F_Ew(1);
+ IMUL16T(&ops[0].sng, ops[1].sng, ops[2].sng);
+ P_Gw(0);
+ }
+ else /* USE32 */
+ {
+ D_Gd(0);
+ D_Ed(1, RO1, PG_R);
+ D_Iy(2);
+ F_Gd(0);
+ F_Ed(1);
+ IMUL32T(&ops[0].sng, ops[1].sng, ops[2].sng);
+ P_Gd(0);
+ }
+ break;
+
+ case 0x6c: /* T4 INSB Yb Fdx */
+ STRING_COUNT;
+ F_Fdx(1);
+
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[1].sng, BYTE_WIDTH);
+
+ while ( rep_count )
+ {
+ D_Yb(0, WO0, PG_W);
+ IN8(&ops[0].sng, ops[1].sng);
+ rep_count--;
+ C_Yb(0);
+ PIG_P_Yb(0);
+ /*
+ KNOWN BUG #1.
+ We should check for pending interrupts here, at least:-
+ Single step trap
+ Debug trap
+ */
+ }
+#ifdef PIG
+ UPDATE_INTEL_IP(p);
+#endif
+ PIG_SYNCH(CHECK_SOME_MEM);
+ quick_mode = FALSE;
+ break;
+
+ case 0x6d: /* T4 INSW Yv Fdx */
+ STRING_COUNT;
+ F_Fdx(1);
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[1].sng, WORD_WIDTH);
+
+ while ( rep_count )
+ {
+ D_Yw(0, WO0, PG_W);
+ IN16(&ops[0].sng, ops[1].sng);
+ rep_count--;
+ C_Yw(0);
+ PIG_P_Yw(0);
+ /* KNOWN BUG #1. */
+ }
+ }
+ else /* USE32 */
+ {
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[1].sng, DWORD_WIDTH);
+
+ while ( rep_count )
+ {
+ D_Yd(0, WO0, PG_W);
+ IN32(&ops[0].sng, ops[1].sng);
+ rep_count--;
+ C_Yd(0);
+ PIG_P_Yd(0);
+ /* KNOWN BUG #1. */
+ }
+ }
+#ifdef PIG
+ UPDATE_INTEL_IP(p);
+#endif
+ PIG_SYNCH(CHECK_SOME_MEM);
+ quick_mode = FALSE;
+ break;
+
+ case 0x6e: /* T6 OUTSB Fdx Xb */
+ STRING_COUNT;
+ F_Fdx(0);
+
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[0].sng, BYTE_WIDTH);
+
+ while ( rep_count )
+ {
+ D_Xb(1, RO1, PG_R);
+ F_Xb(1);
+ OUT8(ops[0].sng, ops[1].sng);
+ rep_count--;
+ C_Xb(1);
+ /* KNOWN BUG #1. */
+ }
+#ifdef PIG
+ UPDATE_INTEL_IP(p);
+#endif
+ PIG_SYNCH(CHECK_ALL);
+ quick_mode = FALSE;
+ break;
+
+ case 0x6f: /* T6 OUTSW Fdx Xv */
+ STRING_COUNT;
+ F_Fdx(0);
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[0].sng, WORD_WIDTH);
+
+ while ( rep_count )
+ {
+ D_Xw(1, RO1, PG_R);
+ F_Xw(1);
+ OUT16(ops[0].sng, ops[1].sng);
+ rep_count--;
+ C_Xw(1);
+ /* KNOWN BUG #1. */
+ }
+ }
+ else /* USE32 */
+ {
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[0].sng, DWORD_WIDTH);
+
+ while ( rep_count )
+ {
+ D_Xd(1, RO1, PG_R);
+ F_Xd(1);
+ OUT32(ops[0].sng, ops[1].sng);
+ rep_count--;
+ C_Xd(1);
+ /* KNOWN BUG #1. */
+ }
+ }
+#ifdef PIG
+ UPDATE_INTEL_IP(p);
+#endif
+ PIG_SYNCH(CHECK_ALL);
+ quick_mode = FALSE;
+ break;
+
+ case 0x70: /* T2 JO Jb */
+ inst32 = JO;
+TYPE70:
+
+ D_Jb(0);
+ UPDATE_INTEL_IP_USE_OP_SIZE(p);
+#ifdef PIG
+ if ((opcode != 0xeb) && (ops[0].sng == 3))
+ {
+ /* Convert the EDL cpu super-instructions
+ *
+ * Jcc .+03
+ * JMPN dst
+ *
+ * into
+ *
+ * Jcc' dest
+ */
+ int offset_in_page = DiffCpuPtrsLS8(FloorIntelPageLS8(p), p);
+
+ if ((GET_CS_AR_X() == 0)
+ && (offset_in_page != 0)
+ && (offset_in_page <= 0xFFD)
+ && (*p == 0xe9))
+ {
+ p_start = p;
+ (void)GET_INST_BYTE(p);
+ switch (opcode)
+ {
+ case 0x70: /* T2 JO Jb */ inst32 = JNO; goto TYPE0F80;
+ case 0x71: /* T2 JNO Jb */ inst32 = JO; goto TYPE0F80;
+ case 0x72: /* T2 JB Jb */ inst32 = JNB; goto TYPE0F80;
+ case 0x73: /* T2 JNB Jb */ inst32 = JB; goto TYPE0F80;
+ case 0x74: /* T2 JZ Jb */ inst32 = JNZ; goto TYPE0F80;
+ case 0x75: /* T2 JNZ Jb */ inst32 = JZ; goto TYPE0F80;
+ case 0x76: /* T2 JBE Jb */ inst32 = JNBE; goto TYPE0F80;
+ case 0x77: /* T2 JNBE Jb */ inst32 = JBE; goto TYPE0F80;
+ case 0x78: /* T2 JS Jb */ inst32 = JNS; goto TYPE0F80;
+ case 0x79: /* T2 JNS Jb */ inst32 = JS; goto TYPE0F80;
+ case 0x7a: /* T2 JP Jb */ inst32 = JNP; goto TYPE0F80;
+ case 0x7b: /* T2 JNP Jb */ inst32 = JP; goto TYPE0F80;
+ case 0x7c: /* T2 JL Jb */ inst32 = JNL; goto TYPE0F80;
+ case 0x7d: /* T2 JNL Jb */ inst32 = JL; goto TYPE0F80;
+ case 0x7e: /* T2 JLE Jb */ inst32 = JNLE; goto TYPE0F80;
+ case 0x7f: /* T2 JNLE Jb */ inst32 = JLE; goto TYPE0F80;
+ default:
+ break;
+ }
+ }
+ }
+#endif /* PIG */
+ (*inst32)(ops[0].sng);
+ CANCEL_HOST_IP();
+
+#ifdef PIG
+ if (single_instruction_delay && !took_relative_jump)
+ {
+ if (single_instruction_delay_enable)
+ {
+ save_last_xcptn_details("STI/POPF blindspot\n", 0, 0, 0, 0, 0);
+ PIG_SYNCH(CHECK_NO_EXEC);
+ }
+ else
+ {
+ save_last_xcptn_details("STI/POPF problem\n", 0, 0, 0, 0, 0);
+ }
+ break;
+ }
+#ifdef SYNCH_BOTH_WAYS
+ took_relative_jump = TRUE;
+#endif /* SYNCH_BOTH_WAYS */
+ if (took_relative_jump)
+ {
+ PIG_SYNCH(CHECK_ALL);
+ }
+#endif /* PIG */
+ break;
+
+ case 0x71: /* T2 JNO Jb */ inst32 = JNO; goto TYPE70;
+ case 0x72: /* T2 JB Jb */ inst32 = JB; goto TYPE70;
+ case 0x73: /* T2 JNB Jb */ inst32 = JNB; goto TYPE70;
+ case 0x74: /* T2 JZ Jb */ inst32 = JZ; goto TYPE70;
+ case 0x75: /* T2 JNZ Jb */ inst32 = JNZ; goto TYPE70;
+ case 0x76: /* T2 JBE Jb */ inst32 = JBE; goto TYPE70;
+ case 0x77: /* T2 JNBE Jb */ inst32 = JNBE; goto TYPE70;
+ case 0x78: /* T2 JS Jb */ inst32 = JS; goto TYPE70;
+ case 0x79: /* T2 JNS Jb */ inst32 = JNS; goto TYPE70;
+ case 0x7a: /* T2 JP Jb */ inst32 = JP; goto TYPE70;
+ case 0x7b: /* T2 JNP Jb */ inst32 = JNP; goto TYPE70;
+ case 0x7c: /* T2 JL Jb */ inst32 = JL; goto TYPE70;
+ case 0x7d: /* T2 JNL Jb */ inst32 = JNL; goto TYPE70;
+ case 0x7e: /* T2 JLE Jb */ inst32 = JLE; goto TYPE70;
+ case 0x7f: /* T2 JNLE Jb */ inst32 = JNLE; goto TYPE70;
+
+ case 0x80:
+ case 0x82:
+ modRM = GET_INST_BYTE(p);
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T5 ADD Eb Ib */
+ instp32328 = ADD;
+TYPE80_0:
+
+ D_Eb(0, RW0, PG_W);
+ D_Ib(1);
+ F_Eb(0);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 8);
+ P_Eb(0);
+ break;
+
+ case 1: /* T5 OR Eb Ib */ instp32328 = OR; goto TYPE80_0;
+ case 2: /* T5 ADC Eb Ib */ instp32328 = ADC; goto TYPE80_0;
+ case 3: /* T5 SBB Eb Ib */ instp32328 = SBB; goto TYPE80_0;
+ case 4: /* T5 AND Eb Ib */ instp32328 = AND; goto TYPE80_0;
+ case 5: /* T5 SUB Eb Ib */ instp32328 = SUB; goto TYPE80_0;
+ case 6: /* T5 XOR Eb Ib */ instp32328 = XOR; goto TYPE80_0;
+
+ case 7: /* T6 CMP Eb Ib */
+ inst32328 = CMP;
+TYPE80_7:
+
+ D_Eb(0, RO0, PG_R);
+ D_Ib(1);
+ F_Eb(0);
+ (*inst32328)(ops[0].sng, ops[1].sng, 8);
+ break;
+ } /* end switch ( GET_XXX(modRM) ) */
+ break;
+
+ case 0x81:
+ modRM = GET_INST_BYTE(p);
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T5 ADD Ev Iv */
+ instp32328 = ADD;
+TYPE81_0:
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RW0, PG_W);
+ D_Iw(1);
+ F_Ew(0);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 16);
+ P_Ew(0);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, RW0, PG_W);
+ D_Id(1);
+ F_Ed(0);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 32);
+ P_Ed(0);
+ }
+ break;
+
+ case 1: /* T5 OR Ev Iv */ instp32328 = OR; goto TYPE81_0;
+ case 2: /* T5 ADC Ev Iv */ instp32328 = ADC; goto TYPE81_0;
+ case 3: /* T5 SBB Ev Iv */ instp32328 = SBB; goto TYPE81_0;
+ case 4: /* T5 AND Ev Iv */ instp32328 = AND; goto TYPE81_0;
+ case 5: /* T5 SUB Ev Iv */ instp32328 = SUB; goto TYPE81_0;
+ case 6: /* T5 XOR Ev Iv */ instp32328 = XOR; goto TYPE81_0;
+
+ case 7: /* T6 CMP Ev Iv */
+ inst32328 = CMP;
+TYPE81_7:
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RO0, PG_R);
+ D_Iw(1);
+ F_Ew(0);
+ (*inst32328)(ops[0].sng, ops[1].sng, 16);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, RO0, PG_R);
+ D_Id(1);
+ F_Ed(0);
+ (*inst32328)(ops[0].sng, ops[1].sng, 32);
+ }
+ break;
+ } /* end switch ( GET_XXX(modRM) ) */
+ break;
+
+ case 0x83:
+ modRM = GET_INST_BYTE(p);
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T5 ADD Ev Ib */
+ instp32328 = ADD;
+TYPE83_0:
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RW0, PG_W);
+ D_Ix(1);
+ F_Ew(0);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 16);
+ P_Ew(0);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, RW0, PG_W);
+ D_Iy(1);
+ F_Ed(0);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 32);
+ P_Ed(0);
+ }
+ break;
+
+ case 1: /* T5 OR Ev Ib */ instp32328 = OR; goto TYPE83_0;
+ case 2: /* T5 ADC Ev Ib */ instp32328 = ADC; goto TYPE83_0;
+ case 3: /* T5 SBB Ev Ib */ instp32328 = SBB; goto TYPE83_0;
+ case 4: /* T5 AND Ev Ib */ instp32328 = AND; goto TYPE83_0;
+ case 5: /* T5 SUB Ev Ib */ instp32328 = SUB; goto TYPE83_0;
+ case 6: /* T5 XOR Ev Ib */ instp32328 = XOR; goto TYPE83_0;
+
+ case 7: /* T6 CMP Ev Ib */
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RO0, PG_R);
+ D_Ix(1);
+ F_Ew(0);
+ CMP(ops[0].sng, ops[1].sng, 16);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, RO0, PG_R);
+ D_Iy(1);
+ F_Ed(0);
+ CMP(ops[0].sng, ops[1].sng, 32);
+ }
+ break;
+ } /* end switch ( GET_XXX(modRM) ) */
+ break;
+
+ case 0x84: /* T6 TEST Eb Gb */ inst32328 = TEST; goto TYPE38;
+ case 0x85: /* T6 TEST Ev Gv */ inst32328 = TEST; goto TYPE39;
+
+ case 0x86: /* T8 XCHG Eb Gb */
+ modRM = GET_INST_BYTE(p);
+ D_Eb(0, RW0, PG_W);
+ D_Gb(1);
+ F_Eb(0);
+ F_Gb(1);
+ XCHG(&ops[0].sng, &ops[1].sng);
+ P_Eb(0);
+ P_Gb(1);
+ break;
+
+ case 0x87: /* T8 XCHG Ev Gv */
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RW0, PG_W);
+ D_Gw(1);
+ F_Ew(0);
+ F_Gw(1);
+ XCHG(&ops[0].sng, &ops[1].sng);
+ P_Ew(0);
+ P_Gw(1);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, RW0, PG_W);
+ D_Gd(1);
+ F_Ed(0);
+ F_Gd(1);
+ XCHG(&ops[0].sng, &ops[1].sng);
+ P_Ed(0);
+ P_Gd(1);
+ }
+ break;
+
+ case 0x88: /* T4 MOV Eb Gb */
+ modRM = GET_INST_BYTE(p);
+ D_Eb(0, WO0, PG_W);
+ D_Gb(1);
+ F_Gb(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Eb(0);
+ break;
+
+ case 0x89: /* T4 MOV Ev Gv */
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, WO0, PG_W);
+ D_Gw(1);
+ F_Gw(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Ew(0);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, WO0, PG_W);
+ D_Gd(1);
+ F_Gd(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Ed(0);
+ }
+ break;
+
+ case 0x8a: /* T4 MOV Gb Eb */
+ modRM = GET_INST_BYTE(p);
+ D_Gb(0);
+ D_Eb(1, RO1, PG_R);
+ F_Eb(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Gb(0);
+ break;
+
+ case 0x8b: /* T4 MOV Gv Ev */
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Gw(0);
+ D_Ew(1, RO1, PG_R);
+ F_Ew(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Gw(0);
+ }
+ else /* USE32 */
+ {
+ D_Gd(0);
+ D_Ed(1, RO1, PG_R);
+ F_Ed(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Gd(0);
+ }
+ break;
+
+ case 0x8c: /* T4 MOV Ew Nw */
+ modRM = GET_INST_BYTE(p);
+ if ( GET_SEG(modRM) > MAX_VALID_SEG )
+ Int6();
+
+ if ( GET_OPERAND_SIZE() == USE16 || modRM < LOWEST_REG_MODRM )
+ {
+ D_Ew(0, WO0, PG_W);
+ D_Nw(1);
+ F_Nw(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Ew(0);
+ }
+ else /* USE32 and REGISTER */
+ {
+ D_Rd(0);
+ D_Nw(1);
+ F_Nw(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Rd(0);
+ }
+ break;
+
+ case 0x8d: /* T4 LEA Gv M */
+ modRM = GET_INST_BYTE(p);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Gw(0);
+ D_M(1);
+ F_M(1);
+ LEA(&ops[0].sng, ops[1].sng);
+ P_Gw(0);
+ }
+ else /* USE32 */
+ {
+ D_Gd(0);
+ D_M(1);
+ F_M(1);
+ LEA(&ops[0].sng, ops[1].sng);
+ P_Gd(0);
+ }
+ break;
+
+ case 0x8e: /* T4 MOV Nw Ew */
+ modRM = GET_INST_BYTE(p);
+ if ( GET_SEG(modRM) > MAX_VALID_SEG || GET_SEG(modRM) == CS_REG )
+ Int6();
+ D_Nw(0);
+ D_Ew(1, RO1, PG_R);
+ F_Ew(1);
+ MOV_SR(ops[0].sng, ops[1].sng);
+ if ( GET_SEG(modRM) == SS_REG )
+ {
+ /* locally update IP - interrupts are supressed after MOV SS,xx */
+ UPDATE_INTEL_IP(p);
+
+ goto NEXT_INST;
+ }
+ break;
+
+ case 0x8f:
+ modRM = GET_INST_BYTE(p);
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T3 POP Ev */
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ SET_POP_DISP(2); /* in case they use [ESP] */
+ D_Ew(0, WO0, PG_W);
+ POP(&ops[0].sng);
+ P_Ew(0);
+ }
+ else /* USE32 */
+ {
+ SET_POP_DISP(4); /* in case they use [ESP] */
+ D_Ed(0, WO0, PG_W);
+ POP(&ops[0].sng);
+ P_Ed(0);
+ }
+ SET_POP_DISP(0);
+ break;
+
+ case 1: case 2: case 3: case 4: case 5: case 6: case 7:
+ Int6();
+ break;
+ } /* end switch ( GET_XXX(modRM) ) */
+ break;
+
+ case 0x90: /* T0 NOP */
+ break;
+
+ case 0x91: /* T8 XCHG F(e)ax Hv */
+ case 0x92:
+ case 0x93:
+ case 0x94:
+ case 0x95:
+ case 0x96:
+ case 0x97:
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ F_Fax(0);
+ D_Hw(1);
+ F_Hw(1);
+ XCHG(&ops[0].sng, &ops[1].sng);
+ P_Fax(0);
+ P_Hw(1);
+ }
+ else /* USE32 */
+ {
+ F_Feax(0);
+ D_Hd(1);
+ F_Hd(1);
+ XCHG(&ops[0].sng, &ops[1].sng);
+ P_Feax(0);
+ P_Hd(1);
+ }
+ break;
+
+ case 0x98: /* T0 CBW */
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ CBW();
+ }
+ else /* USE32 */
+ {
+ CWDE();
+ }
+ break;
+
+ case 0x99: /* T0 CWD */
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ CWD();
+ }
+ else /* USE32 */
+ {
+ CDQ();
+ }
+ break;
+
+ case 0x9a: /* T2 CALL Ap */
+ instp32 = CALLF;
+ took_absolute_toc = TRUE;
+TYPE9A:
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Aw(0);
+ }
+ else /* USE32 */
+ {
+ D_Ad(0);
+ }
+ UPDATE_INTEL_IP_USE_OP_SIZE(p);
+ (*instp32)(ops[0].mlt);
+ CANCEL_HOST_IP();
+ PIG_SYNCH(CHECK_ALL);
+ break;
+
+ case 0x9b: /* T0 WAIT */
+ if ( GET_MP() && GET_TS() )
+ Int7();
+ WAIT();
+ break;
+
+ case 0x9c: /* T0 PUSHF */
+ if ( GET_VM() == 1 && GET_CPL() > GET_IOPL() )
+ GP((IU16)0, FAULT_CCPU_PUSHF_ACCESS);
+ PUSHF();
+ break;
+
+ case 0x9d: /* T0 POPF */
+ {
+ int oldIF;
+
+ if ( GET_VM() == 1 && GET_CPL() > GET_IOPL() )
+ GP((IU16)0, FAULT_CCPU_POPF_ACCESS);
+
+ oldIF = getIF();
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ POPF();
+ }
+ else /* USE32 */
+ {
+ POPFD();
+ }
+#ifdef PIG
+ if (getIF()==1 && oldIF==0)
+ {
+ /* locally update IP - interrupts are supressed after POPF */
+ UPDATE_INTEL_IP(p);
+
+ /* We need to pig sync one instr *after* an POPF that enabled
+ * interrupts, because the A4CPU might need to take a H/W interrupt
+ */
+
+ single_instruction_delay = TRUE;
+ PIG_SYNCH(CHECK_ALL);
+
+ goto NEXT_INST;
+ }
+#endif /* PIG */
+
+ quick_mode = FALSE;
+ break;
+ }
+
+ case 0x9e: /* T0 SAHF */
+ SAHF();
+ break;
+
+ case 0x9f: /* T0 LAHF */
+ LAHF();
+ break;
+
+ case 0xa0: /* T4 MOV Fal Ob */
+ D_Ob(1, RO1, PG_R);
+ F_Ob(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Fal(0);
+ break;
+
+ case 0xa1: /* T4 MOV F(e)ax Ov */
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ow(1, RO1, PG_R);
+ F_Ow(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Fax(0);
+ }
+ else /* USE32 */
+ {
+ D_Od(1, RO1, PG_R);
+ F_Od(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Feax(0);
+ }
+ break;
+
+ case 0xa2: /* T4 MOV Ob Fal */
+ D_Ob(0, WO0, PG_W);
+ F_Fal(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Ob(0);
+ break;
+
+ case 0xa3: /* T4 MOV Ov F(e)ax */
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ow(0, WO0, PG_W);
+ F_Fax(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Ow(0);
+ }
+ else /* USE32 */
+ {
+ D_Od(0, WO0, PG_W);
+ F_Feax(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Od(0);
+ }
+ break;
+
+ case 0xa4: /* T4 MOVSB Yb Xb */
+ STRING_COUNT;
+
+ while ( rep_count )
+ {
+ D_Xb(1, RO1, PG_R);
+ D_Yb(0, WO0, PG_W);
+ F_Xb(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ rep_count--;
+ C_Yb(0);
+ C_Xb(1);
+ P_Yb(0);
+ /* KNOWN BUG #1. */
+ }
+ break;
+
+ case 0xa5: /* T4 MOVSW Yv Xv */
+ STRING_COUNT;
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ while ( rep_count )
+ {
+ D_Xw(1, RO1, PG_R);
+ D_Yw(0, WO0, PG_W);
+ F_Xw(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ rep_count--;
+ C_Yw(0);
+ C_Xw(1);
+ P_Yw(0);
+ /* KNOWN BUG #1. */
+ }
+ }
+ else /* USE32 */
+ {
+ while ( rep_count )
+ {
+ D_Xd(1, RO1, PG_R);
+ D_Yd(0, WO0, PG_W);
+ F_Xd(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ rep_count--;
+ C_Yd(0);
+ C_Xd(1);
+ P_Yd(0);
+ /* KNOWN BUG #1. */
+ }
+ }
+ break;
+
+ case 0xa6: /* T6 CMPSB Xb Yb */
+ STRING_COUNT;
+
+ while ( rep_count )
+ {
+ D_Xb(0, RO0, PG_R);
+ D_Yb(1, RO1, PG_R);
+ F_Xb(0);
+ F_Yb(1);
+ CMP(ops[0].sng, ops[1].sng, 8);
+ rep_count--;
+ C_Xb(0);
+ C_Yb(1);
+ if ( rep_count &&
+ ( repeat == REP_E && GET_ZF() == 0 ||
+ repeat == REP_NE && GET_ZF() == 1 )
+ )
+ break;
+ /* KNOWN BUG #1. */
+ }
+ break;
+
+ case 0xa7: /* T6 CMPSW Xv Yv */
+ STRING_COUNT;
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ while ( rep_count )
+ {
+ D_Xw(0, RO0, PG_R);
+ D_Yw(1, RO1, PG_R);
+ F_Xw(0);
+ F_Yw(1);
+ CMP(ops[0].sng, ops[1].sng, 16);
+ rep_count--;
+ C_Xw(0);
+ C_Yw(1);
+ if ( rep_count &&
+ ( repeat == REP_E && GET_ZF() == 0 ||
+ repeat == REP_NE && GET_ZF() == 1 )
+ )
+ break;
+ /* KNOWN BUG #1. */
+ }
+ }
+ else /* USE32 */
+ {
+ while ( rep_count )
+ {
+ D_Xd(0, RO0, PG_R);
+ D_Yd(1, RO1, PG_R);
+ F_Xd(0);
+ F_Yd(1);
+ CMP(ops[0].sng, ops[1].sng, 32);
+ rep_count--;
+ C_Xd(0);
+ C_Yd(1);
+ if ( rep_count &&
+ ( repeat == REP_E && GET_ZF() == 0 ||
+ repeat == REP_NE && GET_ZF() == 1 )
+ )
+ break;
+ /* KNOWN BUG #1. */
+ }
+ }
+ break;
+
+ case 0xa8: /* T6 TEST Fal Ib */ inst32328 = TEST; goto TYPE3C;
+ case 0xa9: /* T6 TEST F(e)ax Iv */ inst32328 = TEST; goto TYPE3D;
+
+ case 0xaa: /* T4 STOSB Yb Fal */
+ STRING_COUNT;
+
+ F_Fal(1);
+ while ( rep_count )
+ {
+ D_Yb(0, WO0, PG_W);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ rep_count--;
+ C_Yb(0);
+ P_Yb(0);
+ /* KNOWN BUG #1. */
+ }
+ break;
+
+ case 0xab: /* T4 STOSW Yv F(e)ax */
+ STRING_COUNT;
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ F_Fax(1);
+ while ( rep_count )
+ {
+ D_Yw(0, WO0, PG_W);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ rep_count--;
+ C_Yw(0);
+ P_Yw(0);
+ /* KNOWN BUG #1. */
+ }
+ }
+ else /* USE32 */
+ {
+ F_Feax(1);
+ while ( rep_count )
+ {
+ D_Yd(0, WO0, PG_W);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ rep_count--;
+ C_Yd(0);
+ P_Yd(0);
+ /* KNOWN BUG #1. */
+ }
+ }
+ break;
+
+ case 0xac: /* T4 LODSB Fal Xb */
+ STRING_COUNT;
+
+ while ( rep_count )
+ {
+ D_Xb(1, RO1, PG_R);
+ F_Xb(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ rep_count--;
+ P_Fal(0);
+ C_Xb(1);
+ /* KNOWN BUG #1. */
+ }
+ break;
+
+ case 0xad: /* T4 LODSW F(e)ax Xv */
+ STRING_COUNT;
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ while ( rep_count )
+ {
+ D_Xw(1, RO1, PG_R);
+ F_Xw(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ rep_count--;
+ P_Fax(0);
+ C_Xw(1);
+ /* KNOWN BUG #1. */
+ }
+ }
+ else /* USE32 */
+ {
+ while ( rep_count )
+ {
+ D_Xd(1, RO1, PG_R);
+ F_Xd(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ rep_count--;
+ P_Feax(0);
+ C_Xd(1);
+ /* KNOWN BUG #1. */
+ }
+ }
+ break;
+
+ case 0xae: /* T6 SCASB Fal Yb */
+ STRING_COUNT;
+
+ F_Fal(0);
+ while ( rep_count )
+ {
+ D_Yb(1, RO1, PG_R);
+ F_Yb(1);
+ CMP(ops[0].sng, ops[1].sng, 8);
+ rep_count--;
+ C_Yb(1);
+ if ( rep_count &&
+ ( repeat == REP_E && GET_ZF() == 0 ||
+ repeat == REP_NE && GET_ZF() == 1 )
+ )
+ break;
+ /* KNOWN BUG #1. */
+ }
+ break;
+
+ case 0xaf: /* T6 SCASW F(e)ax Yv */
+ STRING_COUNT;
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ F_Fax(0);
+ while ( rep_count )
+ {
+ D_Yw(1, RO1, PG_R);
+ F_Yw(1);
+ CMP(ops[0].sng, ops[1].sng, 16);
+ rep_count--;
+ C_Yw(1);
+ if ( rep_count &&
+ ( repeat == REP_E && GET_ZF() == 0 ||
+ repeat == REP_NE && GET_ZF() == 1 )
+ )
+ break;
+ /* KNOWN BUG #1. */
+ }
+ }
+ else /* USE32 */
+ {
+ F_Feax(0);
+ while ( rep_count )
+ {
+ D_Yd(1, RO1, PG_R);
+ F_Yd(1);
+ CMP(ops[0].sng, ops[1].sng, 32);
+ rep_count--;
+ C_Yd(1);
+ if ( rep_count &&
+ ( repeat == REP_E && GET_ZF() == 0 ||
+ repeat == REP_NE && GET_ZF() == 1 )
+ )
+ break;
+ /* KNOWN BUG #1. */
+ }
+ }
+ break;
+
+ case 0xb0: /* T4 MOV Hb Ib */
+ case 0xb1:
+ case 0xb2:
+ case 0xb3:
+ case 0xb4:
+ case 0xb5:
+ case 0xb6:
+ case 0xb7:
+ D_Hb(0);
+ D_Ib(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Hb(0);
+ break;
+
+ case 0xb8: /* T4 MOV Hv Iv */
+ case 0xb9:
+ case 0xba:
+ case 0xbb:
+ case 0xbc:
+ case 0xbd:
+ case 0xbe:
+ case 0xbf:
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Hw(0);
+ D_Iw(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Hw(0);
+ }
+ else /* USE32 */
+ {
+ D_Hd(0);
+ D_Id(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Hd(0);
+ }
+ break;
+
+ case 0xc0:
+ modRM = GET_INST_BYTE(p);
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T5 ROL Eb Ib */ instp32328 = ROL; goto TYPE80_0;
+ case 1: /* T5 ROR Eb Ib */ instp32328 = ROR; goto TYPE80_0;
+ case 2: /* T5 RCL Eb Ib */ instp32328 = RCL; goto TYPE80_0;
+ case 3: /* T5 RCR Eb Ib */ instp32328 = RCR; goto TYPE80_0;
+ case 4: /* T5 SHL Eb Ib */ instp32328 = SHL; goto TYPE80_0;
+ case 5: /* T5 SHR Eb Ib */ instp32328 = SHR; goto TYPE80_0;
+ case 6: /* T5 SHL Eb Ib */ instp32328 = SHL; goto TYPE80_0;
+ case 7: /* T5 SAR Eb Ib */ instp32328 = SAR; goto TYPE80_0;
+ }
+
+ case 0xc1:
+ modRM = GET_INST_BYTE(p);
+
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T5 ROL Ev Ib */ instp32328 = ROL; break;
+ case 1: /* T5 ROR Ev Ib */ instp32328 = ROR; break;
+ case 2: /* T5 RCL Ev Ib */ instp32328 = RCL; break;
+ case 3: /* T5 RCR Ev Ib */ instp32328 = RCR; break;
+ case 4: /* T5 SHL Ev Ib */ instp32328 = SHL; break;
+ case 5: /* T5 SHR Ev Ib */ instp32328 = SHR; break;
+ case 6: /* T5 SHL Ev Ib */ instp32328 = SHL; break;
+ case 7: /* T5 SAR Ev Ib */ instp32328 = SAR; break;
+ }
+
+TYPEC1:
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RW0, PG_W);
+ D_Ib(1);
+ F_Ew(0);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 16);
+ P_Ew(0);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, RW0, PG_W);
+ D_Ib(1);
+ F_Ed(0);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 32);
+ P_Ed(0);
+ }
+ break;
+
+ case 0xc2: /* T2 RET Iw */
+ inst32 = RETN;
+ took_absolute_toc = TRUE;
+TYPEC2:
+
+ D_Iw(0);
+ UPDATE_INTEL_IP_USE_OP_SIZE(p);
+ (*inst32)(ops[0].sng);
+ CANCEL_HOST_IP();
+ PIG_SYNCH(CHECK_ALL);
+ break;
+
+ case 0xc3: /* T2 RET I0 */
+ inst32 = RETN;
+ took_absolute_toc = TRUE;
+TYPEC3:
+
+ F_I0(0);
+ UPDATE_INTEL_IP_USE_OP_SIZE(p);
+ (*inst32)(ops[0].sng);
+ CANCEL_HOST_IP();
+ PIG_SYNCH(CHECK_ALL);
+ break;
+
+ case 0xc4: /* T4 LES Gv Mp */
+ instp32p32 = LES;
+TYPEC4:
+
+ modRM = GET_INST_BYTE(p);
+ if (((modRM & 0xfc) == 0xc4) && (instp32p32 == LES)) {
+ /*
+ * It's a c4c? BOP.
+ * The bop routine itself will read the argument, but
+ * we read it here so that we get the next EIP correct.
+ */
+ int nField, i;
+
+ D_Ib(0);
+ nField = modRM & 3;
+ immed = 0;
+ for (i = 0; i < nField; i++)
+ {
+ immed |= (ULONG)GET_INST_BYTE(p);
+ immed <<= 8;
+ }
+ immed |= ops[0].sng;
+#ifdef PIG
+ if (immed == 0xfe)
+ SET_EIP(CCPU_save_EIP);
+ else
+ UPDATE_INTEL_IP(p);
+ CANCEL_HOST_IP();
+ PIG_SYNCH(CHECK_NO_EXEC); /* Everything checkable up to this point */
+#else /* PIG */
+ UPDATE_INTEL_IP(p);
+ if ((immed & 0xff) == 0xfe)
+ {
+ switch(immed)
+ {
+#if defined(SFELLOW)
+ case 0x03fe:
+ SfdelayUSecs();
+ break;
+ case 0x05fe:
+ SfsasTouchBop();
+ break;
+ case 0x06fe:
+ SfscatterGatherSasTouch();
+ break;
+#endif /* SFELLOW */
+ case 0xfe:
+ c_cpu_unsimulate();
+ /* Never returns (?) */
+ default:
+ EDL_fast_bop(immed);
+ break;
+ }
+ }
+ else
+ {
+ in_C = 1;
+ bop(ops[0].sng);
+ in_C = 0;
+ }
+ CANCEL_HOST_IP();
+ SYNCH_TICK();
+#endif /* PIG */
+ break;
+ }
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Gw(0);
+ D_Mp16(1, RO1, PG_R);
+ F_Mp16(1);
+ (*instp32p32)(&ops[0].sng, ops[1].mlt);
+ P_Gw(0);
+ }
+ else /* USE32 */
+ {
+ D_Gd(0);
+ D_Mp32(1, RO1, PG_R);
+ F_Mp32(1);
+ (*instp32p32)(&ops[0].sng, ops[1].mlt);
+ P_Gd(0);
+ }
+ break;
+
+ case 0xc5: /* T4 LDS Gv Mp */
+ instp32p32 = LDS;
+ goto TYPEC4;
+
+ case 0xc6:
+ modRM = GET_INST_BYTE(p);
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T4 MOV Eb Ib */
+ D_Eb(0, WO0, PG_W);
+ D_Ib(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Eb(0);
+ break;
+
+ case 1: case 2: case 3: case 4: case 5: case 6: case 7:
+ Int6();
+ break;
+ } /* end switch ( GET_XXX(modRM) ) */
+ break;
+
+ case 0xc7:
+ modRM = GET_INST_BYTE(p);
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T4 MOV Ev Iv */
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, WO0, PG_W);
+ D_Iw(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Ew(0);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, WO0, PG_W);
+ D_Id(1);
+ ops[0].sng = ops[1].sng; /*MOV(&ops[0].sng, ops[1].sng);*/
+ P_Ed(0);
+ }
+ break;
+
+ case 1: case 2: case 3: case 4: case 5: case 6: case 7:
+ Int6();
+ break;
+ } /* end switch ( GET_XXX(modRM) ) */
+ break;
+
+ case 0xc8: /* T6 ENTER Iw Ib */
+ D_Iw(0);
+ D_Ib(1);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ ENTER16(ops[0].sng, ops[1].sng);
+ }
+ else /* USE32 */
+ {
+ ENTER32(ops[0].sng, ops[1].sng);
+ }
+ break;
+
+ case 0xc9: /* T0 LEAVE */
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ LEAVE16();
+ }
+ else /* USE32 */
+ {
+ LEAVE32();
+ }
+ break;
+
+ case 0xca: /* T2 RET Iw */
+ inst32 = RETF;
+ took_absolute_toc = TRUE;
+ goto TYPEC2;
+ case 0xcb: /* T2 RET I0 */
+ inst32 = RETF;
+ took_absolute_toc = TRUE;
+ goto TYPEC3;
+
+ case 0xcc: /* T2 INT I3 */
+ took_absolute_toc = TRUE;
+ F_I3(0);
+ UPDATE_INTEL_IP(p);
+ start_trap = 0; /* clear any pending TF exception */
+ INTx(ops[0].sng);
+ CANCEL_HOST_IP();
+ PIG_SYNCH(CHECK_ALL);
+ break;
+
+ case 0xcd: /* T2 INT Ib */
+ if ( GET_VM() == 1 && GET_CPL() > GET_IOPL() )
+ GP((IU16)0, FAULT_CCPU_INT_ACCESS);
+ took_absolute_toc = TRUE;
+ D_Ib(0);
+ UPDATE_INTEL_IP(p);
+ start_trap = 0; /* clear any pending TF exception */
+ INTx(ops[0].sng);
+ CANCEL_HOST_IP();
+ PIG_SYNCH(CHECK_ALL);
+ break;
+
+ case 0xce: /* T0 INTO */
+ if ( GET_OF() )
+ {
+ took_absolute_toc = TRUE;
+ UPDATE_INTEL_IP(p);
+ start_trap = 0; /* clear any pending TF exception */
+ INTO();
+ CANCEL_HOST_IP();
+ PIG_SYNCH(CHECK_ALL);
+ }
+ break;
+
+ case 0xcf: /* T0 IRET */
+ if ( GET_VM() == 1 && GET_CPL() > GET_IOPL() )
+ GP((IU16)0, FAULT_CCPU_IRET_ACCESS);
+ took_absolute_toc = TRUE;
+ UPDATE_INTEL_IP(p);
+ IRET();
+ CANCEL_HOST_IP();
+ PIG_SYNCH(CHECK_ALL);
+ /* Dont do interrupt checks etc after an IRET */
+#ifdef PIG
+ /* If the destination is going to page fault, or need
+ * accessing, then the EDL CPU will do so before issuing
+ * the pig synch. We use the dasm386 decode to prefetch
+ * a single instruction which mimics the EDL Cpu's behaviour
+ * when close to a page boundary.
+ */
+ prefetch_1_instruction(); /* Will PF if destination not present */
+ ccpu_synch_count++;
+ c_cpu_unsimulate();
+#endif /* PIG */
+
+ goto NEXT_INST;
+ break;
+
+ case 0xd0:
+ modRM = GET_INST_BYTE(p);
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T5 ROL Eb I1 */ instp32328 = ROL; break;
+ case 1: /* T5 ROR Eb I1 */ instp32328 = ROR; break;
+ case 2: /* T5 RCL Eb I1 */ instp32328 = RCL; break;
+ case 3: /* T5 RCR Eb I1 */ instp32328 = RCR; break;
+ case 4: /* T5 SHL Eb I1 */ instp32328 = SHL; break;
+ case 5: /* T5 SHR Eb I1 */ instp32328 = SHR; break;
+ case 6: /* T5 SHL Eb I1 */ instp32328 = SHL; break;
+ case 7: /* T5 SAR Eb I1 */ instp32328 = SAR; break;
+ }
+ D_Eb(0, RW0, PG_W);
+ F_Eb(0);
+ F_I1(1);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 8);
+ P_Eb(0);
+ break;
+
+ case 0xd1:
+ modRM = GET_INST_BYTE(p);
+
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T5 ROL Ev I1 */ instp32328 = ROL; break;
+ case 1: /* T5 ROR Ev I1 */ instp32328 = ROR; break;
+ case 2: /* T5 RCL Ev I1 */ instp32328 = RCL; break;
+ case 3: /* T5 RCR Ev I1 */ instp32328 = RCR; break;
+ case 4: /* T5 SHL Ev I1 */ instp32328 = SHL; break;
+ case 5: /* T5 SHR Ev I1 */ instp32328 = SHR; break;
+ case 6: /* T5 SHL Ev I1 */ instp32328 = SHL; break;
+ case 7: /* T5 SAR Ev I1 */ instp32328 = SAR; break;
+ }
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RW0, PG_W);
+ F_Ew(0);
+ F_I1(1);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 16);
+ P_Ew(0);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, RW0, PG_W);
+ F_Ed(0);
+ F_I1(1);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 32);
+ P_Ed(0);
+ }
+ break;
+
+ case 0xd2:
+ modRM = GET_INST_BYTE(p);
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T5 ROL Eb Fcl */ instp32328 = ROL; break;
+ case 1: /* T5 ROR Eb Fcl */ instp32328 = ROR; break;
+ case 2: /* T5 RCL Eb Fcl */ instp32328 = RCL; break;
+ case 3: /* T5 RCR Eb Fcl */ instp32328 = RCR; break;
+ case 4: /* T5 SHL Eb Fcl */ instp32328 = SHL; break;
+ case 5: /* T5 SHR Eb Fcl */ instp32328 = SHR; break;
+ case 6: /* T5 SHL Eb Fcl */ instp32328 = SHL; break;
+ case 7: /* T5 SAR Eb Fcl */ instp32328 = SAR; break;
+ }
+ D_Eb(0, RW0, PG_W);
+ F_Eb(0);
+ F_Fcl(1);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 8);
+ P_Eb(0);
+ break;
+
+ case 0xd3:
+ modRM = GET_INST_BYTE(p);
+
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T5 ROL Ev Fcl */ instp32328 = ROL; break;
+ case 1: /* T5 ROR Ev Fcl */ instp32328 = ROR; break;
+ case 2: /* T5 RCL Ev Fcl */ instp32328 = RCL; break;
+ case 3: /* T5 RCR Ev Fcl */ instp32328 = RCR; break;
+ case 4: /* T5 SHL Ev Fcl */ instp32328 = SHL; break;
+ case 5: /* T5 SHR Ev Fcl */ instp32328 = SHR; break;
+ case 6: /* T5 SHL Ev Fcl */ instp32328 = SHL; break;
+ case 7: /* T5 SAR Ev Fcl */ instp32328 = SAR; break;
+ }
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RW0, PG_W);
+ F_Ew(0);
+ F_Fcl(1);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 16);
+ P_Ew(0);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, RW0, PG_W);
+ F_Ed(0);
+ F_Fcl(1);
+ (*instp32328)(&ops[0].sng, ops[1].sng, 32);
+ P_Ed(0);
+ }
+ break;
+
+ case 0xd4: /* T2 AAM Ib */
+ inst32 = AAM;
+TYPED4:
+
+ D_Ib(0);
+ (*inst32)(ops[0].sng);
+ break;
+
+ case 0xd5: /* T2 AAD Ib */ inst32 = AAD; goto TYPED4;
+
+ case 0xd6: /* T2 BOP Ib */
+ D_Ib(0);
+ UPDATE_INTEL_IP(p);
+
+ PIG_SYNCH(CHECK_NO_EXEC);
+
+#ifndef PIG
+ if (ops[0].sng == 0xfe)
+ {
+ c_cpu_unsimulate();
+ }
+ in_C = 1;
+ bop(ops[0].sng);
+ in_C = 0;
+ CANCEL_HOST_IP();
+#endif /* PIG */
+ SYNCH_TICK();
+ break;
+
+ case 0xd7: /* T2 XLAT Z */
+ D_Z(0, RO0, PG_R);
+ F_Z(0);
+ XLAT(ops[0].sng);
+ break;
+
+ case 0xd8: /* T2 NPX ??? */
+ case 0xd9:
+ case 0xda:
+ case 0xdb:
+ case 0xdc:
+ case 0xdd:
+ case 0xde:
+ case 0xdf:
+ if ( GET_EM() || GET_TS() )
+ Int7();
+
+ if (NpxIntrNeeded)
+ {
+ TakeNpxExceptionInt(); /* should set up ISR */
+ goto DO_INST; /* run ISR */
+ }
+
+#ifdef PIG
+ /* Must get npx registers from test CPU
+ * This is lazily done for efficiency.
+ */
+ c_checkCpuNpxRegisters();
+#endif /* PIG */
+
+ modRM = GET_INST_BYTE(p);
+ ZFRSRVD(((opcode-0xd8)*0x100) + modRM);
+ break;
+
+ case 0xe0: /* T2 LOOPNE Jb */
+ inst32 = LOOPNE16;
+ inst232 = LOOPNE32;
+TYPEE0:
+
+ D_Jb(0);
+ UPDATE_INTEL_IP_USE_OP_SIZE(p);
+ if ( GET_ADDRESS_SIZE() == USE16 )
+ {
+ (*inst32)(ops[0].sng);
+ }
+ else /* USE32 */
+ {
+ (*inst232)(ops[0].sng);
+ }
+ CANCEL_HOST_IP();
+
+#ifdef PIG
+ if (single_instruction_delay && !took_relative_jump)
+ {
+ if (single_instruction_delay_enable)
+ {
+ save_last_xcptn_details("STI/POPF blindspot\n", 0, 0, 0, 0, 0);
+ PIG_SYNCH(CHECK_NO_EXEC);
+ }
+ else
+ {
+ save_last_xcptn_details("STI/POPF problem\n", 0, 0, 0, 0, 0);
+ }
+ break;
+ }
+#ifdef SYNCH_BOTH_WAYS
+ took_relative_jump = TRUE;
+#endif /* SYNCH_BOTH_WAYS */
+ if (took_relative_jump)
+ {
+ PIG_SYNCH(CHECK_ALL);
+ }
+#endif /* PIG */
+ break;
+
+ case 0xe1: /* T2 LOOPE Jb */
+ inst32 = LOOPE16;
+ inst232 = LOOPE32;
+ goto TYPEE0;
+
+ case 0xe2: /* T2 LOOP Jb */
+ inst32 = LOOP16;
+ inst232 = LOOP32;
+ goto TYPEE0;
+
+ case 0xe3: /* T2 JCXZ Jb */
+ inst32 = JCXZ;
+ inst232 = JECXZ;
+ goto TYPEE0;
+
+ case 0xe4: /* T4 INB Fal Ib */
+ D_Ib(1);
+
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[1].sng, BYTE_WIDTH);
+
+ IN8(&ops[0].sng, ops[1].sng);
+ P_Fal(0);
+#ifdef PIG
+ UPDATE_INTEL_IP(p);
+#endif
+ PIG_SYNCH(CHECK_NO_AL);
+ quick_mode = FALSE;
+ break;
+
+ case 0xe5: /* T4 INW F(e)ax Ib */
+ D_Ib(1);
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[1].sng, WORD_WIDTH);
+
+ IN16(&ops[0].sng, ops[1].sng);
+ P_Fax(0);
+#ifdef PIG
+ UPDATE_INTEL_IP(p);
+#endif
+ PIG_SYNCH(CHECK_NO_AX);
+ }
+ else /* USE32 */
+ {
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[1].sng, DWORD_WIDTH);
+
+ IN32(&ops[0].sng, ops[1].sng);
+ P_Feax(0);
+#ifdef PIG
+ UPDATE_INTEL_IP(p);
+#endif
+ PIG_SYNCH(CHECK_NO_EAX);
+ }
+ quick_mode = FALSE;
+ break;
+
+ case 0xe6: /* T6 OUTB Ib Fal */
+ D_Ib(0);
+
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[0].sng, BYTE_WIDTH);
+
+ F_Fal(1);
+ OUT8(ops[0].sng, ops[1].sng);
+#ifdef PIG
+ UPDATE_INTEL_IP(p);
+ if (ops[0].sng == 0x60)
+ {
+ /* This may be a change of A20 wrap status */
+ PIG_SYNCH(CHECK_NO_A20);
+ }
+ else
+ {
+ PIG_SYNCH(CHECK_ALL);
+ }
+#else
+ SYNCH_TICK();
+#endif
+ break;
+
+ case 0xe7: /* T6 OUTW Ib F(e)ax */
+ D_Ib(0);
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[0].sng, WORD_WIDTH);
+
+ F_Fax(1);
+ OUT16(ops[0].sng, ops[1].sng);
+ }
+ else /* USE32 */
+ {
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[0].sng, DWORD_WIDTH);
+
+ F_Feax(1);
+ OUT32(ops[0].sng, ops[1].sng);
+ }
+#ifdef PIG
+ UPDATE_INTEL_IP(p);
+#endif
+ PIG_SYNCH(CHECK_ALL);
+ quick_mode = FALSE;
+ break;
+
+ case 0xe8: /* T2 CALL Jv */
+ inst32 = CALLR;
+ took_absolute_toc = TRUE;
+TYPEE8:
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Jw(0);
+ }
+ else /* USE32 */
+ {
+ D_Jd(0);
+ }
+
+ UPDATE_INTEL_IP_USE_OP_SIZE(p);
+ (*inst32)(ops[0].sng);
+ CANCEL_HOST_IP();
+ PIG_SYNCH(CHECK_ALL);
+ break;
+
+ case 0xe9: /* T2 JMP Jv */
+ inst32 = JMPR;
+ took_absolute_toc = TRUE;
+ goto TYPEE8;
+ case 0xea: /* T2 JMP Ap */
+ instp32 = JMPF;
+ took_absolute_toc = TRUE;
+ goto TYPE9A;
+ case 0xeb: /* T2 JMP Jb */
+ inst32 = JMPR;
+ took_absolute_toc = TRUE;
+ goto TYPE70;
+
+ case 0xec: /* T4 INB Fal Fdx */
+ F_Fdx(1);
+
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[1].sng, BYTE_WIDTH);
+
+ IN8(&ops[0].sng, ops[1].sng);
+ P_Fal(0);
+#ifdef PIG
+ UPDATE_INTEL_IP(p);
+#endif
+ PIG_SYNCH(CHECK_NO_AL);
+ quick_mode = FALSE;
+ break;
+
+ case 0xed: /* T4 INW F(e)ax Fdx */
+ F_Fdx(1);
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[1].sng, WORD_WIDTH);
+
+ IN16(&ops[0].sng, ops[1].sng);
+ P_Fax(0);
+#ifdef PIG
+ UPDATE_INTEL_IP(p);
+#endif
+ PIG_SYNCH(CHECK_NO_AX);
+ }
+ else /* USE32 */
+ {
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[1].sng, DWORD_WIDTH);
+
+ IN32(&ops[0].sng, ops[1].sng);
+ P_Feax(0);
+#ifdef PIG
+ UPDATE_INTEL_IP(p);
+#endif
+ PIG_SYNCH(CHECK_NO_EAX);
+ }
+ quick_mode = FALSE;
+ break;
+
+ case 0xee: /* T6 OUTB Fdx Fal */
+ F_Fdx(0);
+
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[0].sng, BYTE_WIDTH);
+
+ F_Fal(1);
+ OUT8(ops[0].sng, ops[1].sng);
+#ifdef PIG
+ UPDATE_INTEL_IP(p);
+#endif
+ PIG_SYNCH(CHECK_ALL);
+ quick_mode = FALSE;
+ break;
+
+ case 0xef: /* T6 OUTW Fdx F(e)ax */
+ F_Fdx(0);
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[0].sng, WORD_WIDTH);
+
+ F_Fax(1);
+ OUT16(ops[0].sng, ops[1].sng);
+ }
+ else /* USE32 */
+ {
+ if ( GET_CPL() > GET_IOPL() || GET_VM() )
+ check_io_permission_map(ops[0].sng, DWORD_WIDTH);
+
+ F_Feax(1);
+ OUT32(ops[0].sng, ops[1].sng);
+ }
+#ifdef PIG
+ UPDATE_INTEL_IP(p);
+#endif
+ PIG_SYNCH(CHECK_ALL);
+ quick_mode = FALSE;
+ break;
+
+ case 0xf0: /* T0 LOCK */
+ CHECK_PREFIX_LENGTH();
+ goto DECODE; /* NB. Incorrect Emulation! */
+
+ case 0xf1:
+ CHECK_PREFIX_LENGTH();
+ goto DECODE;
+
+ case 0xf2:
+ repeat = REP_NE;
+ CHECK_PREFIX_LENGTH();
+ goto DECODE;
+
+ case 0xf3:
+ repeat = REP_E;
+ CHECK_PREFIX_LENGTH();
+ goto DECODE;
+
+ case 0xf4: /* T0 HLT */
+ if ( GET_CPL() != 0 )
+ GP((IU16)0, FAULT_CCPU_HLT_ACCESS);
+
+ /* Wait for an interrupt */
+
+ UPDATE_INTEL_IP(p);
+ PIG_SYNCH(CHECK_ALL);
+
+#ifndef PIG
+
+ while ( TRUE )
+ {
+ /* RESET ends the halt state. */
+ if ( cpu_interrupt_map & CPU_RESET_EXCEPTION_MASK )
+ break;
+
+ /* An enabled INTR ends the halt state. */
+ if ( GET_IF() && cpu_interrupt_map & CPU_HW_INT_MASK )
+ break;
+
+ /* As time goes by. */
+ if (cpu_interrupt_map & CPU_SIGALRM_EXCEPTION_MASK)
+ {
+ cpu_interrupt_map &= ~CPU_SIGALRM_EXCEPTION_MASK;
+ host_timer_event();
+ }
+
+#ifndef PROD
+ if (cpu_interrupt_map & CPU_SAD_EXCEPTION_MASK)
+ {
+ cpu_interrupt_map &= ~CPU_SAD_EXCEPTION_MASK;
+ force_yoda();
+ }
+#endif /* PROD */
+
+ SYNCH_TICK();
+ QUICK_EVENT_TICK();
+ }
+ quick_mode = FALSE;
+
+#endif /* PIG */
+
+ break;
+
+ case 0xf5: /* T0 CMC */
+ CMC();
+ break;
+
+ case 0xf6:
+ modRM = GET_INST_BYTE(p);
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T6 TEST Eb Ib */
+ case 1:
+ inst32328 = TEST;
+ goto TYPE80_7;
+
+ case 2: /* T1 NOT Eb */
+ D_Eb(0, RW0, PG_W);
+ F_Eb(0);
+ NOT(&ops[0].sng);
+ P_Eb(0);
+ break;
+
+ case 3: /* T1 NEG Eb */
+ instp328 = NEG;
+TYPEF6_3:
+
+ D_Eb(0, RW0, PG_W);
+ F_Eb(0);
+ (*instp328)(&ops[0].sng, 8);
+ P_Eb(0);
+ break;
+
+ case 4: /* T5 MUL Fal Eb */
+ instp3232 = MUL8;
+TYPEF6_4:
+
+ D_Eb(1, RO1, PG_R);
+ F_Fal(0);
+ F_Eb(1);
+ (*instp3232)(&ops[0].sng, ops[1].sng);;
+ P_Fal(0);
+ break;
+
+ case 5: /* T5 IMUL Fal Eb */ instp3232 = IMUL8; goto TYPEF6_4;
+
+ case 6: /* T2 DIV Eb */
+ inst32 = DIV8;
+TYPEF6_6:
+
+ D_Eb(0, RO0, PG_R);
+ F_Eb(0);
+ (*inst32)(ops[0].sng);
+ break;
+
+ case 7: /* T2 IDIV Eb */ inst32 = IDIV8; goto TYPEF6_6;
+ } /* end switch ( GET_XXX(modRM) ) */
+ break;
+
+ case 0xf7:
+ modRM = GET_INST_BYTE(p);
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T6 TEST Ev Iv */
+ case 1:
+ inst32328 = TEST;
+ goto TYPE81_7;
+
+ case 2: /* T1 NOT Ew */
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RW0, PG_W);
+ F_Ew(0);
+ NOT(&ops[0].sng);
+ P_Ew(0);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, RW0, PG_W);
+ F_Ed(0);
+ NOT(&ops[0].sng);
+ P_Ed(0);
+ }
+ break;
+
+ case 3: /* T1 NEG Ew */
+ instp328 = NEG;
+TYPEF7_3:
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RW0, PG_W);
+ F_Ew(0);
+ (*instp328)(&ops[0].sng, 16);
+ P_Ew(0);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, RW0, PG_W);
+ F_Ed(0);
+ (*instp328)(&ops[0].sng, 32);
+ P_Ed(0);
+ }
+ break;
+
+ case 4: /* T5 MUL F(e)ax Ev */
+ instp3232 = MUL16;
+ inst2p3232 = MUL32;
+TYPEF7_4:
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(1, RO1, PG_R);
+ F_Fax(0);
+ F_Ew(1);
+ (*instp3232)(&ops[0].sng, ops[1].sng);;
+ P_Fax(0);
+ }
+ else /* USE32 */
+ {
+ D_Ed(1, RO1, PG_R);
+ F_Feax(0);
+ F_Ed(1);
+ (*inst2p3232)(&ops[0].sng, ops[1].sng);
+ P_Feax(0);
+ }
+ break;
+
+ case 5: /* T5 IMUL F(e)ax Ev */
+ instp3232 = IMUL16;
+ inst2p3232 = IMUL32;
+ goto TYPEF7_4;
+
+ case 6: /* T2 DIV Ev */
+ inst32 = DIV16;
+ inst232 = DIV32;
+TYPEF7_6:
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RO0, PG_R);
+ F_Ew(0);
+ (*inst32)(ops[0].sng);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, RO0, PG_R);
+ F_Ed(0);
+ (*inst232)(ops[0].sng);
+ }
+ break;
+
+ case 7: /* T5 IDIV Ev */
+ inst32 = IDIV16;
+ inst232 = IDIV32;
+ goto TYPEF7_6;
+ } /* end switch ( GET_XXX(modRM) ) */
+ break;
+
+ case 0xf8: /* T0 CLC */
+ CLC();
+ break;
+
+ case 0xf9: /* T0 STC */
+ STC();
+ break;
+
+ case 0xfa: /* T0 CLI */
+ if ( GET_CPL() > GET_IOPL() )
+ GP((IU16)0, FAULT_CCPU_CLI_ACCESS);
+ CLI();
+ break;
+
+ case 0xfb: /* T0 STI */
+ if ( GET_CPL() > GET_IOPL() )
+ GP((IU16)0, FAULT_CCPU_STI_ACCESS);
+ STI();
+
+ /* locally update IP - interrupts are supressed after STI */
+ UPDATE_INTEL_IP(p);
+
+#ifdef PIG
+ /* We need to pig sync one instr *after* an STI that enabled
+ * interrupts, because the A4CPU might need to take a H/W interrupt
+ */
+ single_instruction_delay = TRUE;
+ PIG_SYNCH(CHECK_ALL);
+#endif /* PIG */
+ goto NEXT_INST;
+
+ case 0xfc: /* T0 CLD */
+ CLD();
+ break;
+
+ case 0xfd: /* T0 STD */
+ STD();
+ break;
+
+ case 0xfe:
+ modRM = GET_INST_BYTE(p);
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T1 INC Eb */ instp328 = INC; goto TYPEF6_3;
+ case 1: /* T1 DEC Eb */ instp328 = DEC; goto TYPEF6_3;
+
+ case 2: case 3: case 4: case 5: case 6: case 7:
+ Int6();
+ break;
+ }
+ break;
+
+ case 0xff:
+ modRM = GET_INST_BYTE(p);
+ switch ( GET_XXX(modRM) )
+ {
+ case 0: /* T1 INC Ev */ instp328 = INC; goto TYPEF7_3;
+ case 1: /* T1 DEC Ev */ instp328 = DEC; goto TYPEF7_3;
+
+ case 2: /* T2 CALL Ev */
+ inst32 = CALLN;
+ took_absolute_toc = TRUE;
+TYPEFF_2:
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Ew(0, RO0, PG_R);
+ F_Ew(0);
+ }
+ else /* USE32 */
+ {
+ D_Ed(0, RO0, PG_R);
+ F_Ed(0);
+ }
+
+ UPDATE_INTEL_IP_USE_OP_SIZE(p);
+ (*inst32)(ops[0].sng);
+ CANCEL_HOST_IP();
+ PIG_SYNCH(CHECK_ALL);
+ break;
+
+ case 3: /* T2 CALL Mp */
+ instp32 = CALLF;
+ took_absolute_toc = TRUE;
+TYPEFF_3:
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ D_Mp16(0, RO0, PG_R);
+ F_Mp16(0);
+ }
+ else /* USE32 */
+ {
+ D_Mp32(0, RO0, PG_R);
+ F_Mp32(0);
+ }
+
+ UPDATE_INTEL_IP_USE_OP_SIZE(p);
+ (*instp32)(ops[0].mlt);
+ CANCEL_HOST_IP();
+ PIG_SYNCH(CHECK_ALL);
+ break;
+
+ case 4: /* T2 JMP Ev */
+ inst32 = JMPN;
+ took_absolute_toc = TRUE;
+ goto TYPEFF_2;
+ case 5: /* T2 JMP Mp */
+ instp32 = JMPF;
+ took_absolute_toc = TRUE;
+ goto TYPEFF_3;
+ case 6: /* T2 PUSH Ev */
+ inst32 = PUSH;
+ inst232 = PUSH;
+ goto TYPEF7_6;
+
+ case 7:
+ Int6();
+ break;
+ } /* end switch ( GET_XXX(modRM) ) */
+ break;
+ } /* end switch ( opcode ) */
+
+ /* >>>>> Instruction Completed. <<<<< */
+
+ /* Reset default mode */
+ SET_OPERAND_SIZE(GET_CS_AR_X());
+ SET_ADDRESS_SIZE(GET_CS_AR_X());
+
+ /*
+ Increment instruction pointer.
+ NB. For most instructions we increment the IP after processing
+ the instruction, however all users of the IP (eg flow of control)
+ instructions are coded on the basis that IP has already been
+ updated, so where necessary we update IP before the instruction.
+ In those cases p_start is also updated so that this code can
+ tell that IP has already been updated.
+ */
+ if ( p != p_start )
+ UPDATE_INTEL_IP(p);
+
+ /*
+ Move start of inst to the next inst. We have successfully
+ completed instruction and are now going on to inter-instruction
+ checks.
+ */
+ CCPU_save_EIP = GET_EIP();
+
+ /*
+ Now check for interrupts/external events/breakpoints...
+ */
+
+ if ( quick_mode && GET_DR(DR_DSR) == 0 )
+ goto DO_INST;
+
+#ifdef SYNCH_TIMERS
+ CHECK_INTERRUPT:
+#endif /* SYNCH_TIMERS */
+ quick_mode = FALSE;
+
+ /* Action RESET first. <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+#ifdef SYNCH_TIMERS
+ if (took_absolute_toc || took_relative_jump)
+#endif /* SYNCH_TIMERS */
+ if (cpu_interrupt_map & CPU_RESET_EXCEPTION_MASK)
+ {
+ cpu_interrupt_map &= ~CPU_RESET_EXCEPTION_MASK;
+ c_cpu_reset();
+ doing_contributory = FALSE;
+ doing_page_fault = FALSE;
+ doing_double_fault = FALSE;
+ doing_fault = FALSE;
+ EXT = INTERNAL;
+ SET_POP_DISP(0);
+ goto NEXT_INST;
+ }
+
+ /* Action Insignia (ie non Intel) Processing. <<<<<<<<<<<<<<< */
+
+#ifdef SYNCH_TIMERS
+ if (took_absolute_toc || took_relative_jump)
+#endif /* SYNCH_TIMERS */
+ if (cpu_interrupt_map & CPU_SIGALRM_EXCEPTION_MASK)
+ {
+ cpu_interrupt_map &= ~CPU_SIGALRM_EXCEPTION_MASK;
+ host_timer_event();
+ }
+
+ if (cpu_interrupt_map & CPU_SAD_EXCEPTION_MASK)
+ {
+ cpu_interrupt_map &= ~CPU_SAD_EXCEPTION_MASK;
+ force_yoda();
+ }
+
+ /* INTEL inter instruction processing. <<<<<<<<<<<<<<<<<<<<<<<*/
+
+ /* Reset default mode */
+ SET_OPERAND_SIZE(GET_CS_AR_X());
+ SET_ADDRESS_SIZE(GET_CS_AR_X());
+
+ /* Check for single step trap */
+ if ( start_trap )
+ {
+ SET_DR(DR_DSR, GET_DR(DR_DSR) | DSR_BS_MASK); /* set BS */
+ Int1_t(); /* take TF trap */
+ }
+
+ /* check for debug traps */
+ if ( GET_DR(DR_DSR) &
+ (DSR_BT_MASK | DSR_B3_MASK | DSR_B2_MASK | DSR_B1_MASK |
+ DSR_B0_MASK) )
+ {
+ Int1_t(); /* at least one breakpoint set from:-
+ T-bit or DATA Breakpoints */
+ }
+
+ if ( nr_inst_break && GET_RF() == 0 )
+ {
+ check_for_inst_exception(GET_CS_BASE() + GET_EIP());
+ if ( GET_DR(DR_DSR) )
+ {
+ Int1_f(); /* a CODE Breakpoint triggered */
+ }
+ }
+
+#ifdef SYNCH_TIMERS
+ if (took_absolute_toc || took_relative_jump)
+#endif /* SYNCH_TIMERS */
+#ifndef SFELLOW
+ if (GET_IF() && (cpu_interrupt_map & CPU_HW_INT_MASK))
+ {
+
+/*
+ * IRET hooks aren't yet used by the C CPU, but we might want to do in
+ * future.
+ */
+
+ IU32 hook_address;
+
+ cpu_hw_interrupt_number = ica_intack(&hook_address);
+ cpu_interrupt_map &= ~CPU_HW_INT_MASK;
+ EXT = EXTERNAL;
+ SYNCH_TICK();
+ do_intrupt(cpu_hw_interrupt_number, FALSE, FALSE, (IU16)0);
+ CCPU_save_EIP = GET_EIP(); /* to reflect IP change */
+ }
+#else /* SFELLOW */
+ if (GET_IF() && (cpu_interrupt_map & (CPU_HW_INT_MASK | CPU_HW_NPX_INT_MASK)))
+ {
+ /* service any pending real H/W interrupt first */
+ if (cpu_interrupt_map & CPU_HW_INT_MASK)
+ {
+ cpu_hw_interrupt_number = ica_intack();
+ cpu_interrupt_map &= ~CPU_HW_INT_MASK;
+ EXT = EXTERNAL;
+ do_intrupt(cpu_hw_interrupt_number, FALSE, FALSE, (IU16)0);
+ CCPU_save_EIP = GET_EIP(); /* to reflect IP change */
+ }
+ else
+ if (cpu_interrupt_map & CPU_HW_NPX_INT_MASK)
+ {
+ cpu_hw_interrupt_number = IRQ5_SLAVE_PIC + VectorBase8259Slave();
+ cpu_interrupt_map &= ~CPU_HW_NPX_INT_MASK;
+ EXT = EXTERNAL;
+ do_intrupt(cpu_hw_interrupt_number, FALSE, FALSE, (IU16)0);
+ CCPU_save_EIP = GET_EIP(); /* to reflect IP change */
+ }
+ }
+#endif /* SFELLOW */
+
+#ifdef PIG
+ if ( pig_synch_required )
+ {
+ if (IgnoringThisSynchPoint(GET_CS_SELECTOR(), GET_EIP()))
+ {
+ pig_synch_required = FALSE;
+ }
+ else
+ {
+ /* If the destination is going to page fault, or need
+ * accessing, then the EDL CPU will do so before issuing
+ * the pig synch. We use the dasm386 decode to prefetch
+ * a single instruction which mimics the EDL Cpu's behaviour
+ * when close to a page boundary.
+ */
+ prefetch_1_instruction(); /* Will PF if destination not present */
+#if defined(SFELLOW)
+ /*
+ * Check for occurrence of memory-mapped input.
+ * This initial crude implementation just leaves the entire synch
+ * section unchecked.
+ */
+ if ( pig_mmio_info.flags & MM_INPUT_OCCURRED )
+ {
+ pig_cpu_action = CHECK_NONE; /* cos' its effects are unknown */
+#if COLLECT_MMIO_STATS
+ if ( ++pig_mmio_info.mm_input_section_count == 0 )
+ pig_mmio_info.flags |= MM_INPUT_SECTION_COUNT_WRAPPED;
+#endif /* COLLECT_MMIO_STATS */
+ }
+ if ( pig_mmio_info.flags & MM_OUTPUT_OCCURRED )
+ {
+#if COLLECT_MMIO_STATS
+ if ( ++pig_mmio_info.mm_output_section_count == 0 )
+ pig_mmio_info.flags |= MM_OUTPUT_SECTION_COUNT_WRAPPED;
+#endif /* COLLECT_MMIO_STATS */
+ }
+#endif /* SFELLOW */
+ ccpu_synch_count++;
+ c_cpu_unsimulate();
+ }
+ }
+#endif /* PIG */
+
+NEXT_INST:
+
+ CCPU_save_EIP = GET_EIP(); /* to reflect IP change */
+
+#if defined(SFELLOW) && !defined(PROD)
+ if (sf_debug_char_waiting())
+ {
+ force_yoda();
+ }
+#endif /* SFELLOW && !PROD */
+
+ /* Reset default mode */
+ SET_OPERAND_SIZE(GET_CS_AR_X());
+ SET_ADDRESS_SIZE(GET_CS_AR_X());
+ took_relative_jump = FALSE;
+ took_absolute_toc = FALSE;
+
+ SETUP_HOST_IP(p);
+
+ /*
+ THIS IS A CHEAT.
+ The Intel documentation says RF is cleared AFTER all instructions
+ except (POPF, IRET or TASK SWITCH). To save clearing RF for each
+ and every instruction with a special test for the named exceptions
+ we clear RF before the instruction, we are assuming the
+ instruction will now be successful. As all the fault handlers set
+ RF in the pushed flags it will appear that RF was left set when
+ instructions don't run to completion from this point.
+ So although we cheat we intend to have the same effect as the
+ real thing.
+ */
+ SET_RF(0);
+
+ start_trap = GET_TF();
+
+ /* Determine if we can go into quick mode */
+ if ( cpu_interrupt_map == 0 &&
+ start_trap == 0 &&
+ nr_inst_break == 0
+#ifdef PIG
+ && !pig_synch_required
+#endif
+ )
+ {
+ quick_mode = TRUE;
+ }
+
+ goto DO_INST;
+ }
+
+#define MAP_BASE_ADDR 0x66
+
+LOCAL IUM32 width_mask[4] = { 0x1, 0x3, 0, 0xf };
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check IO access against Permission Map in TSS. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL VOID
+ check_io_permission_map IFN2
+ (
+ IU32, port, /* address of 1st port being accessed */
+ IUM8, width /* bytes (1|2|4) accessed */
+ )
+ {
+ IU16 map_start_offset;
+ IU16 map_word_offset;
+ IU16 map_word;
+
+ /* if invalid or 286 TSS, just take exception */
+ if ( GET_TR_SELECTOR() == 0 || GET_TR_AR_SUPER() == BUSY_TSS )
+ GP((IU16)0, FAULT_CHKIOMAP_BAD_TSS);
+
+ if ( MAP_BASE_ADDR >= GET_TR_LIMIT() )
+ GP((IU16)0, FAULT_CHKIOMAP_BAD_MAP); /* No I/O Map Base Address. */
+
+ /* Read bit map start address */
+ map_start_offset = spr_read_word(GET_TR_BASE() + MAP_BASE_ADDR);
+
+ /* Now try to read reqd word from bit map */
+ map_word_offset = map_start_offset + port/8;
+ if ( map_word_offset >= GET_TR_LIMIT() )
+ GP((IU16)0, FAULT_CHKIOMAP_BAD_TR); /* Map truncated before current port */
+
+ /* Actually read word and check appropriate bits */
+ map_word = spr_read_word(GET_TR_BASE() + map_word_offset);
+ map_word = map_word >> port%8; /* bits to lsb's */
+ if ( map_word & width_mask[width-1] )
+ GP((IU16)0, FAULT_CHKIOMAP_ACCESS); /* Access dis-allowed */
+
+ /* ACCESS OK */
+ }
+
+ /*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+#ifndef SFELLOW
+
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ /* Set the CPU heartbeat timer (for quick events). */
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ GLOBAL VOID c_cpu_q_ev_set_count IFN1( IU32, countval )
+ {
+/* printf("setting q counter to %d\n", countval); */
+ cpu_heartbeat = countval;
+ }
+
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ /* Calculate (ie guess) the number of CPU heartbeat timer ticks to */
+ /* will have gone by for a given number of microseconds. */
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ GLOBAL IU32 c_cpu_calc_q_ev_inst_for_time IFN1( IU32, time )
+ {
+ return ( time );
+ }
+
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ /* Get the CPU heartbeat timer (for quick events). */
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ GLOBAL IU32 c_cpu_q_ev_get_count()
+ {
+/* printf("returning q counter as %d\n", cpu_heartbeat); */
+ return cpu_heartbeat;
+ }
+
+#endif /* SFELLOW */
+
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ /* Set up new page for fast Instruction Decoding. */
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ GLOBAL IU8
+ ccpu_new_code_page
+
+ IFN1(
+ IU8 **, q /* pntr. to host format IP pointer */
+ )
+
+ /* ANSI */
+ {
+ IU32 ip_phy_addr; /* Used when setting up IP (cf SETUP_HOST_IP) */
+
+ /* update Intel IP up to end of the old page */
+ SET_EIP(GET_EIP() + DIFF_INST_BYTE(*q, p_start));
+
+ /* move onto new page in host format */
+ SETUP_HOST_IP(*q)
+ p_start = *q;
+
+#ifdef PIG
+ return *IncCpuPtrLS8(*q);
+#else /* PIG */
+#ifdef BACK_M
+ return *(*q)--;
+#else
+ return *(*q)++;
+#endif /* BACK_M */
+#endif /* PIG */
+ }
+
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ /* Initialise the CPU. */
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ GLOBAL VOID
+ c_cpu_init IFN0()
+ {
+ SAVED IBOOL first = TRUE;
+
+#ifdef PIG
+ SAVED char default_flags[] = "faults accessed";
+
+ if (first)
+ {
+ char *s = getenv("FLAGS_IGNORE_DEFAULT");
+ if (s)
+ set_flags_ignore(s);
+ else
+ set_flags_ignore(default_flags);
+ single_instruction_delay_enable = FALSE;
+ s = getenv("SINGLE_INSTRUCTION_BLIND_SPOT");
+ if (s)
+ {
+ if (strcmp(s, "TRUE") == 0)
+ single_instruction_delay_enable = TRUE;
+ else if (strcmp(s, "FALSE") == 0)
+ single_instruction_delay_enable = FALSE;
+ else
+ printf("*** Ignoring getenv(\"SINGLE_INSTRUCTION_BLIND_SPOT\") value\n");
+ printf("STI/POPF %s cause a blind spot after next conditional\n",
+ single_instruction_delay_enable ? "will": "will not");
+ }
+ first = FALSE;
+ }
+#endif /* PIG */
+
+#ifdef NTVDM
+ ccpu386InitThreadStuff();
+#endif
+
+ c_cpu_reset();
+ SET_POP_DISP(0);
+ doing_contributory = FALSE;
+ doing_page_fault = FALSE;
+ doing_double_fault = FALSE;
+ doing_fault = FALSE;
+ EXT = INTERNAL;
+ }
+
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * Make CPU aware that external event is pending.
+ * Be careful about modifying this function, as much of the base and host
+ * in A2CPU will modify cpu_interrupt_map directly, rather than going through
+ * this function.
+ *~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+ GLOBAL VOID
+ c_cpu_interrupt IFN2(CPU_INT_TYPE, type, IU16, number)
+ {
+ switch ( type )
+ {
+ case CPU_HW_RESET:
+ cpu_interrupt_map |= CPU_RESET_EXCEPTION_MASK;
+ break;
+ case CPU_TIMER_TICK:
+ cpu_interrupt_map |= CPU_SIGALRM_EXCEPTION_MASK;
+ break;
+ case CPU_SIGIO_EVENT:
+ cpu_interrupt_map |= CPU_SIGIO_EXCEPTION_MASK;
+ break;
+ case CPU_HW_INT:
+ cpu_interrupt_map |= CPU_HW_INT_MASK;
+ break;
+ case CPU_SAD_INT:
+ cpu_interrupt_map |= CPU_SAD_EXCEPTION_MASK;
+ break;
+ }
+ quick_mode = FALSE;
+ }
+
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ /* Act like CPU 'reset' line activated. (Well nearly) */
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ GLOBAL VOID
+ c_cpu_reset IFN0()
+ {
+ IBOOL disableNpx = FALSE;
+
+ /* All FLAGS are cleared */
+ /* NB. VM MUST BE CLEARED BEFORE SEGMENT REGISTERS ARE LOADED. */
+#ifdef SPC486
+ SET_AC(0);
+#endif /* SPC486 */
+ SET_RF(0); SET_VM(0); SET_NT(0); SET_IOPL(0);
+ SET_PF(0); SET_CF(0); SET_AF(0); SET_ZF(0); SET_SF(0); SET_OF(0);
+ SET_TF(0); SET_IF(0); SET_DF(0);
+
+ SET_EIP(0xFFF0);
+ SET_CPL(0);
+
+ SET_CS_SELECTOR(0xF000);
+ SET_CS_BASE(0xf0000); /* Really 0xffff0000 */
+ load_pseudo_descr(CS_REG);
+
+ SET_SS_SELECTOR(0);
+ SET_SS_BASE(0);
+ load_pseudo_descr(SS_REG);
+
+ SET_DS_SELECTOR(0);
+ SET_DS_BASE(0);
+ load_pseudo_descr(DS_REG);
+
+ SET_ES_SELECTOR(0);
+ SET_ES_BASE(0);
+ load_pseudo_descr(ES_REG);
+
+ SET_FS_SELECTOR(0);
+ SET_FS_BASE(0);
+ load_pseudo_descr(FS_REG);
+
+ SET_GS_SELECTOR(0);
+ SET_GS_BASE(0);
+ load_pseudo_descr(GS_REG);
+
+ SET_CR(CR_STAT, 0);
+#ifdef SPC486
+ SET_CD(1);
+ SET_NW(1);
+#endif /* SPC486 */
+
+ SET_DR(DR_DAR0, 0); /* Really Undefined */
+ SET_DR(DR_DAR1, 0); /* Really Undefined */
+ SET_DR(DR_DAR2, 0); /* Really Undefined */
+ SET_DR(DR_DAR3, 0); /* Really Undefined */
+ SET_DR(DR_DSR, 0); /* Really Undefined */
+ MOV_DR((IU32) DR_DCR, (IU32) 0); /* Disable Breakpoints */
+
+ SET_TR(TR_TCR, 0); /* Really Undefined */
+ SET_TR(TR_TDR, 0); /* Really Undefined */
+
+ SET_IDT_BASE(0); SET_IDT_LIMIT(0x3ff);
+
+ /* Really Undefined */
+ SET_GDT_BASE(0); SET_GDT_LIMIT(0);
+
+ SET_LDT_SELECTOR(0); SET_LDT_BASE(0); SET_LDT_LIMIT(0);
+
+ SET_TR_SELECTOR(0); SET_TR_BASE(0); SET_TR_LIMIT(0);
+ SET_TR_AR_SUPER(3);
+
+ SET_EAX(0);
+ SET_ECX(0); /* Really Undefined */
+#ifdef SPC486
+ SET_EDX(0x0000E401); /* Give component ID : revision ID */
+#else
+ SET_EDX(0x00000303); /* Give component ID : revision ID */
+#endif
+ SET_EBX(0); /* Really Undefined */
+ SET_ESP(0); /* Really Undefined */
+ SET_EBP(0); /* Really Undefined */
+ SET_ESI(0); /* Really Undefined */
+ SET_EDI(0); /* Really Undefined */
+
+
+#if defined(SWITCHNPX)
+ if (!config_inquire(C_SWITCHNPX, NULL))
+ disableNpx = TRUE;
+#endif /* SWITCHNPX */
+
+ if ( disableNpx )
+ SET_ET(0);
+ else
+ SET_ET(1);
+
+ InitNpx(disableNpx);
+ }
+
+
+
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ /* Entry point to CPU. */
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ GLOBAL VOID
+ c_cpu_simulate IFN0()
+ {
+ SYNCH_TICK();
+ if (simulate_level >= FRAMES)
+ fprintf(stderr, "Stack overflow in host_simulate()!\n");
+
+ /* Save current context and invoke a new CPU level */
+#ifdef NTVDM
+ if ( setjmp(ccpu386SimulatePtr()) == 0)
+#else
+ if ( setjmp(longjmp_env_stack[simulate_level++]) == 0 )
+#endif
+ {
+ in_C = 0;
+ ccpu(FALSE);
+ }
+ }
+
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ /* Restart (Continue) point for CPU. */
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ GLOBAL VOID
+ c_cpu_continue IFN0()
+ {
+#ifdef NTVDM
+ ccpu386GotoThrdExptnPt();
+#else
+ longjmp(next_inst[simulate_level-1], 1);
+#endif
+ }
+
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ /* Exit point from CPU. */
+ /* Called from CPU via 'BOP FE' to exit the current CPU invocation */
+ /* Or from CPU via '0F 0F' for the PIG_TESTER. */
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ GLOBAL VOID
+ c_cpu_unsimulate IFN0()
+ {
+#ifdef NTVDM
+ ccpu386Unsimulate();
+#else
+ if (simulate_level == 0)
+ {
+ fprintf(stderr, "host_unsimulate() - already at base of stack!\n");
+#ifndef PROD
+ force_yoda();
+#endif /* PROD */
+ }
+ else
+ {
+ /* Return to previous context */
+ in_C = 1;
+ longjmp(longjmp_env_stack[--simulate_level], 1);
+ }
+#endif
+ }
+
+#ifdef PIG
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ /* To push an interrupt frame in response to an external interrupt. */
+ /* Called from CPU under test, just before it processes the interrupt */
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ GLOBAL VOID
+ c_pig_interrupt
+ IFN1(
+ IU8, vector
+ )
+
+ {
+ if (simulate_level >= FRAMES)
+ fprintf(stderr, "Stack overflow in c_do_interrupt()!\n");
+
+ /* Save current context and invoke a new CPU level */
+#ifdef NTVDM
+ if ( setjmp(ccpu386SimulatePtr()) == 0)
+#else
+ if ( setjmp(longjmp_env_stack[simulate_level++]) == 0 )
+#endif
+ {
+ in_C = 0;
+ EXT = EXTERNAL;
+
+ /* Reset default mode */
+ SET_OPERAND_SIZE(GET_CS_AR_X());
+ SET_ADDRESS_SIZE(GET_CS_AR_X());
+
+ do_intrupt((IU16)vector, FALSE, FALSE, (IU16)0);
+ }
+ }
+#endif /* PIG */
+
+
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ /* End of application hook. */
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ GLOBAL VOID
+ c_cpu_EOA_hook IFN0()
+ {
+ /* Do nothing */
+ }
+
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ /* SoftPC termination hook. */
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+ GLOBAL VOID
+ c_cpu_terminate IFN0()
+ {
+ /* Do nothing */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_main.h b/private/mvdm/softpc.new/base/ccpu386/c_main.h
new file mode 100644
index 000000000..77d245ad7
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_main.h
@@ -0,0 +1,226 @@
+/*[
+
+c_main.h
+
+LOCAL CHAR SccsID[]="@(#)c_main.h 1.11 09/02/94";
+
+C CPU definitions and interfaces.
+---------------------------------
+
+]*/
+
+
+/*
+ Define major CPU varients here.
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+/* Indicator that multiple shifts or rotates (ie count n != 1) should
+ treat the Overflow Flag as undefined. */
+#define SHIFTROT_N_OF_UNDEFINED
+
+/* Indicator to set MUL undefined flags to a specific value (else they
+ are left unchanged). */
+#define SET_UNDEFINED_MUL_FLAG
+
+/* Indicator to set DIV undefined flags to a specific value (else they
+ are left unchanged). */
+#define SET_UNDEFINED_DIV_FLAG
+
+/* Indicator to set SHRD/SHLD undefined flags (i.e. OF with shift > 1)
+ to a specific value (else they are left unchanged). */
+#define SET_UNDEFINED_SHxD_FLAG
+
+/* Indicator to set all other undefined flags to a specific value (else they
+ are left unchanged). */
+#define SET_UNDEFINED_FLAG
+
+/* Value to set undefined flags to (if they are not left unchanged). */
+#define UNDEFINED_FLAG 0
+
+
+/*
+ Rational definition of TRUE/FALSE.
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Leads to more efficient tests than other definitions.
+typedef int BOOL;
+#define FALSE ((BOOL)0)
+#define TRUE ((BOOL)1)
+ */
+
+
+/*
+ Allowable types of segment prefixs.
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Actually we define here only a type for no segment prefix,
+ otherwise the segment register names (CS_REG,DS_REG,...) are used.
+ */
+#define SEG_CLR 6
+
+
+/*
+ Frequently used constants.
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+/* Masks for bits 0 - 32. */
+#define BIT0_MASK 0x1
+#define BIT1_MASK 0x2
+#define BIT2_MASK 0x4
+#define BIT3_MASK 0x8
+#define BIT4_MASK 0x10
+#define BIT5_MASK 0x20
+#define BIT6_MASK 0x40
+#define BIT7_MASK 0x80
+#define BIT8_MASK 0x100
+#define BIT9_MASK 0x200
+#define BIT10_MASK 0x400
+#define BIT11_MASK 0x800
+#define BIT12_MASK 0x1000
+#define BIT13_MASK 0x2000
+#define BIT14_MASK 0x4000
+#define BIT15_MASK 0x8000
+#define BIT16_MASK 0x10000
+#define BIT17_MASK 0x20000
+#define BIT18_MASK 0x40000
+#define BIT19_MASK 0x80000
+#define BIT20_MASK 0x100000
+#define BIT21_MASK 0x200000
+#define BIT22_MASK 0x400000
+#define BIT23_MASK 0x800000
+#define BIT24_MASK 0x1000000
+#define BIT25_MASK 0x2000000
+#define BIT26_MASK 0x4000000
+#define BIT27_MASK 0x8000000
+#define BIT28_MASK 0x10000000
+#define BIT29_MASK 0x20000000
+#define BIT30_MASK 0x40000000
+#define BIT31_MASK 0x80000000
+
+/* Various Intel component masks */
+#define BYTE_MASK 0xff
+#define WORD_MASK 0xffff
+
+/* Widths for IO permission map checks */
+#define BYTE_WIDTH ((IUM8)1)
+#define WORD_WIDTH ((IUM8)2)
+#define DWORD_WIDTH ((IUM8)4)
+
+/*
+ Data structures.
+ ~~~~~~~~~~~~~~~~
+ */
+
+/* Our model for the data extracted from a decriptor entry. */
+typedef struct
+ {
+ IU32 base; /* 32-bit base address */
+ IU32 limit; /* 32-bit offset limit */
+ IU16 AR; /* 16-bit attributes/access rights */
+ } CPU_DESCR;
+
+
+/*
+ Table for converting byte quantity to Parity Flag.
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+IMPORT IU8 pf_table[];
+
+#ifdef PIG
+IMPORT IBOOL took_relative_jump;
+#endif /* PIG */
+
+
+/*
+ External interface provided to outside world.
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+typedef IU16 IO_ADDR;
+
+#ifndef DOUBLE_CPU
+/*
+ Note we can't include "cpu.h" as this would overwrite our macro
+ names, hence we must redefine the external subroutines here.
+ */
+IMPORT IU32 effective_addr IPT2(
+ IU16, selector,
+ IU32, offset
+ );
+
+IMPORT VOID c_cpu_enable_a20 IPT0();
+
+IMPORT VOID c_cpu_force_a20_low IPT0();
+
+IMPORT VOID c_cpu_init IPT0();
+
+IMPORT VOID c_cpu_reset IPT0();
+
+IMPORT VOID c_cpu_continue IPT0();
+
+IMPORT VOID c_cpu_simulate IPT0();
+
+IMPORT VOID c_pig_interrupt IPT1(IU8, vector);
+
+IMPORT VOID c_cpu_unsimulate IPT0();
+
+
+#if 0 /* ROG */
+IMPORT VOID read_descriptor IPT2(
+ IU32, addr,
+ CPU_DESCR *, descr
+ );
+
+IMPORT ISM32 selector_outside_table IPT2(
+ IU16, selector,
+ IU32 *, descr_addr
+ );
+
+#endif /* 0 ROG */
+
+#endif /* !DOUBLE_CPU */
+
+/*
+ Useful mini functions (macros).
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+/*
+ Macros for access to MODRM bit fields.
+
+ 7 6 5 4 3 2 1 0
+ =================
+ MODRM | | | |
+ =================
+ MODE REG R_M
+ XXX LOW3
+ SEG
+ EEE
+ SEG3
+ SEG2
+
+ */
+#define GET_MODE(x) ((x) >> 6 & 0x3)
+#define GET_R_M(x) ((x) & 0x7)
+#define GET_REG(x) ((x) >> 3 & 0x7)
+#define GET_XXX(x) ((x) >> 3 & 0x7)
+#define GET_SEG(x) ((x) >> 3 & 0x7)
+#define GET_EEE(x) ((x) >> 3 & 0x7)
+#define GET_SEG3(x) ((x) >> 3 & 0x7)
+#define GET_SEG2(x) ((x) >> 3 & 0x7)
+#define GET_LOW3(x) ((x) & 0x7)
+
+/* Turn operand size into mask for Most Significant Bit. */
+#define SZ2MSB(x) ((IU32)0x80000000 >> 32 - x )
+
+/* Turn operand size into mask for Operand. */
+#define SZ2MASK(x) ((IU32)0xffffffff >> 32 - x )
+
+#ifdef DOUBLE_CPU
+
+#define HARD_CPU 0
+#define SOFT_CPU 1
+
+IMPORT VOID double_switch_to IPT1(IU8, cpu_type);
+
+#endif
+
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_mem.h b/private/mvdm/softpc.new/base/ccpu386/c_mem.h
new file mode 100644
index 000000000..e32c38826
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_mem.h
@@ -0,0 +1,75 @@
+/*[
+
+c_mem.h
+
+LOCAL CHAR SccsID[]="@(#)c_mem.h 1.8 02/28/95";
+
+CPU to Memory interface.
+------------------------
+
+]*/
+
+/* >>>>>>>>>>>>>>>> NON KOSHER FORM <<<<<<<<<<<<<<<< */
+
+/* Use only for decoding the Intel Opcode Stream. */
+/* DIRECT ACCESS to memory! */
+
+IMPORT UTINY ccpu_new_code_page IPT1(UTINY **, q );
+
+IMPORT UTINY *pg_end; /* point up to which host may safely read
+ instruction stream bytes */
+
+/* To return difference between two points in the inst. stream.
+ n = new posn, o = old posn. */
+
+#ifdef PIG
+#include <Cpu_c.h>
+#define DIFF_INST_BYTE(n, o) DiffCpuPtrsLS8((o), (n))
+#else /* !PIG */
+#ifdef BACK_M
+#define DIFF_INST_BYTE(n, o) ((o) - (n))
+#else
+#define DIFF_INST_BYTE(n, o) ((n) - (o))
+#endif /* BACK_M */
+#endif /* PIG */
+
+/* To get next inst. byte and move pointer to next inst. byte. */
+#ifdef PIG
+#define GET_INST_BYTE(x) \
+ save_instruction_byte( DiffCpuPtrsLS8((x), pg_end) <= 0 ? ccpu_new_code_page(&(x)) : *IncCpuPtrLS8(x) )
+#else /* !PIG */
+#ifdef BACK_M
+#define GET_INST_BYTE(x) \
+ ( (x) <= pg_end ? ccpu_new_code_page(&(x)) : *(x)-- )
+#else
+#define GET_INST_BYTE(x) \
+ ( (x) >= pg_end ? ccpu_new_code_page(&(x)) : *(x)++ )
+#endif /* BACK_M */
+#endif /* PIG */
+
+
+/* >>>>>>>>>>>>>>>> KOSHER FORM <<<<<<<<<<<<<<<< */
+
+#ifdef PIG
+
+IMPORT IU8 phy_read_byte IPT1(LIN_ADDR, address );
+IMPORT IU16 phy_read_word IPT1(LIN_ADDR, address );
+IMPORT IU32 phy_read_dword IPT1(LIN_ADDR, address );
+IMPORT VOID phy_write_byte IPT2(LIN_ADDR, address, IU8, data);
+IMPORT VOID phy_write_word IPT2(LIN_ADDR, address, IU16, data);
+IMPORT VOID phy_write_dword IPT2(LIN_ADDR, address, IU32, data);
+
+IMPORT VOID cannot_phy_write_byte IPT2(LIN_ADDR, address, IU8, valid_mask);
+IMPORT VOID cannot_phy_write_word IPT2(LIN_ADDR, address, IU16, valid_mask);
+IMPORT VOID cannot_phy_write_dword IPT2(LIN_ADDR, address, IU32, valid_mask);
+#else
+
+#define phy_read_byte(x) ((IU8)(phy_r8((PHY_ADDR)x)))
+#define phy_read_word(x) ((IU16)(phy_r16((PHY_ADDR)x)))
+#define phy_read_dword(x) ((IU32)(phy_r32((PHY_ADDR)x)))
+
+#define phy_write_byte(x, y) phy_w8((PHY_ADDR)x, (IU8)y)
+#define phy_write_word(x, y) phy_w16((PHY_ADDR)x, (IU16)y)
+#define phy_write_dword(x, y) phy_w32((PHY_ADDR)x, (IU32)y)
+
+#endif /* PIG */
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_mul64.c b/private/mvdm/softpc.new/base/ccpu386/c_mul64.c
new file mode 100644
index 000000000..514498f19
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_mul64.c
@@ -0,0 +1,185 @@
+/*[
+
+c_mul64.c
+
+LOCAL CHAR SccsID[]="@(#)c_mul64.c 1.5 02/09/94";
+
+64-bit Multiplication Functions.
+--------------------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <c_mul64.h>
+#include <c_neg64.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Do 64bit = 32bit X 32bit Signed Multiply. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+mul64
+
+IFN4(
+ IS32 *, hr, /* Pntr to high 32 bits of result */
+ IS32 *, lr, /* Pntr to low 32 bits of result */
+ IS32, mcand, /* multiplicand */
+ IS32, mpy /* multiplier */
+ )
+
+
+ {
+ if ( mcand & BIT31_MASK )
+ {
+ if ( mpy & BIT31_MASK )
+ {
+ /* Negative Multiplicand :: Negative Multiplier */
+ mcand = -mcand;
+ mpy = -mpy;
+ mulu64((IU32 *)hr, (IU32 *)lr, (IU32)mcand, (IU32)mpy);
+ }
+ else
+ {
+ /* Negative Multiplicand :: Positive Multiplier */
+ mcand = -mcand;
+ mulu64((IU32 *)hr, (IU32 *)lr, (IU32)mcand, (IU32)mpy);
+ neg64(hr, lr);
+ }
+ }
+ else
+ {
+ if ( mpy & BIT31_MASK )
+ {
+ /* Positive Multiplicand :: Negative Multiplier */
+ mpy = -mpy;
+ mulu64((IU32 *)hr, (IU32 *)lr, (IU32)mcand, (IU32)mpy);
+ neg64(hr, lr);
+ }
+ else
+ {
+ /* Positive Multiplicand :: Positive Multiplier */
+ mulu64((IU32 *)hr, (IU32 *)lr, (IU32)mcand, (IU32)mpy);
+ }
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Do 64bit = 32bit X 32bit Unsigned Multiply. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+mulu64
+
+IFN4(
+ IU32 *, hr, /* Pntr to high 32 bits of result */
+ IU32 *, lr, /* Pntr to low 32 bits of result */
+ IU32, mcand, /* multiplicand */
+ IU32, mpy /* multiplier */
+ )
+
+
+ {
+ IU32 ha, la, hb, lb;
+ IU32 res1, res2, res3, res4;
+ IU32 temp;
+
+ /* Our algorithm:-
+
+ a) Split the operands up into two 16 bit parts,
+
+ 3 1 1
+ 1 6 5 0
+ ===================
+ mcand = | ha | la |
+ ===================
+
+ ===================
+ mpy = | hb | lb |
+ ===================
+
+ b) Form four partial results,
+
+ res1 = la * lb
+ res2 = ha * lb
+ res3 = la * hb
+ res4 = ha * hb
+
+ c) Shift results to correct posn and sum. The tricky bit is
+ allowing for the carry between bits 31 and 32.
+
+ 6 3 3
+ 3 2 1 0
+ =====================================
+ | hr | lr |
+ =====================================
+ <------res1------->
+ <------res2------->
+ <------res3------->
+ <------res4------->
+ */
+
+ /* a) */
+
+ la = mcand & WORD_MASK;
+ ha = mcand >> 16 & WORD_MASK;
+ lb = mpy & WORD_MASK;
+ hb = mpy >> 16 & WORD_MASK;
+
+ /* b) */
+
+ res1 = la * lb;
+ res2 = ha * lb;
+ res3 = la * hb;
+ res4 = ha * hb;
+
+ /* c) */
+
+ /* Form:-
+ <------res1------->
+ <------res2------->
+ */
+ *hr = res2 >> 16;
+ *lr = res1 + (res2 << 16);
+ /* determine carry for res1 + res2 */
+ if ( (res1 & BIT31_MASK) && (res2 & BIT15_MASK) ||
+ ( !(*lr & BIT31_MASK) &&
+ ((res1 & BIT31_MASK) | (res2 & BIT15_MASK)) )
+ )
+ *hr = *hr + 1;
+
+ /* Add in:-
+ <------res3------->
+ */
+ *hr = *hr + (res3 >> 16);
+ temp = *lr + (res3 << 16);
+ /* determine carry for ... + res3 */
+ if ( (*lr & BIT31_MASK) && (res3 & BIT15_MASK) ||
+ ( !(temp & BIT31_MASK) &&
+ ((*lr & BIT31_MASK) | (res3 & BIT15_MASK)) )
+ )
+ *hr = *hr + 1;
+ *lr = temp;
+
+ /* Add in:-
+ <------res4------->
+ */
+ *hr = *hr + res4;
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_mul64.h b/private/mvdm/softpc.new/base/ccpu386/c_mul64.h
new file mode 100644
index 000000000..892d1e508
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_mul64.h
@@ -0,0 +1,29 @@
+/*
+ c_mul64.h
+
+ Define all 64-bit Multiplication Functions.
+ */
+
+/*
+ static char SccsID[]="@(#)c_mul64.h 1.4 02/09/94";
+ */
+
+IMPORT VOID mul64
+
+IPT4(
+ IS32 *, hr,
+ IS32 *, lr,
+ IS32, mcand,
+ IS32, mpy
+
+ );
+
+IMPORT VOID mulu64
+
+IPT4(
+ IU32 *, hr,
+ IU32 *, lr,
+ IU32, mcand,
+ IU32, mpy
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_neg64.c b/private/mvdm/softpc.new/base/ccpu386/c_neg64.c
new file mode 100644
index 000000000..be36685cf
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_neg64.c
@@ -0,0 +1,66 @@
+/*[
+
+c_neg64.c
+
+LOCAL CHAR SccsID[]="@(#)c_neg64.c 1.5 02/09/94";
+
+64-bit Negate Functions.
+------------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <c_neg64.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Do 64bit Negate. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+neg64
+
+IFN2(
+ IS32 *, hr, /* Pntr to high 32 bits of operand */
+ IS32 *, lr /* Pntr to low 32 bits of operand */
+ )
+
+
+ {
+ *hr = ~(*hr); /* 1's complement */
+ *lr = ~(*lr);
+
+ /* +1 ==> 2's complement */
+ /*
+ The only tricky case is when the addition causes a carry from
+ the low to high 32-bits, but this only happens when all low
+ bits are set.
+ */
+ if ( *lr == 0xffffffff )
+ {
+ *lr = 0;
+ *hr = *hr + 1;
+ }
+ else
+ {
+ *lr = *lr + 1;
+ }
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_neg64.h b/private/mvdm/softpc.new/base/ccpu386/c_neg64.h
new file mode 100644
index 000000000..e97e2aabe
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_neg64.h
@@ -0,0 +1,17 @@
+/*
+ c_neg64.h
+
+ Define all 64-bit Negate Functions.
+ */
+
+/*
+ static char SccsID[]="@(#)c_neg64.h 1.4 02/09/94";
+ */
+
+IMPORT VOID neg64
+
+IPT2(
+ IS32 *, hr,
+ IS32 *, lr
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_oprnd.h b/private/mvdm/softpc.new/base/ccpu386/c_oprnd.h
new file mode 100644
index 000000000..191b07629
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_oprnd.h
@@ -0,0 +1,1135 @@
+/*[
+
+c_oprnd.h
+
+LOCAL CHAR SccsID[]="@(#)c_oprnd.h 1.12 03/07/95";
+
+Operand Decoding Functions (Macros).
+------------------------------------
+
+]*/
+
+
+/*[
+
+ There exists 51 different Intel argument types, for each type a
+ Decode (D_), Fetch (F_), Commit (C_) and Put (P_) 'function' may
+ be written. (In fact 'null' functions aren't actually defined.)
+
+ The Decode (D_) 'function' decodes and validates the argument and
+ stores information in an easy to handle form (host variables). For
+ example memory addressing is resolved to a segment identifier and
+ offset, access to the memory location is checked at this point.
+
+ The Fetch (F_) 'function' uses the easy to handle host variables to
+ actually retrieve the operand.
+
+ The Commit (C_) 'function' handles any post instruction operand
+ functions. At present only string operands actually use this function
+ to update SI, DI and CX. This update can only be 'committed' after we
+ are sure no exception can be generated, which is why the Fetch macro
+ can not handle this update.
+
+ The Put (P_) 'function' stores the operand, it may reference the easy
+ to handle host variables when deciding where the operand is stored.
+
+ These 'functions' are invoked as follows for the 3 operand cases:-
+
+ -------------------------------
+ | src | dst | dst/src |
+ | r- | -w | rw |
+ |-----------------------------|
+ | D_ | D_ | D_ |
+ | F_ | | F_ |
+ | <<Instruction Processing>> |
+ | C_ | C_ | C_ |
+ | | P_ | P_ |
+ -------------------------------
+
+ ie: Decode and Commit (if they exist) are called for all arguments;
+ Fetch (if it exists) is only called for source arguments; Put is only
+ called for destination arguments.
+
+ Operand type naming conventions are broadly based on "Appendix A -
+ Opcode Map in 80386 Programmer's Reference Manual" A brief one line
+ description of each type is given below before the actual 'function'
+ definitions.
+
+ The 51 types are composed of those available on the 286,386 and 486:-
+
+ Aw Eb Ew Fal Fax Fcl
+ Fdx Gb Gw Hb Hw I0
+ I1 I3 Ib Iw Ix Jb
+ Jw M Ma16 Mp16 Ms Nw
+ Ob Ow Pw Xb Xw Yb
+ Yw Z
+
+ those available on the 386 and 486:-
+
+ Ad Cd Dd Ed Feax Gd
+ Hd Id Iy Jd Ma32 Mp32
+ Od Qw Rd Td Xd Yd
+
+ and those available on the 486:-
+
+ Mm
+
+ The following table indicates which functions actually exist. A
+ dot(.) indicates a 'null' or undefined function.
+
+ ===================================================
+ D F C P| D F C P| D F C P| D F C P
+ ------------|------------|------------|------------
+ Aw D . . .|Ib D . . .|Xw D F C .|Ma32 D F . .
+ Eb D F . P|Iw D . . .|Yb D F C P|Mm D F . .
+ Ew D F . P|Ix D . . .|Yw D F C P|Mp32 D F . .
+ Fal . F . P|Jb D . . .|Z D F . .|Od D F . P
+ Fax . F . P|Jw D . . .|Ad D . . .|Qw D F . .
+ Fcl . F . P|M D F . .|Cd D F . .|Rd D F . P
+ Fdx . F . P|Ma16 D F . .|Dd D F . .|Td D F . .
+ Gb D F . P|Mp16 D F . .|Ed D F . P|Xd D F C .
+ Gw D F . P|Ms D F . P|Feax . F . P|Yd D F C P
+ Hb D F . P|Nw D F . P|Gd D F . P|
+ Hw D F . P|Ob D F . P|Hd D F . P|
+ I0 . F . .|Ow D F . P|Id D . . .|
+ I1 . F . .|Pw D F . P|Iy D . . .|
+ I3 . F . .|Xb D F C .|Jd D . . .|
+ ===================================================
+
+ Each Intel combination of source and destination is categorised by
+ a numeric instruction type as follows:-
+
+ --------------------------------------------------
+ | Id | Intel assembler | arg1 | arg2 | arg3 |
+ |----|----------------------|------|------|------|
+ | 0 | INST | -- | -- | -- |
+ | 1 | INST dst/src | rw | -- | -- |
+ | 2 | INST src | r- | -- | -- |
+ | 3 | INST dst | -w | -- | -- |
+ | 4 | INST dst,src | -w | r- | -- |
+ | 5 | INST dst/src,src | rw | r- | -- |
+ | 6 | INST src,src | r- | r- | -- |
+ | 7 | INST dst,src,src | -w | r- | r- |
+ | 8 | INST dst/src,dst/src | rw | rw | -- |
+ | 9 | INST dst/src,src,src | rw | r- | r- |
+ --------------------------------------------------
+
+ Each instruction type defines the calling sequences for the
+ pre-instruction (Leading) 'functions' (D_, F_) and the post-
+ instruction (Trailing) 'functions' (C_, P_).
+
+
+ BUT (Mike says)
+ ---
+
+This is all OK, until we get to the BT (bit test) familly of instructions,
+where unfortunately the manual is a little economic with the truth. If the
+bit offset parameter is specified by a register, part of the value in
+the register will actually be used as a (d)word offset if the other operand
+is in memory.
+
+This means that the bit offset operand must be fetched before the other
+operand can be decoded. Yuck.
+
+So for these instructions we're not going to use separate fetch and decode
+stages. Maybe there's a better way of doing this, but I don't know it.
+(Note that this doesn't apply to the BTx instructions with an immediate
+operand)
+]*/
+
+
+/* Segment access checking functions <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+/* RO = Read Only */
+/* WO = Write Only */
+/* RW = Read and Write */
+
+#define RO0 \
+ if ( !GET_SR_AR_R(m_seg[0]) ) \
+ GP((USHORT)0, FAULT_OP0_SEG_NOT_READABLE);
+
+#define WO0 \
+ if ( !GET_SR_AR_W(m_seg[0]) ) \
+ GP((USHORT)0, FAULT_OP0_SEG_NOT_WRITABLE);
+
+#define RW0 \
+ if ( !GET_SR_AR_R(m_seg[0]) || !GET_SR_AR_W(m_seg[0]) ) \
+ GP((USHORT)0, FAULT_OP0_SEG_NO_READ_OR_WRITE);
+
+#define RO1 \
+ if ( !GET_SR_AR_R(m_seg[1]) ) \
+ GP((USHORT)0, FAULT_OP1_SEG_NOT_READABLE);
+
+#define WO1 \
+ if ( !GET_SR_AR_W(m_seg[1]) ) \
+ GP((USHORT)0, FAULT_OP1_SEG_NOT_WRITABLE);
+
+#define RW1 \
+ if ( !GET_SR_AR_R(m_seg[1]) || !GET_SR_AR_W(m_seg[1]) ) \
+ GP((USHORT)0, FAULT_OP1_SEG_NO_READ_OR_WRITE);
+
+/* String Count access function <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+#define STRING_COUNT \
+ if ( repeat == REP_CLR ) \
+ { \
+ rep_count = 1; \
+ } \
+ else \
+ { \
+ if ( GET_ADDRESS_SIZE() == USE16 ) \
+ rep_count = GET_CX(); \
+ else /* USE32 */ \
+ rep_count = GET_ECX(); \
+ }
+
+
+/* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+/* 286,386 and 486 */
+/* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+
+
+/* Aw == direct address <off16><seg> in instruction stream <<<<<<<<<< */
+
+#define D_Aw(ARG) \
+ immed = GET_INST_BYTE(p); \
+ immed |= (ULONG)GET_INST_BYTE(p) << 8; \
+ ops[ARG].mlt[0] = immed; \
+ immed = GET_INST_BYTE(p); \
+ immed |= (ULONG)GET_INST_BYTE(p) << 8; \
+ ops[ARG].mlt[1] = immed;
+
+#define D_E08(ARG, TYPE, PAGE) \
+ m_isreg[ARG] = FALSE; \
+ d_mem(modRM, &p, segment_override, \
+ &m_seg[ARG], &m_off[ARG]); \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)8); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_word(m_la[ARG], PAGE);
+
+#define F_E08(ARG) \
+ vir_read_bytes(&ops[ARG].npxbuff[0], m_la[ARG], m_pa[ARG], 0x08);
+
+#define P_E08(ARG) \
+ vir_write_bytes(m_la[ARG], m_pa[ARG], &ops[ARG].npxbuff[0], 0x08);
+
+#define D_E0a(ARG, TYPE, PAGE) \
+ m_isreg[ARG] = FALSE; \
+ d_mem(modRM, &p, segment_override, \
+ &m_seg[ARG], &m_off[ARG]); \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)10); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_word(m_la[ARG], PAGE);
+
+#define F_E0a(ARG) \
+ vir_read_bytes(&ops[ARG].npxbuff[0], m_la[ARG], m_pa[ARG], 0x0a);
+
+#define P_E0a(ARG) \
+ vir_write_bytes(m_la[ARG], m_pa[ARG], &ops[ARG].npxbuff[0], 0x0a);
+
+#define D_E0e(ARG, TYPE, PAGE) \
+ m_isreg[ARG] = FALSE; \
+ d_mem(modRM, &p, segment_override, \
+ &m_seg[ARG], &m_off[ARG]); \
+ TYPE \
+ if (NPX_ADDRESS_SIZE_32) { \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)28); \
+ } else { \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)14); \
+ } \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_word(m_la[ARG], PAGE);
+
+#define F_E0e(ARG) \
+ if (NPX_ADDRESS_SIZE_32) { \
+ vir_read_bytes(&ops[ARG].npxbuff[0], m_la[ARG], m_pa[ARG], 0x1c); \
+ } else { \
+ vir_read_bytes(&ops[ARG].npxbuff[0], m_la[ARG], m_pa[ARG], 0x0e); \
+ }
+
+#define P_E0e(ARG) \
+ if (NPX_ADDRESS_SIZE_32) { \
+ vir_write_bytes(m_la[ARG], m_pa[ARG], &ops[ARG].npxbuff[0], 0x1c); \
+ } else { \
+ vir_write_bytes(m_la[ARG], m_pa[ARG], &ops[ARG].npxbuff[0], 0x0e); \
+ }
+
+#define D_E5e(ARG, TYPE, PAGE) \
+ m_isreg[ARG] = FALSE; \
+ d_mem(modRM, &p, segment_override, \
+ &m_seg[ARG], &m_off[ARG]); \
+ TYPE \
+ if (NPX_ADDRESS_SIZE_32) { \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)94); \
+ } else { \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)106); \
+ } \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_word(m_la[ARG], PAGE);
+
+#define F_E5e(ARG) \
+ if (NPX_ADDRESS_SIZE_32) { \
+ vir_read_bytes(&ops[ARG].npxbuff[0], m_la[ARG], m_pa[ARG], 0x6c); \
+ } else { \
+ vir_read_bytes(&ops[ARG].npxbuff[0], m_la[ARG], m_pa[ARG], 0x5e); \
+ }
+
+#define P_E5e(ARG) \
+ if (NPX_ADDRESS_SIZE_32) { \
+ vir_write_bytes(m_la[ARG], m_pa[ARG], &ops[ARG].npxbuff[0], 0x6c); \
+ } else { \
+ vir_write_bytes(m_la[ARG], m_pa[ARG], &ops[ARG].npxbuff[0], 0x5e); \
+ }
+
+
+/* Eb == 'mode'+'r/m' fields refer to byte register/memory <<<<<<<<<< */
+
+#define D_Eb(ARG, TYPE, PAGE) \
+ if ( GET_MODE(modRM) == 3 ) \
+ { \
+ save_id[ARG] = GET_R_M(modRM); \
+ m_isreg[ARG] = TRUE; \
+ } \
+ else \
+ { \
+ m_isreg[ARG] = FALSE; \
+ d_mem(modRM, &p, segment_override, \
+ &m_seg[ARG], &m_off[ARG]); \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)1); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_byte(m_la[ARG], PAGE); \
+ }
+
+#define F_Eb(ARG) \
+ if ( m_isreg[ARG] ) \
+ ops[ARG].sng = GET_BR(save_id[ARG]); \
+ else \
+ ops[ARG].sng = vir_read_byte(m_la[ARG], m_pa[ARG]);
+
+#define P_Eb(ARG) \
+ if ( m_isreg[ARG] ) \
+ SET_BR(save_id[ARG], ops[ARG].sng); \
+ else \
+ vir_write_byte(m_la[ARG], m_pa[ARG], (UTINY)ops[ARG].sng);
+
+/* Ew == 'mode'+'r/m' fields refer to word register/memory <<<<<<<<<< */
+
+#define D_Ew(ARG, TYPE, PAGE) \
+ if ( GET_MODE(modRM) == 3 ) \
+ { \
+ save_id[ARG] = GET_R_M(modRM); \
+ m_isreg[ARG] = TRUE; \
+ } \
+ else \
+ { \
+ m_isreg[ARG] = FALSE; \
+ d_mem(modRM, &p, segment_override, \
+ &m_seg[ARG], &m_off[ARG]); \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)2); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_word(m_la[ARG], PAGE); \
+ }
+
+#define F_Ew(ARG) \
+ if ( m_isreg[ARG] ) \
+ ops[ARG].sng = GET_WR(save_id[ARG]); \
+ else \
+ ops[ARG].sng = vir_read_word(m_la[ARG], m_pa[ARG]);
+
+#define P_Ew(ARG) \
+ if ( m_isreg[ARG] ) \
+ SET_WR(save_id[ARG], ops[ARG].sng); \
+ else \
+ vir_write_word(m_la[ARG], m_pa[ARG], (USHORT)ops[ARG].sng);
+
+/* Fal == fixed register, AL <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+#define F_Fal(ARG) ops[ARG].sng = GET_BR(A_AL);
+
+#define P_Fal(ARG) SET_BR(A_AL, ops[ARG].sng);
+
+/* Fax == fixed register, AX <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+#define F_Fax(ARG) ops[ARG].sng = GET_WR(A_AX);
+
+#define P_Fax(ARG) SET_WR(A_AX, ops[ARG].sng);
+
+/* Fcl == fixed register, CL <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+#define F_Fcl(ARG) ops[ARG].sng = GET_BR(A_CL);
+
+#define P_Fcl(ARG) SET_BR(A_CL, ops[ARG].sng);
+
+/* Fdx == fixed register, DX <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+#define F_Fdx(ARG) ops[ARG].sng = GET_WR(A_DX);
+
+#define P_Fdx(ARG) SET_WR(A_DX, ops[ARG].sng);
+
+/* Gb == 'reg' field of modR/M byte denotes byte reg <<<<<<<<<<<<<<<< */
+
+#define D_Gb(ARG) save_id[ARG] = GET_REG(modRM);
+
+#define F_Gb(ARG) ops[ARG].sng = GET_BR(save_id[ARG]);
+
+#define P_Gb(ARG) SET_BR(save_id[ARG], ops[ARG].sng);
+
+/* Gw == 'reg' field of modR/M byte denotes word reg <<<<<<<<<<<<<<<< */
+
+#define D_Gw(ARG) save_id[ARG] = GET_REG(modRM);
+
+#define F_Gw(ARG) ops[ARG].sng = GET_WR(save_id[ARG]);
+
+#define P_Gw(ARG) SET_WR(save_id[ARG], ops[ARG].sng);
+
+/* Hb == low 3 bits of opcode denote byte register <<<<<<<<<<<<<<<<<< */
+
+#define D_Hb(ARG) save_id[ARG] = GET_LOW3(opcode);
+
+#define F_Hb(ARG) ops[ARG].sng = GET_BR(save_id[ARG]);
+
+#define P_Hb(ARG) SET_BR(save_id[ARG], ops[ARG].sng);
+
+/* Hw == low 3 bits of opcode denote word register <<<<<<<<<<<<<<<<<< */
+
+#define D_Hw(ARG) save_id[ARG] = GET_LOW3(opcode);
+
+#define F_Hw(ARG) ops[ARG].sng = GET_WR(save_id[ARG]);
+
+#define P_Hw(ARG) SET_WR(save_id[ARG], ops[ARG].sng);
+
+/* I0 == immediate(0) implied within instruction <<<<<<<<<<<<<<<<<<<< */
+
+#define F_I0(ARG) ops[ARG].sng = 0;
+
+/* I1 == immediate(1) implied within instruction <<<<<<<<<<<<<<<<<<<< */
+
+#define F_I1(ARG) ops[ARG].sng = 1;
+
+/* I3 == immediate(3) implied within instruction <<<<<<<<<<<<<<<<<<<< */
+
+#define F_I3(ARG) ops[ARG].sng = 3;
+
+/* Ib == immediate byte <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+#define D_Ib(ARG) ops[ARG].sng = GET_INST_BYTE(p);
+
+/* Iw == immediate word <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+#define D_Iw(ARG) \
+ immed = GET_INST_BYTE(p); \
+ immed |= (ULONG)GET_INST_BYTE(p) << 8; \
+ ops[ARG].sng = immed;
+
+/* Ix == immediate byte sign extended to word <<<<<<<<<<<<<<<<<<<<<<< */
+
+#define D_Ix(ARG) \
+ immed = GET_INST_BYTE(p); \
+ if ( immed & 0x80 ) \
+ immed |= 0xff00; \
+ ops[ARG].sng = immed;
+
+/* Jb == relative offset byte sign extended to double word <<<<<<<<<< */
+
+#define D_Jb(ARG) \
+ immed = GET_INST_BYTE(p); \
+ if ( immed & 0x80 ) \
+ immed |= 0xffffff00; \
+ ops[ARG].sng = immed;
+
+/* Jw == relative offset word sign extended to double word <<<<<<<<<< */
+
+#define D_Jw(ARG) \
+ immed = GET_INST_BYTE(p); \
+ immed |= (ULONG)GET_INST_BYTE(p) << 8; \
+ if ( immed & 0x8000 ) \
+ immed |= 0xffff0000; \
+ ops[ARG].sng = immed;
+
+/* M == address (ie offset) of memory operand <<<<<<<<<<<<<<<<<<<<<<< */
+
+#define D_M(ARG) \
+ if ( GET_MODE(modRM) == 3 ) \
+ Int6(); /* Register operand not allowed */ \
+ else \
+ { \
+ d_mem(modRM, &p, segment_override, &m_seg[ARG], \
+ &m_off[ARG]); \
+ }
+
+#define F_M(ARG) ops[ARG].sng = m_off[ARG];
+
+/* Ma16 == word operand pair, as used by BOUND <<<<<<<<<<<<<<<<<<<<< */
+
+#define D_Ma16(ARG, TYPE, PAGE) \
+ if ( GET_MODE(modRM) == 3 ) \
+ { \
+ Int6(); /* Register operand not allowed */ \
+ } \
+ else \
+ { \
+ d_mem(modRM, &p, segment_override, \
+ &m_seg[ARG], &m_off[ARG]); \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)2, (INT)2); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_word(m_la[ARG], PAGE); \
+ m_off[ARG] = address_add(m_off[ARG], (LONG)2); \
+ m_la2[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa2[ARG] = usr_chk_word(m_la2[ARG], PAGE); \
+ }
+
+#define F_Ma16(ARG) \
+ ops[ARG].mlt[0] = vir_read_word(m_la[ARG], m_pa[ARG]); \
+ ops[ARG].mlt[1] = vir_read_word(m_la2[ARG], m_pa2[ARG]);
+
+/* Mp16 == 32-bit far pointer:- <word><word> (16:16) <<<<<<<<<<<<<<<< */
+
+#define D_Mp16(ARG, TYPE, PAGE) \
+ if ( GET_MODE(modRM) == 3 ) \
+ { \
+ Int6(); /* Register operand not allowed */ \
+ } \
+ else \
+ { \
+ d_mem(modRM, &p, segment_override, \
+ &m_seg[ARG], &m_off[ARG]); \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)2, (INT)2); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_word(m_la[ARG], PAGE); \
+ m_off[ARG] = address_add(m_off[ARG], (LONG)2); \
+ m_la2[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa2[ARG] = usr_chk_word(m_la2[ARG], PAGE); \
+ }
+
+#define F_Mp16(ARG) \
+ ops[ARG].mlt[0] = vir_read_word(m_la[ARG], m_pa[ARG]); \
+ ops[ARG].mlt[1] = vir_read_word(m_la2[ARG], m_pa2[ARG]);
+
+/* Ms == six byte pseudo decriptor <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+#define D_Ms(ARG, TYPE, PAGE) \
+ d_mem(modRM, &p, segment_override, &m_seg[ARG], &m_off[ARG]);\
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)3, (INT)2); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_word(m_la[ARG], PAGE); \
+ m_off[ARG] = address_add(m_off[ARG], (LONG)2); \
+ m_la2[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa2[ARG] = usr_chk_dword(m_la2[ARG], PAGE);
+
+#define F_Ms(ARG) \
+ ops[ARG].mlt[0] = vir_read_word(m_la[ARG], m_pa[ARG]); \
+ ops[ARG].mlt[1] = vir_read_dword(m_la2[ARG], m_pa2[ARG]);
+
+#define P_Ms(ARG) \
+ vir_write_word(m_la[ARG], m_pa[ARG], (USHORT)ops[ARG].mlt[0]); \
+ vir_write_dword(m_la2[ARG], m_pa2[ARG], (ULONG)ops[ARG].mlt[1]);
+
+/* Nw == 'reg' field of modR/M byte denotes segment register <<<<<<<< */
+
+#define D_Nw(ARG) ops[ARG].sng = GET_SEG(modRM);
+
+#define F_Nw(ARG) ops[ARG].sng = GET_SR_SELECTOR(ops[ARG].sng);
+
+/* Ob == offset to byte encoded in instruction stream <<<<<<<<<<<<<<< */
+
+#define D_Ob(ARG, TYPE, PAGE) \
+ m_seg[ARG] = (segment_override == SEG_CLR) ? \
+ DS_REG : segment_override; \
+ immed = GET_INST_BYTE(p); \
+ immed |= (ULONG)GET_INST_BYTE(p) << 8; \
+ if ( GET_ADDRESS_SIZE() == USE32 ) \
+ { \
+ immed |= (ULONG)GET_INST_BYTE(p) << 16; \
+ immed |= (ULONG)GET_INST_BYTE(p) << 24; \
+ } \
+ m_off[ARG] = immed; \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)1); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_byte(m_la[ARG], PAGE);
+
+#define F_Ob(ARG) ops[ARG].sng = vir_read_byte(m_la[ARG], m_pa[ARG]);
+
+#define P_Ob(ARG) \
+ vir_write_byte(m_la[ARG], m_pa[ARG], (UTINY)ops[ARG].sng);
+
+/* Ow == offset to word encoded in instruction stream <<<<<<<<<<<<<<< */
+
+#define D_Ow(ARG, TYPE, PAGE) \
+ m_seg[ARG] = (segment_override == SEG_CLR) ? \
+ DS_REG : segment_override; \
+ immed = GET_INST_BYTE(p); \
+ immed |= (ULONG)GET_INST_BYTE(p) << 8; \
+ if ( GET_ADDRESS_SIZE() == USE32 ) \
+ { \
+ immed |= (ULONG)GET_INST_BYTE(p) << 16; \
+ immed |= (ULONG)GET_INST_BYTE(p) << 24; \
+ } \
+ m_off[ARG] = immed; \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)2); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_word(m_la[ARG], PAGE);
+
+#define F_Ow(ARG) \
+ ops[ARG].sng = vir_read_word(m_la[ARG], m_pa[ARG]);
+
+#define P_Ow(ARG) \
+ vir_write_word(m_la[ARG], m_pa[ARG], (USHORT)ops[ARG].sng);
+
+/* Pw == 2 bits(4-3) of opcode byte denote segment register <<<<<<<<< */
+
+#define D_Pw(ARG) ops[ARG].sng = GET_SEG2(opcode);
+
+#define F_Pw(ARG) ops[ARG].sng = GET_SR_SELECTOR(ops[ARG].sng);
+
+/* Xb == byte string source addressing <<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+#define D_Xb(ARG, TYPE, PAGE) \
+ m_seg[ARG] = (segment_override == SEG_CLR) ? \
+ DS_REG : segment_override; \
+ if ( GET_ADDRESS_SIZE() == USE16 ) \
+ m_off[ARG] = GET_SI(); \
+ else /* USE32 */ \
+ m_off[ARG] = GET_ESI(); \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)1); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_byte(m_la[ARG], PAGE);
+
+#define F_Xb(ARG) ops[ARG].sng = vir_read_byte(m_la[ARG], m_pa[ARG]);
+
+#define C_Xb(ARG) \
+ if ( GET_ADDRESS_SIZE() == USE16 ) \
+ { \
+ if ( GET_DF() ) \
+ SET_SI(GET_SI() - 1); \
+ else \
+ SET_SI(GET_SI() + 1); \
+ if ( repeat != REP_CLR ) \
+ SET_CX(rep_count); \
+ } \
+ else /* USE32 */ \
+ { \
+ if ( GET_DF() ) \
+ SET_ESI(GET_ESI() - 1); \
+ else \
+ SET_ESI(GET_ESI() + 1); \
+ if ( repeat != REP_CLR ) \
+ SET_ECX(rep_count); \
+ }
+
+/* Xw == word string source addressing <<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+#define D_Xw(ARG, TYPE, PAGE) \
+ m_seg[ARG] = (segment_override == SEG_CLR) ? \
+ DS_REG : segment_override; \
+ if ( GET_ADDRESS_SIZE() == USE16 ) \
+ m_off[ARG] = GET_SI(); \
+ else /* USE32 */ \
+ m_off[ARG] = GET_ESI(); \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)2); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_word(m_la[ARG], PAGE);
+
+#define F_Xw(ARG) \
+ ops[ARG].sng = vir_read_word(m_la[ARG], m_pa[ARG]);
+
+#define C_Xw(ARG) \
+ if ( GET_ADDRESS_SIZE() == USE16 ) \
+ { \
+ if ( GET_DF() ) \
+ SET_SI(GET_SI() - 2); \
+ else \
+ SET_SI(GET_SI() + 2); \
+ if ( repeat != REP_CLR ) \
+ SET_CX(rep_count); \
+ } \
+ else /* USE32 */ \
+ { \
+ if ( GET_DF() ) \
+ SET_ESI(GET_ESI() - 2); \
+ else \
+ SET_ESI(GET_ESI() + 2); \
+ if ( repeat != REP_CLR ) \
+ SET_ECX(rep_count); \
+ }
+
+/* Yb == byte string 'destination' addressing <<<<<<<<<<<<<<<<<<<<<<< */
+
+#define D_Yb(ARG, TYPE, PAGE) \
+ m_seg[ARG] = ES_REG; \
+ if ( GET_ADDRESS_SIZE() == USE16 ) \
+ m_off[ARG] = GET_DI(); \
+ else /* USE32 */ \
+ m_off[ARG] = GET_EDI(); \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)1); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_byte(m_la[ARG], PAGE);
+
+#define F_Yb(ARG) ops[ARG].sng = vir_read_byte(m_la[ARG], m_pa[ARG]);
+
+#define C_Yb(ARG) \
+ if ( GET_ADDRESS_SIZE() == USE16 ) \
+ { \
+ if ( GET_DF() ) \
+ SET_DI(GET_DI() - 1); \
+ else \
+ SET_DI(GET_DI() + 1); \
+ if ( repeat != REP_CLR ) \
+ SET_CX(rep_count); \
+ } \
+ else /* USE32 */ \
+ { \
+ if ( GET_DF() ) \
+ SET_EDI(GET_EDI() - 1); \
+ else \
+ SET_EDI(GET_EDI() + 1); \
+ if ( repeat != REP_CLR ) \
+ SET_ECX(rep_count); \
+ }
+
+#define P_Yb(ARG) \
+ vir_write_byte(m_la[ARG], m_pa[ARG], (IU8)ops[ARG].sng);
+
+#ifdef PIG
+#define PIG_P_Yb(ARG) \
+ cannot_vir_write_byte(m_la[ARG], m_pa[ARG], 0x00);
+#else
+#define PIG_P_Yb(ARG) \
+ vir_write_byte(m_la[ARG], m_pa[ARG], (IU8)ops[ARG].sng);
+#endif /* PIG */
+
+
+/* Yw == word string 'destination' addressing <<<<<<<<<<<<<<<<<<<<<<< */
+
+#define D_Yw(ARG, TYPE, PAGE) \
+ m_seg[ARG] = ES_REG; \
+ if ( GET_ADDRESS_SIZE() == USE16 ) \
+ m_off[ARG] = GET_DI(); \
+ else /* USE32 */ \
+ m_off[ARG] = GET_EDI(); \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)2); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_word(m_la[ARG], PAGE);
+
+#define F_Yw(ARG) \
+ ops[ARG].sng = vir_read_word(m_la[ARG], m_pa[ARG]);
+
+#define C_Yw(ARG) \
+ if ( GET_ADDRESS_SIZE() == USE16 ) \
+ { \
+ if ( GET_DF() ) \
+ SET_DI(GET_DI() - 2); \
+ else \
+ SET_DI(GET_DI() + 2); \
+ if ( repeat != REP_CLR ) \
+ SET_CX(rep_count); \
+ } \
+ else /* USE32 */ \
+ { \
+ if ( GET_DF() ) \
+ SET_EDI(GET_EDI() - 2); \
+ else \
+ SET_EDI(GET_EDI() + 2); \
+ if ( repeat != REP_CLR ) \
+ SET_ECX(rep_count); \
+ }
+
+#define P_Yw(ARG) \
+ vir_write_word(m_la[ARG], m_pa[ARG], (IU16)ops[ARG].sng);
+
+#ifdef PIG
+#define PIG_P_Yw(ARG) \
+ cannot_vir_write_word(m_la[ARG], m_pa[ARG], 0x0000);
+#else
+#define PIG_P_Yw(ARG) \
+ vir_write_word(m_la[ARG], m_pa[ARG], (IU16)ops[ARG].sng);
+#endif /* PIG */
+
+/* Z == 'xlat' addressing form <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+#define D_Z(ARG, TYPE, PAGE) \
+ m_seg[ARG] = (segment_override == SEG_CLR) ? \
+ DS_REG : segment_override; \
+ if ( GET_ADDRESS_SIZE() == USE16 ) \
+ m_off[ARG] = GET_BX() + GET_AL() & WORD_MASK; \
+ else /* USE32 */ \
+ m_off[ARG] = GET_EBX() + GET_AL(); \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)1); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_byte(m_la[ARG], PAGE);
+
+#define F_Z(ARG) ops[ARG].sng = vir_read_byte(m_la[ARG], m_pa[ARG]);
+
+
+/* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+/* 386 and 486 */
+/* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+
+/* Ad == direct address <off32><seg> in instruction stream <<<<<<<<<< */
+
+#define D_Ad(ARG) \
+ immed = GET_INST_BYTE(p); \
+ immed |= (ULONG)GET_INST_BYTE(p) << 8; \
+ immed |= (ULONG)GET_INST_BYTE(p) << 16; \
+ immed |= (ULONG)GET_INST_BYTE(p) << 24; \
+ ops[ARG].mlt[0] = immed; \
+ immed = GET_INST_BYTE(p); \
+ immed |= (ULONG)GET_INST_BYTE(p) << 8; \
+ ops[ARG].mlt[1] = immed;
+
+/* Cd == 'reg' field of modR/M byte denotes control register <<<<<<<< */
+
+#define D_Cd(ARG) ops[ARG].sng = GET_EEE(modRM);
+
+#define F_Cd(ARG) ops[ARG].sng = GET_CR(ops[ARG].sng);
+
+/* Dd == 'reg' field of modR/M byte denotes debug register <<<<<<<<<< */
+
+#define D_Dd(ARG) ops[ARG].sng = GET_EEE(modRM);
+
+#define F_Dd(ARG) ops[ARG].sng = GET_DR(ops[ARG].sng);
+
+/* Ed == 'mode'+'r/m' fields refer to double word register/memory <<< */
+
+#define D_Ed(ARG, TYPE, PAGE) \
+ if ( GET_MODE(modRM) == 3 ) \
+ { \
+ save_id[ARG] = GET_R_M(modRM); \
+ m_isreg[ARG] = TRUE; \
+ } \
+ else \
+ { \
+ m_isreg[ARG] = FALSE; \
+ d_mem(modRM, &p, segment_override, \
+ &m_seg[ARG], &m_off[ARG]); \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)4); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_dword(m_la[ARG], PAGE); \
+ }
+
+#define F_Ed(ARG) \
+ if ( m_isreg[ARG] ) \
+ ops[ARG].sng = GET_GR(save_id[ARG]); \
+ else \
+ ops[ARG].sng = vir_read_dword(m_la[ARG], m_pa[ARG]);
+
+#define P_Ed(ARG) \
+ if ( m_isreg[ARG] ) \
+ SET_GR(save_id[ARG], ops[ARG].sng); \
+ else \
+ vir_write_dword(m_la[ARG], m_pa[ARG], (ULONG)ops[ARG].sng);
+
+/* Feax == fixed register, EAX <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+#define F_Feax(ARG) ops[ARG].sng = GET_GR(A_EAX);
+
+#define P_Feax(ARG) SET_GR(A_EAX, ops[ARG].sng);
+
+/* Gd == 'reg' field of modR/M byte denotes double word reg <<<<<<<<< */
+
+#define D_Gd(ARG) save_id[ARG] = GET_REG(modRM);
+
+#define F_Gd(ARG) ops[ARG].sng = GET_GR(save_id[ARG]);
+
+#define P_Gd(ARG) SET_GR(save_id[ARG], ops[ARG].sng);
+
+/* Hd == low 3 bits of opcode denote double word register <<<<<<<<<<< */
+
+#define D_Hd(ARG) save_id[ARG] = GET_LOW3(opcode);
+
+#define F_Hd(ARG) ops[ARG].sng = GET_GR(save_id[ARG]);
+
+#define P_Hd(ARG) SET_GR(save_id[ARG], ops[ARG].sng);
+
+/* Id == immediate double word <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+#define D_Id(ARG) \
+ immed = GET_INST_BYTE(p); \
+ immed |= (ULONG)GET_INST_BYTE(p) << 8; \
+ immed |= (ULONG)GET_INST_BYTE(p) << 16; \
+ immed |= (ULONG)GET_INST_BYTE(p) << 24; \
+ ops[ARG].sng = immed;
+
+/* Iy == immediate byte sign extended to double word <<<<<<<<<<<<<<<< */
+
+#define D_Iy(ARG) \
+ immed = GET_INST_BYTE(p); \
+ if ( immed & 0x80 ) \
+ immed |= 0xffffff00; \
+ ops[ARG].sng = immed;
+
+/* Jd == relative offset double word <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+#define D_Jd(ARG) \
+ immed = GET_INST_BYTE(p); \
+ immed |= (ULONG)GET_INST_BYTE(p) << 8; \
+ immed |= (ULONG)GET_INST_BYTE(p) << 16; \
+ immed |= (ULONG)GET_INST_BYTE(p) << 24; \
+ ops[ARG].sng = immed;
+
+/* Ma32 == double word operand pair, as used by BOUND <<<<<<<<<<<<<<< */
+
+#define D_Ma32(ARG, TYPE, PAGE) \
+ if ( GET_MODE(modRM) == 3 ) \
+ { \
+ Int6(); /* Register operand not allowed */ \
+ } \
+ else \
+ { \
+ d_mem(modRM, &p, segment_override, \
+ &m_seg[ARG], &m_off[ARG]); \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)2, (INT)4); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_dword(m_la[ARG], PAGE); \
+ m_off[ARG] = address_add(m_off[ARG], (LONG)4); \
+ m_la2[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa2[ARG] = usr_chk_dword(m_la2[ARG], PAGE); \
+ }
+
+#define F_Ma32(ARG) \
+ ops[ARG].mlt[0] = vir_read_dword(m_la[ARG], m_pa[ARG]); \
+ ops[ARG].mlt[1] = vir_read_dword(m_la2[ARG], m_pa2[ARG]);
+
+/* Mp32 == 48-bit far pointer:- <double word><word> (32:16) <<<<<<<<< */
+
+#define D_Mp32(ARG, TYPE, PAGE) \
+ if ( GET_MODE(modRM) == 3 ) \
+ { \
+ Int6(); /* Register operand not allowed */ \
+ } \
+ else \
+ { \
+ d_mem(modRM, &p, segment_override, \
+ &m_seg[ARG], &m_off[ARG]); \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)4); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_off[ARG] = address_add(m_off[ARG], (LONG)4); \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)2); \
+ m_la2[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_dword(m_la[ARG], PAGE); \
+ m_pa2[ARG] = usr_chk_word(m_la2[ARG], PAGE); \
+ }
+
+#define F_Mp32(ARG) \
+ ops[ARG].mlt[0] = vir_read_dword(m_la[ARG], m_pa[ARG]); \
+ ops[ARG].mlt[1] = vir_read_word(m_la2[ARG], m_pa2[ARG]);
+
+/* Od == offset to double word encoded in instruction stream <<<<<<<< */
+
+#define D_Od(ARG, TYPE, PAGE) \
+ m_seg[ARG] = (segment_override == SEG_CLR) ? \
+ DS_REG : segment_override; \
+ immed = GET_INST_BYTE(p); \
+ immed |= (ULONG)GET_INST_BYTE(p) << 8; \
+ if ( GET_ADDRESS_SIZE() == USE32 ) \
+ { \
+ immed |= (ULONG)GET_INST_BYTE(p) << 16; \
+ immed |= (ULONG)GET_INST_BYTE(p) << 24; \
+ } \
+ m_off[ARG] = immed; \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)4); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_dword(m_la[ARG], PAGE);
+
+#define F_Od(ARG) \
+ ops[ARG].sng = vir_read_dword(m_la[ARG], m_pa[ARG]);
+
+#define P_Od(ARG) \
+ vir_write_dword(m_la[ARG], m_pa[ARG], (ULONG)ops[ARG].sng);
+
+/* Qw == 3 bits(5-3) of opcode byte denote segment register <<<<<<<<< */
+
+#define D_Qw(ARG) ops[ARG].sng = GET_SEG3(opcode);
+
+#define F_Qw(ARG) ops[ARG].sng = GET_SR_SELECTOR(ops[ARG].sng);
+
+/* Rd == ('mode') + 'r/m' fields refer to a double word register <<<< */
+
+#define D_Rd(ARG) save_id[ARG] = GET_R_M(modRM);
+
+#define F_Rd(ARG) ops[ARG].sng = GET_GR(save_id[ARG]);
+
+#define P_Rd(ARG) SET_GR(save_id[ARG], ops[ARG].sng);
+
+/* Td == 'reg' field of modR/M byte denotes test register <<<<<<<<<<< */
+
+#define D_Td(ARG) ops[ARG].sng = GET_EEE(modRM);
+
+#define F_Td(ARG) ops[ARG].sng = GET_TR(ops[ARG].sng);
+
+/* Xd == double word string source addressing <<<<<<<<<<<<<<<<<<<<<<< */
+
+#define D_Xd(ARG, TYPE, PAGE) \
+ m_seg[ARG] = (segment_override == SEG_CLR) ? \
+ DS_REG : segment_override; \
+ if ( GET_ADDRESS_SIZE() == USE16 ) \
+ m_off[ARG] = GET_SI(); \
+ else /* USE32 */ \
+ m_off[ARG] = GET_ESI(); \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)4); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_dword(m_la[ARG], PAGE);
+
+#define F_Xd(ARG) \
+ ops[ARG].sng = vir_read_dword(m_la[ARG], m_pa[ARG]);
+
+#define C_Xd(ARG) \
+ if ( GET_ADDRESS_SIZE() == USE16 ) \
+ { \
+ if ( GET_DF() ) \
+ SET_SI(GET_SI() - 4); \
+ else \
+ SET_SI(GET_SI() + 4); \
+ if ( repeat != REP_CLR ) \
+ SET_CX(rep_count); \
+ } \
+ else /* USE32 */ \
+ { \
+ if ( GET_DF() ) \
+ SET_ESI(GET_ESI() - 4); \
+ else \
+ SET_ESI(GET_ESI() + 4); \
+ if ( repeat != REP_CLR ) \
+ SET_ECX(rep_count); \
+ }
+
+/* Yd == double word string 'destination' addressing <<<<<<<<<<<<<<<< */
+
+#define D_Yd(ARG, TYPE, PAGE) \
+ m_seg[ARG] = ES_REG; \
+ if ( GET_ADDRESS_SIZE() == USE16 ) \
+ m_off[ARG] = GET_DI(); \
+ else /* USE32 */ \
+ m_off[ARG] = GET_EDI(); \
+ TYPE \
+ limit_check(m_seg[ARG], m_off[ARG], (INT)1, (INT)4); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ m_pa[ARG] = usr_chk_dword(m_la[ARG], PAGE);
+
+#define F_Yd(ARG) \
+ ops[ARG].sng = vir_read_dword(m_la[ARG], m_pa[ARG]);
+
+#define C_Yd(ARG) \
+ if ( GET_ADDRESS_SIZE() == USE16 ) \
+ { \
+ if ( GET_DF() ) \
+ SET_DI(GET_DI() - 4); \
+ else \
+ SET_DI(GET_DI() + 4); \
+ if ( repeat != REP_CLR ) \
+ SET_CX(rep_count); \
+ } \
+ else /* USE32 */ \
+ { \
+ if ( GET_DF() ) \
+ SET_EDI(GET_EDI() - 4); \
+ else \
+ SET_EDI(GET_EDI() + 4); \
+ if ( repeat != REP_CLR ) \
+ SET_ECX(rep_count); \
+ }
+
+#define P_Yd(ARG) \
+ vir_write_dword(m_la[ARG], m_pa[ARG], (IU32)ops[ARG].sng);
+
+#ifdef PIG
+#define PIG_P_Yd(ARG) \
+ cannot_vir_write_dword(m_la[ARG], m_pa[ARG], 0x00000000);
+#else
+#define PIG_P_Yd(ARG) \
+ vir_write_dword(m_la[ARG], m_pa[ARG], (IU32)ops[ARG].sng);
+#endif /* PIG */
+
+
+/*
+ * The macros for decoding and fetching the operands for a BTx instruction.
+ * See the file header for a description of why these are required.
+ */
+
+#define BT_OPSw(TYPE, PAGE) \
+ if ( GET_MODE(modRM) == 3 ) { \
+ /* \
+ * Register operand, no frigging required. \
+ */ \
+ \
+ save_id[0] = GET_R_M(modRM); \
+ m_isreg[0] = TRUE; \
+ D_Gw(1) \
+ F_Ew(0) \
+ F_Gw(1) \
+ } else { \
+ D_Gw(1) \
+ F_Gw(1) \
+ m_isreg[0] = FALSE; \
+ d_mem(modRM, &p, segment_override, \
+ &m_seg[0], &m_off[0]); \
+ m_off[0] += (ops[1].sng >> 3) & ~1; \
+ TYPE \
+ limit_check(m_seg[0], m_off[0], (INT)1, (INT)2); \
+ m_la[0] = GET_SR_BASE(m_seg[0]) + m_off[0]; \
+ m_pa[0] = usr_chk_word(m_la[0], PAGE); \
+ F_Ew(0) \
+ } \
+
+#define BT_OPSd(TYPE, PAGE) \
+ if ( GET_MODE(modRM) == 3 ) { \
+ /* \
+ * Register operand, no frigging required. \
+ */ \
+ \
+ save_id[0] = GET_R_M(modRM); \
+ m_isreg[0] = TRUE; \
+ D_Gd(1) \
+ F_Ed(0) \
+ F_Gd(1) \
+ } else { \
+ D_Gd(1) \
+ F_Gd(1) \
+ m_isreg[0] = FALSE; \
+ d_mem(modRM, &p, segment_override, \
+ &m_seg[0], &m_off[0]); \
+ m_off[0] += (ops[1].sng >> 3) & ~3; \
+ TYPE \
+ limit_check(m_seg[0], m_off[0], (INT)1, (INT)4); \
+ m_la[0] = GET_SR_BASE(m_seg[0]) + m_off[0]; \
+ m_pa[0] = usr_chk_dword(m_la[0], PAGE); \
+ F_Ed(0) \
+ } \
+
+/* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+/* 486 only */
+/* <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+
+/* Mm == address of memory operand <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+#define D_Mm(ARG) \
+ if ( GET_MODE(modRM) == 3 ) \
+ Int6(); /* Register operand not allowed */ \
+ else \
+ { \
+ d_mem(modRM, &p, segment_override, &m_seg[ARG], \
+ &m_off[ARG]); \
+ m_la[ARG] = GET_SR_BASE(m_seg[ARG]) + m_off[ARG]; \
+ }
+
+#define F_Mm(ARG) ops[ARG].sng = m_la[ARG];
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_page.c b/private/mvdm/softpc.new/base/ccpu386/c_page.c
new file mode 100644
index 000000000..540e554f0
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_page.c
@@ -0,0 +1,857 @@
+/*[
+
+c_page.c
+
+LOCAL CHAR SccsID[]="@(#)c_page.c 1.10 02/28/95";
+
+Paging Support.
+---------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h> /* SoftPC types */
+#include <c_main.h> /* C CPU definitions-interfaces */
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <c_page.h> /* our interface */
+#include <c_mem.h> /* CPU - Physical Memory interface */
+#include <c_tlb.h> /* Translation Lookaside Buffer interface */
+#include <ccpusas4.h> /* CPU <-> sas interface */
+#include <c_debug.h> /* Debugging Regs and Breakpoint interface */
+
+
+/*[
+
+ Various levels of interface are provided to the paging system (to
+ allow fairly optimal emulation) these levels are:-
+
+ spr_chk_ Checks Supervisor Access to given data item, caller
+ aware that #PF may occur. 'A/D' bits will be set. No
+ other action is taken.
+
+ usr_chk_ Checks User Access to given data item, caller aware
+ that #PF may occur. 'A/D' bits will be set. No other
+ action is taken.
+
+ spr_ Perform Supervisor Access, caller aware that #PF may
+ occur. Action (Read/Write) is performed immediately.
+ Will update A/D bits.
+
+ vir_ Perform Virtual Memory Operation (Read/Write). No checks
+ are made and no fault will be generated, only call after
+ a spr_chk or usr_chk function.
+
+ NB. At present no super optimal vir_ implementation
+ exists. If a spr_chk or usr_chk function is not
+ called before a vir_ function, then the vir_
+ function may cause #PF, this condition will become
+ a fatal error in an optimised implementation.
+ For the moment we assume that after a 'chk' call it
+ is virtually 100% certain that the 'vir' call will
+ get a cache hit.
+
+]*/
+
+#define LAST_DWORD_ON_PAGE 0xffc
+#define LAST_WORD_ON_PAGE 0xffe
+
+#define OFFSET_MASK 0xfff
+
+#ifdef PIG
+LOCAL VOID cannot_spr_write_byte IPT2( LIN_ADDR, lin_addr, IU8, valid_mask);
+LOCAL VOID cannot_spr_write_word IPT2( LIN_ADDR, lin_addr, IU16, valid_mask);
+LOCAL VOID cannot_spr_write_dword IPT2( LIN_ADDR, lin_addr, IU32, valid_mask);
+#endif /* PIG */
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check Supervisor Byte access. */
+/* May cause #PF. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL PHY_ADDR
+spr_chk_byte
+
+IFN2(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ ISM32, access /* Read(PG_R) or Write(PG_W) */
+ )
+
+
+ {
+ if ( GET_PG() == 1 )
+ {
+ access |= PG_S;
+ lin_addr = lin2phy(lin_addr, access);
+ }
+
+ return lin_addr;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check Supervisor Double Word access. */
+/* May cause #PF. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+spr_chk_dword
+
+IFN2(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ ISM32, access /* Read(PG_R) or Write(PG_W) */
+ )
+
+
+ {
+ if ( GET_PG() == 1 )
+ {
+ access |= PG_S;
+ (VOID)lin2phy(lin_addr, access);
+ if ( (lin_addr & OFFSET_MASK) > LAST_DWORD_ON_PAGE )
+ (VOID)lin2phy(lin_addr + 3, access);
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check Supervisor Word access. */
+/* May cause #PF. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+spr_chk_word
+
+IFN2(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ ISM32, access /* Read(PG_R) or Write(PG_W) */
+ )
+
+
+ {
+ if ( GET_PG() == 1 )
+ {
+ access |= PG_S;
+ (VOID)lin2phy(lin_addr, access);
+ if ( (lin_addr & OFFSET_MASK) > LAST_WORD_ON_PAGE )
+ (VOID)lin2phy(lin_addr + 1, access);
+ }
+ }
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check User Byte access. */
+/* May cause #PF. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IU32
+usr_chk_byte
+
+IFN2(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ ISM32, access /* Read(PG_R) or Write(PG_W) */
+ )
+
+
+ {
+ PHY_ADDR phy_addr;
+
+ phy_addr = lin_addr;
+
+ if ( GET_PG() == 1 )
+ {
+ if ( GET_CPL() == 3 )
+ access |= PG_U;
+ else
+ access |= PG_S;
+
+ phy_addr = lin2phy(lin_addr, access);
+ }
+
+ return phy_addr;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check User Double Word access. */
+/* May cause #PF. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IU32
+usr_chk_dword
+
+IFN2(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ ISM32, access /* Read(PG_R) or Write(PG_W) */
+ )
+
+
+ {
+ PHY_ADDR phy_addr;
+
+ phy_addr = lin_addr;
+
+ if ( GET_PG() == 1 )
+ {
+ if ( GET_CPL() == 3 )
+ access |= PG_U;
+ else
+ access |= PG_S;
+
+ phy_addr = lin2phy(lin_addr, access);
+
+ if ( (lin_addr & OFFSET_MASK) > LAST_DWORD_ON_PAGE )
+ {
+ (VOID)lin2phy(lin_addr + 3, access);
+ phy_addr = NO_PHYSICAL_MAPPING;
+ }
+ }
+
+ return phy_addr;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check User Word access. */
+/* May cause #PF. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IU32
+usr_chk_word
+
+IFN2(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ ISM32, access /* Read(PG_R) or Write(PG_W) */
+ )
+
+
+ {
+ PHY_ADDR phy_addr;
+
+ phy_addr = lin_addr;
+
+ if ( GET_PG() == 1 )
+ {
+ if ( GET_CPL() == 3 )
+ access |= PG_U;
+ else
+ access |= PG_S;
+
+ phy_addr = lin2phy(lin_addr, access);
+ if ( (lin_addr & OFFSET_MASK) > LAST_WORD_ON_PAGE )
+ {
+ (VOID)lin2phy(lin_addr + 1, access);
+ phy_addr = NO_PHYSICAL_MAPPING;
+ }
+ }
+
+ return phy_addr;
+ }
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Supervisor Read a Byte from memory. */
+/* May cause #PF. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IU8
+spr_read_byte
+
+IFN1(
+ LIN_ADDR, lin_addr /* Linear Address */
+ )
+
+
+ {
+ if ( GET_PG() == 1 )
+ {
+ lin_addr = lin2phy(lin_addr, PG_R | PG_S);
+ }
+
+ return phy_read_byte(lin_addr);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Supervisor Read a Double Word from memory. */
+/* May cause #PF. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IU32
+spr_read_dword
+
+IFN1(
+ LIN_ADDR, lin_addr /* Linear Address */
+ )
+
+
+ {
+ IU16 low_word;
+ IU16 high_word;
+
+ if ( GET_PG() == 1 )
+ {
+ if ( (lin_addr & OFFSET_MASK) > LAST_DWORD_ON_PAGE )
+ {
+ /* Spans two pages */
+ low_word = spr_read_word(lin_addr);
+ high_word = spr_read_word(lin_addr + 2);
+ return (IU32)high_word << 16 | low_word;
+ }
+ else
+ {
+ lin_addr = lin2phy(lin_addr, PG_R | PG_S);
+ }
+ }
+
+ return phy_read_dword(lin_addr);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Supervisor Read a Word from memory. */
+/* May cause #PF. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IU16
+spr_read_word
+
+IFN1(
+ LIN_ADDR, lin_addr /* Linear Address */
+ )
+
+
+ {
+ IU8 low_byte;
+ IU8 high_byte;
+
+ if ( GET_PG() == 1 )
+ {
+ if ( (lin_addr & OFFSET_MASK) > LAST_WORD_ON_PAGE )
+ {
+ /* Spans two pages */
+ low_byte = spr_read_byte(lin_addr);
+ high_byte = spr_read_byte(lin_addr + 1);
+ return (IU16)high_byte << 8 | low_byte;
+ }
+ else
+ {
+ lin_addr = lin2phy(lin_addr, PG_R | PG_S);
+ }
+ }
+
+ return phy_read_word(lin_addr);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Supervisor Write a Byte to memory. */
+/* May cause #PF. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+spr_write_byte
+
+IFN2(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ IU8, data
+ )
+
+
+ {
+ if ( GET_PG() == 1 )
+ {
+ lin_addr = lin2phy(lin_addr, PG_W | PG_S);
+ }
+
+ phy_write_byte(lin_addr, data);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Supervisor Write a Double Word to memory. */
+/* May cause #PF. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+spr_write_dword
+
+IFN2(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ IU32, data
+ )
+
+
+ {
+ if ( GET_PG() == 1 )
+ {
+ if ( (lin_addr & OFFSET_MASK) > LAST_DWORD_ON_PAGE )
+ {
+ /* Spans two pages */
+ spr_write_word(lin_addr, (IU16)data);
+ spr_write_word(lin_addr + 2, (IU16)(data >> 16));
+ return;
+ }
+ else
+ {
+ lin_addr = lin2phy(lin_addr, PG_W | PG_S);
+ }
+ }
+
+ phy_write_dword(lin_addr, data);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Supervisor Write a Word to memory. */
+/* May cause #PF. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+spr_write_word
+
+IFN2(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ IU16, data
+ )
+
+
+ {
+ if ( GET_PG() == 1 )
+ {
+ if ( (lin_addr & OFFSET_MASK) > LAST_WORD_ON_PAGE )
+ {
+ /* Spans two pages */
+ spr_write_byte(lin_addr, (IU8)data);
+ spr_write_byte(lin_addr + 1, (IU8)(data >> 8));
+ return;
+ }
+ else
+ {
+ lin_addr = lin2phy(lin_addr, PG_W | PG_S);
+ }
+ }
+
+ phy_write_word(lin_addr, data);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Virtual Read Bytes from memory. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL void
+vir_read_bytes
+
+IFN4(
+ IU8 *, destbuff, /* Where the data goes */
+ LIN_ADDR, lin_addr, /* Linear Address */
+ PHY_ADDR, phy_addr, /* Physical Address, if non zero */
+ IU32, num_bytes
+ )
+ {
+ if ( nr_data_break )
+ {
+ check_for_data_exception(lin_addr, D_R, D_BYTE);
+ }
+ if ( phy_addr ) {
+ phy_addr += (num_bytes-1);
+ while (num_bytes--) {
+ *destbuff++ = phy_read_byte(phy_addr);
+ phy_addr--;
+ }
+ } else {
+ lin_addr += (num_bytes-1);
+ while (num_bytes--) {
+ *destbuff++ = spr_read_byte(lin_addr);
+ lin_addr--;
+ }
+ }
+}
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Virtual Read a Byte from memory. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IU8
+vir_read_byte
+
+IFN2(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ PHY_ADDR, phy_addr /* Physical Address, if non zero */
+ )
+
+
+ {
+ if ( nr_data_break )
+ {
+ check_for_data_exception(lin_addr, D_R, D_BYTE);
+ }
+
+ if ( phy_addr )
+ {
+ return phy_read_byte(phy_addr);
+ }
+ else
+ {
+ return spr_read_byte(lin_addr);
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Virtual Read a Double Word from memory. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IU32
+vir_read_dword
+
+IFN2(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ PHY_ADDR, phy_addr /* Physical Address, if non zero */
+ )
+
+
+ {
+ if ( nr_data_break )
+ {
+ check_for_data_exception(lin_addr, D_R, D_DWORD);
+ }
+
+ if ( phy_addr )
+ {
+ return phy_read_dword(phy_addr);
+ }
+ else
+ {
+ return spr_read_dword(lin_addr);
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Virtual Read a Word from memory. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IU16
+vir_read_word
+
+IFN2(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ PHY_ADDR, phy_addr /* Physical Address, if non zero */
+ )
+
+
+ {
+ if ( nr_data_break )
+ {
+ check_for_data_exception(lin_addr, D_R, D_WORD);
+ }
+
+ if ( phy_addr )
+ {
+ return phy_read_word(phy_addr);
+ }
+ else
+ {
+ return spr_read_word(lin_addr);
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Virtual Write Bytes to memory. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+vir_write_bytes
+
+IFN4(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ PHY_ADDR, phy_addr, /* Physical Address, if non zero */
+ IU8 *, data, /* Pointer to data to be written */
+ IU32, num_bytes /* Number of bytes to act on */
+ )
+ {
+ IU8 data_byte;
+
+ check_D(lin_addr, num_bytes);
+ if ( nr_data_break ) {
+ check_for_data_exception(lin_addr, D_W, D_BYTE);
+ }
+ if ( phy_addr ) {
+ phy_addr += (num_bytes - 1);
+ while (num_bytes--) {
+ data_byte = *data++;
+ phy_write_byte(phy_addr, data_byte);
+ phy_addr--;
+ }
+ } else {
+ lin_addr += (num_bytes - 1);
+ while (num_bytes--) {
+ data_byte = *data++;
+ spr_write_byte(lin_addr, data_byte);
+ lin_addr--;
+ }
+ }
+}
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Virtual Write a Byte to memory. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+vir_write_byte
+
+IFN3(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ PHY_ADDR, phy_addr, /* Physical Address, if non zero */
+ IU8, data
+ )
+
+
+ {
+ check_D(lin_addr, 1);
+ if ( nr_data_break )
+ {
+ check_for_data_exception(lin_addr, D_W, D_BYTE);
+ }
+
+ if ( phy_addr )
+ {
+ phy_write_byte(phy_addr, data);
+ }
+ else
+ {
+ spr_write_byte(lin_addr, data);
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Virtual Write a Double Word to memory. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+vir_write_dword
+
+IFN3(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ PHY_ADDR, phy_addr, /* Physical Address, if non zero */
+ IU32, data
+ )
+
+
+ {
+ check_D(lin_addr, 4);
+ if ( nr_data_break )
+ {
+ check_for_data_exception(lin_addr, D_W, D_DWORD);
+ }
+
+ if ( phy_addr )
+ {
+ phy_write_dword(phy_addr, data);
+ }
+ else
+ {
+ spr_write_dword(lin_addr, data);
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Virtual Write a Word to memory. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+vir_write_word
+
+IFN3(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ PHY_ADDR, phy_addr, /* Physical Address, if non zero */
+ IU16, data
+ )
+
+
+ {
+ check_D(lin_addr, 2);
+ if ( nr_data_break )
+ {
+ check_for_data_exception(lin_addr, D_W, D_WORD);
+ }
+
+ if ( phy_addr )
+ {
+ phy_write_word(phy_addr, data);
+ }
+ else
+ {
+ spr_write_word(lin_addr, data);
+ }
+ }
+
+
+
+#ifdef PIG
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Supervisor Write a Byte to memory */
+/* But when Pigging INSD we have no data to write. Just flag address. */
+/* May cause #PF. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL VOID
+cannot_spr_write_byte
+
+IFN2(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ IU8, valid_mask
+ )
+
+
+ {
+ if ( GET_PG() == 1 )
+ {
+ lin_addr = lin2phy(lin_addr, PG_W | PG_S);
+ }
+
+ cannot_phy_write_byte(lin_addr, valid_mask);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Supervisor Write a Double Word to memory */
+/* But when Pigging INSD we have no data to write. Just flag address. */
+/* May cause #PF. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL VOID
+cannot_spr_write_dword
+
+IFN2(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ IU32, valid_mask
+ )
+
+
+ {
+ if ( GET_PG() == 1 )
+ {
+ if ( (lin_addr & OFFSET_MASK) > LAST_DWORD_ON_PAGE )
+ {
+ /* Spans two pages */
+ cannot_spr_write_word(lin_addr, valid_mask & 0xffff);
+ cannot_spr_write_word(lin_addr + 2, valid_mask >> 16);
+ return;
+ }
+ else
+ {
+ lin_addr = lin2phy(lin_addr, PG_W | PG_S);
+ }
+ }
+
+ cannot_phy_write_dword(lin_addr, valid_mask);
+ }
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Supervisor Write a Word to memory. */
+/* But when Pigging INSW we have no data to write. Just flag address. */
+/* May cause #PF. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL VOID
+cannot_spr_write_word
+
+IFN2(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ IU16, valid_mask
+ )
+
+
+ {
+ if ( GET_PG() == 1 )
+ {
+ if ( (lin_addr & OFFSET_MASK) > LAST_WORD_ON_PAGE )
+ {
+ /* Spans two pages */
+ cannot_spr_write_byte(lin_addr, valid_mask & 0xff);
+ cannot_spr_write_byte(lin_addr + 1, valid_mask >> 8);
+ return;
+ }
+ else
+ {
+ lin_addr = lin2phy(lin_addr, PG_W | PG_S);
+ }
+ }
+
+ cannot_phy_write_word(lin_addr, valid_mask);
+ }
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Virtual Write a Byte to memory. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+cannot_vir_write_byte
+
+IFN3(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ LIN_ADDR, phy_addr, /* Physical Address, if non zero */
+ IU8, valid_mask
+ )
+
+
+ {
+ check_D(lin_addr, 1);
+ if ( nr_data_break )
+ {
+ check_for_data_exception(lin_addr, D_W, D_BYTE);
+ }
+
+ if ( phy_addr )
+ {
+ cannot_phy_write_byte(phy_addr, valid_mask);
+ }
+ else
+ {
+ cannot_spr_write_byte(lin_addr, valid_mask);
+ }
+ }
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Virtual Write a Double Word to memory. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+cannot_vir_write_dword
+
+IFN3(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ PHY_ADDR, phy_addr, /* Physical Address, if non zero */
+ IU32, valid_mask
+ )
+
+
+ {
+ check_D(lin_addr, 4);
+ if ( nr_data_break )
+ {
+ check_for_data_exception(lin_addr, D_W, D_DWORD);
+ }
+
+ if ( phy_addr )
+ {
+ cannot_phy_write_dword(phy_addr, valid_mask);
+ }
+ else
+ {
+ cannot_spr_write_dword(lin_addr, valid_mask);
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Virtual Write a Word to memory. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+cannot_vir_write_word
+
+IFN3(
+ LIN_ADDR, lin_addr, /* Linear Address */
+ PHY_ADDR, phy_addr, /* Physical Address, if non zero */
+ IU16, valid_mask
+ )
+
+
+ {
+ check_D(lin_addr, 2);
+ if ( nr_data_break )
+ {
+ check_for_data_exception(lin_addr, D_W, D_WORD);
+ }
+
+ if ( phy_addr )
+ {
+ cannot_phy_write_word(phy_addr, valid_mask);
+ }
+ else
+ {
+ cannot_spr_write_word(lin_addr, valid_mask);
+ }
+ }
+#endif /* PIG */
+
+
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_page.h b/private/mvdm/softpc.new/base/ccpu386/c_page.h
new file mode 100644
index 000000000..1fa17eefb
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_page.h
@@ -0,0 +1,226 @@
+/*[
+
+c_page.h
+
+Paging Support.
+---------------
+
+LOCAL CHAR SccsID[]="@(#)c_page.h 1.7 02/28/95";
+
+]*/
+
+
+/*
+ Page Access Types.
+ */
+#define PG_R 0x0 /* Read */
+#define PG_W 0x1 /* Write */
+
+/*
+ Supervisor Memory Access Check Functions.
+
+ Will Check Access as per Supervisor (taking #PF if reqd.), 'A/D' bits
+ will be set in the Page Entries, no other action occurs.
+ Normally these routines will be followed by vir_.. calls.
+ */
+IMPORT IU32 spr_chk_byte
+
+IPT2(
+ IU32, lin_addr,
+ ISM32, access_type
+
+ );
+
+IMPORT VOID spr_chk_dword
+
+IPT2(
+ IU32, lin_addr,
+ ISM32, access_type
+
+ );
+
+IMPORT VOID spr_chk_word
+
+IPT2(
+ IU32, lin_addr,
+ ISM32, access_type
+
+ );
+
+
+/*
+ User Memory Access Check Functions.
+
+ Will Check Access as per User (taking #PF if reqd.), 'A/D' bits will
+ be set in the Page Entries, no other action occurs.
+ Normally these routines will be followed by vir_.. calls.
+ */
+IMPORT IU32 usr_chk_byte
+
+IPT2(
+ IU32, lin_addr,
+ ISM32, access_type
+
+ );
+
+IMPORT IU32 usr_chk_dword
+
+IPT2(
+ IU32, lin_addr,
+ ISM32, access_type
+
+ );
+
+IMPORT IU32 usr_chk_word
+
+IPT2(
+ IU32, lin_addr,
+ ISM32, access_type
+
+ );
+
+
+/*
+ Supervisor Memory Access Functions.
+
+ Will Check Access as per Supervisor (taking #PF if reqd.), 'A/D'
+ bits will be set in the Page Entries, Map Address and Perform
+ Read or Write.
+ */
+IMPORT IU8 spr_read_byte
+
+IPT1(
+ IU32, lin_addr
+
+ );
+
+IMPORT IU32 spr_read_dword
+
+IPT1(
+ IU32, lin_addr
+
+ );
+
+IMPORT IU16 spr_read_word
+
+IPT1(
+ IU32, lin_addr
+
+ );
+
+IMPORT VOID spr_write_byte
+
+IPT2(
+ IU32, lin_addr,
+ IU8, data
+
+ );
+
+IMPORT VOID spr_write_dword
+
+IPT2(
+ IU32, lin_addr,
+ IU32, data
+
+ );
+
+IMPORT VOID spr_write_word
+
+IPT2(
+ IU32, lin_addr,
+ IU16, data
+
+ );
+
+
+/*
+ Virtual Memory Access Functions.
+
+ No Checks are made (assumed already done), just Perform Read or
+ Write.
+ This is also the point at which data breakpoints are checked.
+ */
+
+#define NO_PHYSICAL_MAPPING 0 /* Indicates no physical address is
+ available, the linear address will be
+ re-mapped. */
+
+IMPORT IU8 vir_read_byte
+
+IPT2(
+ IU32, lin_addr,
+ IU32, phy_addr
+
+ );
+
+IMPORT IU32 vir_read_dword
+
+IPT2(
+ IU32, lin_addr,
+ IU32, phy_addr
+
+ );
+
+IMPORT IU16 vir_read_word
+
+IPT2(
+ IU32, lin_addr,
+ IU32, phy_addr
+
+ );
+
+IMPORT VOID vir_write_byte
+
+IPT3(
+ IU32, lin_addr,
+ IU32, phy_addr,
+ IU8, data
+
+ );
+
+IMPORT VOID vir_write_dword
+
+IPT3(
+ IU32, lin_addr,
+ IU32, phy_addr,
+ IU32, data
+
+ );
+
+IMPORT VOID vir_write_word
+
+IPT3(
+ IU32, lin_addr,
+ IU32, phy_addr,
+ IU16, data
+
+ );
+
+#ifdef PIG
+IMPORT VOID cannot_vir_write_byte
+
+IPT3(
+ IU32, lin_addr,
+ IU32, phy_addr,
+ IU8, valid_mask
+ );
+
+IMPORT VOID cannot_vir_write_dword
+
+IPT3(
+ IU32, lin_addr,
+ IU32, phy_addr,
+ IU32, valid_mask
+ );
+
+IMPORT VOID cannot_vir_write_word
+
+IPT3(
+ IU32, lin_addr,
+ IU32, phy_addr,
+ IU16, valid_mask
+ );
+#endif /* PIG */
+
+extern void vir_write_bytes IPT4(LIN_ADDR, lin_addr,PHY_ADDR, phy_addr, IU8 *, data, IU32, num_bytes);
+extern void vir_read_bytes IPT4(IU8 *, destbuff, LIN_ADDR, lin_addr, PHY_ADDR, phy_addr, IU32, num_bytes);
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_prot.c b/private/mvdm/softpc.new/base/ccpu386/c_prot.c
new file mode 100644
index 000000000..074ed5ce3
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_prot.c
@@ -0,0 +1,287 @@
+/*[
+
+c_prot.c
+
+LOCAL CHAR SccsID[]="@(#)c_prot.c 1.7 01/19/95";
+
+Protected Mode Support (Misc).
+------------------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <c_page.h>
+#include <fault.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check selector is valid for load into SS register. */
+/* Only invoked in protected mode. */
+/* Take GP if segment not valid. */
+/* Take SF if segment not present. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+check_SS
+
+IFN4(
+ IU16, selector, /* (I) 16-bit selector to stack segment */
+ ISM32, privilege, /* (I) privilege level to check against */
+ IU32 *, descr_addr, /* (O) address of stack segment descriptor */
+ CPU_DESCR *, entry /* (O) the decoded descriptor */
+ )
+
+
+ {
+ /* must be within GDT or LDT */
+ if ( selector_outside_GDT_LDT(selector, descr_addr) )
+ GP(selector, FAULT_CHECKSS_SELECTOR);
+
+ read_descriptor_linear(*descr_addr, entry);
+
+ /* must be writable data */
+ switch ( descriptor_super_type(entry->AR) )
+ {
+ case EXPANDUP_WRITEABLE_DATA:
+ case EXPANDDOWN_WRITEABLE_DATA:
+ break; /* good type */
+
+ default:
+ GP(selector, FAULT_CHECKSS_BAD_SEG_TYPE); /* bad type */
+ }
+
+ /* access check requires RPL == DPL == privilege */
+ if ( GET_SELECTOR_RPL(selector) != privilege ||
+ GET_AR_DPL(entry->AR) != privilege )
+ GP(selector, FAULT_CHECKSS_ACCESS);
+
+ /* finally it must be present */
+ if ( GET_AR_P(entry->AR) == NOT_PRESENT )
+ SF(selector, FAULT_CHECKSS_NOTPRESENT);
+
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Get SS:(E)SP for a given privilege from the TSS */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+get_stack_selector_from_TSS
+
+IFN3(
+ IU32, priv, /* (I) priv level for which stack is reqd */
+ IU16 *, new_ss, /* (O) SS as retrieved from TSS */
+ IU32 *, new_sp /* (O) (E)SP as retrieved from TSS */
+ )
+
+
+ {
+ IU32 address;
+
+ if ( GET_TR_AR_SUPER() == BUSY_TSS )
+ {
+ /* 286 TSS */
+ switch ( priv )
+ {
+ case 0: address = 0x02; break;
+ case 1: address = 0x06; break;
+ case 2: address = 0x0a; break;
+ }
+
+ address += GET_TR_BASE();
+
+ *new_sp = (IU32)spr_read_word(address);
+ *new_ss = spr_read_word(address+2);
+ }
+ else
+ {
+ /* 386 TSS */
+ switch ( priv )
+ {
+ case 0: address = 0x04; break;
+ case 1: address = 0x0c; break;
+ case 2: address = 0x14; break;
+ }
+
+ address += GET_TR_BASE();
+
+ *new_sp = spr_read_dword(address);
+ *new_ss = spr_read_word(address+4);
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check a Data Segment Register (DS, ES, FS, GS) during */
+/* a Privilege Change. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+load_data_seg_new_privilege
+
+IFN1(
+ ISM32, indx /* Segment Register identifier */
+ )
+
+
+ {
+ IU16 selector; /* selector to be examined */
+ IU32 descr; /* ... its associated decriptor location */
+ ISM32 dpl; /* ... its associated DPL */
+ BOOL valid; /* selector validity */
+
+ selector = GET_SR_SELECTOR(indx); /* take local copy */
+
+ if ( !selector_outside_GDT_LDT(selector, &descr) )
+ {
+ valid = TRUE; /* at least its in table */
+
+ /* Type must be ok, else it would not have been loaded. */
+
+ /* for data and non-conforming code the access check applies */
+ if ( GET_SR_AR_C(indx) == 0 )
+ {
+ /* The access check is:- DPL >= CPL and DPL >= RPL */
+ dpl = GET_SR_AR_DPL(indx);
+ if ( dpl >= GET_CPL() && dpl >= GET_SELECTOR_RPL(selector) )
+ ; /* ok */
+ else
+ valid = FALSE; /* fails privilege check */
+ }
+ }
+ else
+ {
+ valid = FALSE; /* not in table */
+ }
+
+ if ( !valid )
+ {
+ /* segment can't be seen at new privilege */
+ SET_SR_SELECTOR(indx, 0);
+ SET_SR_AR_W(indx, 0); /* deny write */
+ SET_SR_AR_R(indx, 0); /* deny read */
+
+ /* the following lines were added to make the C-CPU act like the Soft-486
+ * in this respect... an investigation is under way to see how the real
+ * i486 behaves - this code may need to be changed in the future
+ */
+ SET_SR_BASE(indx, 0);
+ SET_SR_LIMIT(indx, 0);
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Validate a stack segment selector, during a stack change */
+/* Take #TS(selector) if not valid stack selector */
+/* Take #SF(selector) if segment not present */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+validate_SS_on_stack_change
+
+IFN4(
+ IU32, priv, /* (I) privilege level to check against */
+ IU16, selector, /* (I) selector to be checked */
+ IU32 *, descr, /* (O) address of related descriptor */
+ CPU_DESCR *, entry /* (O) the decoded descriptor */
+ )
+
+
+ {
+ if ( selector_outside_GDT_LDT(selector, descr) )
+ TS(selector, FAULT_VALSS_CHG_SELECTOR);
+
+ read_descriptor_linear(*descr, entry);
+
+ /* do access check */
+ if ( GET_SELECTOR_RPL(selector) != priv ||
+ GET_AR_DPL(entry->AR) != priv )
+ TS(selector, FAULT_VALSS_CHG_ACCESS);
+
+ /* do type check */
+ switch ( descriptor_super_type(entry->AR) )
+ {
+ case EXPANDUP_WRITEABLE_DATA:
+ case EXPANDDOWN_WRITEABLE_DATA:
+ break; /* ok */
+
+ default:
+ TS(selector, FAULT_VALSS_CHG_BAD_SEG_TYPE); /* wrong type */
+ }
+
+ /* finally check it is present */
+ if ( GET_AR_P(entry->AR) == NOT_PRESENT )
+ SF(selector, FAULT_VALSS_CHG_NOTPRESENT);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Validate TSS selector. */
+/* Take #GP(selector) or #TS(selector) if not valid TSS. */
+/* Take #NP(selector) if TSS not present */
+/* Return super type of TSS decscriptor. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL ISM32
+validate_TSS
+
+IFN3(
+ IU16, selector, /* (I) selector to be checked */
+ IU32 *, descr_addr, /* (O) address of related descriptor */
+ BOOL, is_switch /* (I) if true we are in task switch */
+ )
+
+
+ {
+ BOOL is_ok = TRUE;
+ IU8 AR;
+ ISM32 super;
+
+ /* must be in GDT */
+ if ( selector_outside_GDT(selector, descr_addr) )
+ {
+ is_ok = FALSE;
+ }
+ else
+ {
+ /* is it really an available TSS segment (is_switch false) or
+ is it really a busy TSS segment (is_switch true) */
+ AR = spr_read_byte((*descr_addr)+5);
+ super = descriptor_super_type((IU16)AR);
+ if ( ( !is_switch &&
+ (super == AVAILABLE_TSS || super == XTND_AVAILABLE_TSS) )
+ ||
+ ( is_switch &&
+ (super == BUSY_TSS || super == XTND_BUSY_TSS) ) )
+ ; /* ok */
+ else
+ is_ok = FALSE;
+ }
+
+ /* handle invalid TSS */
+ if ( !is_ok )
+ {
+ if ( is_switch )
+ TS(selector, FAULT_VALTSS_SELECTOR);
+ else
+ GP(selector, FAULT_VALTSS_SELECTOR);
+ }
+
+ /* must be present */
+ if ( GET_AR_P(AR) == NOT_PRESENT )
+ NP(selector, FAULT_VALTSS_NP);
+
+ return super;
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_prot.h b/private/mvdm/softpc.new/base/ccpu386/c_prot.h
new file mode 100644
index 000000000..fe61a1317
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_prot.h
@@ -0,0 +1,56 @@
+/*[
+
+c_prot.h
+
+Protected Mode Support (Misc).
+------------------------------
+
+LOCAL CHAR SccsID[]="@(#)c_prot.h 1.4 02/09/94";
+
+]*/
+
+
+IMPORT VOID check_SS
+
+IPT4(
+ IU16, selector,
+ ISM32, privilege,
+ IU32 *, descr_addr,
+ CPU_DESCR *, entry
+
+ );
+
+IMPORT VOID get_stack_selector_from_TSS
+
+IPT3(
+ IU32, priv,
+ IU16 *, new_ss,
+ IU32 *, new_sp
+
+ );
+
+IMPORT VOID load_data_seg_new_privilege
+
+IPT1(
+ ISM32, indx
+
+ );
+
+IMPORT VOID validate_SS_on_stack_change
+
+IPT4(
+ IU32, priv,
+ IU16, selector,
+ IU32 *, descr,
+ CPU_DESCR *, entry
+
+ );
+
+IMPORT ISM32 validate_TSS
+
+IPT3(
+ IU16, selector,
+ IU32 *, descr_addr,
+ BOOL, is_switch
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_reg.c b/private/mvdm/softpc.new/base/ccpu386/c_reg.c
new file mode 100644
index 000000000..d6541729a
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_reg.c
@@ -0,0 +1,1879 @@
+/*[
+
+c_reg.c
+
+LOCAL CHAR SccsID[]="@(#)c_reg.c 1.24 02/13/95 Copyright Insignia Solutions Ltd.";
+
+Provide External Interface to CPU Registers.
+--------------------------------------------
+
+]*/
+
+#include <insignia.h>
+
+#include <host_def.h>
+
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+ /* NB our own interface is actually defined
+ in 'cpu.h', however we can not include it
+ here as it would redefine all the internal
+ macros. This is the only file were the
+ internal register definitions meet the
+ external definitions. Obviously the external
+ definitions here and in 'cpu.h' must aggree
+ with each other. We just can't get the
+ compiler to prove it for us! */
+#include <c_xtrn.h>
+#include <mov.h>
+
+#include <Fpu_c.h>
+#include <Pigger_c.h>
+#ifdef PIG
+#include <gdpvar.h>
+#define AR_FIXUP \
+{ \
+ if (GLOBAL_AR_FixupWanted) \
+ { \
+ if (GET_PE()==0) \
+ return 0x93; \
+ if (GET_VM()!=0) \
+ return 0xF3; \
+ } \
+}
+#else /* PIG */
+#define AR_FIXUP
+#endif /* PIG */
+
+/*
+ Prototype our internal functions.
+ */
+LOCAL IU16 get_seg_ar IPT1(ISM32, indx);
+
+LOCAL VOID set_seg_ar IPT2(ISM32, indx, IU16, val);
+
+LOCAL IU32 get_seg_limit IPT1(ISM32, indx);
+
+LOCAL VOID set_seg_limit IPT2(ISM32, indx, IU32, val);
+
+/*
+ =====================================================================
+ INTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Get segment register access rights. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL IU16
+get_seg_ar
+
+IFN1(
+ ISM32, indx /* index to segment register */
+ )
+
+
+ {
+ IU16 ar;
+
+ /*
+ Note we return only the essentials that describe the current
+ semantics we are applying to the segment, not necessarily the
+ access rights actually loaded. However the value provided may be
+ used to restore the segment register via the associated 'set'
+ function.
+
+ We don't provide G or AVL. P and A are assumed to be set.
+
+ We do provide DPL, X(B), E, W for DATA(SS,DS,ES,FS,GS) segments.
+ We do provide DPL, X(D), C, R for CODE(CS) or DATA(SS,DS,ES,FS,GS)
+ segments.
+ */
+
+ if ( GET_SR_AR_W(indx) == 0 && GET_SR_AR_R(indx) == 0 && indx != CS_REG )
+ return (IU16)0; /* Invalid */
+
+ /* Conforming attribute or CS_REG with non writeable segment
+ means CODE segment */
+ if ( GET_SR_AR_C(indx) || (indx == CS_REG && !GET_SR_AR_W(indx)) )
+ {
+ /* Set Bits 4 and 3 and output C and R attributes */
+ ar = BIT4_MASK | BIT3_MASK |
+ GET_SR_AR_C(indx) << 2 |
+ GET_SR_AR_R(indx) << 1;
+ }
+ else /* DATA segment */
+ {
+ /* Set Bit 4 and output E and W attributes */
+ ar = BIT4_MASK |
+ GET_SR_AR_E(indx) << 2 |
+ GET_SR_AR_W(indx) << 1;
+ }
+
+ /* Add in DPL and X attributes */
+ ar = ar | GET_SR_AR_DPL(indx) << 5 | GET_SR_AR_X(indx) << 14;
+
+ /* Add in P and A (always set) */
+ ar = ar | BIT7_MASK | BIT0_MASK;
+
+ return ar;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set segment register access rights. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL VOID
+set_seg_ar
+
+IFN2(
+ ISM32, indx, /* index to segment register */
+ IU16, val
+ )
+
+
+ {
+ /*
+ Note we expect to be given an access rights similar to the one
+ provided by the get_seg_ar() function. We extract the essential
+ information from it into our internal variables.
+
+ We ignore P and A, and Bit 4.
+
+ We use DPL, X(B), E, W for CODE(CS) or DATA(SS,DS,ES,FS,GS)
+ segments.
+ We use DPL, X(D), C, R for CODE(CS) segments.
+ */
+
+ if ( val == 0x0 )
+ {
+ /* Invalid */
+ SET_SR_AR_R(indx, 0); /* !read */
+ SET_SR_AR_W(indx, 0); /* !write */
+ return;
+ }
+
+ SET_SR_AR_X(indx, GET_AR_X(val));
+ SET_SR_AR_DPL(indx, GET_AR_DPL(val));
+
+ if ( val & BIT3_MASK )
+ {
+ /* CODE segment */
+ SET_SR_AR_W(indx, 0); /* !write */
+ SET_SR_AR_E(indx, 0); /* expand up */
+ SET_SR_AR_R(indx, GET_AR_R(val));
+ SET_SR_AR_C(indx, GET_AR_C(val));
+ }
+ else
+ {
+ /* DATA segment */
+ SET_SR_AR_R(indx, 1); /* readable */
+ SET_SR_AR_C(indx, 0); /* !conform */
+ SET_SR_AR_W(indx, GET_AR_W(val));
+ SET_SR_AR_E(indx, GET_AR_E(val));
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Get segment register limit. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL IU32
+get_seg_limit
+
+IFN1(
+ ISM32, indx /* index to segment register */
+ )
+
+
+ {
+ /* Note limit already expanded to take account of G bit */
+ return GET_SR_LIMIT(indx);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set segment register limit. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL VOID
+set_seg_limit
+
+IFN2(
+ ISM32, indx, /* index to segment register */
+ IU32, val /* new value for limit */
+ )
+
+
+ {
+ /* Note limit assumed already expanded to take account of G bit */
+ SET_SR_LIMIT(indx, val);
+ }
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Provide Access to Byte Registers. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+
+GLOBAL IU8
+c_getAL()
+ {
+ return (IU8)GET_AL();
+ }
+
+GLOBAL IU8
+c_getCL()
+ {
+ return (IU8)GET_CL();
+ }
+
+GLOBAL IU8
+c_getDL()
+ {
+ return (IU8)GET_DL();
+ }
+
+GLOBAL IU8
+c_getBL()
+ {
+ return (IU8)GET_BL();
+ }
+
+GLOBAL IU8
+c_getAH()
+ {
+ return (IU8)GET_AH();
+ }
+
+GLOBAL IU8
+c_getCH()
+ {
+ return (IU8)GET_CH();
+ }
+
+GLOBAL IU8
+c_getDH()
+ {
+ return (IU8)GET_DH();
+ }
+
+GLOBAL IU8
+c_getBH()
+ {
+ return (IU8)GET_BH();
+ }
+
+GLOBAL VOID
+c_setAL
+
+IFN1(
+ IU8, val
+ )
+
+
+ {
+ SET_AL(val);
+ }
+
+GLOBAL VOID
+c_setCL
+
+IFN1(
+ IU8, val
+ )
+
+
+ {
+ SET_CL(val);
+ }
+
+GLOBAL VOID
+c_setDL
+
+IFN1(
+ IU8, val
+ )
+
+
+ {
+ SET_DL(val);
+ }
+
+GLOBAL VOID
+c_setBL
+
+IFN1(
+ IU8, val
+ )
+
+
+ {
+ SET_BL(val);
+ }
+
+GLOBAL VOID
+c_setAH
+
+IFN1(
+ IU8, val
+ )
+
+
+ {
+ SET_AH(val);
+ }
+
+GLOBAL VOID
+c_setCH
+
+IFN1(
+ IU8, val
+ )
+
+
+ {
+ SET_CH(val);
+ }
+
+GLOBAL VOID
+c_setDH
+
+IFN1(
+ IU8, val
+ )
+
+
+ {
+ SET_DH(val);
+ }
+
+GLOBAL VOID
+c_setBH
+
+IFN1(
+ IU8, val
+ )
+
+
+ {
+ SET_BH(val);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Provide Access to Word Registers. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+
+GLOBAL IU16
+c_getAX()
+ {
+ return (IU16)GET_AX();
+ }
+
+GLOBAL IU16
+c_getCX()
+ {
+ return (IU16)GET_CX();
+ }
+
+GLOBAL IU16
+c_getDX()
+ {
+ return (IU16)GET_DX();
+ }
+
+GLOBAL IU16
+c_getBX()
+ {
+ return (IU16)GET_BX();
+ }
+
+GLOBAL IU16
+c_getSP()
+ {
+ return (IU16)GET_SP();
+ }
+
+GLOBAL IU16
+c_getBP()
+ {
+ return (IU16)GET_BP();
+ }
+
+GLOBAL IU16
+c_getSI()
+ {
+ return (IU16)GET_SI();
+ }
+
+GLOBAL IU16
+c_getDI()
+ {
+ return (IU16)GET_DI();
+ }
+
+GLOBAL IU32
+c_getEIP()
+ {
+ return (IU32)GET_EIP();
+ }
+
+GLOBAL IU16
+c_getIP()
+ {
+ return (IU16)GET_EIP();
+ }
+
+GLOBAL VOID
+c_setAX
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ SET_AX(val);
+ }
+
+GLOBAL VOID
+c_setCX
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ SET_CX(val);
+ }
+
+GLOBAL VOID
+c_setDX
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ SET_DX(val);
+ }
+
+GLOBAL VOID
+c_setBX
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ SET_BX(val);
+ }
+
+GLOBAL VOID
+c_setSP
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ SET_SP(val);
+ }
+
+GLOBAL VOID
+c_setBP
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ SET_BP(val);
+ }
+
+GLOBAL VOID
+c_setSI
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ SET_SI(val);
+ }
+
+GLOBAL VOID
+c_setDI
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ SET_DI(val);
+ }
+
+GLOBAL VOID
+c_setEIP
+
+IFN1(
+ IU32, val
+ )
+
+
+ {
+ SET_EIP(val);
+ }
+
+GLOBAL VOID
+c_setIP
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ SET_EIP(val);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Provide Access to Double Word Registers. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+
+GLOBAL IU32
+c_getEAX()
+ {
+ return (IU32)GET_EAX();
+ }
+
+GLOBAL IU32
+c_getECX()
+ {
+ return (IU32)GET_ECX();
+ }
+
+GLOBAL IU32
+c_getEDX()
+ {
+ return (IU32)GET_EDX();
+ }
+
+GLOBAL IU32
+c_getEBX()
+ {
+ return (IU32)GET_EBX();
+ }
+
+GLOBAL IU32
+c_getESP()
+ {
+ return (IU32)GET_ESP();
+ }
+
+GLOBAL IU32
+c_getEBP()
+ {
+ return (IU32)GET_EBP();
+ }
+
+GLOBAL IU32
+c_getESI()
+ {
+ return (IU32)GET_ESI();
+ }
+
+GLOBAL IU32
+c_getEDI()
+ {
+ return (IU32)GET_EDI();
+ }
+
+GLOBAL VOID
+c_setEAX
+
+IFN1(
+ IU32, val
+ )
+
+
+ {
+ SET_EAX(val);
+ }
+
+GLOBAL VOID
+c_setECX
+
+IFN1(
+ IU32, val
+ )
+
+
+ {
+ SET_ECX(val);
+ }
+
+GLOBAL VOID
+c_setEDX
+
+IFN1(
+ IU32, val
+ )
+
+
+ {
+ SET_EDX(val);
+ }
+
+GLOBAL VOID
+c_setEBX
+
+IFN1(
+ IU32, val
+ )
+
+
+ {
+ SET_EBX(val);
+ }
+
+GLOBAL VOID
+c_setESP
+
+IFN1(
+ IU32, val
+ )
+
+
+ {
+ SET_ESP(val);
+ }
+
+GLOBAL VOID
+c_setEBP
+
+IFN1(
+ IU32, val
+ )
+
+
+ {
+ SET_EBP(val);
+ }
+
+GLOBAL VOID
+c_setESI
+
+IFN1(
+ IU32, val
+ )
+
+
+ {
+ SET_ESI(val);
+ }
+
+GLOBAL VOID
+c_setEDI
+
+IFN1(
+ IU32, val
+ )
+
+
+ {
+ SET_EDI(val);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Provide Access to Segment Registers. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+
+GLOBAL IU16
+c_getES()
+ {
+ return (IU16)GET_ES_SELECTOR();
+ }
+
+GLOBAL IU16
+c_getCS()
+ {
+ return (IU16)GET_CS_SELECTOR();
+ }
+
+GLOBAL IU16
+c_getSS()
+ {
+ return (IU16)GET_SS_SELECTOR();
+ }
+
+GLOBAL IU16
+c_getDS()
+ {
+ return (IU16)GET_DS_SELECTOR();
+ }
+
+GLOBAL IU16
+c_getFS()
+ {
+ return (IU16)GET_FS_SELECTOR();
+ }
+
+GLOBAL IU16
+c_getGS()
+ {
+ return (IU16)GET_GS_SELECTOR();
+ }
+
+GLOBAL ISM32
+c_setES
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ return call_cpu_function((CALL_CPU *)load_data_seg, TYPE_I_W, ES_REG, val);
+ }
+
+GLOBAL ISM32
+c_setCS
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ return call_cpu_function((CALL_CPU *)load_code_seg, TYPE_W, CS_REG, val);
+ }
+
+GLOBAL ISM32
+c_setSS
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ return call_cpu_function((CALL_CPU *)load_stack_seg, TYPE_W, SS_REG, val);
+ }
+
+GLOBAL ISM32
+c_setDS
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ return call_cpu_function((CALL_CPU *)load_data_seg, TYPE_I_W, DS_REG, val);
+ }
+
+GLOBAL ISM32
+c_setFS
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ return call_cpu_function((CALL_CPU *)load_data_seg, TYPE_I_W, FS_REG, val);
+ }
+
+GLOBAL ISM32
+c_setGS
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ return call_cpu_function((CALL_CPU *)load_data_seg, TYPE_I_W, GS_REG, val);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Provide Access to Full(Private) Segment Registers. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+
+GLOBAL IU16
+c_getES_SELECTOR()
+ {
+ return (IU16)GET_ES_SELECTOR();
+ }
+
+GLOBAL IU16
+c_getCS_SELECTOR()
+ {
+ return (IU16)GET_CS_SELECTOR();
+ }
+
+GLOBAL IU16
+c_getSS_SELECTOR()
+ {
+ return (IU16)GET_SS_SELECTOR();
+ }
+
+GLOBAL IU16
+c_getDS_SELECTOR()
+ {
+ return (IU16)GET_DS_SELECTOR();
+ }
+
+GLOBAL IU16
+c_getFS_SELECTOR()
+ {
+ return (IU16)GET_FS_SELECTOR();
+ }
+
+GLOBAL IU16
+c_getGS_SELECTOR()
+ {
+ return (IU16)GET_GS_SELECTOR();
+ }
+
+GLOBAL IU32
+c_getES_BASE()
+ {
+ return (IU32)GET_ES_BASE();
+ }
+
+GLOBAL IU32
+c_getCS_BASE()
+ {
+ return (IU32)GET_CS_BASE();
+ }
+
+GLOBAL IU32
+c_getSS_BASE()
+ {
+ return (IU32)GET_SS_BASE();
+ }
+
+GLOBAL IU32
+c_getDS_BASE()
+ {
+ return (IU32)GET_DS_BASE();
+ }
+
+GLOBAL IU32
+c_getFS_BASE()
+ {
+ return (IU32)GET_FS_BASE();
+ }
+
+GLOBAL IU32
+c_getGS_BASE()
+ {
+ return (IU32)GET_GS_BASE();
+ }
+
+GLOBAL IU32
+c_getES_LIMIT()
+ {
+ return (IU32)get_seg_limit(ES_REG);
+ }
+
+GLOBAL IU32
+c_getCS_LIMIT()
+ {
+ return (IU32)get_seg_limit(CS_REG);
+ }
+
+GLOBAL IU32
+c_getSS_LIMIT()
+ {
+ return (IU32)get_seg_limit(SS_REG);
+ }
+
+GLOBAL IU32
+c_getDS_LIMIT()
+ {
+ return (IU32)get_seg_limit(DS_REG);
+ }
+
+GLOBAL IU32
+c_getFS_LIMIT()
+ {
+ return (IU32)get_seg_limit(FS_REG);
+ }
+
+GLOBAL IU32
+c_getGS_LIMIT()
+ {
+ return (IU32)get_seg_limit(GS_REG);
+ }
+
+GLOBAL IU16
+c_getES_AR()
+ {
+ AR_FIXUP;
+ return (IU16)get_seg_ar(ES_REG);
+ }
+
+GLOBAL IU16
+c_getCS_AR()
+ {
+ AR_FIXUP;
+ return (IU16)get_seg_ar(CS_REG);
+ }
+
+GLOBAL IU16
+c_getSS_AR()
+ {
+ AR_FIXUP;
+ return (IU16)get_seg_ar(SS_REG);
+ }
+
+GLOBAL IU16
+c_getDS_AR()
+ {
+ AR_FIXUP;
+ return (IU16)get_seg_ar(DS_REG);
+ }
+
+GLOBAL IU16
+c_getFS_AR()
+ {
+ AR_FIXUP;
+ return (IU16)get_seg_ar(FS_REG);
+ }
+
+GLOBAL IU16
+c_getGS_AR()
+ {
+ AR_FIXUP;
+ return (IU16)get_seg_ar(GS_REG);
+ }
+
+GLOBAL VOID
+c_setES_SELECTOR
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ SET_ES_SELECTOR(val);
+ }
+
+GLOBAL VOID
+c_setCS_SELECTOR
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ SET_CS_SELECTOR(val);
+ }
+
+GLOBAL VOID
+c_setSS_SELECTOR
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ SET_SS_SELECTOR(val);
+ }
+
+GLOBAL VOID
+c_setDS_SELECTOR
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ SET_DS_SELECTOR(val);
+ }
+
+GLOBAL VOID
+c_setFS_SELECTOR
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ SET_FS_SELECTOR(val);
+ }
+
+GLOBAL VOID
+c_setGS_SELECTOR
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ SET_GS_SELECTOR(val);
+ }
+
+GLOBAL VOID
+c_setES_BASE_LIMIT_AR
+
+IFN3(
+ IU32, base,
+ IU32, limit,
+ IU16, ar
+ )
+ {
+ SET_ES_BASE(base);
+ set_seg_limit(ES_REG, limit);
+ set_seg_ar(ES_REG, ar);
+ }
+
+GLOBAL VOID
+c_setCS_BASE_LIMIT_AR
+
+IFN3(
+ IU32, base,
+ IU32, limit,
+ IU16, ar
+ )
+ {
+ SET_CS_BASE(base);
+ set_seg_limit(CS_REG, limit);
+ set_seg_ar(CS_REG, ar);
+ }
+
+GLOBAL VOID
+c_setSS_BASE_LIMIT_AR
+
+IFN3(
+ IU32, base,
+ IU32, limit,
+ IU16, ar
+ )
+ {
+ SET_SS_BASE(base);
+ set_seg_limit(SS_REG, limit);
+ set_seg_ar(SS_REG, ar);
+ }
+
+GLOBAL VOID
+c_setDS_BASE_LIMIT_AR
+
+IFN3(
+ IU32, base,
+ IU32, limit,
+ IU16, ar
+ )
+ {
+ SET_DS_BASE(base);
+ set_seg_limit(DS_REG, limit);
+ set_seg_ar(DS_REG, ar);
+ }
+
+GLOBAL VOID
+c_setFS_BASE_LIMIT_AR
+
+IFN3(
+ IU32, base,
+ IU32, limit,
+ IU16, ar
+ )
+ {
+ SET_FS_BASE(base);
+ set_seg_limit(FS_REG, limit);
+ set_seg_ar(FS_REG, ar);
+ }
+
+GLOBAL VOID
+c_setGS_BASE_LIMIT_AR
+
+IFN3(
+ IU32, base,
+ IU32, limit,
+ IU16, ar
+ )
+ {
+ SET_GS_BASE(base);
+ set_seg_limit(GS_REG, limit);
+ set_seg_ar(GS_REG, ar);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Provide Access to Flags. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+
+GLOBAL ISM32
+c_getAF()
+ {
+ return (ISM32)GET_AF();
+ }
+
+GLOBAL ISM32
+c_getCF()
+ {
+ return (ISM32)GET_CF();
+ }
+
+GLOBAL ISM32
+c_getDF()
+ {
+ return (ISM32)GET_DF();
+ }
+
+GLOBAL ISM32
+c_getIF()
+ {
+ return (ISM32)GET_IF();
+ }
+
+GLOBAL ISM32
+c_getOF()
+ {
+ return (ISM32)GET_OF();
+ }
+
+GLOBAL ISM32
+c_getPF()
+ {
+ return (ISM32)GET_PF();
+ }
+
+GLOBAL ISM32
+c_getSF()
+ {
+ return (ISM32)GET_SF();
+ }
+
+GLOBAL ISM32
+c_getTF()
+ {
+ return (ISM32)GET_TF();
+ }
+
+GLOBAL ISM32
+c_getZF()
+ {
+ return (ISM32)GET_ZF();
+ }
+
+GLOBAL ISM32
+c_getIOPL()
+ {
+ return (ISM32)GET_IOPL();
+ }
+
+GLOBAL ISM32
+c_getNT()
+ {
+ return (ISM32)GET_NT();
+ }
+
+GLOBAL ISM32
+c_getRF()
+ {
+ return (ISM32)GET_RF();
+ }
+
+GLOBAL ISM32
+c_getVM()
+ {
+ return (ISM32)GET_VM();
+ }
+
+#ifdef SPC486
+GLOBAL ISM32
+c_getAC()
+ {
+ return (ISM32)GET_AC();
+ }
+
+GLOBAL ISM32
+c_getET()
+ {
+ return (ISM32)GET_ET();
+ }
+
+GLOBAL ISM32
+c_getNE()
+ {
+ return (ISM32)GET_NE();
+ }
+
+GLOBAL ISM32
+c_getWP()
+ {
+ return (ISM32)GET_WP();
+ }
+
+GLOBAL ISM32
+c_getAM()
+ {
+ return (ISM32)GET_AM();
+ }
+
+GLOBAL ISM32
+c_getNW()
+ {
+ return (ISM32)GET_NW();
+ }
+
+GLOBAL ISM32
+c_getCD()
+ {
+ return (ISM32)GET_CD();
+ }
+#endif /* SPC486 */
+
+GLOBAL IU16
+c_getSTATUS()
+ {
+ return (IU16)getFLAGS();
+ }
+
+GLOBAL VOID
+c_setAF
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_AF(val);
+ }
+
+GLOBAL VOID
+c_setCF
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_CF(val);
+ }
+
+GLOBAL VOID
+c_setDF
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_DF(val);
+ }
+
+GLOBAL VOID
+c_setIF
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_IF(val);
+ }
+
+GLOBAL VOID
+c_setOF
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_OF(val);
+ }
+
+GLOBAL VOID
+c_setPF
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_PF(val);
+ }
+
+GLOBAL VOID
+c_setSF
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_SF(val);
+ }
+
+GLOBAL VOID
+c_setTF
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_TF(val);
+ }
+
+GLOBAL VOID
+c_setZF
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_ZF(val);
+ }
+
+GLOBAL VOID
+c_setIOPL
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_IOPL(val);
+ }
+
+GLOBAL VOID
+c_setNT
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_NT(val);
+ }
+
+GLOBAL VOID
+c_setRF
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_RF(val);
+ }
+
+GLOBAL VOID
+c_setVM
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_VM(val);
+ }
+
+
+
+#ifdef SPC486
+GLOBAL VOID
+c_setAC
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_AC(val);
+ }
+#endif /* SPC486 */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Provide Access to Control Registers. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+
+GLOBAL ISM32
+c_getPE()
+ {
+ return (ISM32)GET_PE();
+ }
+
+GLOBAL ISM32
+c_getMP()
+ {
+ return (ISM32)GET_MP();
+ }
+
+GLOBAL ISM32
+c_getEM()
+ {
+ return (ISM32)GET_EM();
+ }
+
+GLOBAL ISM32
+c_getTS()
+ {
+ return (ISM32)GET_TS();
+ }
+
+GLOBAL ISM32
+c_getPG()
+ {
+ return (ISM32)GET_PG();
+ }
+
+GLOBAL IU16
+c_getMSW()
+ {
+ return (IU16)GET_MSW();
+ }
+
+GLOBAL IU32
+c_getCR0
+IFN0()
+ {
+ return (IU32)GET_CR(0);
+ }
+
+GLOBAL IU32
+c_getCR2
+IFN0()
+ {
+ return (IU32)GET_CR(2);
+ }
+
+GLOBAL IU32
+c_getCR3
+IFN0()
+ {
+ return (IU32)GET_CR(3);
+ }
+
+GLOBAL VOID
+c_setPE
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_PE(val);
+ }
+
+GLOBAL VOID
+c_setMP
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_MP(val);
+ }
+
+GLOBAL VOID
+c_setEM
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_EM(val);
+ }
+
+GLOBAL VOID
+c_setTS
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_TS(val);
+ }
+
+GLOBAL VOID
+c_setET
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ /* Quietly ignore outside interference. Once set at RESET
+ time ET remains unchanged. */
+ UNUSED(val);
+ }
+
+#ifdef SPC486
+GLOBAL VOID
+c_setNE
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_NE(val);
+ }
+
+GLOBAL VOID
+c_setWP
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_WP(val);
+ }
+
+GLOBAL VOID
+c_setAM
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_AM(val);
+ }
+
+GLOBAL VOID
+c_setNW
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_NW(val);
+ }
+
+GLOBAL VOID
+c_setCD
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_CD(val);
+ }
+#endif /* SPC486 */
+
+GLOBAL VOID
+c_setPG
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_PG(val);
+ }
+
+GLOBAL VOID
+c_setMSW
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ IU32 keep_et;
+ /*
+ Does not allow ET to be changed!
+ Ideally this external interface should be removed.
+ */
+ keep_et = GET_ET();
+ SET_MSW(val);
+ SET_ET(keep_et);
+ MOV_CR(0, (IU32)((GET_CR(0)&0xFFFF0000) | ((IU32)val)));
+ }
+
+GLOBAL VOID
+c_setCR0
+IFN1(
+ IU32, val
+ )
+ {
+ MOV_CR(0, (IU32)val);
+ }
+
+/* CR1 is reserved on the 486 */
+
+GLOBAL VOID
+c_setCR2
+IFN1(
+ IU32, val
+ )
+ {
+ MOV_CR(2, (IU32)val);
+ }
+
+GLOBAL VOID
+c_setCR3
+IFN1(
+ IU32, val
+ )
+ {
+ MOV_CR(3, (IU32)val);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Provide Access to Descriptor Registers. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+
+GLOBAL IU32
+c_getGDT_BASE()
+ {
+ return (IU32)GET_GDT_BASE();
+ }
+
+GLOBAL IU16
+c_getGDT_LIMIT()
+ {
+ return (IU16)GET_GDT_LIMIT();
+ }
+
+GLOBAL IU32
+c_getIDT_BASE()
+ {
+ return (IU32)GET_IDT_BASE();
+ }
+
+GLOBAL IU16
+c_getIDT_LIMIT()
+ {
+ return (IU16)GET_IDT_LIMIT();
+ }
+
+GLOBAL IU16
+c_getLDT_SELECTOR()
+ {
+ return (IU16)GET_LDT_SELECTOR();
+ }
+
+GLOBAL IU32
+c_getLDT_BASE()
+ {
+ return (IU32)GET_LDT_BASE();
+ }
+
+GLOBAL IU32
+c_getLDT_LIMIT()
+ {
+ return (IU32)GET_LDT_LIMIT();
+ }
+
+GLOBAL IU16
+c_getTR_SELECTOR()
+ {
+ return (IU16)GET_TR_SELECTOR();
+ }
+
+GLOBAL IU32
+c_getTR_BASE()
+ {
+ return (IU32)GET_TR_BASE();
+ }
+
+GLOBAL IU32
+c_getTR_LIMIT()
+ {
+ return (IU32)GET_TR_LIMIT();
+ }
+
+GLOBAL IU16
+c_getTR_AR()
+ {
+ return (IU16)GET_TR_AR_SUPER();
+ }
+
+GLOBAL VOID
+c_setGDT_BASE_LIMIT
+
+IFN2(
+ IU32, base,
+ IU16, limit
+ )
+
+
+ {
+ SET_GDT_BASE(base);
+ SET_GDT_LIMIT(limit);
+ }
+
+GLOBAL VOID
+c_setIDT_BASE_LIMIT
+
+IFN2(
+ IU32, base,
+ IU16, limit
+ )
+
+
+ {
+ SET_IDT_BASE(base);
+ SET_IDT_LIMIT(limit);
+ }
+
+GLOBAL VOID
+c_setLDT_SELECTOR
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ SET_LDT_SELECTOR(val);
+ }
+
+GLOBAL VOID
+c_setLDT_BASE_LIMIT
+
+IFN2(
+ IU32, base,
+ IU32, limit
+ )
+
+
+ {
+ SET_LDT_BASE(base);
+ SET_LDT_LIMIT(limit);
+ }
+
+GLOBAL VOID
+c_setTR_SELECTOR
+
+IFN1(
+ IU16, val
+ )
+
+
+ {
+ SET_TR_SELECTOR(val);
+ }
+
+GLOBAL VOID
+c_setTR_BASE_LIMIT
+
+IFN2(
+ IU32, base,
+ IU32, limit
+ )
+
+
+ {
+ SET_TR_BASE(base);
+ SET_TR_LIMIT(limit);
+ SET_TR_AR_SUPER(BUSY_TSS);
+ }
+
+
+GLOBAL VOID
+c_setTR_BASE_LIMIT_AR
+
+IFN3(
+ IU32, base,
+ IU32, limit,
+ IU16, ar
+ )
+
+
+ {
+ if (GET_AR_SUPER(ar) == BUSY_TSS || GET_AR_SUPER(ar) == XTND_BUSY_TSS)
+ {
+ SET_TR_BASE(base);
+ SET_TR_LIMIT(limit);
+ SET_TR_AR_SUPER(GET_AR_SUPER(ar));
+ }
+ else
+ {
+ if (ar != 0)
+ printf ("%s:%d Invalid Task Reg AR byte %02x supplied.\n",
+ __FILE__, __LINE__, ar);
+ SET_TR_BASE(base);
+ SET_TR_LIMIT(limit);
+ SET_TR_AR_SUPER(BUSY_TSS);
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Provide Access to Current Privilege Level. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+
+GLOBAL ISM32
+c_getCPL()
+ {
+ return (ISM32)GET_CPL();
+ }
+
+GLOBAL VOID
+c_setCPL
+
+IFN1(
+ ISM32, val
+ )
+
+
+ {
+ SET_CPL(val);
+ }
+
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_reg.h b/private/mvdm/softpc.new/base/ccpu386/c_reg.h
new file mode 100644
index 000000000..705b73030
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_reg.h
@@ -0,0 +1,508 @@
+/*[
+
+c_reg.h
+
+LOCAL CHAR SccsID[]="@(#)c_reg.h 1.8 08/26/94";
+
+Access to CPU Registers.
+------------------------
+
+NB. This file is *NOT* a description of the 'c_reg.c' functions. Those
+ are given in 'cpu.h'. However it is analogous in that 'c_reg.c'
+ defines the CPU Registers for the outside world. This file defines
+ the CPU Registers for access internal to the C CPU.
+
+]*/
+
+
+/* CS, SS, DS, ES, FS and GS */
+typedef struct
+ {
+ IU16 selector; /* 16-bit selector */
+ IU32 ar_dpl; /* privilege */
+ IU32 ar_e; /* expansion direction */
+ IU32 ar_r; /* readable */
+ IU32 ar_w; /* writable */
+ IU32 ar_c; /* conforming */
+ IU32 ar_x; /* default (CS) big (SS,DS,ES,FS,GS) */
+ IU32 base; /* 32-bit base address (286 = 24-bit) */
+ IU32 limit; /* 32-bit offset limit (286 = 16-bit) */
+ } SEGMENT_REGISTER;
+
+/* LDTR and TR */
+typedef struct
+ {
+ IU16 selector; /* 16-bit selector */
+ ISM32 ar_super; /* descriptor type (only used for TR) */
+ IU32 base; /* 32-bit base address (286 = 24-bit) */
+ IU32 limit; /* 32-bit offset limit (286 = 16-bit) */
+ } SYSTEM_ADDRESS_REGISTER;
+
+/* GDTR and IDTR */
+typedef struct
+ {
+ IU32 base; /* 32-bit base (286 = 24-bit) */
+ IU16 limit; /* 16-bit limit */
+ } SYSTEM_TABLE_ADDRESS_REGISTER;
+
+
+/*
+ C CPU Registers. (See c_main.c for full description.)
+ */
+IMPORT IU32 CCPU_TR[];
+IMPORT IU32 CCPU_DR[];
+IMPORT IU32 CCPU_CR[];
+IMPORT IU32 CCPU_GR[];
+IMPORT IU16 *CCPU_WR[];
+IMPORT IU8 *CCPU_BR[];
+IMPORT IU32 CCPU_IP;
+IMPORT IU32 CCPU_FLAGS[];
+IMPORT IU32 CCPU_CPL;
+IMPORT IU32 CCPU_MODE[];
+IMPORT SEGMENT_REGISTER CCPU_SR[];
+IMPORT SYSTEM_ADDRESS_REGISTER CCPU_SAR[];
+IMPORT SYSTEM_TABLE_ADDRESS_REGISTER CCPU_STAR[];
+
+
+/*
+ Access to the emulation register set.
+ */
+
+
+/* Double Word General Registers */
+#define GET_GR(i) CCPU_GR[(i)]
+#define SET_GR(i, x) CCPU_GR[(i)] = (x)
+
+#define A_EAX 0
+#define A_ECX 1
+#define A_EDX 2
+#define A_EBX 3
+#define A_ESP 4
+#define A_EBP 5
+#define A_ESI 6
+#define A_EDI 7
+
+#define GET_EAX() GET_GR(A_EAX)
+#define GET_ECX() GET_GR(A_ECX)
+#define GET_EDX() GET_GR(A_EDX)
+#define GET_EBX() GET_GR(A_EBX)
+#define GET_ESP() GET_GR(A_ESP)
+#define GET_EBP() GET_GR(A_EBP)
+#define GET_ESI() GET_GR(A_ESI)
+#define GET_EDI() GET_GR(A_EDI)
+
+#define SET_EAX(x) SET_GR(A_EAX, (x))
+#define SET_ECX(x) SET_GR(A_ECX, (x))
+#define SET_EDX(x) SET_GR(A_EDX, (x))
+#define SET_EBX(x) SET_GR(A_EBX, (x))
+#define SET_ESP(x) SET_GR(A_ESP, (x))
+#define SET_EBP(x) SET_GR(A_EBP, (x))
+#define SET_ESI(x) SET_GR(A_ESI, (x))
+#define SET_EDI(x) SET_GR(A_EDI, (x))
+
+/* Word Registers */
+#define GET_WR(i) (*CCPU_WR[(i)])
+#define SET_WR(i, x) *CCPU_WR[(i)] = (x)
+
+#define A_AX 0
+#define A_CX 1
+#define A_DX 2
+#define A_BX 3
+#define A_SP 4
+#define A_BP 5
+#define A_SI 6
+#define A_DI 7
+
+#define GET_AX() GET_WR(A_AX)
+#define GET_CX() GET_WR(A_CX)
+#define GET_DX() GET_WR(A_DX)
+#define GET_BX() GET_WR(A_BX)
+#define GET_SP() GET_WR(A_SP)
+#define GET_BP() GET_WR(A_BP)
+#define GET_SI() GET_WR(A_SI)
+#define GET_DI() GET_WR(A_DI)
+
+#define SET_AX(x) SET_WR(A_AX, (x))
+#define SET_CX(x) SET_WR(A_CX, (x))
+#define SET_DX(x) SET_WR(A_DX, (x))
+#define SET_BX(x) SET_WR(A_BX, (x))
+#define SET_SP(x) SET_WR(A_SP, (x))
+#define SET_BP(x) SET_WR(A_BP, (x))
+#define SET_SI(x) SET_WR(A_SI, (x))
+#define SET_DI(x) SET_WR(A_DI, (x))
+
+#define GET_EIP() CCPU_IP
+#define SET_EIP(x) CCPU_IP = (x)
+
+/* Byte Registers */
+#define GET_BR(i) (*CCPU_BR[(i)])
+#define SET_BR(i, x) *CCPU_BR[(i)] = (x)
+
+#define A_AL 0
+#define A_CL 1
+#define A_DL 2
+#define A_BL 3
+#define A_AH 4
+#define A_CH 5
+#define A_DH 6
+#define A_BH 7
+
+#define GET_AL() GET_BR(A_AL)
+#define GET_CL() GET_BR(A_CL)
+#define GET_DL() GET_BR(A_DL)
+#define GET_BL() GET_BR(A_BL)
+#define GET_AH() GET_BR(A_AH)
+#define GET_CH() GET_BR(A_CH)
+#define GET_DH() GET_BR(A_DH)
+#define GET_BH() GET_BR(A_BH)
+
+#define SET_AL(x) SET_BR(A_AL, (x))
+#define SET_CL(x) SET_BR(A_CL, (x))
+#define SET_DL(x) SET_BR(A_DL, (x))
+#define SET_BL(x) SET_BR(A_BL, (x))
+#define SET_AH(x) SET_BR(A_AH, (x))
+#define SET_CH(x) SET_BR(A_CH, (x))
+#define SET_DH(x) SET_BR(A_DH, (x))
+#define SET_BH(x) SET_BR(A_BH, (x))
+
+/* Segment Registers */
+#define GET_SR_SELECTOR(i) CCPU_SR[(i)].selector
+#define GET_SR_BASE(i) CCPU_SR[(i)].base
+#define GET_SR_LIMIT(i) CCPU_SR[(i)].limit
+#define GET_SR_AR_DPL(i) CCPU_SR[(i)].ar_dpl
+#define GET_SR_AR_E(i) CCPU_SR[(i)].ar_e
+#define GET_SR_AR_W(i) CCPU_SR[(i)].ar_w
+#define GET_SR_AR_R(i) CCPU_SR[(i)].ar_r
+#define GET_SR_AR_C(i) CCPU_SR[(i)].ar_c
+#define GET_SR_AR_X(i) CCPU_SR[(i)].ar_x
+
+#define SET_SR_SELECTOR(i, x) CCPU_SR[(i)].selector = (x)
+#define SET_SR_BASE(i, x) CCPU_SR[(i)].base = (x)
+#define SET_SR_LIMIT(i, x) CCPU_SR[(i)].limit = (x)
+#define SET_SR_AR_DPL(i, x) CCPU_SR[(i)].ar_dpl = (x)
+#define SET_SR_AR_E(i, x) CCPU_SR[(i)].ar_e = (x)
+#define SET_SR_AR_W(i, x) CCPU_SR[(i)].ar_w = (x)
+#define SET_SR_AR_R(i, x) CCPU_SR[(i)].ar_r = (x)
+#define SET_SR_AR_C(i, x) CCPU_SR[(i)].ar_c = (x)
+#define SET_SR_AR_X(i, x) CCPU_SR[(i)].ar_x = (x)
+
+#define ES_REG 0
+#define CS_REG 1
+#define SS_REG 2
+#define DS_REG 3
+#define FS_REG 4
+#define GS_REG 5
+
+#define GET_ES_SELECTOR() GET_SR_SELECTOR(ES_REG)
+#define GET_ES_BASE() GET_SR_BASE(ES_REG)
+#define GET_ES_LIMIT() GET_SR_LIMIT(ES_REG)
+#define GET_ES_AR_DPL() GET_SR_AR_DPL(ES_REG)
+#define GET_ES_AR_E() GET_SR_AR_E(ES_REG)
+#define GET_ES_AR_W() GET_SR_AR_W(ES_REG)
+#define GET_ES_AR_R() GET_SR_AR_R(ES_REG)
+#define GET_ES_AR_C() GET_SR_AR_C(ES_REG)
+#define GET_ES_AR_X() GET_SR_AR_X(ES_REG)
+
+#define SET_ES_SELECTOR(x) SET_SR_SELECTOR(ES_REG, (x))
+#define SET_ES_BASE(x) SET_SR_BASE(ES_REG, (x))
+#define SET_ES_LIMIT(x) SET_SR_LIMIT(ES_REG, (x))
+#define SET_ES_AR_DPL(x) SET_SR_AR_DPL(ES_REG, (x))
+#define SET_ES_AR_E(x) SET_SR_AR_E(ES_REG, (x))
+#define SET_ES_AR_W(x) SET_SR_AR_W(ES_REG, (x))
+#define SET_ES_AR_R(x) SET_SR_AR_R(ES_REG, (x))
+#define SET_ES_AR_C(x) SET_SR_AR_C(ES_REG, (x))
+#define SET_ES_AR_X(x) SET_SR_AR_X(ES_REG, (x))
+
+#define GET_CS_SELECTOR() GET_SR_SELECTOR(CS_REG)
+#define GET_CS_BASE() GET_SR_BASE(CS_REG)
+#define GET_CS_LIMIT() GET_SR_LIMIT(CS_REG)
+#define GET_CS_AR_DPL() GET_SR_AR_DPL(CS_REG)
+#define GET_CS_AR_E() GET_SR_AR_E(CS_REG)
+#define GET_CS_AR_W() GET_SR_AR_W(CS_REG)
+#define GET_CS_AR_R() GET_SR_AR_R(CS_REG)
+#define GET_CS_AR_C() GET_SR_AR_C(CS_REG)
+#define GET_CS_AR_X() GET_SR_AR_X(CS_REG)
+
+#define SET_CS_SELECTOR(x) SET_SR_SELECTOR(CS_REG, (x))
+#define SET_CS_BASE(x) SET_SR_BASE(CS_REG, (x))
+#define SET_CS_LIMIT(x) SET_SR_LIMIT(CS_REG, (x))
+#define SET_CS_AR_DPL(x) SET_SR_AR_DPL(CS_REG, (x))
+#define SET_CS_AR_E(x) SET_SR_AR_E(CS_REG, (x))
+#define SET_CS_AR_W(x) SET_SR_AR_W(CS_REG, (x))
+#define SET_CS_AR_R(x) SET_SR_AR_R(CS_REG, (x))
+#define SET_CS_AR_C(x) SET_SR_AR_C(CS_REG, (x))
+#define SET_CS_AR_X(x) SET_SR_AR_X(CS_REG, (x))
+
+#define GET_SS_SELECTOR() GET_SR_SELECTOR(SS_REG)
+#define GET_SS_BASE() GET_SR_BASE(SS_REG)
+#define GET_SS_LIMIT() GET_SR_LIMIT(SS_REG)
+#define GET_SS_AR_DPL() GET_SR_AR_DPL(SS_REG)
+#define GET_SS_AR_E() GET_SR_AR_E(SS_REG)
+#define GET_SS_AR_W() GET_SR_AR_W(SS_REG)
+#define GET_SS_AR_R() GET_SR_AR_R(SS_REG)
+#define GET_SS_AR_C() GET_SR_AR_C(SS_REG)
+#define GET_SS_AR_X() GET_SR_AR_X(SS_REG)
+
+#define SET_SS_SELECTOR(x) SET_SR_SELECTOR(SS_REG, (x))
+#define SET_SS_BASE(x) SET_SR_BASE(SS_REG, (x))
+#define SET_SS_LIMIT(x) SET_SR_LIMIT(SS_REG, (x))
+#define SET_SS_AR_DPL(x) SET_SR_AR_DPL(SS_REG, (x))
+#define SET_SS_AR_E(x) SET_SR_AR_E(SS_REG, (x))
+#define SET_SS_AR_W(x) SET_SR_AR_W(SS_REG, (x))
+#define SET_SS_AR_R(x) SET_SR_AR_R(SS_REG, (x))
+#define SET_SS_AR_C(x) SET_SR_AR_C(SS_REG, (x))
+#define SET_SS_AR_X(x) SET_SR_AR_X(SS_REG, (x))
+
+#define GET_DS_SELECTOR() GET_SR_SELECTOR(DS_REG)
+#define GET_DS_BASE() GET_SR_BASE(DS_REG)
+#define GET_DS_LIMIT() GET_SR_LIMIT(DS_REG)
+#define GET_DS_AR_DPL() GET_SR_AR_DPL(DS_REG)
+#define GET_DS_AR_E() GET_SR_AR_E(DS_REG)
+#define GET_DS_AR_W() GET_SR_AR_W(DS_REG)
+#define GET_DS_AR_R() GET_SR_AR_R(DS_REG)
+#define GET_DS_AR_C() GET_SR_AR_C(DS_REG)
+#define GET_DS_AR_X() GET_SR_AR_X(DS_REG)
+
+#define SET_DS_SELECTOR(x) SET_SR_SELECTOR(DS_REG, (x))
+#define SET_DS_BASE(x) SET_SR_BASE(DS_REG, (x))
+#define SET_DS_LIMIT(x) SET_SR_LIMIT(DS_REG, (x))
+#define SET_DS_AR_DPL(x) SET_SR_AR_DPL(DS_REG, (x))
+#define SET_DS_AR_E(x) SET_SR_AR_E(DS_REG, (x))
+#define SET_DS_AR_W(x) SET_SR_AR_W(DS_REG, (x))
+#define SET_DS_AR_R(x) SET_SR_AR_R(DS_REG, (x))
+#define SET_DS_AR_C(x) SET_SR_AR_C(DS_REG, (x))
+#define SET_DS_AR_X(x) SET_SR_AR_X(DS_REG, (x))
+
+#define GET_FS_SELECTOR() GET_SR_SELECTOR(FS_REG)
+#define GET_FS_BASE() GET_SR_BASE(FS_REG)
+#define GET_FS_LIMIT() GET_SR_LIMIT(FS_REG)
+#define GET_FS_AR_DPL() GET_SR_AR_DPL(FS_REG)
+#define GET_FS_AR_E() GET_SR_AR_E(FS_REG)
+#define GET_FS_AR_W() GET_SR_AR_W(FS_REG)
+#define GET_FS_AR_R() GET_SR_AR_R(FS_REG)
+#define GET_FS_AR_C() GET_SR_AR_C(FS_REG)
+#define GET_FS_AR_X() GET_SR_AR_X(FS_REG)
+
+#define SET_FS_SELECTOR(x) SET_SR_SELECTOR(FS_REG, (x))
+#define SET_FS_BASE(x) SET_SR_BASE(FS_REG, (x))
+#define SET_FS_LIMIT(x) SET_SR_LIMIT(FS_REG, (x))
+#define SET_FS_AR_DPL(x) SET_SR_AR_DPL(FS_REG, (x))
+#define SET_FS_AR_E(x) SET_SR_AR_E(FS_REG, (x))
+#define SET_FS_AR_W(x) SET_SR_AR_W(FS_REG, (x))
+#define SET_FS_AR_R(x) SET_SR_AR_R(FS_REG, (x))
+#define SET_FS_AR_C(x) SET_SR_AR_C(FS_REG, (x))
+#define SET_FS_AR_X(x) SET_SR_AR_X(FS_REG, (x))
+
+#define GET_GS_SELECTOR() GET_SR_SELECTOR(GS_REG)
+#define GET_GS_BASE() GET_SR_BASE(GS_REG)
+#define GET_GS_LIMIT() GET_SR_LIMIT(GS_REG)
+#define GET_GS_AR_DPL() GET_SR_AR_DPL(GS_REG)
+#define GET_GS_AR_E() GET_SR_AR_E(GS_REG)
+#define GET_GS_AR_W() GET_SR_AR_W(GS_REG)
+#define GET_GS_AR_R() GET_SR_AR_R(GS_REG)
+#define GET_GS_AR_C() GET_SR_AR_C(GS_REG)
+#define GET_GS_AR_X() GET_SR_AR_X(GS_REG)
+
+#define SET_GS_SELECTOR(x) SET_SR_SELECTOR(GS_REG, (x))
+#define SET_GS_BASE(x) SET_SR_BASE(GS_REG, (x))
+#define SET_GS_LIMIT(x) SET_SR_LIMIT(GS_REG, (x))
+#define SET_GS_AR_DPL(x) SET_SR_AR_DPL(GS_REG, (x))
+#define SET_GS_AR_E(x) SET_SR_AR_E(GS_REG, (x))
+#define SET_GS_AR_W(x) SET_SR_AR_W(GS_REG, (x))
+#define SET_GS_AR_R(x) SET_SR_AR_R(GS_REG, (x))
+#define SET_GS_AR_C(x) SET_SR_AR_C(GS_REG, (x))
+#define SET_GS_AR_X(x) SET_SR_AR_X(GS_REG, (x))
+
+/* System Table Address Registers */
+#define GET_STAR_BASE(i) CCPU_STAR[(i)].base
+#define GET_STAR_LIMIT(i) CCPU_STAR[(i)].limit
+
+#define SET_STAR_BASE(i, x) CCPU_STAR[(i)].base = (x)
+#define SET_STAR_LIMIT(i, x) CCPU_STAR[(i)].limit = (x)
+
+#define GDT_REG 0
+#define IDT_REG 1
+
+#define GET_GDT_BASE() GET_STAR_BASE(GDT_REG)
+#define GET_GDT_LIMIT() GET_STAR_LIMIT(GDT_REG)
+#define GET_IDT_BASE() GET_STAR_BASE(IDT_REG)
+#define GET_IDT_LIMIT() GET_STAR_LIMIT(IDT_REG)
+
+#define SET_GDT_BASE(x) SET_STAR_BASE(GDT_REG, (x))
+#define SET_GDT_LIMIT(x) SET_STAR_LIMIT(GDT_REG, (x))
+#define SET_IDT_BASE(x) SET_STAR_BASE(IDT_REG, (x))
+#define SET_IDT_LIMIT(x) SET_STAR_LIMIT(IDT_REG, (x))
+
+/* System Address Registers */
+#define GET_SAR_SELECTOR(i) CCPU_SAR[(i)].selector
+#define GET_SAR_AR_SUPER(i) CCPU_SAR[(i)].ar_super
+#define GET_SAR_BASE(i) CCPU_SAR[(i)].base
+#define GET_SAR_LIMIT(i) CCPU_SAR[(i)].limit
+
+#define SET_SAR_SELECTOR(i, x) CCPU_SAR[(i)].selector = (x)
+#define SET_SAR_AR_SUPER(i, x) CCPU_SAR[(i)].ar_super = (x)
+#define SET_SAR_BASE(i, x) CCPU_SAR[(i)].base = (x)
+#define SET_SAR_LIMIT(i, x) CCPU_SAR[(i)].limit = (x)
+
+#define LDT_REG 0
+#define TR_REG 1
+
+#define GET_LDT_SELECTOR() GET_SAR_SELECTOR(LDT_REG)
+#define GET_LDT_BASE() GET_SAR_BASE(LDT_REG)
+#define GET_LDT_LIMIT() GET_SAR_LIMIT(LDT_REG)
+#define GET_TR_SELECTOR() GET_SAR_SELECTOR(TR_REG)
+#define GET_TR_AR_SUPER() GET_SAR_AR_SUPER(TR_REG)
+#define GET_TR_BASE() GET_SAR_BASE(TR_REG)
+#define GET_TR_LIMIT() GET_SAR_LIMIT(TR_REG)
+
+#define SET_LDT_SELECTOR(x) SET_SAR_SELECTOR(LDT_REG, (x))
+#define SET_LDT_BASE(x) SET_SAR_BASE(LDT_REG, (x))
+#define SET_LDT_LIMIT(x) SET_SAR_LIMIT(LDT_REG, (x))
+#define SET_TR_SELECTOR(x) SET_SAR_SELECTOR(TR_REG, (x))
+#define SET_TR_AR_SUPER(x) SET_SAR_AR_SUPER(TR_REG, (x))
+#define SET_TR_BASE(x) SET_SAR_BASE(TR_REG, (x))
+#define SET_TR_LIMIT(x) SET_SAR_LIMIT(TR_REG, (x))
+
+/* Flag Register */
+#define GET_CF() CCPU_FLAGS[0]
+#define GET_PF() CCPU_FLAGS[2]
+#define GET_AF() CCPU_FLAGS[4]
+#define GET_ZF() CCPU_FLAGS[6]
+#define GET_SF() CCPU_FLAGS[7]
+#define GET_TF() CCPU_FLAGS[8]
+#define GET_IF() CCPU_FLAGS[9]
+#define GET_DF() CCPU_FLAGS[10]
+#define GET_OF() CCPU_FLAGS[11]
+#define GET_IOPL() CCPU_FLAGS[12]
+#define GET_NT() CCPU_FLAGS[14]
+#define GET_RF() CCPU_FLAGS[16]
+#define GET_VM() CCPU_FLAGS[17]
+#define GET_AC() CCPU_FLAGS[18]
+
+#define SET_CF(x) CCPU_FLAGS[0] = (x)
+#define SET_PF(x) CCPU_FLAGS[2] = (x)
+#define SET_AF(x) CCPU_FLAGS[4] = (x)
+#define SET_ZF(x) CCPU_FLAGS[6] = (x)
+#define SET_SF(x) CCPU_FLAGS[7] = (x)
+#define SET_TF(x) CCPU_FLAGS[8] = (x)
+#ifdef SFELLOW
+extern IU32 DisableEE IPT0();
+extern void EnableEE IPT0();
+#define SET_IF(x) \
+{ \
+ if (x) \
+ { \
+ EnableEE(); \
+ } \
+ else \
+ { \
+ DisableEE(); \
+ } \
+ CCPU_FLAGS[9] = (x); \
+}
+#else
+#define SET_IF(x) CCPU_FLAGS[9] = (x)
+#endif /* SFELLOW */
+#define SET_DF(x) CCPU_FLAGS[10] = (x)
+#define SET_OF(x) CCPU_FLAGS[11] = (x)
+#define SET_IOPL(x) CCPU_FLAGS[12] = (x)
+#define SET_NT(x) CCPU_FLAGS[14] = (x)
+#define SET_RF(x) CCPU_FLAGS[16] = (x)
+#define SET_VM(x) CCPU_FLAGS[17] = (x)
+#define SET_AC(x) CCPU_FLAGS[18] = (x)
+
+/* Test Registers */
+#define TR_TDR 7
+#define TR_TCR 6
+#define TR_CCR 5
+#define TR_CSR 4
+#define TR_CDR 3
+
+#define GET_TR(i) CCPU_TR[(i)]
+#define SET_TR(i, x) CCPU_TR[(i)] = (x)
+
+/* Debug Registers */
+#define DR_DCR 7
+#define DR_DSR 6
+#define DR_DAR3 3
+#define DR_DAR2 2
+#define DR_DAR1 1
+#define DR_DAR0 0
+
+#define DSR_BT_MASK BIT15_MASK
+#define DSR_BS_MASK BIT14_MASK
+#define DSR_B3_MASK BIT3_MASK
+#define DSR_B2_MASK BIT2_MASK
+#define DSR_B1_MASK BIT1_MASK
+#define DSR_B0_MASK BIT0_MASK
+
+#define GET_DR(i) CCPU_DR[(i)]
+#define SET_DR(i, x) CCPU_DR[(i)] = (x)
+
+/* Control Registers */
+#define CR_PDBR 3
+#define CR_PFLA 2
+#define CR_RSVD 1
+#define CR_STAT 0
+
+#define GET_CR(i) CCPU_CR[(i)]
+#define SET_CR(i, x) CCPU_CR[(i)] = (x)
+
+#define GET_PE() ( CCPU_CR[CR_STAT] & BIT0_MASK)
+#define GET_MP() ((CCPU_CR[CR_STAT] & BIT1_MASK) != 0)
+#define GET_EM() ((CCPU_CR[CR_STAT] & BIT2_MASK) != 0)
+#define GET_TS() ((CCPU_CR[CR_STAT] & BIT3_MASK) != 0)
+#define GET_ET() ((CCPU_CR[CR_STAT] & BIT4_MASK) != 0)
+#define GET_PG() ((CCPU_CR[CR_STAT] & BIT31_MASK) != 0)
+
+/* 486 only */
+#define GET_NE() ((CCPU_CR[CR_STAT] & BIT5_MASK) != 0)
+#define GET_WP() ((CCPU_CR[CR_STAT] & BIT16_MASK) != 0)
+#define GET_AM() ((CCPU_CR[CR_STAT] & BIT18_MASK) != 0)
+#define GET_NW() ((CCPU_CR[CR_STAT] & BIT29_MASK) != 0)
+#define GET_CD() ((CCPU_CR[CR_STAT] & BIT30_MASK) != 0)
+
+#define SET_PE(x)\
+ (CCPU_CR[CR_STAT] = CCPU_CR[CR_STAT] & ~BIT0_MASK | ((x) & 1))
+#define SET_MP(x)\
+ (CCPU_CR[CR_STAT] = CCPU_CR[CR_STAT] & ~BIT1_MASK | ((x) & 1) << 1)
+#define SET_EM(x)\
+ (CCPU_CR[CR_STAT] = CCPU_CR[CR_STAT] & ~BIT2_MASK | ((x) & 1) << 2)
+#define SET_TS(x)\
+ (CCPU_CR[CR_STAT] = CCPU_CR[CR_STAT] & ~BIT3_MASK | ((x) & 1) << 3)
+#define SET_ET(x)\
+ (CCPU_CR[CR_STAT] = CCPU_CR[CR_STAT] & ~BIT4_MASK | ((x) & 1) << 4)
+#define SET_PG(x)\
+ (CCPU_CR[CR_STAT] = CCPU_CR[CR_STAT] & ~BIT31_MASK | ((x) & 1) << 31)
+
+/* 486 only */
+#define SET_NE(x)\
+ (CCPU_CR[CR_STAT] = CCPU_CR[CR_STAT] & ~BIT5_MASK | ((x) & 1) << 5)
+#define SET_WP(x)\
+ (CCPU_CR[CR_STAT] = CCPU_CR[CR_STAT] & ~BIT16_MASK | ((x) & 1) << 16)
+#define SET_AM(x)\
+ (CCPU_CR[CR_STAT] = CCPU_CR[CR_STAT] & ~BIT18_MASK | ((x) & 1) << 18)
+#define SET_NW(x)\
+ (CCPU_CR[CR_STAT] = CCPU_CR[CR_STAT] & ~BIT29_MASK | ((x) & 1) << 29)
+#define SET_CD(x)\
+ (CCPU_CR[CR_STAT] = CCPU_CR[CR_STAT] & ~BIT30_MASK | ((x) & 1) << 30)
+#define GET_MSW() CCPU_CR[CR_STAT] & WORD_MASK
+#define SET_MSW(x)\
+ (CCPU_CR[CR_STAT] = CCPU_CR[CR_STAT] & ~WORD_MASK | ((x) & WORD_MASK))
+
+/* Current Privilege Level */
+#define GET_CPL() CCPU_CPL
+#define SET_CPL(x) CCPU_CPL = (x)
+
+/* Current Operating Mode */
+#define USE16 0
+#define USE32 1
+
+#define GET_OPERAND_SIZE() CCPU_MODE[0]
+#define GET_ADDRESS_SIZE() CCPU_MODE[1]
+
+#define SET_OPERAND_SIZE(x) CCPU_MODE[0] = (x)
+#define SET_ADDRESS_SIZE(x) CCPU_MODE[1] = (x)
+
+#define GET_POP_DISP() CCPU_MODE[2]
+#define SET_POP_DISP(x) CCPU_MODE[2] = (x)
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_seg.c b/private/mvdm/softpc.new/base/ccpu386/c_seg.c
new file mode 100644
index 000000000..f98d4336e
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_seg.c
@@ -0,0 +1,396 @@
+/*[
+
+c_seg.c
+
+LOCAL CHAR SccsID[]="@(#)c_seg.c 1.10 03/02/95";
+
+Segment Register Support.
+-------------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <c_page.h>
+#include <fault.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Load CS, both selector and hidden cache. Selector must be valid. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+load_CS_cache
+
+IFN3(
+ IU16, selector, /* (I) 16-bit selector to code segment */
+ IU32, descr_addr, /* (I) address of code segment descriptor */
+ CPU_DESCR *, entry /* (I) the decoded descriptor */
+ )
+
+
+ {
+ if ( GET_PE() == 0 || GET_VM() == 1 )
+ {
+ /* Real Mode or V86 Mode */
+ SET_CS_SELECTOR(selector);
+ SET_CS_BASE((IU32)selector << 4);
+
+ /* LIMIT is untouched. (cf 80386 PRM Pg14-4) */
+ /* (cf i486 PRM Pg22-4) */
+
+ /* But access rights are updated */
+ SET_CS_AR_W(1); /* allow write access */
+ SET_CS_AR_R(1); /* allow read access */
+ SET_CS_AR_E(0); /* expand up */
+ SET_CS_AR_C(0); /* not conforming */
+ SET_CS_AR_X(0); /* not big (16-bit) */
+
+ if ( GET_VM() == 1 )
+ SET_CS_AR_DPL(3);
+ else
+ SET_CS_AR_DPL(0);
+ }
+ else
+ {
+ /* Protected Mode */
+
+ /* show segment has been accessed (i486 only writes if changed) */
+#ifdef SPC486
+ if ((entry->AR & ACCESSED) == 0)
+#endif /* SPC486 */
+ spr_write_byte(descr_addr+5, (IU8)entry->AR | ACCESSED);
+ entry->AR |= ACCESSED;
+
+ /* the visible bit */
+ SET_CS_SELECTOR(selector);
+
+ /* load hidden cache */
+ SET_CS_BASE(entry->base);
+ SET_CS_LIMIT(entry->limit);
+ /* load attributes from descriptor */
+ SET_CS_AR_DPL(GET_AR_DPL(entry->AR));
+ SET_CS_AR_R(GET_AR_R(entry->AR));
+ SET_CS_AR_C(GET_AR_C(entry->AR));
+ SET_CS_AR_X(GET_AR_X(entry->AR));
+
+ SET_CS_AR_E(0); /* expand up */
+ SET_CS_AR_W(0); /* deny write */
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Load SS, both selector and hidden cache. Selector must be valid. */
+/* Only invoked in protected mode. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+load_SS_cache
+
+IFN3(
+ IU16, selector, /* (I) 16-bit selector to stack segment */
+ IU32, descr_addr, /* (I) address of stack segment descriptor */
+ CPU_DESCR *, entry /* (I) the decoded descriptor */
+ )
+
+
+ {
+ /* show segment has been accessed (i486 only writes if changed) */
+#ifdef SPC486
+ if ((entry->AR & ACCESSED) == 0)
+#endif /* SPC486 */
+ spr_write_byte(descr_addr+5, (IU8)entry->AR | ACCESSED);
+ entry->AR |= ACCESSED;
+
+ /* the visible bit */
+ SET_SS_SELECTOR(selector);
+
+ /* load hidden cache */
+ SET_SS_BASE(entry->base);
+ SET_SS_LIMIT(entry->limit);
+ /* load attributes from descriptor */
+ SET_SS_AR_DPL(GET_AR_DPL(entry->AR));
+ SET_SS_AR_E(GET_AR_E(entry->AR));
+ SET_SS_AR_X(GET_AR_X(entry->AR));
+
+ SET_SS_AR_W(1); /* must be writeable */
+ SET_SS_AR_R(1); /* must be readable */
+ SET_SS_AR_C(0); /* not conforming */
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Load CS selector. */
+/* Take #GP if segment not valid */
+/* Take #NP if segment not present */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+load_code_seg
+
+IFN1(
+ IU16, new_cs
+ )
+
+
+ {
+ IU32 cs_descr_addr; /* code segment descriptor address */
+ CPU_DESCR cs_entry; /* code segment descriptor entry */
+
+ /*
+ Given that the CPU should be started from a valid state, we
+ check CS selectors as if a far call to the same privilege
+ level was being generated. This is in effect saying yes the
+ CS could have been loaded by a valid Intel instruction.
+ This logic may have to be revised if strange LOADALL usage is
+ found.
+ */
+
+ if ( GET_PE() == 0 || GET_VM() == 1 )
+ {
+ /* Real Mode or V86 Mode */
+ load_CS_cache(new_cs, (IU32)0, (CPU_DESCR *)0);
+ }
+ else
+ {
+ /* Protected Mode */
+
+ if ( selector_outside_GDT_LDT(new_cs, &cs_descr_addr) )
+ GP(new_cs, FAULT_LOADCS_SELECTOR);
+
+ /* load descriptor */
+ read_descriptor_linear(cs_descr_addr, &cs_entry);
+
+ /* validate possible types of target */
+ switch ( descriptor_super_type(cs_entry.AR) )
+ {
+ case CONFORM_NOREAD_CODE:
+ case CONFORM_READABLE_CODE:
+ /* access check requires DPL <= CPL */
+ if ( GET_AR_DPL(cs_entry.AR) > GET_CPL() )
+ GP(new_cs, FAULT_LOADCS_ACCESS_1);
+
+ /* it must be present */
+ if ( GET_AR_P(cs_entry.AR) == NOT_PRESENT )
+ NP(new_cs, FAULT_LOADCS_NOTPRESENT_1);
+ break;
+
+ case NONCONFORM_NOREAD_CODE:
+ case NONCONFORM_READABLE_CODE:
+ /* access check requires RPL <= CPL and DPL == CPL */
+ if ( GET_SELECTOR_RPL(new_cs) > GET_CPL() ||
+ GET_AR_DPL(cs_entry.AR) != GET_CPL() )
+ GP(new_cs, FAULT_LOADCS_ACCESS_2);
+
+ /* it must be present */
+ if ( GET_AR_P(cs_entry.AR) == NOT_PRESENT )
+ NP(new_cs, FAULT_LOADCS_NOTPRESENT_2);
+ break;
+
+ default:
+ GP(new_cs, FAULT_LOADCS_BAD_SEG_TYPE);
+ }
+
+ /* stamp new selector with CPL */
+ SET_SELECTOR_RPL(new_cs, GET_CPL());
+
+ load_CS_cache(new_cs, cs_descr_addr, &cs_entry);
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Load A Data Segment Register. (DS, ES, FS, GS) */
+/* Take #GP if segment not valid */
+/* Take #NP if segment not present */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+load_data_seg
+
+IFN2(
+ ISM32, indx,
+ IU16, selector
+ )
+
+
+ {
+ IU32 descr_addr;
+ CPU_DESCR entry;
+ ISM32 super;
+ ISM32 dpl;
+ BOOL is_data;
+
+ if ( GET_PE() == 0 || GET_VM() == 1 )
+ {
+ /* Real Mode or V86 Mode */
+ SET_SR_SELECTOR(indx, selector);
+ SET_SR_BASE(indx, (IU32)selector << 4);
+ }
+ else
+ {
+ /* Protected Mode */
+ if ( selector_is_null(selector) )
+ {
+ /* load is allowed - but later access will fail
+ * Since the program can not see the internal changes
+ * performed to achieve this, we make the behaviour
+ * match the easiest implementation in the A4CPU
+ */
+ SET_SR_SELECTOR(indx, selector);
+
+ /* the following lines were added to make the C-CPU behave like
+ * the Soft 486 CPU - an investigation is being made to see if this
+ * behaviour corresponds with the real i486 - this code may have to
+ * change.
+ */
+ SET_SR_BASE(indx, 0);
+ SET_SR_LIMIT(indx, 0);
+ SET_SR_AR_W(indx, 0);
+ SET_SR_AR_R(indx, 0);
+ }
+ else
+ {
+ if ( selector_outside_GDT_LDT(selector, &descr_addr) )
+ GP(selector, FAULT_LOADDS_SELECTOR);
+
+ read_descriptor_linear(descr_addr, &entry);
+
+ /* check type */
+ switch ( super = descriptor_super_type(entry.AR) )
+ {
+ case CONFORM_READABLE_CODE:
+ case NONCONFORM_READABLE_CODE:
+ is_data = FALSE;
+ break;
+
+ case EXPANDUP_READONLY_DATA:
+ case EXPANDUP_WRITEABLE_DATA:
+ case EXPANDDOWN_READONLY_DATA:
+ case EXPANDDOWN_WRITEABLE_DATA:
+ is_data = TRUE;
+ break;
+
+ default:
+ GP(selector, FAULT_LOADDS_BAD_SEG_TYPE); /* bad type */
+ }
+
+ /* for data and non-conforming code the access check applies */
+ if ( super != CONFORM_READABLE_CODE )
+ {
+ /* access check requires CPL <= DPL and RPL <= DPL */
+ dpl = GET_AR_DPL(entry.AR);
+ if ( GET_CPL() > dpl || GET_SELECTOR_RPL(selector) > dpl )
+ GP(selector, FAULT_LOADDS_ACCESS);
+ }
+
+ /* must be present */
+ if ( GET_AR_P(entry.AR) == NOT_PRESENT )
+ NP(selector, FAULT_LOADDS_NOTPRESENT);
+
+ /* show segment has been accessed (i486 only writes if changed) */
+#ifdef SPC486
+ if ((entry.AR & ACCESSED) == 0)
+#endif /* SPC486 */
+ spr_write_byte(descr_addr+5, (IU8)entry.AR | ACCESSED);
+ entry.AR |= ACCESSED;
+
+ /* OK - load up */
+
+ /* the visible bit */
+ SET_SR_SELECTOR(indx, selector);
+
+ /* load hidden cache */
+ SET_SR_BASE(indx, entry.base);
+ SET_SR_LIMIT(indx, entry.limit);
+ /* load attributes from descriptor */
+ SET_SR_AR_DPL(indx, GET_AR_DPL(entry.AR));
+
+ if ( is_data )
+ {
+ SET_SR_AR_W(indx, GET_AR_W(entry.AR));
+ SET_SR_AR_E(indx, GET_AR_E(entry.AR));
+ SET_SR_AR_C(indx, 0); /* not conforming */
+ }
+ else
+ {
+ SET_SR_AR_C(indx, GET_AR_C(entry.AR));
+ SET_SR_AR_W(indx, 0); /* deny write access */
+ SET_SR_AR_E(indx, 0); /* expand up */
+ }
+
+ SET_SR_AR_X(indx, GET_AR_X(entry.AR));
+
+ SET_SR_AR_R(indx, 1); /* must be readable */
+ }
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Load Pseudo Descriptor Semantics for Real Mode or V86 Mode. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+load_pseudo_descr
+
+IFN1(
+ ISM32, index /* index to segment register */
+ )
+
+
+ {
+ SET_SR_LIMIT(index, 0xffff);
+ SET_SR_AR_W(index, 1); /* allow write access */
+ SET_SR_AR_R(index, 1); /* allow read access */
+ SET_SR_AR_E(index, 0); /* expand up */
+ SET_SR_AR_C(index, 0); /* not conforming */
+ SET_SR_AR_X(index, 0); /* not big (16-bit) */
+
+ if ( GET_VM() == 1 )
+ SET_SR_AR_DPL(index, 3);
+ else
+ SET_SR_AR_DPL(index, 0);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Load Stack Segment Register. (SS) */
+/* Take #GP if segment not valid */
+/* Take #SF if segment not present */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+load_stack_seg
+
+IFN1(
+ IU16, selector
+ )
+
+
+ {
+ IU32 descr_addr;
+ CPU_DESCR entry;
+
+ if ( GET_PE() == 0 || GET_VM() == 1 )
+ {
+ /* Real Mode or V86 Mode */
+ SET_SS_SELECTOR(selector);
+ SET_SS_BASE((IU32)selector << 4);
+ }
+ else
+ {
+ /* Protected Mode */
+ check_SS(selector, (ISM32)GET_CPL(), &descr_addr, &entry);
+ load_SS_cache(selector, descr_addr, &entry);
+ }
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_seg.h b/private/mvdm/softpc.new/base/ccpu386/c_seg.h
new file mode 100644
index 000000000..f560d169d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_seg.h
@@ -0,0 +1,58 @@
+/*[
+
+c_seg.h
+
+LOCAL CHAR SccsID[]="@(#)c_seg.h 1.4 02/09/94";
+
+Segment Register Support.
+-------------------------
+
+]*/
+
+
+IMPORT VOID load_CS_cache
+
+IPT3(
+ IU16, selector,
+ IU32, descr_addr,
+ CPU_DESCR *, entry
+
+ );
+
+IMPORT VOID load_SS_cache
+
+IPT3(
+ IU16, selector,
+ IU32, descr_addr,
+ CPU_DESCR *, entry
+
+ );
+
+IMPORT VOID load_code_seg
+
+IPT1(
+ IU16, selector
+
+ );
+
+IMPORT VOID load_data_seg
+
+IPT2(
+ ISM32, index,
+ IU16, selector
+
+ );
+
+IMPORT VOID load_pseudo_descr
+
+IPT1(
+ ISM32, index
+
+ );
+
+IMPORT VOID load_stack_seg
+
+IPT1(
+ IU16, selector
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_stack.c b/private/mvdm/softpc.new/base/ccpu386/c_stack.c
new file mode 100644
index 000000000..1e6d95e08
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_stack.c
@@ -0,0 +1,679 @@
+/*[
+
+c_stack.c
+
+LOCAL CHAR SccsID[]="@(#)c_stack.c 1.14 03/03/95";
+
+Stack (and related SP/BP access) Support.
+-----------------------------------------
+
+]*/
+
+
+#include <insignia.h>
+#include <host_def.h>
+
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <c_page.h>
+#include <c_tlb.h>
+#include <fault.h>
+#include <ccpupig.h>
+
+#if defined(PIG) && !defined(PROD)
+/* The Soft486 CPU may (when not constrained SAFE_PUSH) corrupt the unwritten
+ * parts of, say, an interrupt fram which contains 16-bit items pushed into
+ * 32-bit allocated slots. This defines makes the Pigger blind to these locations.
+ */
+#define PIG_DONT_CHECK_OTHER_WORD_ON_STACK
+#endif /* PIG && !PROD */
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Semi-intelligent support for Incrementing/Decrementing the Stack */
+/* Pointer(E)SP. */
+/* Alters ESP or SP depending on StackAddrSize. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+byte_change_SP
+
+IFN1(
+ IS32, delta
+ )
+
+
+ {
+ if ( GET_SS_AR_X() == USE32 ) /* look at SS 'B' bit */
+ SET_ESP(GET_ESP() + delta);
+ else
+ SET_SP(GET_SP() + delta);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Intelligent support for Incrementing/Decrementing the Stack */
+/* Pointer(E)SP by either words or double words items depending on */
+/* OperandSize. */
+/* Alters ESP or SP depending on StackAddrSize. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+change_SP
+
+IFN1(
+ IS32, items
+ )
+
+
+ {
+ if ( GET_OPERAND_SIZE() == USE16 )
+ items = items * 2;
+ else /* USE32 */
+ items = items * 4;
+
+ if ( GET_SS_AR_X() == USE32 ) /* look at SS 'B' bit */
+ SET_ESP(GET_ESP() + items);
+ else
+ SET_SP(GET_SP() + items);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Intelligent support for Reading the Frame Pointer(E)BP. */
+/* Returns either EBP or BP depending on StackAddrSize. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IU32
+get_current_BP()
+ {
+ if ( GET_SS_AR_X() == USE32 ) /* look at SS 'B' bit */
+ return GET_EBP();
+
+ return GET_BP();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Intelligent support for Reading the Stack Pointer(E)SP. */
+/* Returns either ESP or SP depending on StackAddrSize. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IU32
+GetStackPointer()
+ {
+ if ( GET_SS_AR_X() == USE32 ) /* look at SS 'B' bit */
+ return GET_ESP();
+
+ return GET_SP();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Intelligent support for Writing the Frame Pointer (E)BP. */
+/* Writes EBP or BP depending on StackAddrSize */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+set_current_BP
+
+IFN1(
+ IU32, new_bp
+ )
+
+
+ {
+ if ( GET_SS_AR_X() == USE32 )
+ SET_EBP(new_bp);
+ else /* USE16 */
+ SET_BP(new_bp);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Intelligent support for Writing the Stack Pointer (E)SP. */
+/* Writes ESP or SP depending on StackAddrSize */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+set_current_SP
+
+IFN1(
+ IU32, new_sp
+ )
+
+
+ {
+ if ( GET_SS_AR_X() == USE32 )
+ SET_ESP(new_sp);
+ else /* USE16 */
+ SET_SP(new_sp);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Pop word or double word from the stack. */
+/* Used by instructions which implicitly reference the stack. */
+/* Stack Checking MUST have been completed earlier. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IU32
+spop()
+ {
+ IU32 addr; /* stack address */
+ IU32 val;
+
+ /*
+ Intel describe the algorithm as:-
+
+ if ( StackAddrSize == 16 )
+ if ( OperandSize == 16 )
+ val <- SS:[SP] // 2-byte
+ SP = SP + 2
+ else // OperandSize == 32
+ val <- SS:[SP] // 4-byte
+ SP = SP + 4
+ else // StackAddrSize == 32
+ if ( OperandSize == 16 )
+ val <- SS:[ESP] // 2-byte
+ ESP = ESP + 2
+ else // OperandSize == 32
+ val <- SS:[ESP] // 4-byte
+ ESP = ESP + 4
+
+ We achieve the same effect by calling 'intelligent' SP functions
+ which take account of the StackAddrSize, and concentrate here on
+ the OperandSize.
+ */
+
+ addr = GET_SS_BASE() + GetStackPointer();
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ val = (IU32)vir_read_word(addr, NO_PHYSICAL_MAPPING);
+ byte_change_SP((IS32)2);
+ }
+ else /* USE32 */
+ {
+ val = (IU32)vir_read_dword(addr, NO_PHYSICAL_MAPPING);
+ byte_change_SP((IS32)4);
+ }
+
+ return val;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Push word or double word onto the stack. */
+/* Used by instructions which implicitly reference the stack. */
+/* Stack Checking MUST have been completed earlier. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+spush
+
+IFN1(
+ IU32, data /* value to be pushed */
+ )
+
+
+ {
+ IU32 addr; /* stack address */
+
+ /*
+ Intel describe the algorithm as:-
+
+ if ( StackAddrSize == 16 )
+ if ( OperandSize == 16 )
+ SP = SP - 2
+ SS:[SP] <- val // 2-byte
+ else // OperandSize == 32
+ SP = SP - 4
+ SS:[SP] <- val // 4-byte
+ else // StackAddrSize == 32
+ if ( OperandSize == 16 )
+ ESP = ESP - 2
+ SS:[ESP] <- val // 2-byte
+ else // OperandSize == 32
+ ESP = ESP - 4
+ SS:[ESP] <- val // 4-byte
+
+ We achieve the same effect by calling 'intelligent' SP functions
+ which take account of the StackAddrSize, and concentrate here on
+ the OperandSize.
+ */
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ /* push word */
+ byte_change_SP((IS32)-2);
+ addr = GET_SS_BASE() + GetStackPointer();
+ vir_write_word(addr, NO_PHYSICAL_MAPPING, (IU16)data);
+ }
+ else /* USE32 */
+ {
+ /* push double word */
+ byte_change_SP((IS32)-4);
+ addr = GET_SS_BASE() + GetStackPointer();
+ vir_write_dword(addr, NO_PHYSICAL_MAPPING, (IU32)data);
+ }
+ }
+
+#ifdef PIG
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Push word or double word onto the stack. */
+/* Used by instructions which implicitly reference the stack. */
+/* Stack Checking MUST have been completed earlier. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+spush_flags
+
+IFN1(
+ IU32, data /* value to be pushed */
+ )
+
+
+ {
+ IU32 addr; /* stack address */
+
+ /*
+ * see comment for spush().
+ */
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ /* push word */
+ byte_change_SP((IS32)-2);
+ addr = GET_SS_BASE() + GetStackPointer();
+ vir_write_word(addr, NO_PHYSICAL_MAPPING, (IU16)data);
+
+ /*
+ * record the address at which we may not know the flags value
+ * -- we will examine PigIgnoreFlags when the EDL CPU runs to check
+ * if there's a problem.
+ */
+ record_flags_addr(addr);
+ }
+ else /* USE32 */
+ {
+ /* push double word */
+ byte_change_SP((IS32)-4);
+ addr = GET_SS_BASE() + GetStackPointer();
+ vir_write_dword(addr, NO_PHYSICAL_MAPPING, (IU32)data);
+
+ /*
+ * no need to record word at addr+2 as the flags are always known for this word
+ */
+ record_flags_addr(addr);
+ }
+ }
+#endif /* PIG */
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Push word onto the stack. */
+/* Operand size of 32 will still push 16 bits of data, but the stack */
+/* pointer is adjusted by 4. */
+/* Used by PUSH segment-register */
+/* Stack Checking MUST have been completed earlier. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+spush16
+
+IFN1(
+ IU32, data /* value to be pushed */
+ )
+
+
+ {
+ IU32 addr; /* stack address */
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ /* stack item size is word */
+ byte_change_SP((IS32)-2);
+ addr = GET_SS_BASE() + GetStackPointer();
+ vir_write_word(addr, NO_PHYSICAL_MAPPING, (IU16)data);
+ }
+ else /* USE32 */
+ {
+ /* stack item size is double word */
+ byte_change_SP((IS32)-4);
+ addr = GET_SS_BASE() + GetStackPointer();
+ vir_write_word(addr, NO_PHYSICAL_MAPPING, (IU16)data);
+#ifdef PIG_DONT_CHECK_OTHER_WORD_ON_STACK
+ cannot_vir_write_word(addr+2, NO_PHYSICAL_MAPPING, 0x0000);
+#endif /* PIG_DONT_CHECK_OTHER_WORD_ON_STACK */
+ }
+ }
+
+#ifdef PIG
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Push word onto the stack. */
+/* Operand size of 32 will still push 16 bits of data, but the stack */
+/* pointer is adjusted by 4. */
+/* Used by PUSH segment-register */
+/* Stack Checking MUST have been completed earlier. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+spush16_flags
+
+IFN1(
+ IU32, data /* value to be pushed */
+ )
+
+
+ {
+ IU32 addr; /* stack address */
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ /* stack item size is word */
+ byte_change_SP((IS32)-2);
+ addr = GET_SS_BASE() + GetStackPointer();
+ vir_write_word(addr, NO_PHYSICAL_MAPPING, (IU16)data);
+ record_flags_addr(addr);
+ }
+ else /* USE32 */
+ {
+ /* stack item size is double word */
+ byte_change_SP((IS32)-4);
+ addr = GET_SS_BASE() + GetStackPointer();
+ vir_write_word(addr, NO_PHYSICAL_MAPPING, (IU16)data);
+ record_flags_addr(addr);
+#ifdef PIG_DONT_CHECK_OTHER_WORD_ON_STACK
+ cannot_vir_write_word(addr+2, NO_PHYSICAL_MAPPING, 0x0000);
+#endif /* PIG_DONT_CHECK_OTHER_WORD_ON_STACK */
+ }
+ }
+#endif /* PIG */
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Temporary Pop word or double word from the stack. */
+/* (E)SP is not changed by this instruction. */
+/* Used by instructions which implicitly reference the stack. */
+/* Stack Checking MUST have been completed earlier. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IU32
+tpop
+
+IFN2(
+ LIN_ADDR, item_offset, /* item offset(from stack top) to required item */
+ LIN_ADDR, byte_offset /* byte offset (additional to item_offset) */
+ )
+
+
+ {
+ IU32 addr; /* stack address */
+ IS32 offset; /* total offset from stack top */
+ IU32 val;
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ offset = item_offset * 2 + byte_offset;
+ else /* USE32 */
+ offset = item_offset * 4 + byte_offset;
+
+ /* calculate offset address in 32/16bit arithmetic */
+ addr = GetStackPointer() + offset;
+ if ( GET_SS_AR_X() == 0 ) /* look at SS 'B' bit */
+ addr &= WORD_MASK;
+
+ /* then add segment address */
+ addr += GET_SS_BASE();
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ val = (IU32)vir_read_word(addr, NO_PHYSICAL_MAPPING);
+ else /* USE32 */
+ val = (IU32)vir_read_dword(addr, NO_PHYSICAL_MAPPING);
+
+ return val;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check new stack has space for a given number of bytes. */
+/* Take #SF(0) if insufficient room on stack (as in 80386 manual) */
+/* Take #SF(sel) if insufficient room on stack (as in i486 manual) */
+/* Take #PF if page fault. */
+/* Stack wrapping is not supported by this routine. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+validate_new_stack_space
+
+IFN4(
+ LIN_ADDR, nr_items, /* (I) number of items which must exist */
+ LIN_ADDR, stack_top, /* (I) stack pointer */
+ CPU_DESCR *, entry, /* (I) pntr to descriptor cache entry for
+ stack */
+ IU16, stack_sel /* (I) selector of new stack */
+ )
+
+
+ {
+ ISM32 bytes;
+ IU32 upper;
+ IU32 offset;
+ ISM32 i;
+
+/* The 80386 & i486 PRMs disagree on this matter... the EDL i486 CPU matches
+ the i486 manual - which seems to do the more sensible thing - until an
+ experiment is done to show which is the correct behaviour, we'll do what
+ the book says...
+ */
+
+#ifdef SPC486
+#define XX_error_code stack_sel
+#else
+#define XX_error_code 0
+#endif
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ bytes = nr_items * 2;
+ else /* USE32 */
+ bytes = nr_items * 4;
+
+ if ( GET_AR_E(entry->AR) == 0 )
+ {
+ /* expand up */
+ if ( stack_top < bytes || (stack_top - 1) > entry->limit )
+ SF(XX_error_code, FAULT_VALNEWSPC_SS_LIMIT_16); /* limit check fails */
+ }
+ else
+ {
+ /* expand down */
+ if ( GET_AR_X(entry->AR) == USE16 )
+ upper = 0xffff;
+ else /* USE32 */
+ upper = 0xffffffff;
+
+ if ( stack_top <= (entry->limit + bytes) ||
+ (stack_top - 1) > upper )
+ SF(XX_error_code, FAULT_VALNEWSPC_SS_LIMIT_32); /* limit check fails */
+ }
+
+ /* finally do paging unit checks */
+ offset = stack_top - bytes;
+
+ for ( i = 0; i < nr_items; i++ )
+ {
+ if ( GET_OPERAND_SIZE() == USE16 )
+ {
+ spr_chk_word(entry->base + offset, PG_W);
+ offset += 2;
+ }
+ else
+ {
+ spr_chk_dword(entry->base + offset, PG_W);
+ offset += 4;
+ }
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check stack holds a given number of operands. */
+/* Take #GP(0) or #SF(0) if insufficient data on stack. */
+/* Take #PF if page fault. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+validate_stack_exists
+
+IFN2(
+ BOOL, use_bp, /* (I) if true use (E)BP not (E)SP to address
+ stack */
+ LIN_ADDR, nr_items /* (I) number of operands which must exist on
+ stack */
+ )
+
+
+ {
+ IU32 offset;
+ ISM32 operand_size;
+ ISM32 i;
+
+ offset = use_bp ? get_current_BP() : GetStackPointer();
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ operand_size = 2; /* word */
+ else /* USE32 */
+ operand_size = 4; /* double word */
+
+ /* do access check */
+ if ( GET_SS_AR_R() == 0 )
+ {
+ /* raise exception - something wrong with stack access */
+ if ( GET_PE() == 0 || GET_VM() == 1 )
+ GP((IU16)0, FAULT_VALSTACKEX_ACCESS);
+ else
+ SF((IU16)0, FAULT_VALSTACKEX_ACCESS);
+ }
+
+ /* do limit check */
+ limit_check(SS_REG, offset, nr_items, operand_size);
+
+ /* finally do paging unit checks */
+ for ( i = 0; i < nr_items; i++ )
+ {
+ if ( operand_size == 2 )
+ {
+ (VOID)usr_chk_word(GET_SS_BASE() + offset, PG_R);
+ }
+ else
+ {
+ (VOID)usr_chk_dword(GET_SS_BASE() + offset, PG_R);
+ }
+
+ offset += operand_size;
+ if ( GET_SS_AR_X() == 0 ) /* look at SS 'B' bit */
+ offset &= WORD_MASK; /* apply 16-bit arith if reqd */
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check stack has space for a given number of operands. */
+/* Take #GP(0) or #SF(0) if insufficient room on stack. */
+/* Take #PF if page fault. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+validate_stack_space
+
+IFN2(
+ BOOL, use_bp, /* (I) if true use (E)BP not (E)SP to address
+ stack */
+ LIN_ADDR, nr_items /* (I) number of items which must exist on
+ stack */
+ )
+
+
+ {
+ IU32 offset;
+ ISM32 operand_size;
+ IS32 size;
+ ISM32 i;
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ operand_size = 2; /* word */
+ else /* USE32 */
+ operand_size = 4; /* double word */
+
+ /* calculate (-ve) total data size */
+ size = nr_items * -operand_size;
+
+ /* get current stack base */
+ offset = use_bp ? get_current_BP() : GetStackPointer();
+
+ /* hence form lowest memory address of new data to be pushed */
+ /* in 32/16bit arithmetic */
+ offset = offset + size;
+ if ( GET_SS_AR_X() == 0 ) /* look at SS 'B' bit */
+ offset &= WORD_MASK;
+
+ /* do access check */
+ if ( GET_SS_AR_W() == 0 )
+ {
+ /* raise exception - something wrong with stack access */
+ if ( GET_PE() == 0 || GET_VM() == 1 )
+ GP((IU16)0, FAULT_VALSTKSPACE_ACCESS);
+ else
+ SF((IU16)0, FAULT_VALSTKSPACE_ACCESS);
+ }
+
+ /* do limit check */
+ limit_check(SS_REG, offset, nr_items, operand_size);
+
+ /* finally do paging unit checks */
+ for ( i = 0; i < nr_items; i++ )
+ {
+ if ( operand_size == 2 )
+ {
+ (VOID)usr_chk_word(GET_SS_BASE() + offset, PG_W);
+ }
+ else
+ {
+ (VOID)usr_chk_dword(GET_SS_BASE() + offset, PG_W);
+ }
+
+ offset += operand_size;
+ if ( GET_SS_AR_X() == 0 ) /* look at SS 'B' bit */
+ offset &= WORD_MASK; /* apply 16-bit arith if reqd */
+ }
+ }
+
+#ifdef PIG
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Record a (physical) ESP value for later use if PigIgnoreFlags is */
+/* set by the EDL CPU after the pigger has run. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL void
+record_flags_addr IFN1(LIN_ADDR, lin_addr)
+{
+ IU32 phy_addr;
+
+ if ( GET_PG() == 1 )
+ {
+ /*
+ * we ask for supervisor access because the access has
+ * already been checked by the push. We dont know the
+ * U/S used then but asking for PG_S will always work.
+ */
+ phy_addr = lin2phy(lin_addr, PG_S | PG_W);
+ }
+ else
+ phy_addr = lin_addr;
+
+ /* printf("recording stack flags push @ lin: %x, phy %x\n", lin_addr, phy_addr); */
+
+ pig_fault_write(phy_addr, (~ARITH_FLAGS_BITS) & 0xff);
+
+ /*
+ * short-cut - if bottom bits not 0xfff then we can just add 1
+ * to the physical addr for byte 2. Otherwise we have to recalculate
+ * the whole address.
+ */
+ if (((phy_addr & 0xfff) != 0xfff) || (GET_PG() == 0))
+ pig_fault_write(phy_addr + 1, ((~ARITH_FLAGS_BITS) >> 8) & 0xff);
+ else
+ {
+ phy_addr = lin2phy(lin_addr + 1, PG_S | PG_W);
+ pig_fault_write(phy_addr, ((~ARITH_FLAGS_BITS) >> 8) & 0xff);
+ }
+}
+
+#endif
+
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_stack.h b/private/mvdm/softpc.new/base/ccpu386/c_stack.h
new file mode 100644
index 000000000..e1722d75d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_stack.h
@@ -0,0 +1,149 @@
+/*[
+
+c_stack.h
+
+LOCAL CHAR SccsID[]="@(#)c_stack.h 1.11 03/03/95";
+
+Stack (and related SP/BP access) Support.
+-----------------------------------------
+
+]*/
+
+
+/*
+ SP/BP indicator for stack checking operations.
+ */
+#define USE_SP FALSE
+#define USE_BP TRUE
+
+/*
+ Useful defines for tpop(),validate_stack_exists(),
+ validate_stack_space() and change_SP() parameters.
+ */
+#define NR_ITEMS_1 1
+#define NR_ITEMS_2 2
+#define NR_ITEMS_3 3
+#define NR_ITEMS_4 4
+#define NR_ITEMS_5 5
+#define NR_ITEMS_6 6
+#define NR_ITEMS_8 8
+#define NR_ITEMS_9 9
+
+#define STACK_ITEM_1 (IUM32)0
+#define STACK_ITEM_2 (IUM32)1
+#define STACK_ITEM_3 (IUM32)2
+#define STACK_ITEM_4 (IUM32)3
+#define STACK_ITEM_5 (IUM32)4
+#define STACK_ITEM_6 (IUM32)5
+#define STACK_ITEM_7 (IUM32)6
+#define STACK_ITEM_8 (IUM32)7
+#define STACK_ITEM_9 (IUM32)8
+
+#define NULL_BYTE_OFFSET (IUM32)0
+
+
+IMPORT VOID byte_change_SP
+
+IPT1(
+ IS32, delta
+
+ );
+
+IMPORT VOID change_SP
+
+IPT1(
+ IS32, items
+
+ );
+
+IMPORT IU32 get_current_BP IPT0();
+
+IMPORT IU32 GetStackPointer IPT0();
+
+IMPORT VOID set_current_BP
+
+IPT1(
+ IU32, new_bp
+
+ );
+
+IMPORT VOID set_current_SP
+
+IPT1(
+ IU32, new_sp
+
+ );
+
+IMPORT IU32 spop IPT0();
+
+IMPORT VOID spush
+
+IPT1(
+ IU32, data
+
+ );
+
+#ifdef PIG
+IMPORT VOID spush_flags
+
+IPT1(
+ IU32, data
+
+ );
+#endif /* PIG */
+
+IMPORT VOID spush16
+
+IPT1(
+ IU32, data
+
+ );
+
+#ifdef PIG
+IMPORT VOID spush16_flags
+
+IPT1(
+ IU32, data
+
+ );
+#endif /* PIG */
+
+
+IMPORT IU32 tpop
+
+IPT2(
+ LIN_ADDR, item_offset,
+ LIN_ADDR, byte_offset
+
+ );
+
+IMPORT VOID validate_new_stack_space
+
+IPT4(
+ LIN_ADDR, bytes,
+ LIN_ADDR, stack_top,
+ CPU_DESCR *, entry,
+ IU16, stack_sel
+
+ );
+
+IMPORT VOID validate_stack_exists
+
+IPT2(
+ BOOL, use_bp,
+ LIN_ADDR, nr_items
+
+ );
+
+IMPORT VOID validate_stack_space
+
+IPT2(
+ BOOL, use_bp,
+ LIN_ADDR, nr_items
+
+ );
+
+IMPORT void touch_flags_memory IPT0();
+IMPORT void init_flags_esp_list IPT0();
+IMPORT void record_flags_addr IPT1(LIN_ADDR, addr);
+
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_tlb.c b/private/mvdm/softpc.new/base/ccpu386/c_tlb.c
new file mode 100644
index 000000000..bbf9cabe5
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_tlb.c
@@ -0,0 +1,953 @@
+/*[
+
+c_tlb.c
+
+LOCAL CHAR SccsID[]="@(#)c_tlb.c 1.17 03/15/95";
+
+Translation Lookaside Buffer Emulation.
+---------------------------------------
+
+]*/
+
+
+/*
+ Indicator for 'optimised lookup' format TLB.
+ */
+#define FAST_TLB
+
+#include <stdio.h>
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <c_tlb.h>
+#include <c_page.h>
+#include <c_mem.h>
+#include <ccpusas4.h>
+#include <ccpupig.h>
+#include <fault.h>
+
+
+/*
+ The 386 TLB is an 8 entry 4 way set associative cache. It is known
+ that cache sets are not allocated on an LRU basis, we assume simple
+ round robin allocation per entry.
+ */
+
+typedef struct
+ {
+ IU32 la; /* Bits 32-12 => 20-bit Linear Address */
+ IU32 pa; /* Bits 32-12 => 20-bit Physical Address */
+ BOOL v; /* Validity indicator, true means valid */
+ BOOL d; /* Dirty indicator, true means dirty */
+ IU32 mode; /* 2-bit Mode indicator
+ Bit 0 => R/W
+ Bit 1 => U/S */
+ } TLB_ENTRY;
+
+#define NR_TLB_SETS 4
+#define NR_TLB_ENTRIES 8
+
+/*
+ The Intel format TLB data structures.
+ */
+LOCAL TLB_ENTRY tlb[NR_TLB_SETS][NR_TLB_ENTRIES];
+LOCAL IU32 next_set[NR_TLB_ENTRIES] =
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0
+ };
+
+#ifdef FAST_TLB
+
+/*
+ We allocate one byte per Intel page; this 'page_index' allows us
+ to tell quickly if a page translation is held in the TLB and where
+ we can find the translated address. The format is arranged for
+ minimal access checks. Each byte has the format:-
+
+ 1) 7 0
+ ==========
+ |00000000| Page Not Mapped.
+ ==========
+
+ 2) 7 6 5 4 2 10
+ ================
+ |1|Set|Entry|00| Page Mapped in given set and entry of TLB.
+ ================
+ */
+
+#define NR_PAGES 1048576 /* 2^20 */
+
+LOCAL IU8 page_index[NR_PAGES];
+
+#define PI_NOT_VALID 0
+#define PI_VALID 0x80
+#define PI_SET_ENTRY_MASK 0x7c
+#define PI_SET_SHIFT 5
+#define PI_ENTRY_SHIFT 2
+
+/*
+ We also allocate an array of translated (ie physical) addresses,
+ indexed by the Set:Entry combination in the page_index. For each
+ combination four sequential addresses are allocated for the various
+ access modes:-
+
+ Supervisor Read
+ Supervisor Write
+ User Read
+ User Write
+
+ A translation address of zero is taken to mean that no translation
+ is held (It is easy to check for zero). This has the slight side
+ effect that though we may enter address translations for zero (ie
+ the first page of physical memory) we never get a 'hit' for them, so
+ access to the first page of physical memory is always through the
+ slow Intel format TLB.
+ */
+
+#define NR_ACCESS_MODES 4
+
+#define NO_MAPPED_ADDRESS 0
+
+LOCAL IU32 page_address[NR_TLB_SETS * NR_TLB_ENTRIES * NR_ACCESS_MODES];
+
+#endif /* FAST_TLB */
+
+/*
+ Linear Addresses are composed as follows:-
+
+ 3 2 2 1 1
+ 1 2 1 2 1 0
+ ====================================
+ |directory | table | offset |
+ ====================================
+ */
+
+#define OFFSET_MASK 0xfff
+#define TBL_MASK 0x3ff
+#define DIR_MASK 0x3ff
+#define TBL_SHIFT 12
+#define DIR_SHIFT 22
+#define DIR_TBL_SHIFT 12
+
+/*
+ Page Directory Entries (PDE) or
+ Page Table Entries (PTE) are composed as follows:-
+
+ 3 2
+ 1 2 6 5 2 1 0
+ =========================================
+ | | | | | |U|R| |
+ | page frame address | |D|A| |/|/|P|
+ | | | | | |S|W| |
+ =========================================
+ */
+
+#define PE_PFA_MASK 0xfffff000
+#define PE_P_MASK 0x1
+#define PE_U_S_MASK 0x4
+#define PE_R_W_MASK 0x2
+
+#define PE_DIRTY 0x40
+#define PE_ACCESSED 0x20
+
+/*
+ TR7 = Test Data Register:-
+
+ 3 1
+ 1 2 4 32
+ ======================================
+ | | |H| R| |
+ | Physical Address | |T| E| |
+ | | | | P| |
+ ======================================
+
+ TR6 = Test Command Register:-
+
+ 3 1 1 1
+ 1 2 1 0 9 8 7 6 5 0
+ ===========================================
+ | Linear Address |V|D|D|U|U|W|W| |C|
+ | | | |#| |#| |#| | |
+ ===========================================
+ */
+
+#define TCR_LA_MASK 0xfffff000
+#define TCR_V_MASK 0x800
+#define TCR_D_MASK 0x400
+#define TCR_ND_MASK 0x200
+#define TCR_U_MASK 0x100
+#define TCR_NU_MASK 0x80
+#define TCR_W_MASK 0x40
+#define TCR_NW_MASK 0x20
+#define TCR_C_MASK 0x1
+#define TCR_ATTR_MASK 0x7e0
+
+#define TDR_PA_MASK 0xfffff000
+#define TDR_HT_MASK 0x10
+#define TDR_REP_MASK 0xc
+
+#define TDR_REP_SHIFT 2
+
+/*
+ Encoded access check matrix, true indicates access failure.
+ */
+
+#ifdef SPC486
+
+/* WP reqd avail */
+LOCAL BOOL access_check[2] [4] [4] =
+ {
+ { /* WP = 0 */
+ /* S_R S_W U_R U_W */
+ { FALSE, FALSE, FALSE, FALSE }, /* S_R */
+ { FALSE, FALSE, FALSE, FALSE }, /* S_W */
+ { TRUE , TRUE , FALSE, FALSE }, /* U_R */
+ { TRUE , TRUE , TRUE , FALSE } /* U_W */
+ },
+ { /* WP = 1 */
+ /* S_R S_W U_R U_W */
+ { FALSE, FALSE, FALSE, FALSE }, /* S_R */
+ { FALSE, FALSE, TRUE , FALSE }, /* S_W */
+ { TRUE , TRUE , FALSE, FALSE }, /* U_R */
+ { TRUE , TRUE , TRUE , FALSE } /* U_W */
+ }
+ };
+
+#else
+
+/* reqd avail */
+LOCAL BOOL access_check[4] [4] =
+ {
+ /* S_R S_W U_R U_W */
+ { FALSE, FALSE, FALSE, FALSE }, /* S_R */
+ { FALSE, FALSE, FALSE, FALSE }, /* S_W */
+ { TRUE , TRUE , FALSE, FALSE }, /* U_R */
+ { TRUE , TRUE , TRUE , FALSE } /* U_W */
+ };
+
+#endif /* SPC486 */
+
+LOCAL void deal_with_pte_cache_hit IPT1(IU32, linearAddress);
+GLOBAL void Pig_NotePDECacheAccess IPT2(IU32, linearAddress, IU32, accessBits);
+GLOBAL void Pig_NotePTECacheAccess IPT2(IU32, linearAddress, IU32, accessBits);
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Flush TLB. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+flush_tlb()
+ {
+ ISM32 set, entry;
+ TLB_ENTRY *e;
+
+ for ( set = 0; set < NR_TLB_SETS; set++ )
+ for ( entry = 0; entry < NR_TLB_ENTRIES; entry++ )
+ {
+ e = &tlb[set][entry];
+#ifdef FAST_TLB
+ if ( e->v )
+ {
+ /* Remove associated page_index entry */
+ page_index[e->la >> DIR_TBL_SHIFT] = PI_NOT_VALID;
+ }
+#endif /* FAST_TLB */
+ e->v = FALSE;
+ }
+ }
+
+#ifdef SPC486
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Invalidate TLB entry. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+invalidate_tlb_entry
+
+IFN1(
+ IU32, lin /* Linear Address */
+ )
+
+
+ {
+ ISM32 set, entry;
+ TLB_ENTRY *e; /* current tlb entry */
+
+ entry = lin >> DIR_TBL_SHIFT & 0x07; /* isolate bits 14-12 */
+ lin = lin & ~OFFSET_MASK; /* drop any offset */
+
+ for ( set = 0; set < NR_TLB_SETS; set++ )
+ {
+ e = &tlb[set][entry];
+
+ if ( e->v && e->la == lin )
+ {
+ /* Valid entry for given address: Flush it. */
+#ifdef FAST_TLB
+ /* Remove associated page_index entry */
+ page_index[e->la >> DIR_TBL_SHIFT] = PI_NOT_VALID;
+#endif /* FAST_TLB */
+ e->v = FALSE;
+ }
+ }
+ }
+
+#endif /* SPC486 */
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Map linear address to physical address. */
+/* May take #PF. Used by all internal C CPU functions. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IU32
+lin2phy
+
+IFN2(
+ IU32, lin, /* Linear Address */
+ ISM32, access /* access mode Bit 0 => R/W
+ Bit 1 => U/S */
+ )
+
+
+ {
+ IU8 pi; /* page_index entry */
+ IU32 ma; /* mapped address */
+
+ IU32 pde_addr; /* Address of Page Directory Entry */
+ IU32 pte_addr; /* Address of Page Table Entry */
+ IU32 pde; /* Page Directory Entry */
+ IU32 pte; /* Page Table Entry */
+ IU32 new_pde; /* Page Directory Entry (to write back) */
+ IU32 new_pte; /* Page Table Entry (to write back) */
+
+ ISM32 set, entry;
+ IU32 lookup; /* Linear address minus offset */
+ BOOL read_op; /* true if read operation */
+ IU32 comb; /* Combined protection of pde and pte */
+ TLB_ENTRY *e; /* current tlb entry */
+
+#ifdef FAST_TLB
+
+ /* Search optimised format TLB */
+ if ( pi = page_index[lin >> DIR_TBL_SHIFT] )
+ {
+ /* we have hit for the page, get mapped address */
+ if ( ma = page_address[(pi & PI_SET_ENTRY_MASK) + access] )
+ {
+ /* we have hit for access type */
+ return ma | lin & OFFSET_MASK;
+ }
+ }
+
+ /* Otherwise do things the Intel way. */
+
+#endif /* FAST_TLB */
+
+ /* Check for entry in TLB <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<*/
+
+ entry = lin >> DIR_TBL_SHIFT & 0x07; /* isolate bits 14-12 */
+ lookup = lin & ~OFFSET_MASK;
+ read_op = (access & PG_W) ? FALSE : TRUE;
+
+ for ( set = 0; set < NR_TLB_SETS; set++ )
+ {
+ e = &tlb[set][entry];
+ /*
+ The TLB may have a READ miss (address not in TLB) or a WRITE
+ miss (address not in TLB or address in TLB but dirty bit not
+ set). For either case a new cache entry is created.
+ */
+ if ( e->v && e->la == lookup && (read_op || e->d) )
+ {
+ /* Cache Hit <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+ /* check access validity */
+#ifdef SPC486
+ if ( access_check[GET_WP()][access][e->mode] )
+#else
+ if ( access_check[access][e->mode] )
+#endif /* SPC486 */
+ {
+ /* Protection Failure */
+ SET_CR(CR_PFLA, lin);
+ PF((IU16)(access << 1 | 1), FAULT_LIN2PHY_ACCESS);
+ }
+
+ /* return cached physical address */
+ return e->pa | lin & OFFSET_MASK;
+ }
+ }
+
+ /* Cache Miss <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<*/
+
+ /* check that pde is present */
+ pde_addr = (GET_CR(CR_PDBR) & PE_PFA_MASK) +
+ ((lin >> DIR_SHIFT & DIR_MASK) << 2);
+ pde = phy_read_dword(pde_addr);
+
+ if ( (pde & PE_P_MASK) == 0 )
+ {
+ /* pde not present */
+ SET_CR(CR_PFLA, lin);
+ PF((IU16)(access << 1), FAULT_LIN2PHY_PDE_NOTPRESENT);
+ }
+
+ /* check that pte is present */
+ pte_addr = (pde & PE_PFA_MASK) +
+ ((lin >> TBL_SHIFT & TBL_MASK) << 2);
+ pte = phy_read_dword(pte_addr);
+
+ if ( (pte & PE_P_MASK) == 0 )
+ {
+ /* pte not present */
+ SET_CR(CR_PFLA, lin);
+ PF((IU16)(access << 1), FAULT_LIN2PHY_PTE_NOTPRESENT);
+ }
+
+ /* combine pde and pte protection (and convert into our format)
+ *
+ * The i486 HARDWARE manual says take the numerically lower of
+ * the combined bits.
+ */
+
+ if ( (pde & ( PE_U_S_MASK|PE_R_W_MASK )) < (pte & ( PE_U_S_MASK|PE_R_W_MASK )))
+ {
+ /* The pde defines protection */
+ comb = PG_R | PG_S;
+ if ( pde & PE_U_S_MASK )
+ comb |= PG_U;
+ if ( pde & PE_R_W_MASK )
+ comb |= PG_W;
+ }
+ else
+ {
+ /* The pte defines protection */
+ comb = PG_R | PG_S;
+ if ( pte & PE_U_S_MASK )
+ comb |= PG_U;
+ if ( pte & PE_R_W_MASK )
+ comb |= PG_W;
+ }
+
+
+ /* check access validity */
+#ifdef SPC486
+ if ( access_check[GET_WP()][access][comb] )
+#else
+ if ( access_check[access][comb] )
+#endif /* SPC486 */
+ {
+ /* Protection Failure */
+ SET_CR(CR_PFLA, lin);
+ PF((IU16)(access << 1 | 1), FAULT_LIN2PHY_PROTECT_FAIL);
+ }
+
+ /* OK - allocate cache entry */
+ set = next_set[entry];
+ next_set[entry] += 1;
+ next_set[entry] &= 0x3; /* 0,1,2,3,0,1,2.... */
+
+ e = &tlb[set][entry];
+
+#ifdef FAST_TLB
+
+ /* Clear any page_index for old entry */
+ if ( e->v )
+ {
+ page_index[e->la >> DIR_TBL_SHIFT] = PI_NOT_VALID;
+ }
+
+#endif /* FAST_TLB */
+
+ e->la = lookup;
+ e->v = TRUE;
+ e->mode = comb;
+ e->pa = pte & PE_PFA_MASK;
+ e->d = !read_op;
+
+#ifdef FAST_TLB
+
+ /* Set up page_index and associated addresses */
+ pi = set << PI_SET_SHIFT | entry << PI_ENTRY_SHIFT;
+ page_index[e->la >> DIR_TBL_SHIFT] = PI_VALID | pi;
+
+ /* minimal mappings */
+ page_address[pi | PG_S | PG_R] = e->pa;
+ page_address[pi | PG_S | PG_W] = NO_MAPPED_ADDRESS;
+ page_address[pi | PG_U | PG_R] = NO_MAPPED_ADDRESS;
+ page_address[pi | PG_U | PG_W] = NO_MAPPED_ADDRESS;
+
+ /* now augment mappings if possible */
+ if ( e->d )
+ {
+ page_address[pi | PG_S | PG_W] = e->pa;
+ }
+
+ if ( e->mode >= PG_U )
+ {
+ page_address[pi | PG_U | PG_R] = e->pa;
+
+ if ( e->mode & PG_W && e->d )
+ {
+ page_address[pi | PG_U | PG_W] = e->pa;
+ }
+ }
+
+#endif /* FAST_TLB */
+
+ /* update in memory page entries */
+ new_pde = pde | PE_ACCESSED;
+ new_pte = pte | PE_ACCESSED;
+
+ if ( e->d )
+ {
+ new_pte |= PE_DIRTY;
+ }
+
+ if (new_pte != pte)
+ {
+ phy_write_dword(pte_addr, new_pte);
+#ifdef PIG
+ save_last_xcptn_details("PTE %08x: %03x => %03x", pte_addr, pte & 0xFFF, new_pte & 0xFFF, 0, 0);
+ if (((new_pte ^ pte) == PE_ACCESSED) && ignore_page_accessed())
+ cannot_phy_write_byte(pte_addr, ~PE_ACCESSED);
+#endif
+ }
+
+ if (new_pde != pde)
+ {
+ phy_write_dword(pde_addr, new_pde);
+#ifdef PIG
+ save_last_xcptn_details("PDE %08x: %03x => %03x", pde_addr, pde & 0xFFF, new_pde & 0xFFF, 0, 0);
+ if ((new_pde ^ pde) == PE_ACCESSED)
+ cannot_phy_write_byte(pde_addr, ~PE_ACCESSED);
+#endif
+ }
+
+ /* return newly cached physical address */
+ return e->pa | lin & OFFSET_MASK;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* TLB Test Operation ie writes to Test Registers. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+test_tlb()
+ {
+ ISM32 set, entry;
+ TLB_ENTRY *e; /* current TLB entry */
+ IU32 tcr; /* local copy of test command register */
+ IU32 tdr; /* local copy of test data register */
+ IU32 lookup; /* linear address to be looked up */
+ BOOL reqd_v; /* validity required in lookup mode */
+ IU32 temp_u; /* U/S to be set in write mode */
+
+ fprintf(stderr, "Testing TLB.\n");
+
+ tcr = GET_TR(TR_TCR);
+ tdr = GET_TR(TR_TDR);
+ entry = tcr >> DIR_TBL_SHIFT & 0x7; /* Take bits 14-12 */
+
+ if ( tcr & TCR_C_MASK )
+ {
+ /* C = 1 => lookup TLB entry */
+ lookup = tcr & TCR_LA_MASK;
+ reqd_v = (tcr & TCR_V_MASK) != 0;
+
+ for ( set = 0; set < NR_TLB_SETS; set++ )
+ {
+ /* Note search in test mode includes the validity bit */
+ e = &tlb[set][entry];
+ if ( e->v == reqd_v && e->la == lookup )
+ {
+ /* HIT */
+
+ tdr = e->pa; /* write phys addr */
+ tdr = tdr | TDR_HT_MASK; /* HT = 1 */
+ tdr = tdr | set << TDR_REP_SHIFT; /* REP = set */
+ SET_TR(TR_TDR, tdr);
+
+ tcr = tcr & ~TCR_ATTR_MASK; /* clear all attributes */
+
+ /* set attributes from cached values */
+ if ( e->d )
+ tcr = tcr | TCR_D_MASK;
+ else
+ tcr = tcr | TCR_ND_MASK;
+
+ if ( e->mode & PG_U )
+ tcr = tcr | TCR_U_MASK;
+ else
+ tcr = tcr | TCR_NU_MASK;
+
+ if ( e->mode & PG_W )
+ tcr = tcr | TCR_W_MASK;
+ else
+ tcr = tcr | TCR_NW_MASK;
+
+ SET_TR(TR_TCR, tcr);
+ return;
+ }
+ }
+
+ /* lookup MISS */
+ tdr = tdr & ~TDR_HT_MASK; /* HT = 0 */
+ SET_TR(TR_TDR, tdr);
+ }
+ else
+ {
+ /* C = 0 => write TLB entry */
+
+ if ( tdr & TDR_HT_MASK )
+ {
+ /* REP field gives set */
+ set = (tdr & TDR_REP_MASK) >> TDR_REP_SHIFT;
+ }
+ else
+ {
+ /* choose set ourselves */
+ set = next_set[entry];
+ next_set[entry] += 1;
+ next_set[entry] &= 0x3; /* 0,1,2,3,0,1,2.... */
+ }
+
+ e = &tlb[set][entry];
+
+#ifdef FAST_TLB
+
+ /* Clear any page_index for old entry */
+ if ( e->v )
+ {
+ page_index[e->la >> DIR_TBL_SHIFT] = PI_NOT_VALID;
+ }
+
+#endif /* FAST_TLB */
+
+ /* set up cache info. */
+ e->pa = tdr & TDR_PA_MASK;
+ e->la = tcr & TCR_LA_MASK;
+ e->v = (tcr & TCR_V_MASK) != 0;
+ e->d = (tcr & TCR_D_MASK) != 0;
+ e->mode = (tcr & TCR_W_MASK) != 0;
+ temp_u = (tcr & TCR_U_MASK) != 0;
+ e->mode = e->mode | temp_u << 1;
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Map external linear address to physical address. */
+/* Used only by functions external to the C CPU. */
+/* Does not take #PF and does not alter contents of TLB. */
+/* Returns TRUE if mapping done, else FALSE. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IBOOL
+xtrn2phy IFN3
+ (
+ LIN_ADDR, lin, /* Linear Address */
+ IUM8, access_request, /* access mode request */
+ /* Bit 0 => R/W (as per 486) */
+ /* Bit 1 => U/S (as per 486) */
+ /* Bit 2 => if set only return mapping
+ if accessed and dirty bits are set
+ for the required address translation.
+ */
+ PHY_ADDR *, phy /* pntr to Physical Address */
+ )
+ {
+ IU32 pde_addr; /* Address of PDE */
+ IU32 pte_addr; /* Address of PTE */
+ IU32 pde; /* Page Directory Entry */
+ IU32 pte; /* Page Table Entry */
+ IU32 new_pde; /* Page Directory Entry (to write back) */
+ IU32 new_pte; /* Page Table Entry (to write back) */
+
+ ISM32 set, entry;
+ IUM8 access; /* 486 access mode */
+ BOOL read_op; /* true if read operation */
+ IU32 comb; /* Combined protection of pde and pte */
+ IU32 lookup; /* Linear address minus offset */
+ IU8 pi; /* page_index entry */
+ IU32 ma; /* mapped address */
+ TLB_ENTRY *e; /* current tlb entry */
+
+ access = access_request & 0x3; /* isolate 486 part of access mode */
+
+#ifdef FAST_TLB
+
+ /* Search optimised format TLB */
+ if ( pi = page_index[lin >> DIR_TBL_SHIFT] )
+ {
+ /* we have hit for the page, get mapped address */
+ if ( ma = page_address[(pi & PI_SET_ENTRY_MASK) + access] )
+ {
+ /* we have hit for access type */
+ *phy = ma | lin & OFFSET_MASK;
+ return TRUE;
+ }
+ }
+
+ /* Otherwise do things the Intel way. */
+
+#endif /* FAST_TLB */
+
+ /* Check for entry in TLB */
+
+ entry = lin >> DIR_TBL_SHIFT & 0x07; /* isolate bits 14-12 */
+ lookup = lin & ~OFFSET_MASK;
+ read_op = (access & PG_W) ? FALSE : TRUE;
+
+ for ( set = 0; set < NR_TLB_SETS; set++ )
+ {
+ e = &tlb[set][entry];
+ if ( e->v && e->la == lookup && (read_op || e->d) )
+ {
+ /* Cache Hit */
+
+ /* check access validity */
+#ifdef SPC486
+ if ( access_check[GET_WP()][access][e->mode] )
+#else
+ if ( access_check[access][e->mode] )
+#endif /* SPC486 */
+ {
+ return FALSE;
+ }
+
+ *phy = e->pa | lin & OFFSET_MASK;
+ return TRUE;
+ }
+ }
+
+ /* Cache Miss */
+
+ /* check that pde is present */
+ pde_addr = (GET_CR(CR_PDBR) & PE_PFA_MASK) +
+ ((lin >> DIR_SHIFT & DIR_MASK) << 2);
+ pde = phy_read_dword(pde_addr);
+
+ if ( (pde & PE_P_MASK) == 0 )
+ return FALSE; /* pde not present */
+
+ /* check that pte is present */
+ pte_addr = (pde & PE_PFA_MASK) +
+ ((lin >> TBL_SHIFT & TBL_MASK) << 2);
+ pte = phy_read_dword(pte_addr);
+
+ if ( (pte & PE_P_MASK) == 0 )
+ return FALSE; /* pte not present */
+
+ /* combine pde and pte protection */
+ comb = PG_R | PG_S;
+ if ( (pde | pte) & PE_U_S_MASK )
+ comb |= PG_U; /* at least one table says user */
+ if ( (pde & pte) & PE_R_W_MASK )
+ comb |= PG_W; /* both tables allow write */
+
+ /* check access validity */
+#ifdef SPC486
+ if ( access_check[GET_WP()][access][comb] )
+#else
+ if ( access_check[access][comb] )
+#endif /* SPC486 */
+ {
+ return FALSE;
+ }
+
+ /* Finally check that A and D bits reflect the requested
+ translation. */
+ if ( access_request & 0x4 ) /* Bit 2 == 1 */
+ {
+ /*
+ This check may be made in two ways.
+
+ Firstly we might simply return FALSE, thus causing a new
+ invocation of host_simulate() to run, so that assembler
+ routines may load the TLB and set the accessed and dirty bits.
+
+ Secondly we may just ensure that the accessed and dirty bits
+ are set directly here. Providing we don't require that the
+ TLB is faithfully emulated, this is a more efficient method.
+ */
+
+ /* Check current settings */
+ if ( ((pde & PE_ACCESSED) == 0) ||
+ ((pte & PE_ACCESSED) == 0) ||
+ (!read_op && ((pte & PE_DIRTY) == 0)) )
+ {
+ /* update in memory page entries */
+ new_pde = pde | PE_ACCESSED;
+ new_pte = pte | PE_ACCESSED;
+
+ if ( !read_op )
+ {
+ new_pte |= PE_DIRTY;
+ }
+
+ if (new_pte != pte)
+ {
+ phy_write_dword(pte_addr, new_pte);
+#ifdef PIG
+ save_last_xcptn_details("PTE %08x: %03x -> %03x", pte_addr, pte & 0xFFF, new_pte & 0xFFF, 0, 0);
+ if ((new_pte ^ pte) == PE_ACCESSED)
+ cannot_phy_write_byte(pte_addr, ~PE_ACCESSED);
+#endif
+ }
+ if (new_pde != pde)
+ {
+ IU8 mask;
+ phy_write_dword(pde_addr, new_pde);
+#ifdef PIG
+ save_last_xcptn_details("PDE %08x: %03x -> %03x", pde_addr, pde & 0xFFF, new_pde & 0xFFF, 0, 0);
+ mask = 0xff;
+ if (((new_pde ^ pde) == PE_ACCESSED) && ignore_page_accessed())
+ cannot_phy_write_byte(pde_addr, ~PE_ACCESSED);
+#endif
+ }
+ }
+ }
+
+ *phy = (pte & PE_PFA_MASK) | lin & OFFSET_MASK;
+ return TRUE;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* DEBUGGING. Dump tlb information. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+zdumptlb
+
+IFN1(
+ FILE *, out
+ )
+
+
+ {
+ ISM32 set, entry;
+ TLB_ENTRY *e; /* current TLB entry */
+
+ fprintf(out, "set entry lin_addr V phy_addr D U W\n");
+
+ for ( set = 0; set < NR_TLB_SETS; set++ )
+ {
+ for ( entry = 0; entry < NR_TLB_ENTRIES; entry++ )
+ {
+ e = &tlb[set][entry];
+ fprintf(out, " %d %d %08x %d %08x %d %d %d\n",
+ set, entry, e->la, e->v, e->pa, e->d,
+ (e->mode & BIT1_MASK) != 0 ,
+ e->mode & BIT0_MASK);
+ }
+ }
+ }
+
+#ifdef PIG
+
+GLOBAL void Pig_NotePDECacheAccess IFN2(IU32, linearAddress, IU32, accessBits)
+{
+ return;
+}
+
+GLOBAL void Pig_NotePTECacheAccess IFN2(IU32, linearAddress, IU32, accessBits)
+{
+ IU8 pi; /* page_index entry */
+ IU32 ma; /* mapped address */
+
+ ISM32 set, entry;
+ IU32 lookup; /* Linear address minus offset */
+ BOOL read_op; /* true if read operation */
+ TLB_ENTRY *e; /* current tlb entry */
+
+#ifdef FAST_TLB
+
+ /* Search optimised format TLB */
+ if ( pi = page_index[linearAddress >> DIR_TBL_SHIFT] )
+ {
+ /* we have hit for the page, get mapped address */
+ if ( ma = page_address[(pi & PI_SET_ENTRY_MASK) + accessBits] )
+ {
+ deal_with_pte_cache_hit(linearAddress & OFFSET_MASK);
+ return;
+ }
+ }
+
+ /* Otherwise do things the Intel way. */
+
+#endif /* FAST_TLB */
+
+ /* Check for entry in TLB <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<*/
+
+ entry = linearAddress >> DIR_TBL_SHIFT & 0x07; /* isolate bits 14-12 */
+ lookup = linearAddress & ~OFFSET_MASK;
+ read_op = (accessBits & PG_W) ? FALSE : TRUE;
+
+ for ( set = 0; set < NR_TLB_SETS; set++ )
+ {
+ e = &tlb[set][entry];
+ if ( e->v && e->la == lookup && (read_op || e->d) )
+ {
+ /* Cache Hit <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
+
+ /* check access validity */
+#ifdef SPC486
+ if ( access_check[GET_WP()][accessBits][e->mode] )
+#else
+ if ( access_check[accessBits][e->mode] )
+#endif /* SPC486 */
+ {
+ /* would page fault. Ignore it */
+ return;
+ }
+
+ deal_with_pte_cache_hit(linearAddress & OFFSET_MASK);
+ return;
+ }
+ }
+ /* not in cache - no need to do anything */
+}
+
+LOCAL void
+deal_with_pte_cache_hit IFN1(IU32, linearAddress)
+{
+ IU32 pde_addr; /* Address of Page Directory Entry */
+ IU32 pde; /* Page Directory Entry */
+ IU32 pte_addr; /* Address of Page Table Entry */
+ IU32 pte; /* Page Table Entry */
+
+ /* check that pde is present */
+ pde_addr = (GET_CR(CR_PDBR) & PE_PFA_MASK) +
+ ((linearAddress >> DIR_SHIFT & DIR_MASK) << 2);
+ pde = phy_read_dword(pde_addr);
+
+ /* check pde present */
+ if ( (pde & PE_P_MASK) == 0 )
+ return;
+
+ /* check that pte is present */
+ pte_addr = (pde & PE_PFA_MASK) + ((linearAddress >> TBL_SHIFT & TBL_MASK) << 2);
+ pte = phy_read_dword(pte_addr);
+
+ if ( (pte & PE_P_MASK) == 0 )
+ return;
+
+ /* fprintf(trace_file, "deal_with_pte_cache_hit: addr %08lx, pte=%08lx @ %08lx\n",
+ * linearAddress, pte, pte_addr);
+ */
+ cannot_phy_write_byte(pte_addr, ~PE_ACCESSED);
+}
+
+#endif
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_tlb.h b/private/mvdm/softpc.new/base/ccpu386/c_tlb.h
new file mode 100644
index 000000000..d1d0d4f32
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_tlb.h
@@ -0,0 +1,39 @@
+/*[
+
+c_tlb.h
+
+Translation Lookaside Buffer Emulation.
+---------------------------------------
+
+LOCAL CHAR SccsID[]="@(#)c_tlb.h 1.5 02/25/94";
+
+]*/
+
+
+/*
+ Page Accessor Types.
+ */
+#define PG_S 0x0 /* Supervisor */
+#define PG_U 0x2 /* User */
+
+IMPORT VOID flush_tlb IPT0();
+
+IMPORT VOID invalidate_tlb_entry IPT1
+ (
+ IU32, lin
+ );
+
+IMPORT IU32 lin2phy IPT2
+ (
+ IU32, lin,
+ ISM32, access
+ );
+
+IMPORT VOID test_tlb IPT0();
+
+extern IBOOL xtrn2phy IPT3
+ (
+ LIN_ADDR, lin,
+ IUM8, access_request,
+ PHY_ADDR *, phy
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_tsksw.c b/private/mvdm/softpc.new/base/ccpu386/c_tsksw.c
new file mode 100644
index 000000000..9247748ad
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_tsksw.c
@@ -0,0 +1,589 @@
+/*[
+
+c_tsksw.c
+
+LOCAL CHAR SccsID[]="@(#)c_tsksw.c 1.11 03/03/95";
+
+Task Switch Support.
+--------------------
+
+]*/
+
+
+#include <stdio.h>
+#include <insignia.h>
+
+#include <host_def.h>
+
+#include <xt.h>
+#include CpuH
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <c_tsksw.h>
+#include <c_page.h>
+#include <mov.h>
+#include <fault.h>
+
+/*[
+
+ The 286 TSS is laid out as follows:-
+
+ =============================
+ | Back Link to TSS Selector | +00 =
+ | SP for CPL 0 | +02 *
+ | SS for CPL 0 | +04 *
+ | SP for CPL 1 | +06 * Initial Stacks (STATIC)
+ | SS for CPL 1 | +08 *
+ | SP for CPL 2 | +0a *
+ | SS for CPL 2 | +0c *
+ | IP | +0e =
+ | FLAG Register | +10 =
+ | AX | +12 =
+ | CX | +14 =
+ | DX | +16 =
+ | BX | +18 =
+ | SP | +1a = Current State (DYNAMIC)
+ | BP | +1c =
+ | SI | +1e =
+ | DI | +20 =
+ | ES | +22 =
+ | CS | +24 =
+ | SS | +26 =
+ | DS | +28 =
+ | Task LDT Selector | +2a *
+ =============================
+
+ The 386 TSS is laid out as follows:-
+
+ ===========================================
+ | 0 | Back Link | +00 =
+ | ESP for CPL 0 | +04 *
+ | 0 | SS for CPL 0 | +08 *
+ | ESP for CPL 1 | +0c *
+ | 0 | SS for CPL 1 | +10 *
+ | ESP for CPL 2 | +14 *
+ | 0 | SS for CPL 2 | +18 *
+ | CR3 | +1c *
+ | EIP | +20 =
+ | EFLAG | +24 =
+ | EAX | +28 =
+ | ECX | +2c =
+ | EDX | +30 =
+ | EBX | +34 =
+ | ESP | +38 =
+ | EBP | +3c =
+ | ESI | +40 =
+ | EDI | +44 =
+ | 0 | ES | +48 =
+ | 0 | CS | +4c =
+ | 0 | SS | +50 =
+ | 0 | DS | +54 =
+ | 0 | FS | +58 =
+ | 0 | GS | +5c =
+ | 0 | LDT Selector | +60 *
+ | I/O Map Base Addr. | 0 |T| +64 *
+ |-----------------------------------------|
+ | ... |
+ |-----------------------------------------|
+ | I/O Permission Bit Map | +I/O Map Base Addr.
+ | |
+ |11111111| |
+ ===========================================
+
+]*/
+
+/*
+ Prototype our internal functions.
+ */
+LOCAL VOID load_LDT_in_task_switch
+
+IPT1(
+ IU16, tss_selector
+
+ );
+
+LOCAL VOID load_data_seg_new_task
+
+IPT2(
+ ISM32, indx,
+ IU16, selector
+
+ );
+
+
+#define IP_OFFSET_IN_286_TSS 0x0e
+#define IP_OFFSET_IN_386_TSS 0x20
+
+#define CR3_OFFSET_IN_386_TSS 0x1c
+
+#define LOCAL_BRK_ENABLE 0x155 /* LE,L3,L2,L1 and L0 bits of DCR */
+
+/*
+ =====================================================================
+ INTERNAL FUNCTIONS STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Load LDT selector during a task switch. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL VOID
+load_LDT_in_task_switch
+
+IFN1(
+ IU16, tss_selector
+ )
+
+
+ {
+ IU16 selector;
+ IU32 descr_addr;
+ CPU_DESCR entry;
+
+ /* The selector is already loaded into LDTR */
+ selector = GET_LDT_SELECTOR();
+
+ /* A null selector can be left alone */
+ if ( !selector_is_null(selector) )
+ {
+ /* must be in GDT */
+ if ( selector_outside_GDT(selector, &descr_addr) )
+ {
+ SET_LDT_SELECTOR(0); /* invalidate selector */
+ TS(tss_selector, FAULT_LOADLDT_SELECTOR);
+ }
+
+ read_descriptor_linear(descr_addr, &entry);
+
+ /* is it really a LDT segment */
+ if ( descriptor_super_type(entry.AR) != LDT_SEGMENT )
+ {
+ SET_LDT_SELECTOR(0); /* invalidate selector */
+ TS(tss_selector, FAULT_LOADLDT_NOT_AN_LDT);
+ }
+
+ /* must be present */
+ if ( GET_AR_P(entry.AR) == NOT_PRESENT )
+ {
+ SET_LDT_SELECTOR(0); /* invalidate selector */
+ TS(tss_selector, FAULT_LOADLDT_NOTPRESENT);
+ }
+
+ /* ok, good selector, load register */
+ SET_LDT_BASE(entry.base);
+ SET_LDT_LIMIT(entry.limit);
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Load a Data Segment Register (DS, ES, FS, GS) during */
+/* a Task Switch . */
+/* Take #GP(selector) if segment not valid */
+/* Take #NP(selector) if segment not present */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL VOID
+load_data_seg_new_task
+
+IFN2(
+ ISM32, indx, /* Segment Register identifier */
+ IU16, selector /* value to be loaded */
+ )
+
+
+ {
+ load_data_seg(indx, selector);
+
+ /* Reload pseudo descriptors if V86 Mode */
+ if ( GET_VM() == 1 )
+ load_pseudo_descr(indx);
+ }
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Switch tasks */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+switch_tasks
+
+IFN5(
+ BOOL, returning, /* (I) if true doing return from task */
+ BOOL, nesting, /* (I) if true switch with nesting */
+ IU16, TSS_selector, /* (I) selector for new task */
+ IU32, descr, /* (I) memory address of new task descriptor */
+ IU32, return_ip /* (I) offset to restart old task at */
+ )
+
+
+ {
+ IU16 old_tss; /* components of old descriptor */
+ IU8 old_AR;
+ IU32 old_descr;
+
+ CPU_DESCR new_tss; /* components of new descriptor */
+
+ IU32 tss_addr; /* variables used to put/get TSS state */
+ IU32 next_addr;
+ IU32 flags;
+ ISM32 save_cpl;
+ IU8 T_byte; /* Byte holding the T bit */
+
+ IU32 ss_descr; /* variables defining new SS and CS values */
+ CPU_DESCR ss_entry;
+ IU16 new_cs;
+ IU32 cs_descr;
+ CPU_DESCR cs_entry;
+
+ IU32 pdbr; /* New value for PDBR */
+
+ if ( GET_TR_SELECTOR() == 0 )
+ TS(TSS_selector, FAULT_SWTASK_NULL_TR_SEL);
+
+ /* get new TSS info. */
+ read_descriptor_linear(descr, &new_tss);
+
+ /* calc address of descriptor related to old TSS */
+ old_tss = GET_TR_SELECTOR();
+ old_descr = GET_GDT_BASE() + GET_SELECTOR_INDEX_TIMES8(old_tss);
+ old_AR = spr_read_byte(old_descr+5);
+
+ /* SAVE OUTGOING STATE */
+
+ if ( GET_TR_AR_SUPER() == XTND_BUSY_TSS )
+ {
+ /* check outgoing TSS is large enough to save current state */
+ if ( GET_TR_LIMIT() < 0x67 )
+ {
+ TS(TSS_selector, FAULT_SWTASK_BAD_TSS_SIZE_1);
+ }
+
+ tss_addr = GET_TR_BASE();
+ next_addr = tss_addr + CR3_OFFSET_IN_386_TSS;
+
+ spr_write_dword(next_addr, GET_CR(3));
+ next_addr += 4;
+
+ spr_write_dword(next_addr, return_ip);
+ next_addr += 4;
+
+ flags = c_getEFLAGS();
+ if ( returning )
+ flags = flags & ~BIT14_MASK; /* clear NT */
+ spr_write_dword(next_addr, (IU32)flags);
+#ifdef PIG
+ /* Note the possibility of unknown flags "pushed" */
+ record_flags_addr(next_addr);
+#endif /* PIG */
+ next_addr += 4;
+
+ spr_write_dword(next_addr, GET_EAX());
+ next_addr += 4;
+ spr_write_dword(next_addr, GET_ECX());
+ next_addr += 4;
+ spr_write_dword(next_addr, GET_EDX());
+ next_addr += 4;
+ spr_write_dword(next_addr, GET_EBX());
+ next_addr += 4;
+ spr_write_dword(next_addr, GET_ESP());
+ next_addr += 4;
+ spr_write_dword(next_addr, GET_EBP());
+ next_addr += 4;
+ spr_write_dword(next_addr, GET_ESI());
+ next_addr += 4;
+ spr_write_dword(next_addr, GET_EDI());
+ next_addr += 4;
+ spr_write_word(next_addr, GET_ES_SELECTOR());
+ next_addr += 4;
+ spr_write_word(next_addr, GET_CS_SELECTOR());
+ next_addr += 4;
+ spr_write_word(next_addr, GET_SS_SELECTOR());
+ next_addr += 4;
+ spr_write_word(next_addr, GET_DS_SELECTOR());
+ next_addr += 4;
+ spr_write_word(next_addr, GET_FS_SELECTOR());
+ next_addr += 4;
+ spr_write_word(next_addr, GET_GS_SELECTOR());
+ }
+ else /* 286 TSS */
+ {
+ /* check outgoing TSS is large enough to save current state */
+ if ( GET_TR_LIMIT() < 0x29 )
+ {
+ TS(TSS_selector, FAULT_SWTASK_BAD_TSS_SIZE_2);
+ }
+
+ tss_addr = GET_TR_BASE();
+ next_addr = tss_addr + IP_OFFSET_IN_286_TSS;
+
+ spr_write_word(next_addr, (IU16)return_ip);
+ next_addr += 2;
+
+ flags = getFLAGS();
+ if ( returning )
+ flags = flags & ~BIT14_MASK; /* clear NT */
+ spr_write_word(next_addr, (IU16)flags);
+#ifdef PIG
+ /* Note the possibility of unknown flags "pushed" */
+ record_flags_addr(next_addr);
+#endif /* PIG */
+ next_addr += 2;
+
+ spr_write_word(next_addr, GET_AX());
+ next_addr += 2;
+ spr_write_word(next_addr, GET_CX());
+ next_addr += 2;
+ spr_write_word(next_addr, GET_DX());
+ next_addr += 2;
+ spr_write_word(next_addr, GET_BX());
+ next_addr += 2;
+ spr_write_word(next_addr, GET_SP());
+ next_addr += 2;
+ spr_write_word(next_addr, GET_BP());
+ next_addr += 2;
+ spr_write_word(next_addr, GET_SI());
+ next_addr += 2;
+ spr_write_word(next_addr, GET_DI());
+ next_addr += 2;
+ spr_write_word(next_addr, GET_ES_SELECTOR());
+ next_addr += 2;
+ spr_write_word(next_addr, GET_CS_SELECTOR());
+ next_addr += 2;
+ spr_write_word(next_addr, GET_SS_SELECTOR());
+ next_addr += 2;
+ spr_write_word(next_addr, GET_DS_SELECTOR());
+ }
+
+ /* LOAD TASK REGISTER */
+
+ /* mark incoming TSS as busy */
+ new_tss.AR |= BIT1_MASK;
+ spr_write_byte(descr+5, (IU8)new_tss.AR);
+
+ /* update task register */
+ SET_TR_SELECTOR(TSS_selector);
+ SET_TR_BASE(new_tss.base);
+ SET_TR_LIMIT(new_tss.limit);
+ SET_TR_AR_SUPER(descriptor_super_type(new_tss.AR));
+ tss_addr = GET_TR_BASE();
+
+ /* save back link if nesting, else make outgoing TSS available */
+ if ( nesting )
+ {
+ spr_write_word(tss_addr, old_tss);
+ }
+ else
+ {
+ /* mark old TSS as available */
+ old_AR = old_AR & ~BIT1_MASK;
+ spr_write_byte(old_descr+5, old_AR);
+ }
+
+ /* Note: Exceptions now happen in the incoming task */
+
+ /* EXTRACT NEW STATE */
+
+ if ( GET_TR_AR_SUPER() == XTND_BUSY_TSS )
+ {
+ /* check new TSS is large enough to extract new state from */
+ if ( GET_TR_LIMIT() < 0x67 )
+ TS(TSS_selector, FAULT_SWTASK_BAD_TSS_SIZE_3);
+
+ next_addr = tss_addr + CR3_OFFSET_IN_386_TSS;
+ pdbr = (IU32)spr_read_dword(next_addr);
+ if ( pdbr != GET_CR(CR_PDBR) )
+ {
+ /* Only reload PDBR if diferent */
+ MOV_CR(CR_PDBR, pdbr);
+ }
+
+ next_addr = tss_addr + IP_OFFSET_IN_386_TSS;
+
+ SET_EIP(spr_read_dword(next_addr)); next_addr += 4;
+
+ flags = (IU32)spr_read_dword(next_addr); next_addr += 4;
+ save_cpl = GET_CPL();
+ SET_CPL(0); /* act like highest privilege to set all flags */
+ c_setEFLAGS(flags);
+ SET_CPL(save_cpl);
+
+ if ( flags & BIT17_MASK )
+ fprintf(stderr, "(Task Switch)Entering V86 Mode.\n");
+
+ SET_EAX(spr_read_dword(next_addr)); next_addr += 4;
+ SET_ECX(spr_read_dword(next_addr)); next_addr += 4;
+ SET_EDX(spr_read_dword(next_addr)); next_addr += 4;
+ SET_EBX(spr_read_dword(next_addr)); next_addr += 4;
+ SET_ESP(spr_read_dword(next_addr)); next_addr += 4;
+ SET_EBP(spr_read_dword(next_addr)); next_addr += 4;
+ SET_ESI(spr_read_dword(next_addr)); next_addr += 4;
+ SET_EDI(spr_read_dword(next_addr)); next_addr += 4;
+
+ SET_ES_SELECTOR(spr_read_word(next_addr)); next_addr += 4;
+ SET_CS_SELECTOR(spr_read_word(next_addr)); next_addr += 4;
+ SET_SS_SELECTOR(spr_read_word(next_addr)); next_addr += 4;
+ SET_DS_SELECTOR(spr_read_word(next_addr)); next_addr += 4;
+ SET_FS_SELECTOR(spr_read_word(next_addr)); next_addr += 4;
+ SET_GS_SELECTOR(spr_read_word(next_addr)); next_addr += 4;
+
+ SET_LDT_SELECTOR(spr_read_word(next_addr)); next_addr += 4;
+ T_byte = spr_read_byte(next_addr);
+ }
+ else /* 286 TSS */
+ {
+ /* check new TSS is large enough to extract new state from */
+ if ( GET_TR_LIMIT() < 0x2b )
+ TS(TSS_selector, FAULT_SWTASK_BAD_TSS_SIZE_4);
+
+ next_addr = tss_addr + IP_OFFSET_IN_286_TSS;
+
+ SET_EIP(spr_read_word(next_addr)); next_addr += 2;
+
+ flags = (IU32)spr_read_word(next_addr); next_addr += 2;
+ save_cpl = GET_CPL();
+ SET_CPL(0); /* act like highest privilege to set all flags */
+ setFLAGS(flags);
+ SET_VM(0);
+ SET_CPL(save_cpl);
+
+ SET_AX(spr_read_word(next_addr)); next_addr += 2;
+ SET_CX(spr_read_word(next_addr)); next_addr += 2;
+ SET_DX(spr_read_word(next_addr)); next_addr += 2;
+ SET_BX(spr_read_word(next_addr)); next_addr += 2;
+ SET_SP(spr_read_word(next_addr)); next_addr += 2;
+ SET_BP(spr_read_word(next_addr)); next_addr += 2;
+ SET_SI(spr_read_word(next_addr)); next_addr += 2;
+ SET_DI(spr_read_word(next_addr)); next_addr += 2;
+
+ SET_ES_SELECTOR(spr_read_word(next_addr)); next_addr += 2;
+ SET_CS_SELECTOR(spr_read_word(next_addr)); next_addr += 2;
+ SET_SS_SELECTOR(spr_read_word(next_addr)); next_addr += 2;
+ SET_DS_SELECTOR(spr_read_word(next_addr)); next_addr += 2;
+ SET_FS_SELECTOR(0);
+ SET_GS_SELECTOR(0);
+
+ SET_LDT_SELECTOR(spr_read_word(next_addr));
+ T_byte = 0;
+ }
+
+ /* invalidate cache entries for segment registers */
+ SET_CS_AR_R(0); SET_CS_AR_W(0);
+ SET_DS_AR_R(0); SET_DS_AR_W(0);
+ SET_ES_AR_R(0); SET_ES_AR_W(0);
+ SET_SS_AR_R(0); SET_SS_AR_W(0);
+ SET_FS_AR_R(0); SET_FS_AR_W(0);
+ SET_GS_AR_R(0); SET_GS_AR_W(0);
+
+ /* update NT bit */
+ if ( nesting )
+ SET_NT(1);
+ else
+ if ( !returning )
+ SET_NT(0);
+
+ /* update TS */
+ SET_CR(CR_STAT, GET_CR(CR_STAT) | BIT3_MASK);
+
+ /* kill local breakpoints */
+ SET_DR(DR_DCR, GET_DR(DR_DCR) & ~LOCAL_BRK_ENABLE);
+
+ /* set up trap on T-bit */
+ if ( T_byte & BIT0_MASK )
+ {
+ SET_DR(DR_DSR, GET_DR(DR_DSR) | DSR_BT_MASK);
+ }
+
+ /* ERROR CHECKING */
+
+ /* check new LDT and load hidden cache if ok */
+ load_LDT_in_task_switch(TSS_selector);
+
+ if ( GET_VM() == 1 )
+ {
+ SET_CPL(3); /* set V86 privilege level */
+ /* CS selector requires no checks */
+ }
+ else
+ {
+ /* change CPL to that of incoming code segment */
+ SET_CPL(GET_SELECTOR_RPL(GET_CS_SELECTOR()));
+
+ /* check new code selector... */
+ new_cs = GET_CS_SELECTOR();
+ if ( selector_outside_GDT_LDT(new_cs, &cs_descr) )
+ TS(new_cs, FAULT_SWTASK_BAD_CS_SELECTOR);
+
+ read_descriptor_linear(cs_descr, &cs_entry);
+
+ /* check type and privilege of new cs selector */
+ switch ( descriptor_super_type(cs_entry.AR) )
+ {
+ case CONFORM_NOREAD_CODE:
+ case CONFORM_READABLE_CODE:
+ /* check code is present */
+ if ( GET_AR_P(cs_entry.AR) == NOT_PRESENT )
+ NP(new_cs, FAULT_SWTASK_CONFORM_CS_NP);
+
+ /* privilege check requires DPL <= CPL */
+ if ( GET_AR_DPL(cs_entry.AR) > GET_CPL() )
+ TS(new_cs, FAULT_SWTASK_ACCESS_1);
+ break;
+
+ case NONCONFORM_NOREAD_CODE:
+ case NONCONFORM_READABLE_CODE:
+ /* check code is present */
+ if ( GET_AR_P(cs_entry.AR) == NOT_PRESENT )
+ NP(new_cs, FAULT_SWTASK_NOCONFORM_CS_NP);
+
+ /* privilege check requires DPL == CPL */
+ if ( GET_AR_DPL(cs_entry.AR) != GET_CPL() )
+ TS(new_cs, FAULT_SWTASK_ACCESS_2);
+ break;
+
+ default:
+ TS(new_cs, FAULT_SWTASK_BAD_SEG_TYPE);
+ }
+ }
+
+ /* code ok, load hidden cache */
+ load_CS_cache(new_cs, cs_descr, &cs_entry);
+#if 0
+ /* retain operand size from gate until first instruction fetch */
+ if ( GET_CS_AR_X() == USE16 )
+ SET_OPERAND_SIZE(USE16);
+ else /* USE32 */
+ SET_OPERAND_SIZE(USE32);
+#endif
+
+ /* check new SS and load if ok */
+ if ( GET_VM() == 1 )
+ {
+ /* SS selector requires no checks */
+ load_stack_seg(GET_SS_SELECTOR());
+ load_pseudo_descr(SS_REG);
+ }
+ else
+ {
+ validate_SS_on_stack_change(GET_CPL(), GET_SS_SELECTOR(),
+ &ss_descr, &ss_entry);
+ load_SS_cache(GET_SS_SELECTOR(), ss_descr, &ss_entry);
+ }
+
+ /* finally check new DS, ES, FS and GS */
+ load_data_seg_new_task(DS_REG, GET_DS_SELECTOR());
+ load_data_seg_new_task(ES_REG, GET_ES_SELECTOR());
+ load_data_seg_new_task(FS_REG, GET_FS_SELECTOR());
+ load_data_seg_new_task(GS_REG, GET_GS_SELECTOR());
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_tsksw.h b/private/mvdm/softpc.new/base/ccpu386/c_tsksw.h
new file mode 100644
index 000000000..26f1239ac
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_tsksw.h
@@ -0,0 +1,31 @@
+/*[
+
+c_tsksw.h
+
+LOCAL CHAR SccsID[]="@(#)c_tsksw.h 1.5 02/09/94";
+
+Task Switch Support.
+--------------------
+
+]*/
+
+
+/*
+ Switch Task: Control Options.
+ */
+#define NESTING 1
+#define RETURNING 1
+#define NOT_NESTING 0
+#define NOT_RETURNING 0
+
+
+IMPORT VOID switch_tasks
+
+IPT5(
+ BOOL, returning,
+ BOOL, nesting,
+ IU16, TSS_selector,
+ IU32, descr,
+ IU32, return_ip
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_xcptn.c b/private/mvdm/softpc.new/base/ccpu386/c_xcptn.c
new file mode 100644
index 000000000..69fe8c767
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_xcptn.c
@@ -0,0 +1,641 @@
+/*[
+
+c_xcptn.c
+
+LOCAL CHAR SccsID[]="@(#)c_xcptn.c 1.14 01/31/95";
+
+Exception Handling Support.
+---------------------------
+
+]*/
+
+
+#include <stdio.h>
+#include <insignia.h>
+
+#include <host_def.h>
+#include StringH
+#include <xt.h>
+
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <c_intr.h>
+#include <c_xtrn.h>
+#include <ccpupig.h>
+#include <fault.h>
+
+/*
+ Allow print out of exceptions or disallow it.
+ */
+GLOBAL BOOL show_exceptions = FALSE;
+GLOBAL BOOL trap_exceptions = FALSE;
+LOCAL BOOL first_exception = TRUE;
+
+#define check_exception_env() \
+{ \
+ IMPORT char *host_getenv IPT1 (char *, name); \
+ if (first_exception) \
+ { \
+ char *env = host_getenv ("CCPU_SHOW_EXCEPTIONS"); \
+ if (env != NULL) \
+ { \
+ show_exceptions = TRUE; \
+ if (strcasecmp(env, "TRAP") == 0) \
+ trap_exceptions = TRUE; \
+ } \
+ } \
+ first_exception = FALSE; \
+}
+
+IMPORT FILE *trace_file;
+IMPORT IBOOL took_absolute_toc;
+
+/*
+ Intel interrupt(exception) numbers.
+ */
+#define I0_INT_NR 0
+#define I1_INT_NR 1
+#define I5_INT_NR 5
+#define I6_INT_NR 6
+#define I7_INT_NR 7
+#define I16_INT_NR 16
+#define DF_INT_NR 8
+#define GP_INT_NR 13
+#define NP_INT_NR 11
+#define PF_INT_NR 14
+#define SF_INT_NR 12
+#define TS_INT_NR 10
+
+#define NULL_ERROR_CODE (IU16)0
+
+/*
+ Intel IDT Error Code format.
+ */
+#define IDT_VECTOR_MASK 0xff
+#define IDT_VECTOR_SHIFT 3
+#define IDT_INDICATOR_BIT 2
+
+/*
+ Interrupt/Fault Status.
+ */
+GLOBAL BOOL doing_contributory;
+GLOBAL BOOL doing_page_fault;
+GLOBAL BOOL doing_double_fault;
+GLOBAL BOOL doing_fault; /* true: FAULT, false: TRAP or ABORT */
+GLOBAL ISM32 EXT; /* external/internal source */
+GLOBAL IU32 CCPU_save_EIP; /* IP at start of instruction */
+
+/*
+ Prototype our internal functions.
+ */
+LOCAL VOID check_for_double_fault IPT1(IU16, xcode);
+
+LOCAL VOID check_for_shutdown IPT1(IU16, xcode);
+
+LOCAL VOID benign_exception IPT3( ISM32, nmbr, ISM32, source, IU16, xcode);
+
+LOCAL VOID contributory_exception IPT3( IU16, selector, ISM32, nmbr, IU16, xcode);
+
+LOCAL VOID contributory_idt_exception IPT3( IU16, vector, ISM32, nmbr, IU16, xcode);
+
+LOCAL char *faultstr IPT1(ISM32, nmbr );
+
+
+/*
+ =====================================================================
+ INTERNAL ROUTINES START HERE.
+ =====================================================================
+ */
+
+LOCAL char *faultstr IFN1(ISM32, nmbr )
+{
+ char *faulttable[] =
+ {
+ "DIV", "DBG", "NMI", "BPT",
+ "OVF", "BND", "OPC", "NAV",
+ "DF", "9", "TSS", "NP", "SF",
+ "GP", "PF", "15", "FPE", "ALN"
+ };
+ SAVED char buf[4];
+
+ if (nmbr > 16)
+ {
+ sprintf(buf, "%d", nmbr);
+ return buf;
+ }
+ else
+ return faulttable[nmbr];
+}
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Determine if things are so bad we need a double fault. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL VOID
+check_for_double_fault IFN1( IU16, xcode)
+ {
+ if ( doing_contributory || doing_page_fault )
+ DF(xcode);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Determine if things are so bad we need to close down. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL VOID check_for_shutdown IFN1(IU16, xcode)
+ {
+ if ( doing_double_fault )
+ {
+ doing_contributory = FALSE;
+ doing_page_fault = FALSE;
+ doing_double_fault = FALSE;
+ EXT = INTERNAL;
+
+ /* force a reset - see schematic for AT motherboard */
+ c_cpu_reset();
+
+#ifdef PIG
+ save_last_xcptn_details("Exception:- Shutdown @%2d\n", xcode, 0, 0, 0, 0);
+ ccpu_synch_count++;
+ pig_cpu_action = CHECK_ALL;
+ c_cpu_unsimulate();
+#endif /* PIG */
+
+ /* then carry on */
+ c_cpu_continue(); /* DOES NOT RETURN */
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Handle Benign Exception */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL VOID
+benign_exception
+
+IFN3(
+ ISM32, nmbr, /* exception number */
+ ISM32, source, /* internal/external interrupt cause */
+ IU16, xcode /* insignia exception code */
+ )
+
+
+ {
+ SET_EIP(CCPU_save_EIP);
+
+#ifdef NTVDM
+ {
+ extern BOOL host_exint_hook IPT2(IS32, exp_no, IS32, error_code);
+
+ if(GET_PE() && host_exint_hook((IS32) nmbr, NULL_ERROR_CODE))
+ c_cpu_continue(); /* DOES NOT RETURN */
+ }
+#endif
+
+ /* Set default mode up */
+ SET_OPERAND_SIZE(GET_SR_AR_X(CS_REG));
+ SET_ADDRESS_SIZE(GET_SR_AR_X(CS_REG));
+ SET_POP_DISP(0);
+
+ EXT = source;
+ check_exception_env();
+#ifdef PIG
+ save_last_xcptn_details("Exception:- #%s-%d @%2d \n", (IUH)faultstr(nmbr), nmbr, xcode, 0, 0);
+#endif /* PIG */
+ if (show_exceptions){
+ fprintf(trace_file, "(%04x:%08x)Exception:- %d.\n",
+ GET_CS_SELECTOR(), GET_EIP(), nmbr);
+ if (trap_exceptions) force_yoda();
+ }
+ took_absolute_toc = TRUE;
+ do_intrupt((IU16)nmbr, FALSE, FALSE, NULL_ERROR_CODE);
+
+ c_cpu_continue(); /* DOES NOT RETURN */
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Handle Contributory Exception */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL VOID
+contributory_exception
+
+IFN3(
+ IU16, selector, /* failing selector */
+ ISM32, nmbr, /* exception number */
+ IU16, xcode /* insignia exception code */
+ )
+
+
+ {
+ IU16 error_code;
+
+
+ /* check if exception caused by external caller */
+ check_interface_active(nmbr);
+
+ check_for_shutdown(xcode);
+ check_for_double_fault(xcode);
+
+ doing_contributory = TRUE;
+
+ error_code = (selector & 0xfffc) | EXT;
+
+ SET_EIP(CCPU_save_EIP);
+
+#ifdef NTVDM
+ {
+ extern BOOL host_exint_hook IPT2(IS32, exp_no, IS32, error_code);
+
+ if(GET_PE() && host_exint_hook((IS32) nmbr, (IS32)error_code))
+ doing_contributory = FALSE;
+ c_cpu_continue(); /* DOES NOT RETURN */
+ }
+#endif
+
+ /* Set default mode up */
+ SET_OPERAND_SIZE(GET_SR_AR_X(CS_REG));
+ SET_ADDRESS_SIZE(GET_SR_AR_X(CS_REG));
+ SET_POP_DISP(0);
+
+ EXT = INTERNAL;
+ check_exception_env();
+#ifdef PIG
+ save_last_xcptn_details("Exception:- #%s-%d(%04x) @%2d\n", (IUH)faultstr(nmbr), nmbr, error_code, xcode, 0);
+#endif /* PIG */
+ if (show_exceptions){
+ fprintf(trace_file, "(%04x:%08x)Exception:- %d(%04x).\n",
+ GET_CS_SELECTOR(), GET_EIP(), nmbr, error_code);
+ if (trap_exceptions) force_yoda();
+ }
+ took_absolute_toc = TRUE;
+ do_intrupt((IU16)nmbr, FALSE, TRUE, error_code);
+
+ c_cpu_continue(); /* DOES NOT RETURN */
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Handle Contributory Exception (Via IDT). */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL VOID
+contributory_idt_exception
+
+IFN3(
+ IU16, vector, /* failing interrupt vector */
+ ISM32, nmbr, /* exception number */
+ IU16, xcode /* insignia exception code */
+ )
+
+
+ {
+ IU16 error_code;
+
+ /* check if exception caused by external caller */
+ check_interface_active(nmbr);
+
+ check_for_shutdown(xcode);
+ check_for_double_fault(xcode);
+
+ doing_contributory = TRUE;
+ error_code = ((vector & IDT_VECTOR_MASK) << IDT_VECTOR_SHIFT)
+ | IDT_INDICATOR_BIT
+ | EXT;
+
+ SET_EIP(CCPU_save_EIP);
+
+#ifdef NTVDM
+ {
+ extern BOOL host_exint_hook IPT2(IS32, exp_no, IS32, error_code);
+
+ if(GET_PE() && host_exint_hook((IS32) nmbr, (IS32)error_code))
+ doing_contributory = FALSE;
+ c_cpu_continue(); /* DOES NOT RETURN */
+ }
+#endif
+
+ /* Set default mode up */
+ SET_OPERAND_SIZE(GET_SR_AR_X(CS_REG));
+ SET_ADDRESS_SIZE(GET_SR_AR_X(CS_REG));
+ SET_POP_DISP(0);
+
+ EXT = INTERNAL;
+ check_exception_env();
+#ifdef PIG
+ save_last_xcptn_details("Exception:- %s-%d(%04x) @%2d\n", (IUH)faultstr(nmbr), nmbr, error_code, xcode, 0);
+#endif /* PIG */
+ if (show_exceptions){
+ if ( GET_IDT_LIMIT() != 0 ){
+ fprintf(trace_file, "(%04x:%08x)Exception:- %d(%04x).\n",
+ GET_CS_SELECTOR(), GET_EIP(), nmbr, error_code);
+ if (trap_exceptions) force_yoda();
+ }
+ }
+ took_absolute_toc = TRUE;
+ do_intrupt((IU16)nmbr, FALSE, TRUE, error_code);
+
+ c_cpu_continue(); /* DOES NOT RETURN */
+ }
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Interrupt Table Too Small/Double Fault Exception. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+DF
+
+IFN1(
+ IU16, xcode /* insignia exception code */
+ )
+ {
+ doing_fault = FALSE;
+
+ if ( GET_PE() == 1 )
+ {
+ check_for_shutdown(xcode);
+ doing_double_fault = TRUE;
+ }
+
+ SET_EIP(CCPU_save_EIP);
+
+#ifdef NTVDM
+ {
+ extern BOOL host_exint_hook IPT2(IS32, exp_no, IS32, error_code);
+
+ if(GET_PE() && host_exint_hook((IS32) DF_INT_NR, (IS32)NULL_ERROR_CODE))
+ doing_double_fault = FALSE;
+ c_cpu_continue(); /* DOES NOT RETURN */
+ }
+#endif
+
+ /* Set default mode up */
+ SET_OPERAND_SIZE(GET_SR_AR_X(CS_REG));
+ SET_ADDRESS_SIZE(GET_SR_AR_X(CS_REG));
+ SET_POP_DISP(0);
+
+ EXT = INTERNAL;
+ check_exception_env();
+#ifdef PIG
+ save_last_xcptn_details("Exception:- #DF-8 @%2d\n", xcode, 0, 0, 0, 0);
+#endif /* PIG */
+ if (show_exceptions){
+ if ( GET_IDT_LIMIT() != 0 ){
+ fprintf(trace_file, "(%04x:%08x)Exception:- %d.\n",
+ GET_CS_SELECTOR(), GET_EIP(), DF_INT_NR);
+ if (trap_exceptions) force_yoda();
+ }
+ }
+ took_absolute_toc = TRUE;
+ do_intrupt((IU16)DF_INT_NR, FALSE, TRUE, NULL_ERROR_CODE);
+
+ c_cpu_continue(); /* DOES NOT RETURN */
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* General Protection Exception. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID GP IFN2( IU16, selector, IU16, xcode)
+ {
+ doing_fault = TRUE;
+ contributory_exception(selector, GP_INT_NR, xcode);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* General Protection Exception. (Via IDT) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID GP_INT IFN2( IU16, vector, IU16, xcode)
+ {
+ doing_fault = TRUE;
+ contributory_idt_exception(vector, GP_INT_NR, xcode);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Divide Error Exception. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID Int0 IFN0 ()
+ {
+ doing_fault = TRUE;
+ if ( GET_PE() == 1 )
+ {
+ doing_contributory = TRUE;
+ }
+
+ SET_EIP(CCPU_save_EIP);
+
+#ifdef NTVDM
+ {
+ extern BOOL host_exint_hook IPT2(IS32, exp_no, IS32, error_code);
+
+ if(GET_PE() && host_exint_hook((IS32) I0_INT_NR, (IS32)NULL_ERROR_CODE))
+ doing_fault = FALSE;
+ doing_contributory = FALSE;
+ c_cpu_continue(); /* DOES NOT RETURN */
+ }
+#endif
+
+ /* Set default mode up */
+ SET_OPERAND_SIZE(GET_SR_AR_X(CS_REG));
+ SET_ADDRESS_SIZE(GET_SR_AR_X(CS_REG));
+ SET_POP_DISP(0);
+
+ EXT = INTERNAL;
+
+ check_exception_env();
+#ifdef PIG
+ save_last_xcptn_details("Exception:- #DIV-0\n", 0, 0, 0, 0, 0);
+#endif /* PIG */
+ if (show_exceptions){
+ fprintf(trace_file, "(%04x:%08x)Exception:- %d.\n",
+ GET_CS_SELECTOR(), GET_EIP(), I0_INT_NR);
+ if (trap_exceptions) force_yoda();
+ }
+ took_absolute_toc = TRUE;
+ do_intrupt((IU16)I0_INT_NR, FALSE, FALSE, NULL_ERROR_CODE);
+
+ c_cpu_continue(); /* DOES NOT RETURN */
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Single Step Exception. (FAULT) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+Int1_f()
+ {
+ doing_fault = TRUE;
+ benign_exception(I1_INT_NR, EXTERNAL, -1);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Single Step Exception. (TRAP) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+Int1_t()
+ {
+ doing_fault = FALSE;
+ benign_exception(I1_INT_NR, EXTERNAL, -1);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Bounds Check Exception. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+Int5()
+ {
+ doing_fault = TRUE;
+ benign_exception(I5_INT_NR, INTERNAL, -1);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Invalid Opcode Exception. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+Int6()
+ {
+ doing_fault = TRUE;
+ benign_exception(I6_INT_NR, INTERNAL, -1);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* NPX Not Available Exception. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+Int7()
+ {
+ doing_fault = TRUE;
+ benign_exception(I7_INT_NR, INTERNAL, -1);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* NPX Error Exception. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+Int16()
+ {
+ doing_fault = TRUE;
+ benign_exception(I16_INT_NR, EXTERNAL, -1);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Not Present Exception. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID NP IFN2( IU16, selector, IU16, xcode)
+ {
+ doing_fault = TRUE;
+ contributory_exception(selector, NP_INT_NR, xcode);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Not Present Exception. (Via IDT) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+NP_INT
+
+IFN2(
+ IU16, vector,
+ IU16, xcode
+ )
+
+ {
+ doing_fault = TRUE;
+ contributory_idt_exception(vector, NP_INT_NR, xcode);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Page Fault Exception. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+PF
+
+IFN2(
+ IU16, page_error, /* correctly formatted page fault error code */
+ IU16, xcode
+ )
+
+
+ {
+ /* check if exception caused by external caller */
+ check_interface_active(PF_INT_NR);
+
+ doing_fault = TRUE;
+
+ check_for_shutdown(xcode);
+
+ /* Check for double page fault */
+ if ( doing_page_fault )
+ DF(xcode);
+
+ doing_page_fault = TRUE;
+
+ SET_EIP(CCPU_save_EIP);
+
+#ifdef NTVDM
+ {
+ extern BOOL host_exint_hook IPT2(IS32, exp_no, IS32, error_code);
+
+ if(GET_PE() && host_exint_hook((IS32) PF_INT_NR, (IS32)page_error))
+ doing_fault = FALSE;
+ doing_page_fault = FALSE;
+ c_cpu_continue(); /* DOES NOT RETURN */
+ }
+#endif
+
+ /* Set default mode up */
+ SET_OPERAND_SIZE(GET_SR_AR_X(CS_REG));
+ SET_ADDRESS_SIZE(GET_SR_AR_X(CS_REG));
+ SET_POP_DISP(0);
+
+ check_exception_env();
+#ifdef PIG
+ save_last_xcptn_details("Exception:- #PF-14(%04x) CR2=%08x @%2d\n", page_error, GET_CR(CR_PFLA), xcode, 0, 0);
+#endif /* PIG */
+ if (show_exceptions){
+ fprintf(trace_file, "(%04x:%08x)Exception:- %d(%04x) CR2=%08x.\n",
+ GET_CS_SELECTOR(), GET_EIP(), PF_INT_NR, page_error, GET_CR(CR_PFLA));
+ if (trap_exceptions) force_yoda();
+ }
+ took_absolute_toc = TRUE;
+ do_intrupt((IU16)PF_INT_NR, FALSE, TRUE, page_error);
+
+ c_cpu_continue(); /* DOES NOT RETURN */
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Stack Fault Exception. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SF
+
+IFN2(
+ IU16, selector,
+ IU16, xcode
+ )
+
+ {
+ doing_fault = TRUE;
+ contributory_exception(selector, SF_INT_NR, xcode);
+ }
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Task Switch Exception. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+TS
+
+IFN2(
+ IU16, selector,
+ IU16, xcode
+ )
+
+ {
+ doing_fault = TRUE;
+ contributory_exception(selector, TS_INT_NR, xcode);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_xcptn.h b/private/mvdm/softpc.new/base/ccpu386/c_xcptn.h
new file mode 100644
index 000000000..2c891bdd0
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_xcptn.h
@@ -0,0 +1,59 @@
+/*[
+
+c_xcptn.h
+
+LOCAL CHAR SccsID[]="@(#)c_xcptn.h 1.6 01/19/95";
+
+Exception Handling Support.
+---------------------------
+
+]*/
+
+
+/*
+ Intel exception types.
+ */
+#define INTERNAL 0
+#define EXTERNAL 1
+
+
+/*
+ Interrupt Controls.
+ */
+IMPORT BOOL doing_contributory;
+IMPORT BOOL doing_double_fault;
+IMPORT BOOL doing_page_fault;
+IMPORT BOOL doing_fault;
+IMPORT ISM32 EXT;
+IMPORT IU32 CCPU_save_EIP;
+
+
+IMPORT VOID Int0 IPT0();
+
+IMPORT VOID Int1_f IPT0(); /* fault */
+
+IMPORT VOID Int1_t IPT0(); /* trap */
+
+IMPORT VOID Int5 IPT0();
+
+IMPORT VOID Int6 IPT0();
+
+IMPORT VOID Int7 IPT0();
+
+IMPORT VOID Int16 IPT0();
+
+IMPORT VOID DF IPT1( IU16, xcode);
+
+IMPORT VOID TS IPT2( IU16, selector, IU16, xcode );
+
+IMPORT VOID NP IPT2( IU16, selector, IU16, xcode );
+
+IMPORT VOID SF IPT2( IU16, selector, IU16, xcode );
+
+IMPORT VOID GP IPT2( IU16, selector, IU16, xcode );
+
+IMPORT VOID PF IPT2( IU16, page_error, IU16, xcode );
+
+IMPORT VOID NP_INT IPT2( IU16, vector, IU16, xcode );
+
+IMPORT VOID GP_INT IPT2( IU16, vector, IU16, xcode );
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_xfer.c b/private/mvdm/softpc.new/base/ccpu386/c_xfer.c
new file mode 100644
index 000000000..4d5980331
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_xfer.c
@@ -0,0 +1,416 @@
+/*[
+
+c_xfer.c
+
+LOCAL CHAR SccsID[]="@(#)c_xfer.c 1.14 02/17/95";
+
+Transfer of Control Support.
+----------------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <c_xfer.h>
+#include <c_page.h>
+#include <fault.h>
+
+/*
+ Prototype our internal functions.
+ */
+LOCAL VOID read_call_gate
+
+IPT5(
+ IU32, descr_addr,
+ ISM32, super,
+ IU16 *, selector,
+ IU32 *, offset,
+ IU8 *, count
+
+ );
+
+IMPORT IBOOL took_relative_jump;
+
+
+
+/*
+ =====================================================================
+ INTERNAL FUNCTIONS STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Read call gate descriptor. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+LOCAL VOID
+read_call_gate
+
+IFN5(
+ IU32, descr_addr, /* (I) memory address of call gate descriptor */
+ ISM32, super, /* (I) descriptor type
+ (CALL_GATE|XTND_CALL_GATE) */
+ IU16 *, selector, /* (O) selector retrieved from descriptor */
+ IU32 *, offset, /* (O) offset retrieved from descriptor */
+ IU8 *, count /* (O) count retrieved from descriptor */
+ )
+
+
+ {
+ /*
+ The format of a gate descriptor is:-
+
+ ===========================
+ +1 | LIMIT 15-0 | +0
+ ===========================
+ +3 | SELECTOR | +2
+ ===========================
+ +5 | AR | COUNT | +4
+ ===========================
+ +7 | LIMIT 31-16 | +6
+ ===========================
+ */
+
+ IU32 first_dword;
+ IU32 second_dword;
+
+ /* read in descriptor with minimum interaction with memory */
+ first_dword = spr_read_dword(descr_addr);
+ second_dword = spr_read_dword(descr_addr+4);
+
+ /* unpack selector */
+ *selector = first_dword >> 16;
+
+ /* unpack lower bits of offset */
+ *offset = first_dword & WORD_MASK;
+
+ /* unpack count */
+ *count = second_dword & BYTE_MASK;
+
+ if ( super == XTND_CALL_GATE )
+ {
+ /* unpack higer bits of offset */
+ *offset = second_dword & ~WORD_MASK | *offset;
+
+ *count &= 0x0f; /* 4-bit double word count */
+ SET_OPERAND_SIZE(USE32); /* Gate Overrides all else. */
+ }
+ else
+ {
+ *count &= 0x1f; /* 5-bit word count */
+ SET_OPERAND_SIZE(USE16); /* Gate Overrides all else. */
+ }
+ }
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Update IP with relative offset. Check new IP is valid. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+update_relative_ip
+
+IFN1(
+ IU32, rel_offset /* sign extended relative offset */
+ )
+
+
+ {
+ IU32 new_dest;
+
+ new_dest = GET_EIP() + rel_offset;
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ new_dest &= WORD_MASK;
+
+#ifdef TAKE_REAL_MODE_LIMIT_FAULT
+
+ if ( new_dest > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_RM_REL_IP_CS_LIMIT);
+
+#else /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ /* The Soft486 EDL CPU does not take Real Mode limit failures.
+ * Since the Ccpu486 is used as a "reference" cpu we wish it
+ * to behave a C version of the EDL Cpu rather than as a C
+ * version of a i486.
+ */
+
+#ifdef TAKE_PROT_MODE_LIMIT_FAILURE
+
+ /* The Soft486 EDL CPU does not take Protected Mode limit failues
+ * for the instructions with relative offsets, Jxx, LOOPxx, JCXZ,
+ * JMP rel and CALL rel, or instructions with near offsets,
+ * JMP near and CALL near.
+ * Since the Ccpu486 is used as a "reference" cpu we wish it
+ * to behave a C version of the EDL Cpu rather than as a C
+ * version of a i486.
+ */
+
+ if ( GET_PE() == 1 && GET_VM() == 0 )
+ {
+ if ( new_dest > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_PM_REL_IP_CS_LIMIT);
+ }
+
+#endif /* TAKE_PROT_MODE_LIMIT_FAILURE */
+
+#endif /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ SET_EIP(new_dest);
+ took_relative_jump = TRUE;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Validate far call or far jump destination */
+/* Take #GP if invalid or access check fail. */
+/* Take #NP if not present. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+validate_far_dest
+
+IFN6(
+ IU16 *, cs, /* (I/O) segment of target address */
+ IU32 *, ip, /* (I/O) offset of target address */
+ IU32 *, descr_addr, /* (O) related descriptor memory address */
+ IU8 *, count, /* (O) call gate count(valid if CALL_GATE) */
+ ISM32 *, dest_type, /* (O) destination type */
+ ISM32, caller_id /* (I) bit mapped caller identifier */
+ )
+
+
+ {
+ IU16 new_cs;
+ IU32 new_ip;
+ IU32 cs_descr_addr;
+ IU8 AR;
+ ISM32 super;
+
+ new_cs = *cs; /* take local copies */
+ new_ip = *ip;
+
+ *dest_type = SAME_LEVEL; /* default to commonest type */
+
+ if ( selector_outside_GDT_LDT(new_cs, &cs_descr_addr) )
+ GP(new_cs, FAULT_FAR_DEST_SELECTOR);
+
+ /* load access rights */
+ AR = spr_read_byte(cs_descr_addr+5);
+
+ /* validate possible types of target */
+ switch ( super = descriptor_super_type((IU16)AR) )
+ {
+ case CONFORM_NOREAD_CODE:
+ case CONFORM_READABLE_CODE:
+ /* access check requires DPL <= CPL */
+ if ( GET_AR_DPL(AR) > GET_CPL() )
+ GP(new_cs, FAULT_FAR_DEST_ACCESS_1);
+
+ /* it must be present */
+ if ( GET_AR_P(AR) == NOT_PRESENT )
+ NP(new_cs, FAULT_FAR_DEST_NP_CONFORM);
+ break;
+
+ case NONCONFORM_NOREAD_CODE:
+ case NONCONFORM_READABLE_CODE:
+ /* access check requires RPL <= CPL and DPL == CPL */
+ if ( GET_SELECTOR_RPL(new_cs) > GET_CPL() ||
+ GET_AR_DPL(AR) != GET_CPL() )
+ GP(new_cs, FAULT_FAR_DEST_ACCESS_2);
+
+ /* it must be present */
+ if ( GET_AR_P(AR) == NOT_PRESENT )
+ NP(new_cs, FAULT_FAR_DEST_NP_NONCONFORM);
+ break;
+
+ case CALL_GATE:
+ case XTND_CALL_GATE:
+ /* Check gate present and access allowed */
+
+ /* access check requires DPL >= RPL and DPL >= CPL */
+ if ( GET_SELECTOR_RPL(new_cs) > GET_AR_DPL(AR) ||
+ GET_CPL() > GET_AR_DPL(AR) )
+ GP(new_cs, FAULT_FAR_DEST_ACCESS_3);
+
+ if ( GET_AR_P(AR) == NOT_PRESENT )
+ NP(new_cs, FAULT_FAR_DEST_NP_CALLG);
+
+ /* OK, get real destination from gate */
+ read_call_gate(cs_descr_addr, super, &new_cs, &new_ip, count);
+
+ validate_gate_dest(caller_id, new_cs, &cs_descr_addr, dest_type);
+ break;
+
+ case TASK_GATE:
+ /* Check gate present and access allowed */
+
+ /* access check requires DPL >= RPL and DPL >= CPL */
+ if ( GET_SELECTOR_RPL(new_cs) > GET_AR_DPL(AR) ||
+ GET_CPL() > GET_AR_DPL(AR) )
+ GP(new_cs, FAULT_FAR_DEST_ACCESS_4);
+
+ if ( GET_AR_P(AR) == NOT_PRESENT )
+ NP(new_cs, FAULT_FAR_DEST_NP_TASKG);
+
+ /* OK, get real destination from gate */
+ new_cs = spr_read_word(cs_descr_addr+2);
+
+ /* Check out new destination */
+ (void)validate_task_dest(new_cs, &cs_descr_addr);
+
+ *dest_type = NEW_TASK;
+ break;
+
+ case AVAILABLE_TSS:
+ case XTND_AVAILABLE_TSS:
+ /* TSS must be in GDT */
+ if ( GET_SELECTOR_TI(new_cs) == 1 )
+ GP(new_cs, FAULT_FAR_DEST_TSS_IN_LDT);
+
+ /* access check requires DPL >= RPL and DPL >= CPL */
+ if ( GET_SELECTOR_RPL(new_cs) > GET_AR_DPL(AR) ||
+ GET_CPL() > GET_AR_DPL(AR) )
+ GP(new_cs, FAULT_FAR_DEST_ACCESS_5);
+
+ /* it must be present */
+ if ( GET_AR_P(AR) == NOT_PRESENT )
+ NP(new_cs, FAULT_FAR_DEST_NP_TSS);
+
+ *dest_type = NEW_TASK;
+ break;
+
+ default:
+ GP(new_cs, FAULT_FAR_DEST_BAD_SEG_TYPE); /* bad type for far destination */
+ }
+
+ *cs = new_cs; /* Return final values */
+ *ip = new_ip;
+ *descr_addr = cs_descr_addr;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Validate transfer of control to a call gate destination. */
+/* Take #GP if invalid or access check fail. */
+/* Take #NP if not present. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+validate_gate_dest
+
+IFN4(
+ ISM32, caller_id, /* (I) bit mapped caller identifier */
+ IU16, new_cs, /* (I) segment of target address */
+ IU32 *, descr_addr, /* (O) related descriptor memory address */
+ ISM32 *, dest_type /* (O) destination type */
+ )
+
+
+ {
+ IU8 AR;
+
+ *dest_type = SAME_LEVEL; /* default */
+
+ /* Check out new destination */
+ if ( selector_outside_GDT_LDT(new_cs, descr_addr) )
+ GP(new_cs, FAULT_GATE_DEST_SELECTOR);
+
+ /* load access rights */
+ AR = spr_read_byte((*descr_addr)+5);
+
+ /* must be a code segment */
+ switch ( descriptor_super_type((IU16)AR) )
+ {
+ case CONFORM_NOREAD_CODE:
+ case CONFORM_READABLE_CODE:
+ /* access check requires DPL <= CPL */
+ if ( GET_AR_DPL(AR) > GET_CPL() )
+ GP(new_cs, FAULT_GATE_DEST_ACCESS_1);
+ break;
+
+ case NONCONFORM_NOREAD_CODE:
+ case NONCONFORM_READABLE_CODE:
+ /* access check requires DPL <= CPL */
+ if ( GET_AR_DPL(AR) > GET_CPL() )
+ GP(new_cs, FAULT_GATE_DEST_ACCESS_2);
+
+ /* but jumps must have DPL == CPL */
+ if ( (caller_id & JMP_ID) && (GET_AR_DPL(AR) != GET_CPL()) )
+ GP(new_cs, FAULT_GATE_DEST_ACCESS_3);
+
+ /* set MORE_PRIVILEGE(0|1|2) */
+ if ( GET_AR_DPL(AR) < GET_CPL() )
+ *dest_type = GET_AR_DPL(AR);
+ break;
+
+ default:
+ GP(new_cs, FAULT_GATE_DEST_BAD_SEG_TYPE);
+ }
+
+ if ( GET_VM() == 1 )
+ {
+ /*
+ We must be called by ISM32, so ensure we go to CPL 0 via
+ a 32-bit gate.
+ */
+ if ( *dest_type != MORE_PRIVILEGE0 || GET_OPERAND_SIZE() != USE32 )
+ GP(new_cs, FAULT_GATE_DEST_GATE_SIZE);
+ }
+
+ /* it must be present */
+ if ( GET_AR_P(AR) == NOT_PRESENT )
+ NP(new_cs, FAULT_GATE_DEST_NP);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Validate transfer of control to a task gate destination. */
+/* Take #GP if invalid or access check fail. */
+/* Take #NP if not present. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL IMPORT ISM32
+validate_task_dest
+
+IFN2(
+ IU16, selector, /* (I) segment of target address */
+ IU32 *, descr_addr /* (O) related descriptor memory address */
+ )
+
+
+ {
+ IU8 AR;
+ ISM32 super;
+
+ /* must be in GDT */
+ if ( selector_outside_GDT(selector, descr_addr) )
+ GP(selector, FAULT_TASK_DEST_SELECTOR);
+
+ /* load access rights */
+ AR = spr_read_byte((*descr_addr)+5);
+
+ /* is it really an available TSS segment */
+ super = descriptor_super_type((IU16)AR);
+ if ( super == AVAILABLE_TSS || super == XTND_AVAILABLE_TSS )
+ ; /* ok */
+ else
+ GP(selector, FAULT_TASK_DEST_NOT_TSS);
+
+ /* it must be present */
+ if ( GET_AR_P(AR) == NOT_PRESENT )
+ NP(selector, FAULT_TASK_DEST_NP);
+ return super;
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_xfer.h b/private/mvdm/softpc.new/base/ccpu386/c_xfer.h
new file mode 100644
index 000000000..2c065ef1a
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_xfer.h
@@ -0,0 +1,70 @@
+/*[
+
+c_xfer.h
+
+Transfer of Control Support.
+----------------------------
+
+LOCAL CHAR SccsID[]="@(#)c_xfer.h 1.5 02/17/95";
+
+]*/
+
+
+/*
+ Bit mapped identities (caller_id) for the invokers of far
+ transfers of control.
+ */
+#define CALL_ID 0
+#define JMP_ID 1
+#define INT_ID 0
+
+/*
+ Legal far destinations (dest_type).
+ */
+
+/* greater privilege is mapped directly to the Intel privilege */
+#define MORE_PRIVILEGE0 0
+#define MORE_PRIVILEGE1 1
+#define MORE_PRIVILEGE2 2
+/* our own (arbitary) mappings */
+#define SAME_LEVEL 3
+#define LOWER_PRIVILEGE 4
+#define NEW_TASK 5
+
+
+IMPORT VOID update_relative_ip
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID validate_far_dest
+
+IPT6(
+ IU16 *, cs,
+ IU32 *, ip,
+ IU32 *, descr_addr,
+ IU8 *, count,
+ ISM32 *, dest_type,
+ ISM32, caller_id
+
+ );
+
+IMPORT VOID validate_gate_dest
+
+IPT4(
+ ISM32, caller_id,
+ IU16, new_cs,
+ IU32 *, descr_addr,
+ ISM32 *, dest_type
+
+ );
+
+IMPORT ISM32 validate_task_dest
+
+IPT2(
+ IU16, selector,
+ IU32 *, descr_addr
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_xtrn.c b/private/mvdm/softpc.new/base/ccpu386/c_xtrn.c
new file mode 100644
index 000000000..64a5cf70e
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_xtrn.c
@@ -0,0 +1,135 @@
+/*[
+
+c_xtrn.c
+
+LOCAL CHAR SccsID[]="@(#)c_xtrn.c 1.9 04/22/94";
+
+Interface routines used by BIOS code.
+-------------------------------------
+
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+
+#include <stdio.h>
+#include <setjmp.h>
+#include <xt.h>
+
+#if 0
+#include <sas.h>
+#endif
+
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <c_xtrn.h>
+#include <c_mem.h>
+
+
+LOCAL jmp_buf interface_abort;
+LOCAL BOOL interface_active;
+LOCAL ISM32 interface_error;
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Call CPU Function and catch any resulting exception. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL ISM32
+call_cpu_function IFN4(CALL_CPU *, func, ISM32, type, ISM32, arg1, IU16, arg2)
+ {
+ if ( setjmp(interface_abort) == 0 )
+ {
+ interface_active = TRUE;
+
+ /* Do the CPU Function */
+ switch ( type )
+ {
+ case 1:
+ (*(CALL_CPU_2 *)func)(arg1, arg2);
+ break;
+
+ case 2:
+ (*(CALL_CPU_1 *)func)(arg2);
+ break;
+
+ default:
+ break;
+ }
+
+ interface_error = 0; /* All went OK */
+ }
+
+ interface_active = FALSE;
+
+ return interface_error;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Check if external interface is active. */
+/* And Bail Out if it is! */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+check_interface_active
+
+IFN1(
+ ISM32, except_nmbr
+ )
+
+
+ {
+ if ( interface_active )
+ {
+ /* YES CPU Function was called by an interface routine. */
+ interface_error = except_nmbr; /* save error */
+ longjmp(interface_abort, 1); /* Bail Out */
+ }
+ }
+
+/*(
+ *========================= Cpu_find_dcache_entry ==============================
+ * Cpu_find_dcache_entry
+ *
+ * Purpose
+ * In an assembler CPU, this function allows non-CPU code to try and look
+ * up a selector in the dcache, rather than constructing it from memory.
+ * We don't have a dcache, but it gives us a chance to intercept
+ * CS selector calls, as the CS descriptor may not be available.
+ *
+ * Input
+ * selector, The selector to look-up
+ *
+ * Outputs
+ * returns TRUE if selector found (i.e. CS in our case)
+ * base The linear address of the base of the segment.
+ *
+ * Description
+ * Just look out for CS, and return the stored base if we get it.
+)*/
+
+GLOBAL IBOOL
+Cpu_find_dcache_entry IFN2(IU16, seg, LIN_ADDR *, base)
+{
+
+ if (GET_CS_SELECTOR() == seg) {
+ *base = GET_CS_BASE();
+ return(TRUE);
+ } else {
+ return(FALSE);
+ }
+}
diff --git a/private/mvdm/softpc.new/base/ccpu386/c_xtrn.h b/private/mvdm/softpc.new/base/ccpu386/c_xtrn.h
new file mode 100644
index 000000000..e4ba99d0d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/c_xtrn.h
@@ -0,0 +1,38 @@
+/*[
+
+c_xtrn.h
+
+External Interface Support.
+---------------------------
+
+LOCAL CHAR SccsID[]="@(#)c_xtrn.h 1.4 02/09/94";
+
+]*/
+
+
+/*
+ Supported Interface Types.
+ */
+#define TYPE_I_W 1 /* (ISM32 , IU16) */
+#define TYPE_W 2 /* (IU16) */
+
+IMPORT VOID check_interface_active
+
+IPT1(
+ ISM32, except_nmbr
+
+ );
+
+typedef void CALL_CPU IPT0();
+typedef void CALL_CPU_1 IPT1(ISM32, p1);
+typedef void CALL_CPU_2 IPT2(ISM32, p1, IU16, p2);
+
+IMPORT ISM32 call_cpu_function
+
+IPT4(
+ CALL_CPU *,func,
+ ISM32, type,
+ ISM32, arg1,
+ IU16, arg2
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/call.c b/private/mvdm/softpc.new/base/ccpu386/call.c
new file mode 100644
index 000000000..961f4be68
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/call.c
@@ -0,0 +1,357 @@
+/*[
+
+call.c
+
+LOCAL CHAR SccsID[]="@(#)call.c 1.15 02/27/95";
+
+CALL CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+#include <host_def.h>
+#include <xt.h>
+
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <call.h>
+#include <c_xfer.h>
+#include <c_tsksw.h>
+#include <fault.h>
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Process far calls. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+CALLF
+#ifdef ANSI
+ (
+ IU32 op1[2] /* offset:segment pointer */
+ )
+#else
+ (op1)
+ IU32 op1[2];
+#endif
+ {
+ IU16 new_cs; /* The destination */
+ IU32 new_ip;
+
+ IU32 cs_descr_addr; /* code segment descriptor address */
+ CPU_DESCR cs_entry; /* code segment descriptor entry */
+
+ ISM32 dest_type; /* category for destination */
+
+ IU8 count; /* call gate count (if used) */
+ IU32 dpl; /* new privilege level (if used) */
+
+ IU16 new_ss; /* The new stack */
+ IU32 new_sp;
+ ISM32 new_stk_sz; /* Size in bytes of new stack */
+
+ IU32 ss_descr_addr; /* stack segment descriptor address */
+ CPU_DESCR ss_entry; /* stack segment descriptor entry */
+
+ /* Variables used on stack transfers */
+ IU32 old_cs;
+ IU32 old_ip;
+ IU32 old_ss;
+ IU32 old_sp;
+ IU32 params[31];
+ ISM32 i;
+
+ /* get destination (correctly typed) */
+ new_cs = op1[1];
+ new_ip = op1[0];
+
+ if ( GET_PE() == 0 || GET_VM() == 1 )
+ {
+ /* Real Mode or V86 Mode */
+
+ /* must be able to push CS:(E)IP */
+ validate_stack_space(USE_SP, (ISM32)NR_ITEMS_2);
+
+#ifdef TAKE_REAL_MODE_LIMIT_FAULT
+
+ /* do ip limit checking */
+ if ( new_ip > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_CALLF_RM_CS_LIMIT);
+
+#else /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ /* The Soft486 EDL CPU does not take Real Mode limit failures.
+ * Since the Ccpu486 is used as a "reference" cpu we wish it
+ * to behave a C version of the EDL Cpu rather than as a C
+ * version of a i486.
+ */
+
+#endif /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+
+ /* ALL SYSTEMS GO */
+
+ /* push return address */
+ spush16((IU32)GET_CS_SELECTOR());
+ spush((IU32)GET_EIP());
+
+ load_CS_cache(new_cs, (IU32)0, (CPU_DESCR *)0);
+ SET_EIP(new_ip);
+ }
+ else
+ {
+ /* protected mode */
+
+ /* decode and check final destination */
+ validate_far_dest(&new_cs, &new_ip, &cs_descr_addr, &count,
+ &dest_type, CALL_ID);
+
+ /* action possible types of target */
+ switch ( dest_type )
+ {
+ case NEW_TASK:
+ switch_tasks(NOT_RETURNING, NESTING, new_cs, cs_descr_addr, GET_EIP());
+
+ /* limit check new IP (now in new task) */
+ if ( GET_EIP() > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_CALLF_TASK_CS_LIMIT);
+ break;
+
+ case SAME_LEVEL:
+ read_descriptor_linear(cs_descr_addr, &cs_entry);
+
+ /* stamp new selector with CPL */
+ SET_SELECTOR_RPL(new_cs, GET_CPL());
+
+ /* check room for return address CS:(E)IP */
+ validate_stack_space(USE_SP, (ISM32)NR_ITEMS_2);
+
+ /* do ip limit check */
+ if ( new_ip > cs_entry.limit )
+ GP((IU16)0, FAULT_CALLF_PM_CS_LIMIT_1);
+
+ /* ALL SYSTEMS GO */
+
+ /* push return address */
+ spush16((IU32)GET_CS_SELECTOR());
+ spush((IU32)GET_EIP());
+
+ load_CS_cache(new_cs, cs_descr_addr, &cs_entry);
+ SET_EIP(new_ip);
+ break;
+
+ default: /* MORE_PRIVILEGE(0|1|2) */
+ read_descriptor_linear(cs_descr_addr, &cs_entry);
+
+ dpl = dest_type;
+
+ /* stamp new selector with new CPL */
+ SET_SELECTOR_RPL(new_cs, dpl);
+
+ /* find out about new stack */
+ get_stack_selector_from_TSS(dpl, &new_ss, &new_sp);
+
+ /* check new stack selector */
+ validate_SS_on_stack_change(dpl, new_ss,
+ &ss_descr_addr, &ss_entry);
+
+ /* check room for SS:(E)SP
+ parameters
+ CS:(E)IP */
+ new_stk_sz = count + NR_ITEMS_4;
+ validate_new_stack_space(new_stk_sz, new_sp, &ss_entry, new_ss);
+
+ /* do ip limit check */
+ if ( new_ip > cs_entry.limit )
+ GP((IU16)0, FAULT_CALLF_PM_CS_LIMIT_2);
+
+ /* ALL SYSTEMS GO */
+
+ SET_CPL(dpl);
+
+ /* update code segment */
+ old_cs = (IU32)GET_CS_SELECTOR();
+ old_ip = GET_EIP();
+ load_CS_cache(new_cs, cs_descr_addr, &cs_entry);
+ SET_EIP(new_ip);
+
+ /* 'pop' params from old stack */
+ old_ss = (IU32)GET_SS_SELECTOR();
+ old_sp = GET_ESP();
+
+ for ( i = 0; i < count; i++ )
+ params[i] = spop();
+
+ /* update stack segment */
+ load_SS_cache(new_ss, ss_descr_addr, &ss_entry);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ SET_SP(new_sp);
+ else
+ SET_ESP(new_sp);
+
+ /*
+ FORM NEW STACK, VIZ
+
+ ========== ==========
+ old SS:SP -> | parm 1 | new SS:SP -> | old IP |
+ | parm 2 | | old CS |
+ | parm 3 | | parm 1 |
+ ========== | parm 2 |
+ | parm 3 |
+ | old SP |
+ | old SS |
+ ==========
+ */
+
+ /* push old stack values */
+ spush16(old_ss);
+ spush(old_sp);
+
+ /* push back params onto new stack */
+ for ( i = count-1; i >= 0; i-- )
+ spush(params[i]);
+
+ /* push return address */
+ spush16(old_cs);
+ spush(old_ip);
+ break;
+ }
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* call near indirect */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+CALLN
+
+IFN1(
+ IU32, offset
+ )
+
+
+ {
+ /* check push to stack ok */
+ validate_stack_space(USE_SP, (ISM32)NR_ITEMS_1);
+
+ /*
+ Although the 386 book says a 16-bit operand should be AND'ed
+ with 0x0000ffff, a 16-bit operand is never fetched with the
+ top bits dirty anyway, so we don't AND here.
+ */
+
+ /* do ip limit check */
+#ifdef TAKE_REAL_MODE_LIMIT_FAULT
+
+ if ( offset > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_CALLN_RM_CS_LIMIT);
+
+#else /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ /* The Soft486 EDL CPU does not take Real Mode limit failures.
+ * Since the Ccpu486 is used as a "reference" cpu we wish it
+ * to behave a C version of the EDL Cpu rather than as a C
+ * version of a i486.
+ */
+
+#ifdef TAKE_PROT_MODE_LIMIT_FAULT
+
+ if ( GET_PE() == 1 && GET_VM() == 0 )
+ {
+ if ( offset > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_CALLN_PM_CS_LIMIT);
+ }
+
+#endif /* TAKE_PROT_MODE_LIMIT_FAULT */
+
+ /* The Soft486 EDL CPU does not take Protected Mode limit failues
+ * for the instructions with relative offsets, Jxx, LOOPxx, JCXZ,
+ * JMP rel and CALL rel, or instructions with near offsets,
+ * JMP near and CALL near.
+ * Since the Ccpu486 is used as a "reference" cpu we wish it
+ * to behave a C version of the EDL Cpu rather than as a C
+ * version of a i486.
+ */
+
+#endif /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ /* all systems go */
+ spush((IU32)GET_EIP());
+ SET_EIP(offset);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* call near relative */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+CALLR
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ IU32 new_dest;
+
+ /* check push to stack ok */
+ validate_stack_space(USE_SP, (ISM32)NR_ITEMS_1);
+
+ /* calculate and check new destination */
+ new_dest = GET_EIP() + rel_offset;
+
+ if ( GET_OPERAND_SIZE() == USE16 )
+ new_dest &= WORD_MASK;
+
+ /* do ip limit check */
+#ifdef TAKE_REAL_MODE_LIMIT_FAULT
+
+ if ( new_dest > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_CALLR_RM_CS_LIMIT);
+
+#else /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ /* The Soft486 EDL CPU does not take Real Mode limit failures.
+ * Since the Ccpu486 is used as a "reference" cpu we wish it
+ * to behave a C version of the EDL Cpu rather than as a C
+ * version of a i486.
+ */
+
+#ifdef TAKE_PROT_MODE_LIMIT_FAULT
+
+ if ( GET_PE() == 1 && GET_VM() == 0 )
+ {
+ if ( new_dest > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_CALLR_PM_CS_LIMIT);
+ }
+
+#endif /* TAKE_PROT_MODE_LIMIT_FAULT */
+
+ /* The Soft486 EDL CPU does not take Protected Mode limit failues
+ * for the instructions with relative offsets, Jxx, LOOPxx, JCXZ,
+ * JMP rel and CALL rel, or instructions with near offsets,
+ * JMP near and CALL near.
+ * Since the Ccpu486 is used as a "reference" cpu we wish it
+ * to behave a C version of the EDL Cpu rather than as a C
+ * version of a i486.
+ */
+
+#endif /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ /* all systems go */
+ spush((IU32)GET_EIP());
+ SET_EIP(new_dest);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/call.h b/private/mvdm/softpc.new/base/ccpu386/call.h
new file mode 100644
index 000000000..488419e36
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/call.h
@@ -0,0 +1,30 @@
+/*
+ call.h
+
+ Define all CALL CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)call.h 1.4 02/09/94";
+ */
+
+IMPORT VOID CALLF
+
+IPT1(
+ IU32, op1[2]
+
+ );
+
+IMPORT VOID CALLN
+
+IPT1(
+ IU32, offset
+
+ );
+
+IMPORT VOID CALLR
+
+IPT1(
+ IU32, rel_offset
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/cbw.c b/private/mvdm/softpc.new/base/ccpu386/cbw.c
new file mode 100644
index 000000000..75a2f70b1
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cbw.c
@@ -0,0 +1,42 @@
+/*[
+
+cbw.c
+
+LOCAL CHAR SccsID[]="@(#)cbw.c 1.5 02/09/94";
+
+CBW CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <cbw.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+CBW()
+ {
+ if ( GET_AL() & BIT7_MASK ) /* sign bit set? */
+ SET_AH(0xff);
+ else
+ SET_AH(0);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/cbw.h b/private/mvdm/softpc.new/base/ccpu386/cbw.h
new file mode 100644
index 000000000..99d17143e
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cbw.h
@@ -0,0 +1,11 @@
+/*
+ cbw.h
+
+ Define all CBW CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)cbw.h 1.5 09/01/94";
+ */
+
+IMPORT VOID CBW IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/ccpudefs.inc b/private/mvdm/softpc.new/base/ccpu386/ccpudefs.inc
new file mode 100644
index 000000000..74f2098fa
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/ccpudefs.inc
@@ -0,0 +1 @@
+C_DEFINES= -DCPU_30_STYLE -DCPU_40_STYLE -DNEW_CPU -DCCPU -DSPC386 -DANSI -DNTVDM
diff --git a/private/mvdm/softpc.new/base/ccpu386/ccpupig.c b/private/mvdm/softpc.new/base/ccpu386/ccpupig.c
new file mode 100644
index 000000000..5ab94513d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/ccpupig.c
@@ -0,0 +1,320 @@
+/*[
+
+ccpupig.c
+
+LOCAL CHAR SccsID[]="@(#)ccpupig.c 1.22 04/11/95"
+
+C CPU <-> Pigger Interface
+--------------------------
+
+]*/
+
+#include <insignia.h>
+#include <host_def.h>
+
+#ifdef PIG
+
+#include <xt.h>
+#define CPU_PRIVATE
+#include CpuH
+#include <ccpupig.h>
+#include <sas.h> /* need memory(M) */
+#include <ccpusas4.h> /* the cpu internal sas bits */
+#include <Cpu_c.h> /* Intel memory access macros */
+
+#include <c_reg.h>
+#include <c_xcptn.h>
+#include <c_page.h>
+
+#define DASM_PRIVATE
+#include <dasm.h>
+#include <decode.h>
+
+#include <assert.h>
+/*
+ * Interface between this cpu and other one being pigged
+ */
+GLOBAL enum pig_actions pig_cpu_action;
+GLOBAL IBOOL ccpu_pig_enabled = FALSE;
+
+/*
+ * Last Instruction memorizing...
+ */
+
+GLOBAL IU32 ccpu_synch_count = 1;
+
+LOCAL struct ccpu_last_inst *inst_buffer;
+LOCAL struct ccpu_last_inst *inst_ptr;
+LOCAL struct ccpu_last_inst *inst_ptr_wrap;
+LOCAL struct ccpu_last_inst *next_inst_ptr;
+LOCAL struct ccpu_last_inst *inst_bytes_ptr;
+LOCAL char prefetch_inst_buffer[200];
+
+/*(
+ * Keep these last inst vars up to date...
+)*/
+
+GLOBAL VOID save_last_inst_details IFN1(char *, text)
+{
+ inst_ptr->cs = GET_CS_SELECTOR();
+ inst_ptr->big_cs = GET_CS_AR_X() != 0;
+ inst_ptr->text = text;
+ /*
+ * getEIP() should be getInstructionPointer() but they
+ * are equivalent for the current CCPU.
+ */
+ inst_ptr->eip = GET_EIP();
+ inst_bytes_ptr = inst_ptr;
+ inst_bytes_ptr->inst_len = 0;
+
+ inst_ptr->synch_count = ccpu_synch_count;
+
+ if (++inst_ptr >= inst_ptr_wrap)
+ inst_ptr = inst_buffer;
+
+ /* Invalidate the previous prefetch disassembly buffer */
+ prefetch_inst_buffer[0] = '\0';
+}
+
+/* This is called by the CCPU as it processes each instruction byte.
+ * The CCPU has already checked that the Intel instruction is not just
+ * an infinite sequence of prefixes, so we know it will fit.
+ */
+GLOBAL IU8 save_instruction_byte IFN1(IU8, byte)
+{
+ int len = inst_bytes_ptr->inst_len++;
+
+ inst_bytes_ptr->bytes[len] = byte;
+ return (byte);
+}
+
+/* When an exception occurs, the CCPU will save the details in the last instruction
+ * history buffer. This requires a sprintf, and we use the code-bytes data area
+ * to keep this information.
+ * Up to 3 parameters can be in the format.
+ */
+GLOBAL VOID save_last_xcptn_details IFN6(char *, fmt, IUH, a1, IUH, a2, IUH, a3, IUH, a4, IUH, a5 )
+{
+ char buffer[128];
+
+ inst_ptr->cs = getCS_SELECTOR();
+ inst_ptr->eip = getEIP();
+ inst_ptr->big_cs = 0;
+ inst_ptr->synch_count = ccpu_synch_count;
+
+ /* The default message is too long sometimes.
+ * We replace any leading "Exception:-" with "XCPT"
+ */
+
+ if (strncmp(fmt, "Exception:-", 11) == 0)
+ {
+ strcpy(buffer, "XCPT");
+ sprintf(buffer + 4, fmt + 11, a1, a2, a3, a4, a5);
+ }
+ else
+ {
+ sprintf(buffer, fmt, a1, a2, a3, a4, a5);
+ }
+
+ if (strlen(buffer) >= sizeof(inst_ptr->bytes))
+ printf("warning: CCPU XCPTN text message below longer than buffer; truncating:\n -- %s\n", buffer);
+
+ strncpy(&inst_ptr->bytes[0], buffer, sizeof(inst_ptr->bytes) - 2);
+
+ inst_ptr->bytes[sizeof(inst_ptr->bytes) - 2] = '\n';
+ inst_ptr->bytes[sizeof(inst_ptr->bytes) - 1] = '\0';
+
+ inst_ptr->text = (char *)&inst_ptr->bytes[0];
+
+ if (++inst_ptr >= inst_ptr_wrap)
+ inst_ptr = inst_buffer;
+
+ /* Invalidate the previous prefetch disassembly buffer */
+ prefetch_inst_buffer[0] = '\0';
+}
+
+GLOBAL struct ccpu_last_inst *get_synch_inst_details IFN1(IU32, synch_point)
+{
+ /* scan backwards through the buffer until the start of the relevant
+ * synch point block is found.
+ */
+ IS32 n_entries = inst_ptr_wrap - inst_buffer;
+
+ next_inst_ptr = inst_ptr - 1;
+
+ if (next_inst_ptr < inst_buffer)
+ next_inst_ptr = inst_ptr_wrap - 1;
+
+ while (synch_point <= next_inst_ptr->synch_count)
+ {
+ if (--n_entries <= 0)
+ return (next_inst_ptr);
+
+ if (--next_inst_ptr < inst_buffer)
+ next_inst_ptr = inst_ptr_wrap - 1;
+ }
+
+ if (++next_inst_ptr >= inst_ptr_wrap)
+ next_inst_ptr = inst_buffer;
+
+ return (next_inst_ptr);
+}
+
+
+/* After a previous call to get_synch_inst_details(), get the next
+ * inst details. This call should be repeated until NULL is returned.
+ */
+GLOBAL struct ccpu_last_inst *get_next_inst_details IFN1(IU32, finish_synch_point)
+{
+ if (next_inst_ptr)
+ {
+ if (++next_inst_ptr >= inst_ptr_wrap)
+ next_inst_ptr = inst_buffer;
+
+ if ((next_inst_ptr->synch_count == 0)
+ || (next_inst_ptr == inst_ptr)
+ || (next_inst_ptr->synch_count > finish_synch_point)
+ )
+ {
+ next_inst_ptr = (struct ccpu_last_inst *)0;
+ }
+ }
+ return next_inst_ptr;
+}
+
+
+GLOBAL VOID init_last_inst_details IFN0()
+{
+ SAVED IBOOL first = TRUE;
+
+ if (first)
+ {
+ struct ccpu_last_inst *ptr;
+ ISM32 size = ISM32getenv("CCPU_HISTORY_SIZE", 256);
+
+ if (size < 100)
+ {
+ sprintf(prefetch_inst_buffer,
+ "CCPU_HISTORY_SIZE of %d is too small",
+ size);
+ FatalError(prefetch_inst_buffer);
+ }
+ ptr = (struct ccpu_last_inst *)host_malloc(size * sizeof(*ptr));
+ if (ptr == (struct ccpu_last_inst *)0)
+ {
+ sprintf(prefetch_inst_buffer,
+ "Unable to malloc memory for CCPU_HISTORY_SIZE of %d",
+ size);
+ FatalError(prefetch_inst_buffer);
+ }
+ inst_buffer = ptr;
+ inst_ptr_wrap = &inst_buffer[size];
+ first = FALSE;
+ }
+
+ memset(inst_buffer, 0, ((IHPE)inst_ptr_wrap - (IHPE)inst_buffer));
+ next_inst_ptr = (struct ccpu_last_inst *)0;
+ inst_ptr = inst_buffer;
+}
+
+
+/* When about to pig an interrupt we may need to mark the last
+ * basic block as "invalid" even though it has been executed by
+ * the CCPU.
+ */
+GLOBAL VOID save_last_interrupt_details IFN2(IU8, number, IBOOL, invalidateLastBlock)
+{
+ if (invalidateLastBlock)
+ {
+ struct ccpu_last_inst *ptr;
+ IU32 synch_count = ccpu_synch_count - 1;
+
+ ptr = get_synch_inst_details(synch_count);
+
+ while (ptr != (struct ccpu_last_inst *)0)
+ {
+ ptr->text = "Intr: invalidated";
+ ptr = get_next_inst_details(synch_count);
+ }
+ }
+ save_last_xcptn_details("Intr: vector %02x", number, 0, 0, 0, 0);
+}
+
+
+LOCAL IBOOL reset_prefetch;
+
+LOCAL IS32 prefetch_byte IFN1(LIN_ADDR, eip)
+{
+ SAVED IU8 *ip_ptr;
+ SAVED IU8 *ip_ceiling;
+ SAVED LIN_ADDR last_eip;
+ IU8 b;
+
+ if (reset_prefetch
+ || (eip != ++last_eip)
+ || !BelowOrEqualCpuPtrsLS8(ip_ptr, ip_ceiling))
+ {
+ IU32 ip_phys_addr;
+
+ /* Ensure this we fault first on the first
+ * byte within a new page -- dasm386 sometimes
+ * looks ahead a couple of bytes.
+ */
+ if (GET_EIP() != eip)
+ {
+ (void)usr_chk_byte((GET_CS_BASE() + eip) & 0xFFFFF000, PG_R);
+ }
+ ip_phys_addr = usr_chk_byte(GET_CS_BASE() + eip, PG_R);
+ ip_ptr = Sas.SasPtrToPhysAddrByte(ip_phys_addr);
+ ip_ceiling = CeilingIntelPageLS8(ip_ptr);
+ reset_prefetch = FALSE;
+ }
+ b = *IncCpuPtrLS8(ip_ptr);
+ last_eip = eip;
+ return ((IS32) b);
+}
+
+/* Use the decoder from dasm386 to read the bytes in a single instruction */
+GLOBAL void prefetch_1_instruction IFN0()
+{
+ IBOOL bigCode = GET_CS_AR_X() != 0;
+ IU32 eip = GET_EIP();
+ char *fmt, *newline;
+
+ reset_prefetch = TRUE;
+
+ /* If we take a fault, the EIP pushed will be the
+ * value at the start of the "instruction"
+ * We must update this incase we fault.
+ */
+ CCPU_save_EIP = eip;
+
+ if ( bigCode )
+ {
+ fmt = " %04x:%08x ";
+ newline = "\n ";
+ }
+ else
+ {
+ fmt = " %04x:%04x ";
+ newline = "\n ";
+ }
+ (void)dasm_internal(prefetch_inst_buffer,
+ GET_CS_SELECTOR(),
+ eip,
+ bigCode ? THIRTY_TWO_BIT: SIXTEEN_BIT,
+ eip,
+ prefetch_byte,
+ fmt,
+ newline);
+ assert(strlen(prefetch_inst_buffer) < sizeof(prefetch_inst_buffer));
+}
+
+/* Return to the show_code() routine in the pigger the instruction
+ * we prefetched.
+ */
+GLOBAL char *get_prefetched_instruction IFN0()
+{
+ return (prefetch_inst_buffer);
+}
+#endif /* PIG */
diff --git a/private/mvdm/softpc.new/base/ccpu386/ccpupig.h b/private/mvdm/softpc.new/base/ccpu386/ccpupig.h
new file mode 100644
index 000000000..1b44a77c8
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/ccpupig.h
@@ -0,0 +1,167 @@
+/*[
+
+ccpupig.h
+
+LOCAL CHAR SccsID[]="@(#)ccpupig.h 1.26 04/11/95"
+
+C CPU <-> Pigger definitions and interfaces.
+-------------------------------------------
+
+]*/
+
+#ifdef PIG
+
+enum pig_actions
+{
+ CHECK_NONE, /* Check nothing (not yet executed) and carry on */
+ CHECK_ALL, /* Check all and carry on */
+ CHECK_NO_EXEC, /* Check all, but dont carry on */
+ CHECK_SOME_MEM, /* Check memory (other than marked not written) */
+ CHECK_NO_AL, /* Don't check AL */
+ CHECK_NO_AX, /* Don't check AX */
+ CHECK_NO_EAX, /* Don't check EAX */
+ CHECK_NO_A20 /* Don't check A20 wrap (just done OUT 60) */
+};
+
+typedef struct CpuStateREC cpustate_t;
+
+/*
+ * Interface between this cpu and other one being pigged
+ */
+IMPORT enum pig_actions pig_cpu_action;
+IMPORT enum pig_actions last_pig_action;
+IMPORT IBOOL ccpu_pig_enabled;
+
+/*
+ * Mask for arithmetic flags bits not known if PigIgnoreFlags is TRUE
+ * == ( CF | PF | AF | SF | ZF | OV ) == BIT0 | BIT2 | BIT4 | BIT6 | BIT7 | BIT11 )
+ */
+#define ARITH_FLAGS_BITS ( 0x1 | 0x4 | 0x10 | 0x40 | 0x80 | 0x800 )
+
+/*
+ * Mask for interrupts for which the EDL *may* not have correct flags
+ * information.
+ */
+#define NO_FLAGS_EXCEPTION_MASK ( ( 1 << 1 ) | \
+ ( 1 << 3 ) | \
+ ( 1 << 8 ) | \
+ ( 1 << 10 ) | \
+ ( 1 << 11 ) | \
+ ( 1 << 12 ) | \
+ ( 1 << 13 ) | \
+ ( 1 << 14 ) | \
+ ( 1 << 15 ) )
+
+/*
+ * Last Instruction memorizing...
+ */
+
+#define MAX_INTEL_PREFIX (15-1)
+#define MAX_INTEL_BODY 15
+#define MAX_INTEL_BYTES (MAX_INTEL_PREFIX+MAX_INTEL_BODY) /* max size of single intel instruction */
+#define MAX_EXCEPTION_BYTES 40 /* size of buffer used for exception logging */
+
+#define CCPUINST_BUFFER_SIZE ((MAX_INTEL_BYTES > MAX_EXCEPTION_BYTES) ? MAX_INTEL_BYTES : MAX_EXCEPTION_BYTES)
+
+struct ccpu_last_inst {
+ IU16 cs;
+ IU8 inst_len;
+ IBOOL big_cs;
+ IU32 eip;
+ IU32 synch_count;
+ char *text;
+ IU8 bytes[CCPUINST_BUFFER_SIZE];
+};
+
+IMPORT IU32 ccpu_synch_count;
+
+IMPORT VOID save_last_inst_details IPT1(char *, text);
+IMPORT IU8 save_instruction_byte IPT1(IU8, byte);
+IMPORT VOID save_last_xcptn_details IPT6(char *, fmt, IUH, a1, IUH, a2, IUH, a3, IUH, a4, IUH, a5);
+IMPORT VOID init_last_inst_details IPT0();
+IMPORT VOID save_last_interrupt_details IPT2(IU8, number, IBOOL, invalidateLastBlock);
+
+/* Routines to get last instruction information from the CCPU ring buffer */
+
+IMPORT struct ccpu_last_inst *get_synch_inst_details IPT1(IU32, synch_point);
+IMPORT struct ccpu_last_inst *get_next_inst_details IPT1(IU32, synch_point);
+
+/* Routine to return a disassembled form of the last instruction prefetched by the CCPU */
+
+IMPORT char *get_prefetched_instruction IPT0();
+
+/*
+ * Get/Set state of C CCPU (getsetc.c)
+ */
+IMPORT void c_getCpuState IPT1(cpustate_t *, p_state);
+IMPORT void c_setCpuState IPT1(cpustate_t *, p_new_state);
+
+/*
+ * Get NPX regs from A Cpu and set C Cpu (only if necessary)
+ */
+IMPORT void c_checkCpuNpxRegisters IPT0();
+/*
+ * Set NPX regs from given state.
+ */
+IMPORT void c_setCpuNpxRegisters IPT1(cpustate_t *, p_new_state);
+/*
+ *
+ */
+IMPORT void prefetch_1_instruction IPT0();
+
+#if defined(SFELLOW)
+/*
+ * memory-mapped I/O information. Counts number of memory-mapped inputs and
+ * outputs since the last pig synch.
+ */
+#define COLLECT_MMIO_STATS 1
+
+#define LAST_FEW 32 /* must be power of 2 */
+#define LAST_FEW_MASK (LAST_FEW - 1) /* see above */
+
+struct pig_mmio_info \
+{
+#if COLLECT_MMIO_STATS
+ IU32 mm_input_count; /* since last Pig error */
+ IU32 mm_output_count; /* since last Pig error */
+ IU32 mm_input_section_count; /* no. of synch sections unchecked due
+ to M-M input since last Pig error */
+ IU32 mm_output_section_count;/* no. of synch sections containing
+ M-M output since last Pig error */
+ IU32 start_synch_count; /* at last pig error/enabling */
+ struct last_few_inputs
+ {
+ IU32 addr; /* address of memory-mapped input */
+ IU32 synch_count; /* ccpu_synch_count at that input */
+ } last_few_inputs[LAST_FEW];
+ struct last_few_outputs
+ {
+ IU32 addr; /* address of memory-mapped output */
+ IU32 synch_count; /* ccpu_synch_count at that output */
+ } last_few_outputs[LAST_FEW];
+#endif /* COLLECT_MMIO_STATS */
+ IUM16 flags;
+};
+
+/*
+ * flags element definitions
+ */
+#define MM_INPUT_OCCURRED 0x1 /* in current synch section */
+#define MM_OUTPUT_OCCURRED 0x2 /* in current synch section */
+#define MM_INPUT_COUNT_WRAPPED 0x4
+#define MM_OUTPUT_COUNT_WRAPPED 0x8
+#define MM_INPUT_SECTION_COUNT_WRAPPED 0x10
+#define MM_OUTPUT_SECTION_COUNT_WRAPPED 0x20
+
+extern struct pig_mmio_info pig_mmio_info;
+
+#if COLLECT_MMIO_STATS
+extern void clear_mmio_stats IPT0();
+extern void show_mmio_stats IPT0();
+#endif /* COLLECT_MMIO_STATS */
+
+#endif /* SFELLOW */
+
+extern IBOOL IgnoringThisSynchPoint IPT2(IU16, cs, IU32, eip);
+extern IBOOL ignore_page_accessed IPT0();
+#endif /* PIG */
diff --git a/private/mvdm/softpc.new/base/ccpu386/ccpusas4.c b/private/mvdm/softpc.new/base/ccpu386/ccpusas4.c
new file mode 100644
index 000000000..57aa76d5e
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/ccpusas4.c
@@ -0,0 +1,1960 @@
+/*[
+ * File Name : ccpu_sas4.c
+ *
+ * Derived From : ccpu_sas.c
+ *
+ * Author : Mike Moreton
+ *
+ * Creation Date : Oct 93
+ *
+ * SCCS Version : @(#)ccpusas4.c 1.45 08/31/94
+ *
+ * Purpose
+ * This module contains the SAS functions for a C CPU using the
+ * CPU_40_STYLE interface.
+ *
+ *! (c)Copyright Insignia Solutions Ltd., 1990-3. All rights reserved.
+]*/
+
+
+#include "insignia.h"
+#include "host_def.h"
+
+#ifdef CCPU
+
+#ifdef SEGMENTATION
+
+/*
+ * The following #include specifies the code segment into which this module
+ * will by placed by the MPW C compiler on the Mac II running MultiFinder.
+ */
+#include <SOFTPC_SUPPORT.seg>
+#endif
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include MemoryH
+#include StringH
+#include <xt.h>
+#include <trace.h>
+#include <sas.h>
+#include <sasp.h>
+#include <ccpusas4.h>
+#include <gmi.h>
+#include CpuH
+#include <cpu_vid.h>
+#include <debug.h>
+#include <ckmalloc.h>
+#include <rom.h>
+#include <trace.h>
+#include <ckmalloc.h>
+#include <c_tlb.h>
+#include <c_page.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <timer.h>
+#include <yoda.h>
+
+/********************************************************/
+#define SIXTY_FOUR_K 1024*64
+
+/* global functions & variables */
+
+IU8 *memory_type = NULL;
+
+LOCAL BOOL selectors_set = FALSE;
+LOCAL IU16 code_sel, data_sel;
+
+#define INTEL_SRC 0
+#define HOST_SRC 1
+
+/*
+ * We're going to declare all the functions that we'll need for SAS function
+ * pointers so that we can put them all into the function pointers structure.
+ * This structure will then be passed to SasSetPointers from the sas_init
+ * function in this module.
+ */
+
+GLOBAL TYPE_sas_memory_size c_sas_memory_size;
+GLOBAL TYPE_sas_connect_memory c_sas_connect_memory;
+GLOBAL TYPE_sas_enable_20_bit_wrapping c_sas_enable_20_bit_wrapping;
+GLOBAL TYPE_sas_disable_20_bit_wrapping c_sas_disable_20_bit_wrapping;
+GLOBAL TYPE_sas_twenty_bit_wrapping_enabled c_sas_twenty_bit_wrapping_enabled;
+GLOBAL TYPE_sas_memory_type c_sas_memory_type;
+GLOBAL TYPE_sas_hw_at c_sas_hw_at;
+GLOBAL TYPE_sas_w_at c_sas_w_at;
+GLOBAL TYPE_sas_dw_at c_sas_dw_at;
+GLOBAL TYPE_sas_store c_sas_store;
+GLOBAL TYPE_sas_storew c_sas_storew;
+GLOBAL TYPE_sas_storedw c_sas_storedw;
+GLOBAL TYPE_sas_loads c_sas_loads;
+GLOBAL TYPE_sas_stores c_sas_stores;
+GLOBAL TYPE_sas_loads_no_check c_sas_loads_no_check;
+GLOBAL TYPE_sas_stores_no_check c_sas_stores_no_check;
+GLOBAL TYPE_sas_move_bytes_forward c_sas_move_bytes_forward;
+GLOBAL TYPE_sas_move_words_forward c_sas_move_words_forward;
+GLOBAL TYPE_sas_move_doubles_forward c_sas_move_doubles_forward;
+GLOBAL TYPE_sas_fills c_sas_fills;
+GLOBAL TYPE_sas_fillsw c_sas_fillsw;
+GLOBAL TYPE_sas_fillsdw c_sas_fillsdw;
+GLOBAL TYPE_sas_scratch_address c_sas_scratch_address;
+GLOBAL TYPE_sas_transbuf_address c_sas_transbuf_address;
+GLOBAL TYPE_sas_overwrite_memory c_sas_overwrite_memory;
+GLOBAL TYPE_sas_PWS c_sas_PWS;
+GLOBAL TYPE_sas_PRS c_sas_PRS;
+GLOBAL TYPE_sas_PWS_no_check c_sas_PWS_no_check;
+GLOBAL TYPE_sas_PRS_no_check c_sas_PRS_no_check;
+GLOBAL TYPE_getPtrToLinAddrByte c_GetLinAdd;
+GLOBAL TYPE_getPtrToPhysAddrByte c_GetPhyAdd;
+GLOBAL TYPE_sas_init_pm_selectors c_SasRegisterVirtualSelectors;
+GLOBAL TYPE_sas_PigCmpPage c_sas_PigCmpPage;
+
+LOCAL void c_sas_not_used IPT0();
+
+extern struct SasVector cSasPtrs;
+GLOBAL struct SasVector Sas;
+
+/* local functions */
+LOCAL void write_word IPT2(sys_addr, addr, IU16, wrd);
+LOCAL word read_word IPT1(sys_addr, addr);
+LOCAL IU8 bios_read_byte IPT1(LIN_ADDR, linAddr);
+LOCAL IU16 bios_read_word IPT1(LIN_ADDR, linAddr);
+LOCAL IU32 bios_read_double IPT1(LIN_ADDR, linAddr);
+LOCAL void bios_write_byte IPT2(LIN_ADDR, linAddr, IU8, value);
+LOCAL void bios_write_word IPT2(LIN_ADDR, linAddr, IU16, value);
+LOCAL void bios_write_double IPT2(LIN_ADDR, linAddr, IU32, value);
+
+GLOBAL IU8 *Start_of_M_area = NULL;
+GLOBAL PHY_ADDR Length_of_M_area = 0;
+#ifdef BACK_M
+GLOBAL IU8 *end_of_M = NULL;
+#endif
+
+void (*temp_func) ();
+
+#ifndef EGATEST
+#define READ_SELF_MOD(addr) (SAS_MEM_TYPE)( memory_type[(addr)>>12] )
+#define write_self_mod(addr, type) (memory_type[(addr)>>12] = (IU8)(type))
+
+/*********** 'GMI' CCPU ONLY ***********/
+
+/*
+ * types are : SAS_RAM SAS_VIDEO SAS_ROM SAS_WRAP SAS_INACCESSIBLE
+ */
+#define TYPE_RANGE ((int)SAS_INACCESSIBLE)
+
+#define ROM_byte ((IU8)SAS_ROM)
+#define RAM_byte ((IU8)SAS_RAM)
+
+#define write_b_write_ptrs( offset, func ) ( b_write_ptrs[(offset)] = (func) )
+#define write_w_write_ptrs( offset, func ) ( w_write_ptrs[(offset)] = (func) )
+#define write_b_page_ptrs( offset, func ) ( b_move_ptrs[(offset)] = b_fill_ptrs[(offset)] = (func) )
+#define write_w_page_ptrs( offset, func ) ( w_move_ptrs[(offset)] = w_fill_ptrs[(offset)] = (func) )
+#define init_b_write_ptrs( offset, func ) ( b_write_ptrs[(offset)] = (func) )
+#define init_w_write_ptrs( offset, func ) ( w_write_ptrs[(offset)] = (func) )
+#define init_b_page_ptrs( offset, func ) ( b_move_ptrs[(offset)] = b_fill_ptrs[(offset)] = (func) )
+#define init_w_page_ptrs( offset, func ) ( w_move_ptrs[(offset)] = w_fill_ptrs[(offset)] = (func) )
+#define read_b_write_ptrs( offset ) ( b_write_ptrs[(offset)] )
+#define read_w_write_ptrs( offset ) ( w_write_ptrs[(offset)] )
+#define read_b_page_ptrs( offset ) ( b_move_ptrs[(offset)] )
+#define read_w_page_ptrs( offset ) ( w_move_ptrs[(offset)] )
+#define read_b_move_ptrs( offset ) ( b_move_ptrs[(offset)] )
+#define read_w_move_ptrs( offset ) ( w_move_ptrs[(offset)] )
+#define read_b_fill_ptrs( offset ) ( b_fill_ptrs[(offset)] )
+#define read_w_fill_ptrs( offset ) ( w_fill_ptrs[(offset)] )
+
+/*
+ * The main gmi data structures are defined here
+ */
+void (*(b_write_ptrs[TYPE_RANGE])) (); /* byte write function */
+void (*(w_write_ptrs[TYPE_RANGE])) (); /* word write function */
+void (*(b_fill_ptrs[TYPE_RANGE])) (); /* byte str fill func */
+void (*(w_fill_ptrs[TYPE_RANGE])) (); /* word str fill func */
+void (*(b_move_ptrs[TYPE_RANGE])) (); /* byte str write func */
+void (*(w_move_ptrs[TYPE_RANGE])) (); /* word str write func */
+
+#endif /* EGATEST */
+
+
+/*(
+ *======================= c_SasRegisterVirtualSelectors =========================
+ *
+ * Purpose
+ * The Sas virtualisation handler requires a code+data selector which
+ * are available in protected mode (when called from say the Insignia
+ * host windows driver.
+ * Our current experimental implementation does not worry about how
+ * long these live.
+ *
+ * It is expected that this functionality should be moved from the
+ * windows driver itself, to the Insignia VxD so that correct
+ * initialisation/termination can be handled.
+)*/
+
+GLOBAL IBOOL c_SasRegisterVirtualSelectors IFN2(IU16, sel1, IU16, sel2)
+{
+ IU32 addr;
+
+ always_trace0 ("c_SasRegisterVirtualSelectors called\n");
+
+ addr = c_getLDT_BASE() + ((data_sel = sel1) & ~7);
+
+ /* Construct a flat writable data segment */
+
+ sas_storedw (addr, 0x0000FFFF);
+ sas_storedw (addr+4, 0x008ff300);
+
+ addr = c_getLDT_BASE() + ((code_sel = sel2) & ~7);
+
+ /* Construct a code segment with base 0xf0000 and large limits */
+
+ sas_storedw (addr, 0x0000FFFF);
+ sas_storedw (addr+4, 0x008f9f0f);
+
+ selectors_set = TRUE;
+
+ always_trace2 ("Set code_sel = %x, data_sel = %x\n",
+ code_sel, data_sel);
+}
+
+/*(
+ *========================== checkAccess ===================================
+ * checkAccess
+ *
+ * Purpose
+ * This function is used in debugging to spot writes to an area
+ * of memory. Note that it is controlled by global variables that
+ * must be set elsewhere, or by a debugger.
+ *
+ * Input
+ * addr The physical intel address to write to
+ *
+ * Outputs
+ * None.
+ *
+ * Description
+ * Actually a macro that is nothing if CHECK_ACCESS isn't defined.
+)*/
+
+#ifndef CHECK_ACCESS
+#define checkAccess(addr)
+#else
+GLOBAL PHY_ADDR lowCheckAccess = 0;
+GLOBAL PHY_ADDR highCheckAccess = 0;
+#define checkAccess(addr) \
+ if ((addr < highCheckAccess) && (addr >= lowCheckAccess)) { \
+ always_trace1("Write access break point - addres 0x%.8x", \
+ addr); \
+ }
+#endif /* !CHECK_ACCESS else */
+
+#ifndef PROD
+/*
+ * This function is useful for calling from a debugger!
+ */
+
+GLOBAL void
+DumpMemType()
+{
+ SAS_MEM_TYPE currentType;
+ PHY_ADDR numEntries; /* number of entries in the table */
+ PHY_ADDR currEntry;
+
+ currentType = SAS_DANGEROUS; /* memory_type should never have this */
+ numEntries = c_sas_memory_size() >> 12;
+
+ for (currEntry = 0; currEntry < numEntries; currEntry++) {
+ if (memory_type[currEntry] != currentType) {
+ fprintf(stderr,"0x%.8x %s\n", currEntry << 12,
+ SAS_TYPE_TO_STRING(memory_type[currEntry]));
+ currentType = memory_type[currEntry];
+ }
+ }
+ fprintf(stderr,"0x%.8x End of Memory\n", c_sas_memory_size());
+
+}
+#endif /* ndef PROD */
+
+
+/*********** INIT & ADMIN FUNCS ***********/
+/*(
+ *========================== readSelfMod ===================================
+ * readSelfMod
+ *
+ * Purpose
+ * This function reads the self modify table, and returns the
+ * memory type. It will also indicate whether there is a type
+ * boundary within the length specified.
+ *
+ * Input
+ * addr The physical intel address to read from
+ * typeSize The size in bytes of the item to be read
+ *
+ * Outputs
+ * Memory type.
+ *
+ * Description
+ * We check that the memory type for both ends of the type is the same.
+)*/
+
+LOCAL SAS_MEM_TYPE
+readSelfMod IFN2(PHY_ADDR, addr, IUM8, typeSize)
+{
+ SAS_MEM_TYPE startType;
+
+ startType = READ_SELF_MOD(addr);
+
+ if (startType == READ_SELF_MOD(addr + typeSize - 1))
+ return(startType);
+ else
+ return(SAS_DANGEROUS);
+}
+
+/*(
+ *========================== SasSetPointers ===================================
+ * SasSetPointers
+ *
+ * Purpose
+ * This function is used to install a set of function pointers.
+ *
+ * Input
+ * An array of pointers to use.
+ *
+ * Outputs
+ * None.
+ *
+ * Description
+ * Just do a memory copy of the pointers.
+)*/
+
+GLOBAL void
+SasSetPointers IFN1(struct SasVector *, newPointers)
+{
+ memcpy(&Sas, newPointers, sizeof(struct SasVector));
+}
+
+
+/* Init the sas system - malloc the memory & load the roms */
+
+
+/* need to put some of this in the ROMs! */
+
+GLOBAL void
+sas_init IFN1(PHY_ADDR, size)
+{
+ IU32 required_mem;
+ IUM16 ipatch;
+ IU8 *ptr;
+ char *env;
+
+ /*
+ * Set the SAS pointers to point to the functions in this
+ * module, and initialise the scratch buffer to 64K
+ */
+
+ SasSetPointers(&cSasPtrs);
+ (void)c_sas_scratch_address(SIXTY_FOUR_K);
+
+ /* do the host sas */
+
+ required_mem = size + NOWRAP_PROTECTION;
+ Start_of_M_area = (IU8 *) host_sas_init(size);
+ if (Start_of_M_area == NULL) {
+ check_malloc(Start_of_M_area, required_mem, IU8);
+ }
+ env = getenv("CPU_INITIALISE_MEMORY");
+ if (env != NULL)
+ {
+ int zap = strtol(env, (char **)0, 16);
+ memset(Start_of_M_area, zap, size); /* Fill with user supplied byte */
+ }
+ if (!memory_type)
+ check_malloc(memory_type, ((size + NOWRAP_PROTECTION) >> 12), IU8);
+
+ {
+ IMPORT IU8 *CCPU_M;
+
+#ifdef BACK_M
+ CCPU_M = Start_of_M_area + size - 1;
+#else
+ CCPU_M = Start_of_M_area;
+#endif /* BACK_M */
+ }
+
+ /*
+ * Make the entire memory space RAM. The ROM load routines
+ * will change some of this to being ROM.
+ */
+
+ c_sas_connect_memory(0, size - 1, SAS_RAM);
+
+ Length_of_M_area = size;
+#ifdef BACK_M
+ end_of_M = Start_of_M_area + Length_of_M_area -1;
+#endif
+
+ /* init the ROM (load the bios roms etc) */
+
+#ifndef EGATEST
+ rom_init();
+#endif /* EGATEST */
+
+ copyROM();
+}
+
+/* finish the sas system -basically free up the M space prior to reallocing it */
+GLOBAL void
+sas_term IFN0()
+{
+ if (host_sas_term() != NULL) {
+ if (Start_of_M_area)
+ free(Start_of_M_area);
+ if (memory_type)
+ free(memory_type);
+ memory_type = NULL;
+ }
+
+ Start_of_M_area = NULL;
+}
+
+/* return the size of the sas */
+GLOBAL PHY_ADDR
+c_sas_memory_size IFN0()
+{
+ return (Length_of_M_area);
+}
+
+/*********** GMI TYPE FUNCS ***********/
+/*
+ * Sets all intel addresses in give range to the specified memory type
+ * for the ccpu this writes to memory_type.
+ * Callers of this can be a bit confused about the meaning of the
+ * high parameter. e.g. for a range of 1000 - 2fff inclusive, they're
+ * not sure if high should be 2fff or 3000. It should be 2fff, but we
+ * watch out for people who've got it wrong, and put them right, poor
+ * little dears.
+ */
+GLOBAL void
+c_sas_connect_memory IFN3(PHY_ADDR, low, PHY_ADDR, high, SAS_MEM_TYPE, type)
+{
+ if ((high & 0xfff) == 0) {
+ if (high)
+ high--;
+ }
+ sub_note_trace3(SAS_VERBOSE, "Connect %s from 0x%lx to 0x%lx",
+ SAS_TYPE_TO_STRING(type), low, high);
+ memset(&memory_type[low >> 12], type, (high >> 12) - (low >> 12) + 1);
+}
+
+/* returns memory type for specified addr */
+GLOBAL SAS_MEM_TYPE
+c_sas_memory_type IFN1(PHY_ADDR, addr)
+{
+ return(memory_type[ addr >> 12 ]);
+}
+
+/* clears any compiled code from the given range */
+/* for the ccpu this doesn't do anything */
+GLOBAL void
+c_sas_overwrite_memory IFN2(PHY_ADDR, addr, PHY_ADDR, length)
+{
+ UNUSED(addr);
+ UNUSED(length);
+}
+
+/*********** WRAPPING ***********/
+/* enable 20 bit wrapping */
+GLOBAL void
+c_sas_enable_20_bit_wrapping IFN0()
+{
+ SasWrapMask = 0xfffff;
+}
+
+/* disable 20 bit wrapping */
+GLOBAL void
+c_sas_disable_20_bit_wrapping IFN0()
+{
+ SasWrapMask = 0xffffffff;
+}
+
+GLOBAL IBOOL
+c_sas_twenty_bit_wrapping_enabled IFN0()
+{
+ return (SasWrapMask == 0xfffff);
+}
+
+/*(
+ *========================== phyR ===================================
+ * phyR
+ *
+ * Purpose
+ * This is the generic physical read function and takes parameters
+ * of any size (well up to an IU32 that is).
+ *
+ * Input
+ * addr The physical intel address to read from
+ * typeSize The size in bytes of the item to be read
+ * vidFP A video read function pointer of the appropriate size.
+ * name "byte" for byte, etc.
+ *
+ * Outputs
+ * An IU32 that should be masked to get the right bits.
+ *
+ * Description
+ * We check for out of memory refernces, VIDEO and inaccessible references
+ * and also split reads that span a memory type boundary.
+)*/
+typedef IU32 (*VID_READ_FP) IPT1(PHY_ADDR, offset);
+
+LOCAL IU32
+phyR IFN4(PHY_ADDR, addr, IUM8, typeSize, VID_READ_FP, vidFP, char *, name)
+{
+ IUM8 byte;
+ IUM32 retVal;
+
+ addr &= SasWrapMask;
+
+ if ((addr + typeSize + 1) >= Length_of_M_area) {
+ SAVED IBOOL first = TRUE;
+ SAVED IU32 junk_value = 0xfefefefe;
+ if (first)
+ {
+ char *env = getenv("BEYOND_MEMORY_VALUE");
+ if (env != NULL)
+ {
+ junk_value = strtol(env, (char **)0, 16);
+ always_trace1("phyR: using %08x as value to read from outside physical M", junk_value)
+ }
+ first = FALSE;
+ }
+ always_trace2("phyR - %s read from outside physical M - address 0x%0x", name, addr)
+ return(junk_value);
+ }
+
+ switch (readSelfMod(addr, typeSize)) {
+ case SAS_DANGEROUS:
+ retVal = 0;
+ for (byte = 0; byte < typeSize; byte++) {
+ retVal = retVal
+ +((IUM32)phyR(addr, 1, read_pointers.b_read,
+ "byte") << (byte * 8));
+ addr++;
+ }
+ return(retVal);
+#ifdef EGG
+ case SAS_VIDEO:
+ return ((*vidFP)(addr));
+ break;
+#endif /* EGG */
+
+ case SAS_INACCESSIBLE:
+ return (0xffffffff);
+
+ case SAS_ROM:
+ case SAS_RAM:
+ default:
+ /*
+ * Pick-up the bytes. This could be optimised, but
+ * we have to take account of BACK_M, endianness,
+ * and misaligned accesses on RISC hosts. Just
+ * keep it simple for the moment!
+ */
+
+ addr = addr + typeSize - 1; /* move to last byte */
+ retVal = 0;
+
+ while (typeSize > 0) {
+ retVal = retVal << 8;
+ retVal += *(c_GetPhyAdd(addr));
+ addr -= 1;
+ typeSize -= 1;
+ }
+ return(retVal);
+ }
+}
+/*(
+ *========================== phy_rX ===================================
+ * phy_rX
+ *
+ * Purpose
+ * These are the physical read functions.
+ *
+ * Input
+ * addr The physical intel address to read from
+ *
+ * Outputs
+ * The value read
+ *
+ * Description
+ * Simply call the generic function with the right bits.
+)*/
+
+GLOBAL IU8
+phy_r8 IFN1(PHY_ADDR, addr)
+{
+ IU8 retVal;
+
+ retVal = (IU8)phyR(addr, sizeof(IU8), read_pointers.b_read, "byte");
+ sub_note_trace2(SAS_VERBOSE, "phy_r8 addr=%x, val=%x\n", addr, retVal);
+ return(retVal);
+}
+
+
+GLOBAL IU16
+phy_r16 IFN1(PHY_ADDR, addr)
+{
+ IU16 retVal;
+
+ retVal = (IU16)phyR(addr, sizeof(IU16), read_pointers.w_read, "word");
+ sub_note_trace2(SAS_VERBOSE, "phy_r16 addr=%x, val=%x\n", addr, retVal);
+ return(retVal);
+}
+
+
+GLOBAL IU32
+phy_r32 IFN1(PHY_ADDR, addr)
+{
+ /*
+ * MIKE! This needs changing when we have a dword interface to the
+ * video.
+ */
+
+ IU16 low, high;
+ low = (IU16)phyR(addr, sizeof(IU16), read_pointers.w_read, "word");
+ high = (IU16)phyR(addr + 2, sizeof(IU16), read_pointers.w_read, "word");
+
+ return(((IU32)high << 16) + low);
+}
+
+/*(
+ *======================= c_sas_PWS ================================
+ * c_sas_PWS
+ *
+ * Purpose
+ * This function writes a block of memory from into Intel memory
+ * from host memory. It is the physical address equivalent of
+ * sas_stores.
+ *
+ * Input
+ * dest Intel physical address
+ * src host address
+ * length number of IU8s to transfer
+ *
+ * Outputs
+ * None.
+ *
+ * Description
+ * Just call phy_w8 lots of times.
+)*/
+
+GLOBAL void
+c_sas_PWS IFN3(PHY_ADDR, dest, IU8 *, src, PHY_ADDR, length)
+{
+ while (length--) {
+ phy_w8(dest, *src);
+ dest++;
+ src++;
+ }
+}
+
+/*(
+ *======================= c_sas_PWS_no_check =========================
+ * c_sas_PWS_no_check
+ *
+ * Purpose
+ * This function writes a block of memory from into Intel memory
+ * from host memory. It is the physical address equivalent of
+ * sas_stores_no_check.
+ *
+ * Input
+ * dest Intel physical address
+ * src host address
+ * length number of IU8s to transfer
+ *
+ * Outputs
+ * None.
+ *
+ * Description
+ * Just call c_sas_PWS()
+)*/
+GLOBAL void
+c_sas_PWS_no_check IFN3(PHY_ADDR, dest, IU8 *, src, PHY_ADDR, length)
+{
+ c_sas_PWS(dest, src, length);
+}
+
+
+/*(
+ *======================= c_sas_PRS ================================
+ * c_sas_PRS
+ *
+ * Purpose
+ * This function reads a block of memory from Intel memory
+ * into host memory. It is the physical address equivalent of
+ * sas_loads.
+ *
+ * Input
+ * src Intel physical address
+ * dest host address
+ * length number of IU8s to transfer
+ *
+ * Outputs
+ * None.
+ *
+ * Description
+ * Just call phy_r8 lots of times.
+)*/
+
+GLOBAL void
+c_sas_PRS IFN3(PHY_ADDR, src, IU8 *, dest, PHY_ADDR, length)
+{
+ while (length--) {
+ *dest = phy_r8(src);
+ dest++;
+ src++;
+ }
+}
+
+
+/*(
+ *======================= c_sas_PRS_no_check ==========================
+ * c_sas_PRS_no_check
+ *
+ * Purpose
+ * This function reads a block of memory from Intel memory
+ * into host memory. It is the physical address equivalent of
+ * sas_loads_no_check.
+ *
+ * Input
+ * src Intel physical address
+ * dest host address
+ * length number of IU8s to transfer
+ *
+ * Outputs
+ * None.
+ *
+ * Description
+ * Just call c_sas_PRS.
+)*/
+
+GLOBAL void
+c_sas_PRS_no_check IFN3(PHY_ADDR, src, IU8 *, dest, PHY_ADDR, length)
+{
+ c_sas_PRS(src, dest, length);
+}
+
+
+GLOBAL IU8
+c_sas_hw_at IFN1(LIN_ADDR, addr)
+{
+ return (bios_read_byte(addr));
+}
+
+
+/* return the word (short) at the specified address */
+GLOBAL IU16
+c_sas_w_at IFN1(LIN_ADDR, addr)
+{
+ if ((addr & 0xFFF) <= 0xFFE)
+ return (bios_read_word(addr));
+ else
+ {
+ return (bios_read_byte(addr) | ((IU16)bios_read_byte(addr+1) << 8));
+ }
+}
+
+/* return the double word (long) at the address passed */
+GLOBAL IU32
+c_sas_dw_at IFN1(LIN_ADDR, addr)
+{
+ if ((addr & 0xFFF) <= 0xFFC)
+ return (bios_read_double(addr));
+ else
+ {
+ return (bios_read_word(addr) | ((IU32)bios_read_word(addr+2) << 16));
+ }
+}
+
+/* store a byte at the given address */
+
+GLOBAL void phy_w8
+IFN2(PHY_ADDR, addr, IU8, val)
+{
+ sys_addr temp_val;
+
+ sub_note_trace2(SAS_VERBOSE, "c_sas_store addr=%x, val=%x\n", addr, val);
+
+ addr &= SasWrapMask;
+ checkAccess(addr);
+
+ if (addr < Length_of_M_area) {
+ temp_val = readSelfMod(addr, sizeof(IU8));
+
+ switch (temp_val) {
+ case SAS_RAM:
+ (*(IU8 *) c_GetPhyAdd(addr)) = val;
+ break;
+
+#ifdef LIM
+ case SAS_MM_LIM:
+ (*(IU8 *) c_GetPhyAdd(addr)) = val;
+ LIM_b_write(addr);
+ break;
+#endif
+
+ case SAS_INACCESSIBLE:
+ case SAS_ROM:
+ /* No ROM_fix_sets !!! Yeh !!! */
+ break;
+
+ default:
+ printf("Unknown SAS type\n");
+ force_yoda();
+
+ case SAS_VIDEO:
+ temp_func = read_b_write_ptrs(temp_val);
+ (*temp_func) (addr, val);
+ break;
+ }
+
+ } else
+ printf("Byte written outside M %x\n", addr);
+}
+
+GLOBAL void phy_w8_no_check
+IFN2(PHY_ADDR, addr, IU8, val)
+{
+ phy_w8( addr, val );
+}
+
+GLOBAL void c_sas_store
+IFN2(LIN_ADDR, addr, IU8, val)
+{
+ sub_note_trace2(SAS_VERBOSE, "c_sas_store addr=%x, val=%x\n", addr, val);
+ bios_write_byte(addr, val);
+}
+
+/* store a word at the given address */
+GLOBAL void
+phy_w16 IFN2(PHY_ADDR, addr, IU16, val)
+{
+ sys_addr temp_val;
+
+ sub_note_trace2(SAS_VERBOSE, "c_sas_storew addr=%x, val=%x\n", addr, val);
+
+ addr &= SasWrapMask;
+ checkAccess(addr);
+
+ if ((addr + 1) < Length_of_M_area) {
+ temp_val = readSelfMod(addr, sizeof(IU16));
+
+ switch (temp_val) {
+ case SAS_RAM:
+ write_word(addr, val);
+ break;
+
+#ifdef LIM
+ case SAS_MM_LIM:
+ write_word(addr, val);
+ LIM_w_write(addr);
+ break;
+#endif
+
+ case SAS_INACCESSIBLE:
+ case SAS_ROM:
+ /* No ROM_fix_sets !!! Yeh !!! */
+ break;
+
+ default:
+ printf("Unknown Sas type\n");
+ force_yoda();
+
+ case SAS_VIDEO:
+ temp_func = read_w_write_ptrs(temp_val);
+ (*temp_func) (addr, val);
+ break;
+ }
+
+ } else
+ printf("Word written outside M %x\n", addr);
+}
+
+GLOBAL void phy_w16_no_check
+IFN2(PHY_ADDR, addr, IU16, val)
+{
+ phy_w16( addr, val );
+}
+
+GLOBAL void
+phy_w32 IFN2(PHY_ADDR, addr, IU32, val)
+{
+ phy_w16(addr, (IU16)val);
+ phy_w16(addr + 2, (IU16)(val >> 16));
+}
+
+
+GLOBAL void phy_w32_no_check
+IFN2(PHY_ADDR, addr, IU32, val)
+{
+ phy_w32( addr, val );
+}
+
+
+/* store a word at the given address */
+GLOBAL void
+c_sas_storew IFN2(LIN_ADDR, addr, IU16, val)
+{
+ sub_note_trace2(SAS_VERBOSE, "c_sas_storew addr=%x, val=%x\n", addr, val);
+ if ((addr & 0xFFF) <= 0xFFE)
+ bios_write_word(addr, val);
+ else
+ {
+ bios_write_byte(addr+1, val >> 8);
+ bios_write_byte(addr, val & 0xFF);
+ }
+}
+
+/* store a double word at the given address */
+GLOBAL void c_sas_storedw
+IFN2(LIN_ADDR, addr, IU32, val)
+{
+ sub_note_trace2(SAS_VERBOSE, "c_sas_storedw addr=%x, val=%x\n", addr, val);
+
+ if ((addr & 0xFFF) <= 0xFFC)
+ bios_write_double(addr, val);
+ else
+ {
+ bios_write_word(addr+2, val >> 16);
+ bios_write_word(addr, val & 0xFFFF);
+ }
+}
+
+/*********** STRING OPS ***********/
+/* load a string from M */
+GLOBAL void c_sas_loads
+IFN3(LIN_ADDR, src, IU8 *, dest, LIN_ADDR, len)
+{
+ /*
+ * This is a linear address op, so we have to call the byte operation
+ * lots of times.
+ */
+
+ IU8 *destP;
+
+ for (destP = dest; destP < (dest + len); destP++) {
+ *destP = c_sas_hw_at(src);
+ src++;
+ }
+}
+
+GLOBAL void c_sas_loads_no_check
+IFN3(LIN_ADDR, src, IU8 *, dest, LIN_ADDR, len)
+{
+ c_sas_loads(src, dest, len);
+}
+
+/* write a string into M */
+GLOBAL void c_sas_stores
+IFN3(LIN_ADDR, dest, IU8 *, src, LIN_ADDR, len)
+{
+ /*
+ * This is a linear address op, so we have to call the byte operation
+ * lots of times.
+ */
+
+ IU8 *srcP;
+ LIN_ADDR savedDest;
+
+ sub_note_trace3(SAS_VERBOSE, "c_sas_stores dest=%x, src=%x, len=%d\n", dest, src, len);
+
+ savedDest = dest;
+ for (srcP = src; srcP < (src + len); srcP++) {
+ c_sas_store(dest, *srcP);
+ dest++;
+ }
+}
+
+GLOBAL void c_sas_stores_no_check
+IFN3(LIN_ADDR, dest, IU8 *, src, LIN_ADDR, len)
+{
+ c_sas_stores(dest, src, len);
+}
+
+/*********** MOVE OPS ***********/
+/* move bytes from src to dest where src & dest are the low intel addresses */
+/* of the affected areas */
+
+/*
+ * we can use straight memcpys here because we know that M is either all
+ * forwards or
+ */
+/* backwards */
+GLOBAL void c_sas_move_bytes_forward
+IFN3(sys_addr, src, sys_addr, dest,
+ sys_addr, len)
+{
+ LIN_ADDR offset;
+
+ for (offset = 0; offset < len; offset++) {
+ c_sas_store(dest + offset, c_sas_hw_at(src + offset));
+ }
+}
+
+/* move words from src to dest as above */
+GLOBAL void c_sas_move_words_forward
+IFN3(LIN_ADDR, src, LIN_ADDR, dest,
+ LIN_ADDR, len)
+{
+ LIN_ADDR offset;
+
+ len = len * 2; /* convert to bytes */
+ for (offset = 0; offset < len; offset += 2) {
+ c_sas_storew(dest + offset, c_sas_w_at(src + offset));
+ }
+}
+
+/* move doubles from src to dest as above */
+GLOBAL void c_sas_move_doubles_forward
+IFN3(LIN_ADDR, src, LIN_ADDR, dest,
+ LIN_ADDR, len)
+{
+ LIN_ADDR offset;
+
+ len = len * 4; /* convert to bytes */
+ for (offset = 0; offset < len; offset += 4) {
+ c_sas_storedw(dest + offset, c_sas_dw_at(src + offset));
+ }
+}
+
+/* backwards versions not used */
+GLOBAL void c_sas_move_bytes_backward IFN3(sys_addr, src, sys_addr, dest, sys_addr, len)
+{
+ UNUSED(src);
+ UNUSED(dest);
+ UNUSED(len);
+ c_sas_not_used();
+}
+
+GLOBAL void c_sas_move_words_backward IFN3(LIN_ADDR, src, LIN_ADDR, dest, LIN_ADDR, len)
+{
+ UNUSED(src);
+ UNUSED(dest);
+ UNUSED(len);
+ c_sas_not_used();
+}
+
+GLOBAL void c_sas_move_doubles_backward IFN3(LIN_ADDR, src, LIN_ADDR, dest, LIN_ADDR, len)
+{
+ UNUSED(src);
+ UNUSED(dest);
+ UNUSED(len);
+ c_sas_not_used();
+}
+
+
+/*********** FILL OPS ***********/
+/*
+ * Fill an area with bytes (IU8s) of the passed value.
+ */
+GLOBAL void c_sas_fills
+IFN3(LIN_ADDR, dest, IU8 , val, LIN_ADDR, len)
+ {
+ /*
+ * This is a linear address op, so just call the byte operation
+ * lots of times.
+ */
+
+ LIN_ADDR i;
+
+ sub_note_trace3(SAS_VERBOSE, "c_sas_fills dest=%x, val=%x, len=%d\n", dest, val, len);
+
+ for (i = 0; i < len; i++)
+ {
+ c_sas_store(dest, val);
+ dest++;
+ }
+ }
+
+/* fill an area with words (IU16s) of the passed value */
+
+GLOBAL void c_sas_fillsw
+IFN3(LIN_ADDR, dest, IU16, val, LIN_ADDR, len)
+ {
+ /*
+ * This is a linear address op, so just call the word operation
+ * lots of times.
+ */
+
+ LIN_ADDR i;
+
+ sub_note_trace3(SAS_VERBOSE, "c_sas_fillsw dest=%x, val=%x, len=%d\n", dest, val, len);
+
+ for (i = 0; i < len; i++)
+ {
+ c_sas_storew(dest, val);
+ dest += 2;
+ }
+ }
+
+/* Fill Intel memory with 32 bit values */
+
+GLOBAL void c_sas_fillsdw
+IFN3(LIN_ADDR, dest, IU32, val, LIN_ADDR, len)
+ {
+ /*
+ * This is a linear address op, so just call the double word operation
+ * lots of times.
+ */
+
+ LIN_ADDR i;
+
+ sub_note_trace3(SAS_VERBOSE, "c_sas_fillsdw dest=%x, val=%x, len=%d\n", dest, val, len);
+
+ for (i = 0; i < len; i++)
+ {
+ c_sas_storedw(dest, val);
+ dest += 4;
+ }
+ }
+
+/*(
+ *======================= c_sas_scratch_address ================================
+ * c_sas_scratch_address
+ *
+ * Purpose
+ * This function returns a pointer to a scratch area for use by
+ * other functions. There is only one such buffer!
+ *
+ * Input
+ * length (no restrictions)
+ *
+ * Outputs
+ * A pointer to the buffer.
+ *
+ * Description
+ * The buffer is grown each time a new request for a larger buffer is
+ * made. Note that there is an initial call from sas_init for
+ * 64K, so this will be the minimum size we ever have.
+)*/
+
+LOCAL IU8 *scratch = (IU8 *) NULL; /* keep a copy of the pointer */
+LOCAL LIN_ADDR currentLength = 0; /* how much we've allocated */
+
+GLOBAL IU8 *
+c_sas_scratch_address IFN1(sys_addr, length)
+{
+ if (length > currentLength) {
+ if (scratch) {
+ host_free(scratch);
+ printf("Freeing old scratch buffer - VGA will be broken!\n");
+ force_yoda();
+ }
+
+ check_malloc(scratch, length, IU8);
+ currentLength = length;
+ }
+ return (scratch);
+}
+
+
+/*(
+ *======================= sas_transbuf_address ================================
+ * sas_transbuf_address
+ *
+ * Purpose
+ * This function returns a pointer to a host buffer that the base/host
+ * can read data into, and then load into/from Intel space using the two
+ * special functions that follow. This allows optimisations
+ * on forwards M builds that we haven't implemented on the C CPU. Hence
+ * note that sas_loads_to_transbuff is mapped directly onto sas_loads
+ * by sas_init, and similarly for sas_stores_to_transbuff.
+ *
+ * Input
+ * destination address The intel address that this buffer will be
+ * loaded from, stored to.
+ * length (no restrictions)
+ *
+ * Outputs
+ * A pointer to the buffer.
+ *
+ * Description
+ * Just pass them the scratch buffer!.
+)*/
+
+GLOBAL IU8 *
+c_sas_transbuf_address IFN2(LIN_ADDR, dest_intel_addr, PHY_ADDR, length)
+{
+ UNUSED (dest_intel_addr);
+ return (c_sas_scratch_address(length));
+}
+
+
+/********************************************************/
+/* local functions */
+
+/*********** WORD OPS ***********/
+/* store a word in M */
+LOCAL void write_word
+IFN2(sys_addr, addr, IU16, wrd)
+{
+ IU8 hi, lo;
+
+ /* split the word */
+ hi = (IU8) ((wrd >> 8) & 0xff);
+ lo = (IU8) (wrd & 0xff);
+
+
+
+ *(c_GetPhyAdd(addr + 1)) = hi;
+ *(c_GetPhyAdd(addr)) = lo;
+}
+
+/* read a word from M */
+LOCAL word read_word
+IFN1(sys_addr, addr)
+{
+ IU8 hi, lo;
+
+
+ hi = *(c_GetPhyAdd(addr + 1));
+ lo = *(c_GetPhyAdd(addr));
+
+
+ /* build the word */
+ return (((IU16)hi << 8) + (IU16) lo);
+}
+
+#ifndef EGATEST
+void gmi_define_mem
+IFN2(mem_type, type, MEM_HANDLERS *, handlers)
+{
+ int int_type = (int) (type);
+
+ init_b_write_ptrs(int_type, (void (*) ()) (handlers->b_write));
+ init_w_write_ptrs(int_type, (void (*) ()) (handlers->w_write));
+ b_move_ptrs[int_type] = (void (*) ()) (handlers->b_move);
+ w_move_ptrs[int_type] = (void (*) ()) (handlers->w_move);
+ b_fill_ptrs[int_type] = (void (*) ()) (handlers->b_fill);
+ w_fill_ptrs[int_type] = (void (*) ()) (handlers->w_fill);
+}
+
+
+#endif /* EGATEST */
+
+/*(
+ *========================== c_GetLinAdd ===================================
+ * c_GetLinAdd
+ *
+ * Purpose
+ * Returns a host pointer to the byte specified by an Intel linear
+ * address.
+ *
+ * Input
+ * addr The Intel linear address
+ *
+ * Outputs
+ * The host pointer
+ *
+ * Description
+ * Translate it. If it's not a physical address, scream.
+)*/
+
+GLOBAL IU8 *
+c_GetLinAdd IFN1(PHY_ADDR, linAddr)
+{
+ PHY_ADDR phyAddr;
+
+ if (!c_getPG())
+ return(c_GetPhyAdd((PHY_ADDR)linAddr));
+ else if (xtrn2phy(linAddr, (IUM8)0, &phyAddr))
+ return(c_GetPhyAdd(phyAddr));
+ else {
+#ifndef PROD
+ if (!AlreadyInYoda) {
+ always_trace1("get_byte_addr for linear address 0x%x which is unmapped", linAddr);
+ force_yoda();
+ }
+#endif
+ return(c_GetPhyAdd(0)); /* as good as anything! */
+ }
+}
+
+/*(
+ *========================== c_GetPhyAdd ===================================
+ * c_GetPhyAdd
+ *
+ * Purpose
+ * Returns a host pointer to the byte specified by an Intel physical
+ * address.
+ *
+ * Input
+ * addr The Intel physical address
+ *
+ * Outputs
+ * The host pointer
+ *
+ * Description
+ * This is the #ifdef BACK_M bit! Just a simple calculation.
+)*/
+
+LOCAL IBOOL firstDubious = TRUE;
+GLOBAL IU8 *
+c_GetPhyAdd IFN1(PHY_ADDR, addr)
+{
+ IU8 *retVal;
+
+#ifdef BACK_M
+ retVal = (IU8 *)((IHPE)end_of_M - (IHPE)addr);
+ return(retVal);
+#else
+ return((IU8 *)((IHPE)Start_of_M_area + (IHPE)addr));
+#endif
+}
+
+/*
+ * Support for V86 Mode.
+ *
+ * The basic idea here is that some of our BIOS C code will be trying to do
+ * things, like changing the interrupt flag, and doing IO which the OS
+ * (e.g. Windows) might prevent us doing on a real PC by running the
+ * BIOS code in V86 mode. Hence what we do is check whether executing
+ * the relevant instruction would have caused an exception if the processor
+ * at it's current protection level had executed it. If not, it's OK
+ * for us to just go ahead and do it. However, if it would have caused
+ * an exception, we need to actually execute an appropriate instruction
+ * with the CPU.
+ *
+ * This has two advantages - one it makes the code layout simpler(!), and
+ * secondly it means that Windows can have a look at what sort of instruction
+ * caused the exception.
+ *
+ * Note that this only works for V86 mode because we need to patch-up the
+ * CS to point at the ROM. Basically any OS that trys to execute our
+ * BIOS in VM mode and expects to be able to catch exceptions is in for a nasty
+ * shock. Hence the macro that follows:
+ *
+ * When not in V86 mode, at least one of the Insgina drivers must have
+ * allocated and registered two segments for us to use. We use these to
+ * construct a flat-writeable data segment and a small code segment that
+ * points at the rom -- we use the same code as the V86.
+ */
+
+
+#define BIOS_VIRTUALISE_SEGMENT 0xf000
+/*(
+ *========================== biosDoInst ===================================
+ * biosDoInst
+ *
+ * Purpose
+ * This function executes the instruction at the requested offset,
+ * saving CS and IP across it.
+ *
+ * Input
+ * vCS, vEIP, vEAX, vDS, vEDX The values to used for the
+ * virtualised instruction.
+ *
+ * Outputs
+ * The value returned in EAX after virtualisation.
+ *
+ * Description
+ * Use host_simulate to execute an instruction in the bios1.rom
+)*/
+
+LOCAL IU32
+biosDoInst IFN5(IU16, vCS, LIN_ADDR, vEIP, IU32, vEAX, IU16, vDS, IU32, vEDX)
+{
+ SAVED IBOOL first = TRUE;
+ SAVED IBOOL trace_bios_inst = FALSE;
+ SAVED int bodgeAdjustment = 0;
+
+ IMPORT IS32 simulate_level;
+
+ IU16 savedCS;
+ IU32 savedEIP;
+ IU32 savedEAX;
+ IU16 savedDS;
+ IU32 savedEDX;
+ IU32 savedEBP;
+ IU32 result;
+
+ if (first)
+ {
+ if (Sas.Sas_w_at(0xF3030) == 0x9066)
+ {
+ /* These are still Keith's roms with garbage as
+ * first two bytes of each entry point
+ */
+ bodgeAdjustment = 2;
+ fprintf(stderr, "**** Warning: The bios1.rom is out of date. This Ccpu486 will not run Win/E\n");
+ }
+ if (getenv("biosDoInst") != NULL)
+ trace_bios_inst = TRUE;
+ first = FALSE;
+ }
+
+ savedCS = getCS();
+ savedEIP = getEIP(); //GetInstructionPointer();
+ savedEAX = getEAX();
+ savedDS = getDS();
+ savedEDX = getEDX();
+ savedEBP = getEBP();
+
+ setCS (vCS );
+ setEIP(vEIP + bodgeAdjustment);
+ setEAX(vEAX);
+ setDS (vDS );
+ setEDX(vEDX);
+ setEBP(simulate_level);
+
+ /*
+ * Call the CPU.
+ */
+
+ if (trace_bios_inst)
+ {
+ always_trace3("biosDoInst: @ %04x, EAX %08x, EDX %08X", vEIP, vEAX, vEDX);
+ }
+
+ host_simulate();
+
+ if (getEBP() != simulate_level)
+ {
+#ifdef PROD
+ host_error(EG_OWNUP, ERR_QUIT, "biosDoInst: Virtualisation sequencing failure");
+#else
+ always_trace0("biosDoInst: Virtualisation sequencing failure");
+ force_yoda();
+#endif
+ }
+
+ result = getEAX();
+
+ /* Restore the registers to the original values */
+
+ setCS (savedCS );
+ setEIP(savedEIP);
+ setEAX(savedEAX);
+ setDS (savedDS );
+ setEDX(savedEDX);
+ setEBP(savedEBP);
+
+ return (result);
+}
+
+/*(
+ *============================ BiosSti & BiosCli ===============================
+ * BiosSti & BiosCli
+ *
+ * Purpose
+ * These functions are used to check if executing a CLI or STI
+ * would cause an exception. If so, we execute it from the ROMs
+ * so that Windows has a chance to virtualise it.
+ *
+ * Input
+ * None.
+ *
+ * Outputs
+ * None.
+ *
+ * Description
+ * If protection is OK, just do it, otherwise do the instruction in ROM.
+)*/
+
+/* Do STI if legal, else go back to CPU to do STI. */
+GLOBAL void
+BiosSti IFN0()
+{
+
+ if ( c_getCPL() > getIOPL() ) {
+ (void)biosDoInst(BIOS_VIRTUALISE_SEGMENT, BIOS_STI_OFFSET, 0, 0, 0);
+ } else {
+ SET_IF(1);
+ }
+}
+
+/* Do CLI if legal, else go back to CPU to do CLI. */
+GLOBAL void
+BiosCli IFN0()
+{
+
+ if ( c_getCPL() > getIOPL() ) {
+ (void)biosDoInst(BIOS_VIRTUALISE_SEGMENT, BIOS_CLI_OFFSET, 0, 0, 0);
+ } else {
+ SET_IF(0);
+ }
+}
+
+/*(
+ *============================ c_IOVirtualised =================================
+ * c_IOVirtualised
+ *
+ * Purpose
+ * This function checks whether executing an IO instruction
+ * of the indicated width would cause an exception to go off.
+ *
+ * If so, it executes the indicated identical instruction in ROM.
+ * This will allow the exception to go off correctly, and allow the
+ * Intel OS (e.g. Windows) to catch and virtualise it if it wishes.
+ *
+ * Otherwise it will be up to the caller to execute the actual IO.
+ *
+ * Input
+ * port The port to use
+ * value Where output values are taken from, and input values
+ * written to. NOTE: THIS MUST BE AN IU32*, WHATEVER THE WIDTH.
+ * offset The offset in the ROM of the equivalent instruction.
+ * width byte, word, dword
+ *
+ * Outputs
+ * True if the operation went to ROM, false if the caller needs to do it.
+ *
+ * Description
+ * If this is an illegal IO operation, we need to save CS, IP, EAX, EDX
+ * and call host_simulate to execute the equivalent instruction in ROM.
+)*/
+
+GLOBAL IBOOL
+c_IOVirtualised IFN4(io_addr, port, IU32 *, value, LIN_ADDR, offset, IU8, width)
+{
+ if (getVM())
+ {
+ *value = biosDoInst(BIOS_VIRTUALISE_SEGMENT, offset, *value, 0, port);
+ return(TRUE);
+ } else if ( c_getCPL() > getIOPL()) {
+
+ switch (port)
+ {
+ case 0x23c: /* mouse */
+ case 0x23d: /* mouse */
+ case 0xa0: /* ica */
+ case 0x20: /* ica */
+ break;
+ default:
+ always_trace1("Virtualising PM I/O code called, port =0x%x\n",
+ port);
+ }
+
+ if (!selectors_set) {
+ sub_note_trace0(SAS_VERBOSE,
+ "Exiting as selectors not set !\n");
+ return FALSE;
+ }
+ *value = biosDoInst(code_sel, offset, *value, 0, port);
+ return(TRUE);
+ }
+ return FALSE;
+}
+
+/* Read byte from memory, if V86 Mode let CPU do it. */
+LOCAL IU8
+bios_read_byte IFN1(LIN_ADDR, linAddr)
+{
+ PHY_ADDR phyAddr;
+ IUM8 access_request = 0; /* BIT 0 = R/W */
+ /* BIT 1 = U/S */
+ /* BIT 2 = Ensure A and D are valid */
+
+ /* If no paging on, then no problem */
+
+ if (!c_getPG())
+ {
+ return(phy_r8((PHY_ADDR)linAddr));
+ }
+
+ /* Note default access_request (0) is Supervisor Read */
+
+ /* We don't specifically disallow Protected Mode calls, they
+ are not designed to happen, but the Video at least has a habit
+ of reading BIOS variables on host timer ticks. We treat such
+ requests more leniently than V86 Mode requests, by not insisting
+ that the access and dirty bits are kosher.
+ */
+
+ if ( getCPL() != 3 )
+ {
+ access_request = access_request | PG_U;
+ }
+
+ /* Beware V86 Mode, be strict about access and dirty bits */
+
+ if ( getVM() )
+ {
+ access_request = access_request | 0x4;
+ }
+
+ /* Go translate the address. */
+
+ if (xtrn2phy(linAddr, access_request, &phyAddr))
+ {
+ return(phy_r8(phyAddr));
+ }
+
+ /* Handle Address Mapping Failure... */
+
+ if(getPE() && !getVM())
+ {
+ always_trace1("Virtualising PM byte read, lin address 0x%x", linAddr);
+
+ if (!selectors_set)
+ return;
+
+ return ((IU8)biosDoInst(code_sel, BIOS_RDB_OFFSET, 0, data_sel, linAddr));
+ }
+ else
+ {
+ sub_note_trace1(SAS_VERBOSE, "Page read VM virtualisation at 0x%x", linAddr);
+
+ return ((IU8)biosDoInst(BIOS_VIRTUALISE_SEGMENT, BIOS_RDB_OFFSET, 0, data_sel, linAddr));
+ }
+}
+
+
+
+/* Read word from memory, if V86 Mode let CPU do it. */
+LOCAL IU16
+bios_read_word IFN1(LIN_ADDR, linAddr)
+{
+ PHY_ADDR phyAddr;
+ IUM8 access_request = 0; /* BIT 0 = R/W */
+ /* BIT 1 = U/S */
+ /* BIT 2 = Ensure A and D are valid */
+
+ /* If no paging on, then no problem */
+
+ if (!c_getPG())
+ {
+ return(phy_r16((PHY_ADDR)linAddr));
+ }
+
+ /* Note default access_request (0) is Supervisor Read */
+
+ /* We don't specifically disallow Protected Mode calls, they
+ are not designed to happen, but the Video at least has a habit
+ of reading BIOS variables on host timer ticks. We treat such
+ requests more leniently than V86 Mode requests, by not insisting
+ that the access and dirty bits are kosher.
+ */
+
+ if ( getCPL() != 3 )
+ {
+ access_request = access_request | PG_U;
+ }
+
+ /* Beware V86 Mode, be strict about access and dirty bits */
+
+ if ( getVM() )
+ {
+ access_request = access_request | 0x4;
+ }
+
+ /* Go translate the address. Never called crossing a page boundary */
+
+ if (xtrn2phy(linAddr, access_request, &phyAddr))
+ {
+ return(phy_r16(phyAddr));
+ }
+
+ /* Handle Address Mapping Failure... */
+
+ if(getPE() && !getVM())
+ {
+ always_trace1("Virtualising PM word read, lin address 0x%x", linAddr);
+
+ if (!selectors_set)
+ return;
+
+ return ((IU8)biosDoInst(code_sel, BIOS_RDW_OFFSET, 0, data_sel, linAddr));
+ }
+ else
+ {
+ sub_note_trace1(SAS_VERBOSE, "Page read word VM virtualisation at 0x%x", linAddr);
+
+ return ((IU8)biosDoInst(BIOS_VIRTUALISE_SEGMENT, BIOS_RDW_OFFSET, 0, data_sel, linAddr));
+ }
+}
+
+
+/* Read double from memory, if V86 Mode let CPU do it. */
+LOCAL IU32
+bios_read_double IFN1(LIN_ADDR, linAddr)
+{
+ PHY_ADDR phyAddr;
+ IUM8 access_request = 0; /* BIT 0 = R/W */
+ /* BIT 1 = U/S */
+ /* BIT 2 = Ensure A and D are valid */
+
+ /* If no paging on, then no problem */
+
+ if (!c_getPG())
+ {
+ return(phy_r32((PHY_ADDR)linAddr));
+ }
+
+ /* Note default access_request (0) is Supervisor Read */
+
+ /* We don't specifically disallow Protected Mode calls, they
+ are not designed to happen, but the Video at least has a habit
+ of reading BIOS variables on host timer ticks. We treat such
+ requests more leniently than V86 Mode requests, by not insisting
+ that the access and dirty bits are kosher.
+ */
+
+ if ( getCPL() != 3 )
+ {
+ access_request = access_request | PG_U;
+ }
+
+ /* Beware V86 Mode, be strict about access and dirty bits */
+
+ if ( getVM() )
+ {
+ access_request = access_request | 0x4;
+ }
+
+ /* Go translate the address. Never called crossing a page boundary */
+
+ if (xtrn2phy(linAddr, access_request, &phyAddr))
+ {
+ return(phy_r32(phyAddr));
+ }
+
+ /* Handle Address Mapping Failure... */
+
+ if(getPE() && !getVM())
+ {
+ always_trace1("Virtualising PM double read, lin address 0x%x", linAddr);
+
+ if (!selectors_set)
+ return;
+
+ return ((IU8)biosDoInst(code_sel, BIOS_RDD_OFFSET, 0, data_sel, linAddr));
+ }
+ else
+ {
+ sub_note_trace1(SAS_VERBOSE, "Page read double VM virtualisation at 0x%x", linAddr);
+
+ return ((IU8)biosDoInst(BIOS_VIRTUALISE_SEGMENT, BIOS_RDD_OFFSET, 0, data_sel, linAddr));
+ }
+}
+
+
+/* Write byte to memory, if V86 Mode let CPU do it. */
+LOCAL void
+bios_write_byte IFN2(LIN_ADDR, linAddr, IU8, value)
+{
+ PHY_ADDR addr;
+ IUM8 access_request = 0; /* BIT 0 = R/W */
+ /* BIT 1 = U/S */
+ /* BIT 2 = Ensure A and D are valid */
+
+ /* If no paging on, then no problem */
+
+ if (!c_getPG())
+ {
+ phy_w8((PHY_ADDR)linAddr, value);
+ return;
+ }
+
+ /* Note default access_request (0) is Supervisor Read */
+ access_request = access_request | PG_W; /* So make it Right :-) */
+
+ /* We don't specifically disallow Protected Mode calls, they
+ are not designed to happen, but who knows. We treat such
+ requests more leniently than V86 Mode requests, by not insisting
+ that the access and dirty bits are kosher.
+ */
+
+ if ( getCPL() != 3 )
+ {
+ access_request = access_request | PG_U;
+ }
+
+ /* Beware V86 Mode, be strict about access and dirty bits */
+ if ( getVM() )
+ {
+ access_request = access_request | 0x4;
+ }
+
+ /* Go translate the address. */
+ if (xtrn2phy(linAddr, access_request, &addr))
+ {
+ phy_w8(addr, value);
+ return;
+ }
+
+ /* Handle Address Mapping Failure... */
+
+ if(getPE() && !getVM())
+ {
+ always_trace1("Virtualising PM byte write, lin address 0x%x", linAddr);
+
+ if (!selectors_set)
+ return;
+
+ (void)biosDoInst(code_sel, BIOS_WRTB_OFFSET, (IU32)value, data_sel, linAddr);
+ }
+ else
+ {
+ sub_note_trace1(SAS_VERBOSE, "Page write VM virtualisation at 0x%x", linAddr);
+
+ (void)biosDoInst(BIOS_VIRTUALISE_SEGMENT, BIOS_WRTB_OFFSET, (IU32)value, data_sel, linAddr);
+ }
+}
+
+
+/* Write word to memory, if V86 Mode let CPU do it. */
+LOCAL void
+bios_write_word IFN2(LIN_ADDR, linAddr, IU16, value)
+{
+ PHY_ADDR addr;
+ IUM8 access_request = 0; /* BIT 0 = R/W */
+ /* BIT 1 = U/S */
+ /* BIT 2 = Ensure A and D are valid */
+
+ /* If no paging on, then no problem */
+
+ if (!c_getPG())
+ {
+ phy_w16((PHY_ADDR)linAddr, value);
+ return;
+ }
+
+ /* Note default access_request (0) is Supervisor Read */
+ access_request = access_request | PG_W; /* So make it Right :-) */
+
+ /* We don't specifically disallow Protected Mode calls, they
+ are not designed to happen, but who knows. We treat such
+ requests more leniently than V86 Mode requests, by not insisting
+ that the access and dirty bits are kosher.
+ */
+
+ if ( getCPL() != 3 )
+ {
+ access_request = access_request | PG_U;
+ }
+
+ /* Beware V86 Mode, be strict about access and dirty bits */
+ if ( getVM() )
+ {
+ access_request = access_request | 0x4;
+ }
+
+ /* Go translate the address. Never called crossing a page boundary */
+ if (xtrn2phy(linAddr, access_request, &addr))
+ {
+ phy_w16(addr, value);
+ return;
+ }
+
+ /* Handle Address Mapping Failure... */
+
+ if(getPE() && !getVM())
+ {
+ always_trace1("Virtualising PM word write, lin address 0x%x", linAddr);
+
+ if (!selectors_set)
+ return;
+
+ (void)biosDoInst(code_sel, BIOS_WRTW_OFFSET, (IU32)value, data_sel, linAddr);
+ }
+ else
+ {
+ sub_note_trace1(SAS_VERBOSE, "Page word write VM virtualisation at 0x%x", linAddr);
+
+ (void)biosDoInst(BIOS_VIRTUALISE_SEGMENT, BIOS_WRTW_OFFSET, (IU32)value, data_sel, linAddr);
+ }
+}
+
+
+/* Write double to memory, if V86 Mode let CPU do it. */
+LOCAL void
+bios_write_double IFN2(LIN_ADDR, linAddr, IU32, value)
+{
+ PHY_ADDR addr;
+ IUM8 access_request = 0; /* BIT 0 = R/W */
+ /* BIT 1 = U/S */
+ /* BIT 2 = Ensure A and D are valid */
+
+ /* If no paging on, then no problem */
+
+ if (!c_getPG())
+ {
+ phy_w32((PHY_ADDR)linAddr, value);
+ return;
+ }
+
+ /* Note default access_request (0) is Supervisor Read */
+ access_request = access_request | PG_W; /* So make it Right :-) */
+
+ /* We don't specifically disallow Protected Mode calls, they
+ are not designed to happen, but who knows. We treat such
+ requests more leniently than V86 Mode requests, by not insisting
+ that the access and dirty bits are kosher.
+ */
+
+ if ( getCPL() != 3 )
+ {
+ access_request = access_request | PG_U;
+ }
+
+ /* Beware V86 Mode, be strict about access and dirty bits */
+ if ( getVM() )
+ {
+ access_request = access_request | 0x4;
+ }
+
+ /* Go translate the address. Never called crossing a page boundary */
+ if (xtrn2phy(linAddr, access_request, &addr))
+ {
+ phy_w32(addr, value);
+ return;
+ }
+
+ /* Handle Address Mapping Failure... */
+
+ if(getPE() && !getVM())
+ {
+ always_trace1("Virtualising PM double write, lin address 0x%x", linAddr);
+
+ if (!selectors_set)
+ return;
+
+ (void)biosDoInst(code_sel, BIOS_WRTD_OFFSET, (IU32)value, data_sel, linAddr);
+ }
+ else
+ {
+ sub_note_trace1(SAS_VERBOSE, "Page double write VM virtualisation at 0x%x", linAddr);
+
+ (void)biosDoInst(BIOS_VIRTUALISE_SEGMENT, BIOS_WRTD_OFFSET, (IU32)value, data_sel, linAddr);
+ }
+}
+
+
+LOCAL void c_sas_not_used IFN0()
+{
+ always_trace0("c_sas_not_used called");
+#ifndef PROD
+ force_yoda();
+#endif
+}
+
+
+/* Compatibility with SoftPC2.0 access name (used in video) */
+GLOBAL IU8* c_get_byte_addr IFN1(PHY_ADDR, addr)
+{
+ return (c_GetPhyAdd(addr));
+}
+
+/* stub needed for standalone Ccpu */
+GLOBAL IBOOL c_sas_PigCmpPage IFN3(IU32, src, IU8 *, dest, IU32, len)
+{
+ return(FALSE);
+}
+#endif /* CCPU */
diff --git a/private/mvdm/softpc.new/base/ccpu386/ccpusas4.h b/private/mvdm/softpc.new/base/ccpu386/ccpusas4.h
new file mode 100644
index 000000000..8593a92a6
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/ccpusas4.h
@@ -0,0 +1,34 @@
+/*[
+ * File Name : ccpu_sas4.h
+ *
+ * Derived From : Template
+ *
+ * Author : Mike
+ *
+ * Creation Date : October 1993
+ *
+ * SCCS Version : @(#)ccpusas4.h 1.5 11/15/94
+ *!
+ * Purpose
+ * This include file contains the interface provided by ccpu_sas4.h
+ * to the rest of the ccpu.
+ *
+ *! (c)Copyright Insignia Solutions Ltd., 1993. All rights reserved.
+]*/
+
+extern IU8 phy_r8 IPT1(PHY_ADDR, addr);
+extern IU16 phy_r16 IPT1(PHY_ADDR, addr);
+extern IU32 phy_r32 IPT1(PHY_ADDR, addr);
+extern void phy_w8 IPT2(PHY_ADDR, addr, IU8, value);
+extern void phy_w16 IPT2(PHY_ADDR, addr, IU16, value);
+extern void phy_w32 IPT2(PHY_ADDR, addr, IU32, value);
+
+extern PHY_ADDR SasWrapMask;
+
+#if !defined(PIG)
+#ifdef BACK_M
+#define IncCpuPtrLS8(ptr) (ptr)--
+#else /* BACK_M */
+#define IncCpuPtrLS8(ptr) (ptr)++
+#endif /* BACK_M */
+#endif /* PIG */
diff --git a/private/mvdm/softpc.new/base/ccpu386/cdq.c b/private/mvdm/softpc.new/base/ccpu386/cdq.c
new file mode 100644
index 000000000..1de3fac50
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cdq.c
@@ -0,0 +1,42 @@
+/*[
+
+cdq.c
+
+LOCAL CHAR SccsID[]="@(#)cdq.c 1.5 02/09/94";
+
+CDQ CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <cdq.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+CDQ()
+ {
+ if ( GET_EAX() & BIT31_MASK ) /* sign bit set? */
+ SET_EDX(0xffffffff);
+ else
+ SET_EDX(0);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/cdq.h b/private/mvdm/softpc.new/base/ccpu386/cdq.h
new file mode 100644
index 000000000..d4064912c
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cdq.h
@@ -0,0 +1,11 @@
+/*
+ cdq.h
+
+ CDQ CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)cdq.h 1.5 09/01/94";
+ */
+
+IMPORT VOID CDQ IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/clc.c b/private/mvdm/softpc.new/base/ccpu386/clc.c
new file mode 100644
index 000000000..b8c2b6783
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/clc.c
@@ -0,0 +1,39 @@
+/*[
+
+clc.c
+
+LOCAL CHAR SccsID[]="@(#)clc.c 1.5 02/09/94";
+
+CLC CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <clc.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+CLC()
+ {
+ SET_CF(0);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/clc.h b/private/mvdm/softpc.new/base/ccpu386/clc.h
new file mode 100644
index 000000000..e74a9b19f
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/clc.h
@@ -0,0 +1,11 @@
+/*
+ clc.h
+
+ Define all CLC CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)clc.h 1.5 09/01/94";
+ */
+
+IMPORT VOID CLC IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/cld.c b/private/mvdm/softpc.new/base/ccpu386/cld.c
new file mode 100644
index 000000000..b760e2b7b
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cld.c
@@ -0,0 +1,39 @@
+/*[
+
+cld.c
+
+LOCAL CHAR SccsID[]="@(#)cld.c 1.5 02/09/94";
+
+CLD CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <cld.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+CLD()
+ {
+ SET_DF(0);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/cld.h b/private/mvdm/softpc.new/base/ccpu386/cld.h
new file mode 100644
index 000000000..73d5077e0
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cld.h
@@ -0,0 +1,11 @@
+/*
+ cld.h
+
+ Define all CLD CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)cld.h 1.5 09/01/94";
+ */
+
+IMPORT VOID CLD IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/cli.c b/private/mvdm/softpc.new/base/ccpu386/cli.c
new file mode 100644
index 000000000..d1365c883
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cli.c
@@ -0,0 +1,39 @@
+/*[
+
+cli.c
+
+LOCAL CHAR SccsID[]="@(#)cli.c 1.5 02/09/94";
+
+CLI CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <cli.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+CLI()
+ {
+ SET_IF(0);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/cli.h b/private/mvdm/softpc.new/base/ccpu386/cli.h
new file mode 100644
index 000000000..116537d32
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cli.h
@@ -0,0 +1,11 @@
+/*
+ cli.h
+
+ Define all CLI CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)cli.h 1.5 09/01/94";
+ */
+
+IMPORT VOID CLI IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/clts.c b/private/mvdm/softpc.new/base/ccpu386/clts.c
new file mode 100644
index 000000000..30665d98c
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/clts.c
@@ -0,0 +1,39 @@
+/*[
+
+clts.c
+
+LOCAL CHAR SccsID[]="@(#)clts.c 1.5 02/09/94";
+
+CLTS CPU functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <clts.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+CLTS()
+ {
+ SET_CR(CR_STAT, GET_CR(CR_STAT) & ~BIT3_MASK);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/clts.h b/private/mvdm/softpc.new/base/ccpu386/clts.h
new file mode 100644
index 000000000..46bd21836
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/clts.h
@@ -0,0 +1,11 @@
+/*
+ clts.h
+
+ Define all CLTS CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)clts.h 1.5 09/01/94";
+ */
+
+IMPORT VOID CLTS IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/cmc.c b/private/mvdm/softpc.new/base/ccpu386/cmc.c
new file mode 100644
index 000000000..ae0fc801d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cmc.c
@@ -0,0 +1,39 @@
+/*[
+
+cmc.c
+
+LOCAL CHAR SccsID[]="@(#)cmc.c 1.5 02/09/94";
+
+CMC CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <cmc.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+CMC()
+ {
+ SET_CF(1 - GET_CF());
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/cmc.h b/private/mvdm/softpc.new/base/ccpu386/cmc.h
new file mode 100644
index 000000000..e8f24d7ae
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cmc.h
@@ -0,0 +1,11 @@
+/*
+ cmc.h
+
+ Define all CMC CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)cmc.h 1.5 09/01/94";
+ */
+
+IMPORT VOID CMC IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/cmp.c b/private/mvdm/softpc.new/base/ccpu386/cmp.c
new file mode 100644
index 000000000..5f8db59f3
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cmp.c
@@ -0,0 +1,81 @@
+/*[
+
+cmp.c
+
+LOCAL CHAR SccsID[]="@(#)cmp.c 1.5 02/09/94";
+
+CMP CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <cmp.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'cmp'. */
+/* Generic - one size fits all 'cmps'. */
+/* Generic - one size fits all 'scas'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+CMP
+
+IFN3(
+ IU32, op1, /* lsrc operand */
+ IU32, op2, /* rsrc operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+ IU32 carry;
+ IU32 msb;
+ IU32 op1_msb;
+ IU32 op2_msb;
+ IU32 res_msb;
+
+ msb = SZ2MSB(op_sz);
+
+ result = op1 - op2 & SZ2MASK(op_sz); /* Do operation */
+ op1_msb = (op1 & msb) != 0; /* Isolate all msb's */
+ op2_msb = (op2 & msb) != 0;
+ res_msb = (result & msb) != 0;
+ carry = op1 ^ op2 ^ result; /* Isolate carries */
+ /* Determine flags */
+ /*
+ OF = (op1 == !op2) & (op1 ^ res)
+ ie if operand signs differ and res sign different to original
+ destination set OF.
+ */
+ SET_OF((op1_msb != op2_msb) & (op1_msb ^ res_msb));
+ /*
+ Formally:- CF = !op1 & op2 | res & !op1 | res & op2
+ Equivalently:- CF = OF ^ op1 ^ op2 ^ res
+ */
+ SET_CF(((carry & msb) != 0) ^ GET_OF());
+ SET_PF(pf_table[result & BYTE_MASK]);
+ SET_ZF(result == 0);
+ SET_SF((result & msb) != 0); /* SF = MSB */
+ SET_AF((carry & BIT4_MASK) != 0); /* AF = Bit 4 carry */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/cmp.h b/private/mvdm/softpc.new/base/ccpu386/cmp.h
new file mode 100644
index 000000000..864459ed3
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cmp.h
@@ -0,0 +1,18 @@
+/*
+ cmp.h
+
+ Define all CMP CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)cmp.h 1.4 02/09/94";
+ */
+
+IMPORT VOID CMP
+
+IPT3(
+ IU32, op1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/cmpxchg.c b/private/mvdm/softpc.new/base/ccpu386/cmpxchg.c
new file mode 100644
index 000000000..0ea141894
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cmpxchg.c
@@ -0,0 +1,122 @@
+/*[
+
+cmpxchg.c
+
+LOCAL CHAR SccsID[]="@(#)cmpxchg.c 1.5 02/09/94";
+
+CMPXCHG CPU functions.
+----------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <cmpxchg.h>
+#include <cmp.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+#ifdef SPC486
+
+GLOBAL VOID
+CMPXCHG8
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst/lsrc operand */
+ IU32, op2 /* rsrc operand */
+ )
+
+
+ {
+ /*
+ First do comparision and generate flags.
+ */
+ CMP((IU32)GET_AL(), *pop1, 8);
+
+ /*
+ Then swap data as required.
+ */
+ if ( GET_ZF() ) /* ie iff AL == op1 */
+ {
+ *pop1 = op2;
+ }
+ else
+ {
+ SET_AL(*pop1);
+ }
+ }
+
+GLOBAL VOID
+CMPXCHG16
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst/lsrc operand */
+ IU32, op2 /* rsrc operand */
+ )
+
+
+ {
+ /*
+ First do comparision and generate flags.
+ */
+ CMP((IU32)GET_AX(), *pop1, 16);
+
+ /*
+ Then swap data as required.
+ */
+ if ( GET_ZF() ) /* ie iff AX == op1 */
+ {
+ *pop1 = op2;
+ }
+ else
+ {
+ SET_AX(*pop1);
+ }
+ }
+
+GLOBAL VOID
+CMPXCHG32
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst/lsrc operand */
+ IU32, op2 /* rsrc operand */
+ )
+
+
+ {
+ /*
+ First do comparision and generate flags.
+ */
+ CMP((IU32)GET_EAX(), *pop1, 32);
+
+ /*
+ Then swap data as required.
+ */
+ if ( GET_ZF() ) /* ie iff EAX == op1 */
+ {
+ *pop1 = op2;
+ }
+ else
+ {
+ SET_EAX(*pop1);
+ }
+ }
+
+#endif /* SPC486 */
diff --git a/private/mvdm/softpc.new/base/ccpu386/cmpxchg.h b/private/mvdm/softpc.new/base/ccpu386/cmpxchg.h
new file mode 100644
index 000000000..ed25ee81d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cmpxchg.h
@@ -0,0 +1,33 @@
+/*
+ cmpxchg.h
+
+ CMPXCHG CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)cmpxchg.h 1.4 02/09/94";
+ */
+
+IMPORT VOID CMPXCHG8
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
+
+IMPORT VOID CMPXCHG16
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
+
+IMPORT VOID CMPXCHG32
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/cpu4gen.h b/private/mvdm/softpc.new/base/ccpu386/cpu4gen.h
new file mode 100644
index 000000000..9bf724ac3
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cpu4gen.h
@@ -0,0 +1,3086 @@
+/*[
+ * Generated File: cpu4gen.h
+ *
+]*/
+
+#ifndef _CPU4GEN_H_
+#define _CPU4GEN_H_
+
+#include <gdpvar.h> /* For direct access getAX() etc. */
+
+struct CpuVector {
+#ifdef CPU_PRIVATE
+ struct CpuPrivateVector *Private;
+#else /* !CPU_PRIVATE */
+ IHP Private;
+#endif /* CPU_PRIVATE */
+#ifdef CPU_PRIVATE
+ struct SasVector *Sas;
+#else /* !CPU_PRIVATE */
+ IHP Sas;
+#endif /* CPU_PRIVATE */
+#ifdef CPU_PRIVATE
+ struct VideoVector *Video;
+#else /* !CPU_PRIVATE */
+ IHP Video;
+#endif /* CPU_PRIVATE */
+ void (*Simulate) IPT0();
+ void (*Interrupt) IPT2(CPU_INT_TYPE, intType, IU16, intNum);
+ void (*ClearHwInt) IPT0();
+ void (*EndOfApplication) IPT0();
+ void (*Terminate) IPT0();
+ void (*Initialise) IPT0();
+ IU32 (*EffectiveAddr) IPT2(IU16, seg, IU32, offset);
+ void (*SetQuickEventCount) IPT1(IU32, val);
+ IU32 (*GetQuickEventCount) IPT0();
+ void (*InitIOS) IPT4(IHP, InTables, IHP, OutTables, IUH, maxAdaptor, IU16, portMask);
+ void (*DefineInb) IPT2(IUH, adaptor, IHP, func);
+ void (*DefineInw) IPT2(IUH, adaptor, IHP, func);
+ void (*DefineInd) IPT2(IUH, adaptor, IHP, func);
+ void (*DefineOutb) IPT2(IUH, adaptor, IHP, func);
+ void (*DefineOutw) IPT2(IUH, adaptor, IHP, func);
+ void (*DefineOutd) IPT2(IUH, adaptor, IHP, func);
+ void (*SetAL) IPT1(IU8, val);
+ void (*SetAH) IPT1(IU8, val);
+ void (*SetAX) IPT1(IU16, val);
+ void (*SetEAX) IPT1(IU32, val);
+ void (*SetBL) IPT1(IU8, val);
+ void (*SetBH) IPT1(IU8, val);
+ void (*SetBX) IPT1(IU16, val);
+ void (*SetEBX) IPT1(IU32, val);
+ void (*SetCL) IPT1(IU8, val);
+ void (*SetCH) IPT1(IU8, val);
+ void (*SetCX) IPT1(IU16, val);
+ void (*SetECX) IPT1(IU32, val);
+ void (*SetDL) IPT1(IU8, val);
+ void (*SetDH) IPT1(IU8, val);
+ void (*SetDX) IPT1(IU16, val);
+ void (*SetEDX) IPT1(IU32, val);
+ void (*SetSI) IPT1(IU16, val);
+ void (*SetESI) IPT1(IU32, val);
+ void (*SetDI) IPT1(IU16, val);
+ void (*SetEDI) IPT1(IU32, val);
+ void (*SetSP) IPT1(IU16, val);
+ void (*SetESP) IPT1(IU32, val);
+ void (*SetBP) IPT1(IU16, val);
+ void (*SetEBP) IPT1(IU32, val);
+ void (*SetIP) IPT1(IU16, val);
+ void (*SetEIP) IPT1(IU32, val);
+ IUH (*SetCS) IPT1(IU16, val);
+ IUH (*SetSS) IPT1(IU16, val);
+ IUH (*SetDS) IPT1(IU16, val);
+ IUH (*SetES) IPT1(IU16, val);
+ IUH (*SetFS) IPT1(IU16, val);
+ IUH (*SetGS) IPT1(IU16, val);
+ void (*SetEFLAGS) IPT1(IU32, val);
+ void (*SetSTATUS) IPT1(IU16, val);
+ void (*SetIOPL) IPT1(IU8, val);
+ void (*SetMSW) IPT1(IU16, val);
+ void (*SetCR0) IPT1(IU32, val);
+ void (*SetCR2) IPT1(IU32, val);
+ void (*SetCR3) IPT1(IU32, val);
+ void (*SetCF) IPT1(IBOOL, val);
+ void (*SetPF) IPT1(IBOOL, val);
+ void (*SetAF) IPT1(IBOOL, val);
+ void (*SetZF) IPT1(IBOOL, val);
+ void (*SetSF) IPT1(IBOOL, val);
+ void (*SetTF) IPT1(IBOOL, val);
+ void (*SetIF) IPT1(IBOOL, val);
+ void (*SetDF) IPT1(IBOOL, val);
+ void (*SetOF) IPT1(IBOOL, val);
+ void (*SetNT) IPT1(IBOOL, val);
+ void (*SetRF) IPT1(IBOOL, val);
+ void (*SetVM) IPT1(IBOOL, val);
+ void (*SetAC) IPT1(IBOOL, val);
+ void (*SetPE) IPT1(IBOOL, val);
+ void (*SetMP) IPT1(IBOOL, val);
+ void (*SetEM) IPT1(IBOOL, val);
+ void (*SetTS) IPT1(IBOOL, val);
+ void (*SetPG) IPT1(IBOOL, val);
+ void (*SetLDT_SELECTOR) IPT1(IU16, val);
+ void (*SetTR_SELECTOR) IPT1(IU16, val);
+ IU8 (*GetAL) IPT0();
+ IU8 (*GetAH) IPT0();
+ IU16 (*GetAX) IPT0();
+ IU32 (*GetEAX) IPT0();
+ IU8 (*GetBL) IPT0();
+ IU8 (*GetBH) IPT0();
+ IU16 (*GetBX) IPT0();
+ IU32 (*GetEBX) IPT0();
+ IU8 (*GetCL) IPT0();
+ IU8 (*GetCH) IPT0();
+ IU16 (*GetCX) IPT0();
+ IU32 (*GetECX) IPT0();
+ IU8 (*GetDL) IPT0();
+ IU8 (*GetDH) IPT0();
+ IU16 (*GetDX) IPT0();
+ IU32 (*GetEDX) IPT0();
+ IU16 (*GetSI) IPT0();
+ IU32 (*GetESI) IPT0();
+ IU16 (*GetDI) IPT0();
+ IU32 (*GetEDI) IPT0();
+ IU16 (*GetSP) IPT0();
+ IU32 (*GetESP) IPT0();
+ IU16 (*GetBP) IPT0();
+ IU32 (*GetEBP) IPT0();
+ IU16 (*GetIP) IPT0();
+ IU32 (*GetEIP) IPT0();
+ IU16 (*GetCS) IPT0();
+ IU16 (*GetSS) IPT0();
+ IU16 (*GetDS) IPT0();
+ IU16 (*GetES) IPT0();
+ IU16 (*GetFS) IPT0();
+ IU16 (*GetGS) IPT0();
+ IU32 (*GetEFLAGS) IPT0();
+ IU16 (*GetSTATUS) IPT0();
+ IU8 (*GetIOPL) IPT0();
+ IU16 (*GetMSW) IPT0();
+ IU32 (*GetCR0) IPT0();
+ IU32 (*GetCR2) IPT0();
+ IU32 (*GetCR3) IPT0();
+ IBOOL (*GetCF) IPT0();
+ IBOOL (*GetPF) IPT0();
+ IBOOL (*GetAF) IPT0();
+ IBOOL (*GetZF) IPT0();
+ IBOOL (*GetSF) IPT0();
+ IBOOL (*GetTF) IPT0();
+ IBOOL (*GetIF) IPT0();
+ IBOOL (*GetDF) IPT0();
+ IBOOL (*GetOF) IPT0();
+ IBOOL (*GetNT) IPT0();
+ IBOOL (*GetRF) IPT0();
+ IBOOL (*GetVM) IPT0();
+ IBOOL (*GetAC) IPT0();
+ IBOOL (*GetPE) IPT0();
+ IBOOL (*GetMP) IPT0();
+ IBOOL (*GetEM) IPT0();
+ IBOOL (*GetTS) IPT0();
+ IBOOL (*GetET) IPT0();
+ IBOOL (*GetNE) IPT0();
+ IBOOL (*GetWP) IPT0();
+ IBOOL (*GetPG) IPT0();
+ IU32 (*GetGDT_BASE) IPT0();
+ IU16 (*GetGDT_LIMIT) IPT0();
+ IU32 (*GetIDT_BASE) IPT0();
+ IU16 (*GetIDT_LIMIT) IPT0();
+ IU16 (*GetLDT_SELECTOR) IPT0();
+ IU32 (*GetLDT_BASE) IPT0();
+ IU32 (*GetLDT_LIMIT) IPT0();
+ IU16 (*GetTR_SELECTOR) IPT0();
+ IU32 (*GetTR_BASE) IPT0();
+ IU32 (*GetTR_LIMIT) IPT0();
+ IU16 (*GetTR_AR) IPT0();
+ IUH (*GetJumpCalibrateVal) IPT0();
+ IUH (*GetJumpInitialVal) IPT0();
+ void (*SetJumpInitialVal) IPT1(IUH, initialVal);
+ void (*SetEOIEnable) IPT1(IU8 *, initialVal);
+ void (*SetAddProfileData) IPT1(IHP, initialVal);
+ void (*SetMaxProfileData) IPT1(IHP, initialVal);
+ IHP (*GetAddProfileDataAddr) IPT0();
+ void (*PurgeLostIretHookLine) IPT2(IU16, lineNum, IU32, depth);
+ void (*ActivityCheckAfterTimeSlice) IPT0();
+ IBOOL (*CheckCsSelectorAndEipForCallFarPatching) IPT3(IU16, csSel, IU32, eip, IU32 *, pCsBase);
+};
+
+extern struct CpuVector Cpu;
+
+#ifdef CCPU
+IMPORT void c_cpu_simulate IPT0();
+#define cpu_simulate c_cpu_simulate
+#else /* CCPU */
+
+#ifdef PROD
+#define cpu_simulate (*(Cpu.Simulate))
+#else /* PROD */
+IMPORT void cpu_simulate IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_cpu_interrupt IPT2(CPU_INT_TYPE, intType, IU16, intNum);
+#define cpu_interrupt c_cpu_interrupt
+#else /* CCPU */
+
+#ifdef PROD
+#define cpu_interrupt (*(Cpu.Interrupt))
+#else /* PROD */
+IMPORT void cpu_interrupt IPT2(CPU_INT_TYPE, intType, IU16, intNum);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_cpu_clearHwInt IPT0();
+#define cpu_clearHwInt c_cpu_clearHwInt
+#else /* CCPU */
+
+#ifdef PROD
+#define cpu_clearHwInt (*(Cpu.ClearHwInt))
+#else /* PROD */
+IMPORT void cpu_clearHwInt IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_cpu_EOA_hook IPT0();
+#define cpu_EOA_hook c_cpu_EOA_hook
+#else /* CCPU */
+
+#ifdef PROD
+#define cpu_EOA_hook (*(Cpu.EndOfApplication))
+#else /* PROD */
+IMPORT void cpu_EOA_hook IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_cpu_terminate IPT0();
+#define cpu_terminate() c_cpu_terminate()
+#else /* CCPU */
+
+#ifdef PROD
+#define cpu_terminate() { if (Cpu.Terminate) (*(Cpu.Terminate))(); }
+#else /* PROD */
+IMPORT void cpu_terminate IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_cpu_init IPT0();
+#define cpu_init c_cpu_init
+#else /* CCPU */
+
+#ifdef PROD
+#define cpu_init (*(Cpu.Initialise))
+#else /* PROD */
+IMPORT void cpu_init IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_effective_addr IPT2(IU16, seg, IU32, offset);
+#define effective_addr(seg, offset) c_effective_addr(seg, offset)
+#else /* CCPU */
+
+#ifdef PROD
+#define effective_addr(seg, offset) (*(Cpu.EffectiveAddr))(seg, offset)
+#else /* PROD */
+IMPORT IU32 effective_addr IPT2(IU16, seg, IU32, offset);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_cpu_q_ev_set_count IPT1(IU32, val);
+#define host_q_ev_set_count c_cpu_q_ev_set_count
+#else /* CCPU */
+
+#ifdef PROD
+#define host_q_ev_set_count (*(Cpu.SetQuickEventCount))
+#else /* PROD */
+IMPORT void host_q_ev_set_count IPT1(IU32, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_cpu_q_ev_get_count IPT0();
+#define host_q_ev_get_count c_cpu_q_ev_get_count
+#else /* CCPU */
+
+#ifdef PROD
+#define host_q_ev_get_count (*(Cpu.GetQuickEventCount))
+#else /* PROD */
+IMPORT IU32 host_q_ev_get_count IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_cpu_init_ios_in IPT4(IHP, InTables, IHP, OutTables, IUH, maxAdaptor, IU16, portMask);
+#define cpu_init_ios_in c_cpu_init_ios_in
+#else /* CCPU */
+
+#ifdef PROD
+#define cpu_init_ios_in (*(Cpu.InitIOS))
+#else /* PROD */
+IMPORT void cpu_init_ios_in IPT4(IHP, InTables, IHP, OutTables, IUH, maxAdaptor, IU16, portMask);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_cpu_define_inb IPT2(IUH, adaptor, IHP, func);
+#define ios_define_inb c_cpu_define_inb
+#else /* CCPU */
+
+#ifdef PROD
+#define ios_define_inb (*(Cpu.DefineInb))
+#else /* PROD */
+IMPORT void ios_define_inb IPT2(IUH, adaptor, IHP, func);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_cpu_define_inw IPT2(IUH, adaptor, IHP, func);
+#define ios_define_inw c_cpu_define_inw
+#else /* CCPU */
+
+#ifdef PROD
+#define ios_define_inw (*(Cpu.DefineInw))
+#else /* PROD */
+IMPORT void ios_define_inw IPT2(IUH, adaptor, IHP, func);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_cpu_define_ind IPT2(IUH, adaptor, IHP, func);
+#define ios_define_ind c_cpu_define_ind
+#else /* CCPU */
+
+#ifdef PROD
+#define ios_define_ind (*(Cpu.DefineInd))
+#else /* PROD */
+IMPORT void ios_define_ind IPT2(IUH, adaptor, IHP, func);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_cpu_define_outb IPT2(IUH, adaptor, IHP, func);
+#define ios_define_outb c_cpu_define_outb
+#else /* CCPU */
+
+#ifdef PROD
+#define ios_define_outb (*(Cpu.DefineOutb))
+#else /* PROD */
+IMPORT void ios_define_outb IPT2(IUH, adaptor, IHP, func);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_cpu_define_outw IPT2(IUH, adaptor, IHP, func);
+#define ios_define_outw c_cpu_define_outw
+#else /* CCPU */
+
+#ifdef PROD
+#define ios_define_outw (*(Cpu.DefineOutw))
+#else /* PROD */
+IMPORT void ios_define_outw IPT2(IUH, adaptor, IHP, func);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_cpu_define_outd IPT2(IUH, adaptor, IHP, func);
+#define ios_define_outd c_cpu_define_outd
+#else /* CCPU */
+
+#ifdef PROD
+#define ios_define_outd (*(Cpu.DefineOutd))
+#else /* PROD */
+IMPORT void ios_define_outd IPT2(IUH, adaptor, IHP, func);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setAL IPT1(IU8, val);
+#define setAL(val) c_setAL(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setAL(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEax((GLOBAL_nanoEax & 0xFFFFFF00) | ((val) & 0x000000FF)): \
+ SET_GLOBAL_R_EAX((GLOBAL_R_EAX & 0xFFFFFF00) | ((val) & 0x000000FF)))
+
+#else /* PROD */
+IMPORT void setAL IPT1(IU8, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setAH IPT1(IU8, val);
+#define setAH(val) c_setAH(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setAH(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEax((GLOBAL_nanoEax & 0xFFFF00FF) | (((val) & 0x000000FF) << 8)): \
+ SET_GLOBAL_R_EAX((GLOBAL_R_EAX & 0xFFFF00FF) | (((val) & 0x000000FF) << 8)))
+
+#else /* PROD */
+IMPORT void setAH IPT1(IU8, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setAX IPT1(IU16, val);
+#define setAX(val) c_setAX(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setAX(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEax((GLOBAL_nanoEax & 0xFFFF0000) | ((val) & 0x0000FFFF)): \
+ SET_GLOBAL_R_EAX((GLOBAL_R_EAX & 0xFFFF0000) | ((val) & 0x0000FFFF)))
+
+#else /* PROD */
+IMPORT void setAX IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setEAX IPT1(IU32, val);
+#define setEAX(val) c_setEAX(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setEAX(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEax(val): \
+ SET_GLOBAL_R_EAX(val))
+
+#else /* PROD */
+IMPORT void setEAX IPT1(IU32, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setBL IPT1(IU8, val);
+#define setBL(val) c_setBL(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setBL(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEbx((GLOBAL_nanoEbx & 0xFFFFFF00) | ((val) & 0x000000FF)): \
+ SET_GLOBAL_R_EBX((GLOBAL_R_EBX & 0xFFFFFF00) | ((val) & 0x000000FF)))
+
+#else /* PROD */
+IMPORT void setBL IPT1(IU8, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setBH IPT1(IU8, val);
+#define setBH(val) c_setBH(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setBH(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEbx((GLOBAL_nanoEbx & 0xFFFF00FF) | (((val) & 0x000000FF) << 8)): \
+ SET_GLOBAL_R_EBX((GLOBAL_R_EBX & 0xFFFF00FF) | (((val) & 0x000000FF) << 8)))
+
+#else /* PROD */
+IMPORT void setBH IPT1(IU8, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setBX IPT1(IU16, val);
+#define setBX(val) c_setBX(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setBX(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEbx((GLOBAL_nanoEbx & 0xFFFF0000) | ((val) & 0x0000FFFF)): \
+ SET_GLOBAL_R_EBX((GLOBAL_R_EBX & 0xFFFF0000) | ((val) & 0x0000FFFF)))
+
+#else /* PROD */
+IMPORT void setBX IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setEBX IPT1(IU32, val);
+#define setEBX(val) c_setEBX(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setEBX(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEbx(val): \
+ SET_GLOBAL_R_EBX(val))
+
+#else /* PROD */
+IMPORT void setEBX IPT1(IU32, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setCL IPT1(IU8, val);
+#define setCL(val) c_setCL(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setCL(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEcx((GLOBAL_nanoEcx & 0xFFFFFF00) | ((val) & 0x000000FF)): \
+ SET_GLOBAL_R_ECX((GLOBAL_R_ECX & 0xFFFFFF00) | ((val) & 0x000000FF)))
+
+#else /* PROD */
+IMPORT void setCL IPT1(IU8, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setCH IPT1(IU8, val);
+#define setCH(val) c_setCH(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setCH(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEcx((GLOBAL_nanoEcx & 0xFFFF00FF) | (((val) & 0x000000FF) << 8)): \
+ SET_GLOBAL_R_ECX((GLOBAL_R_ECX & 0xFFFF00FF) | (((val) & 0x000000FF) << 8)))
+
+#else /* PROD */
+IMPORT void setCH IPT1(IU8, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setCX IPT1(IU16, val);
+#define setCX(val) c_setCX(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setCX(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEcx((GLOBAL_nanoEcx & 0xFFFF0000) | ((val) & 0x0000FFFF)): \
+ SET_GLOBAL_R_ECX((GLOBAL_R_ECX & 0xFFFF0000) | ((val) & 0x0000FFFF)))
+
+#else /* PROD */
+IMPORT void setCX IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setECX IPT1(IU32, val);
+#define setECX(val) c_setECX(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setECX(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEcx(val): \
+ SET_GLOBAL_R_ECX(val))
+
+#else /* PROD */
+IMPORT void setECX IPT1(IU32, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setDL IPT1(IU8, val);
+#define setDL(val) c_setDL(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setDL(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEdx((GLOBAL_nanoEdx & 0xFFFFFF00) | ((val) & 0x000000FF)): \
+ SET_GLOBAL_R_EDX((GLOBAL_R_EDX & 0xFFFFFF00) | ((val) & 0x000000FF)))
+
+#else /* PROD */
+IMPORT void setDL IPT1(IU8, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setDH IPT1(IU8, val);
+#define setDH(val) c_setDH(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setDH(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEdx((GLOBAL_nanoEdx & 0xFFFF00FF) | (((val) & 0x000000FF) << 8)): \
+ SET_GLOBAL_R_EDX((GLOBAL_R_EDX & 0xFFFF00FF) | (((val) & 0x000000FF) << 8)))
+
+#else /* PROD */
+IMPORT void setDH IPT1(IU8, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setDX IPT1(IU16, val);
+#define setDX(val) c_setDX(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setDX(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEdx((GLOBAL_nanoEdx & 0xFFFF0000) | ((val) & 0x0000FFFF)): \
+ SET_GLOBAL_R_EDX((GLOBAL_R_EDX & 0xFFFF0000) | ((val) & 0x0000FFFF)))
+
+#else /* PROD */
+IMPORT void setDX IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setEDX IPT1(IU32, val);
+#define setEDX(val) c_setEDX(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setEDX(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEdx(val): \
+ SET_GLOBAL_R_EDX(val))
+
+#else /* PROD */
+IMPORT void setEDX IPT1(IU32, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setSI IPT1(IU16, val);
+#define setSI(val) c_setSI(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setSI(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEsi((GLOBAL_nanoEsi & 0xFFFF0000) | ((val) & 0x0000FFFF)): \
+ SET_GLOBAL_R_ESI((GLOBAL_R_ESI & 0xFFFF0000) | ((val) & 0x0000FFFF)))
+
+#else /* PROD */
+IMPORT void setSI IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setESI IPT1(IU32, val);
+#define setESI(val) c_setESI(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setESI(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEsi(val): \
+ SET_GLOBAL_R_ESI(val))
+
+#else /* PROD */
+IMPORT void setESI IPT1(IU32, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setDI IPT1(IU16, val);
+#define setDI(val) c_setDI(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setDI(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEdi((GLOBAL_nanoEdi & 0xFFFF0000) | ((val) & 0x0000FFFF)): \
+ SET_GLOBAL_R_EDI((GLOBAL_R_EDI & 0xFFFF0000) | ((val) & 0x0000FFFF)))
+
+#else /* PROD */
+IMPORT void setDI IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setEDI IPT1(IU32, val);
+#define setEDI(val) c_setEDI(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setEDI(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEdi(val): \
+ SET_GLOBAL_R_EDI(val))
+
+#else /* PROD */
+IMPORT void setEDI IPT1(IU32, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setSP IPT1(IU16, val);
+#define setSP(val) c_setSP(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setSP(val) (*(Cpu.SetSP))(val)
+#else /* PROD */
+IMPORT void setSP IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setESP IPT1(IU32, val);
+#define setESP(val) c_setESP(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setESP(val) (*(Cpu.SetESP))(val)
+#else /* PROD */
+IMPORT void setESP IPT1(IU32, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setBP IPT1(IU16, val);
+#define setBP(val) c_setBP(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setBP(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEbp((GLOBAL_nanoEbp & 0xFFFF0000) | ((val) & 0x0000FFFF)): \
+ SET_GLOBAL_R_EBP((GLOBAL_R_EBP & 0xFFFF0000) | ((val) & 0x0000FFFF)))
+
+#else /* PROD */
+IMPORT void setBP IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setEBP IPT1(IU32, val);
+#define setEBP(val) c_setEBP(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setEBP(val) (GLOBAL_InNanoCpu ? SET_GLOBAL_nanoEbp(val): \
+ SET_GLOBAL_R_EBP(val))
+
+#else /* PROD */
+IMPORT void setEBP IPT1(IU32, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setIP IPT1(IU16, val);
+#define setIP(val) c_setIP(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setIP(val) (*(Cpu.SetIP))(val)
+#else /* PROD */
+IMPORT void setIP IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setEIP IPT1(IU32, val);
+#define setEIP(val) c_setEIP(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setEIP(val) (*(Cpu.SetEIP))(val)
+#else /* PROD */
+IMPORT void setEIP IPT1(IU32, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IUH c_setCS IPT1(IU16, val);
+#define setCS(val) c_setCS(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setCS(val) (*(Cpu.SetCS))(val)
+#else /* PROD */
+IMPORT IUH setCS IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IUH c_setSS IPT1(IU16, val);
+#define setSS(val) c_setSS(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setSS(val) (*(Cpu.SetSS))(val)
+#else /* PROD */
+IMPORT IUH setSS IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IUH c_setDS IPT1(IU16, val);
+#define setDS(val) c_setDS(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setDS(val) (*(Cpu.SetDS))(val)
+#else /* PROD */
+IMPORT IUH setDS IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IUH c_setES IPT1(IU16, val);
+#define setES(val) c_setES(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setES(val) (*(Cpu.SetES))(val)
+#else /* PROD */
+IMPORT IUH setES IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IUH c_setFS IPT1(IU16, val);
+#define setFS(val) c_setFS(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setFS(val) (*(Cpu.SetFS))(val)
+#else /* PROD */
+IMPORT IUH setFS IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IUH c_setGS IPT1(IU16, val);
+#define setGS(val) c_setGS(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setGS(val) (*(Cpu.SetGS))(val)
+#else /* PROD */
+IMPORT IUH setGS IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setEFLAGS IPT1(IU32, val);
+#define setEFLAGS(val) c_setEFLAGS(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setEFLAGS(val) (*(Cpu.SetEFLAGS))(val)
+#else /* PROD */
+IMPORT void setEFLAGS IPT1(IU32, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setSTATUS IPT1(IU16, val);
+#define setSTATUS(val) c_setSTATUS(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setSTATUS(val) (*(Cpu.SetSTATUS))(val)
+#else /* PROD */
+IMPORT void setSTATUS IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setIOPL IPT1(IU8, val);
+#define setIOPL(val) c_setIOPL(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setIOPL(val) (*(Cpu.SetIOPL))(val)
+#else /* PROD */
+IMPORT void setIOPL IPT1(IU8, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setMSW IPT1(IU16, val);
+#define setMSW(val) c_setMSW(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setMSW(val) (*(Cpu.SetMSW))(val)
+#else /* PROD */
+IMPORT void setMSW IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setCR0 IPT1(IU32, val);
+#define setCR0(val) c_setCR0(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setCR0(val) (*(Cpu.SetCR0))(val)
+#else /* PROD */
+IMPORT void setCR0 IPT1(IU32, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setCR2 IPT1(IU32, val);
+#define setCR2(val) c_setCR2(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setCR2(val) (*(Cpu.SetCR2))(val)
+#else /* PROD */
+IMPORT void setCR2 IPT1(IU32, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setCR3 IPT1(IU32, val);
+#define setCR3(val) c_setCR3(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setCR3(val) (*(Cpu.SetCR3))(val)
+#else /* PROD */
+IMPORT void setCR3 IPT1(IU32, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setCF IPT1(IBOOL, val);
+#define setCF(val) c_setCF(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setCF(val) (*(Cpu.SetCF))(val)
+#else /* PROD */
+IMPORT void setCF IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setPF IPT1(IBOOL, val);
+#define setPF(val) c_setPF(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setPF(val) (*(Cpu.SetPF))(val)
+#else /* PROD */
+IMPORT void setPF IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setAF IPT1(IBOOL, val);
+#define setAF(val) c_setAF(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setAF(val) (*(Cpu.SetAF))(val)
+#else /* PROD */
+IMPORT void setAF IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setZF IPT1(IBOOL, val);
+#define setZF(val) c_setZF(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setZF(val) (*(Cpu.SetZF))(val)
+#else /* PROD */
+IMPORT void setZF IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setSF IPT1(IBOOL, val);
+#define setSF(val) c_setSF(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setSF(val) (*(Cpu.SetSF))(val)
+#else /* PROD */
+IMPORT void setSF IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setTF IPT1(IBOOL, val);
+#define setTF(val) c_setTF(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setTF(val) (*(Cpu.SetTF))(val)
+#else /* PROD */
+IMPORT void setTF IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setIF IPT1(IBOOL, val);
+#define setIF(val) c_setIF(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setIF(val) (*(Cpu.SetIF))(val)
+#else /* PROD */
+IMPORT void setIF IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setDF IPT1(IBOOL, val);
+#define setDF(val) c_setDF(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setDF(val) (*(Cpu.SetDF))(val)
+#else /* PROD */
+IMPORT void setDF IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setOF IPT1(IBOOL, val);
+#define setOF(val) c_setOF(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setOF(val) (*(Cpu.SetOF))(val)
+#else /* PROD */
+IMPORT void setOF IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setNT IPT1(IBOOL, val);
+#define setNT(val) c_setNT(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setNT(val) (*(Cpu.SetNT))(val)
+#else /* PROD */
+IMPORT void setNT IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setRF IPT1(IBOOL, val);
+#define setRF(val) c_setRF(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setRF(val) (*(Cpu.SetRF))(val)
+#else /* PROD */
+IMPORT void setRF IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setVM IPT1(IBOOL, val);
+#define setVM(val) c_setVM(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setVM(val) (*(Cpu.SetVM))(val)
+#else /* PROD */
+IMPORT void setVM IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setAC IPT1(IBOOL, val);
+#define setAC(val) c_setAC(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setAC(val) (*(Cpu.SetAC))(val)
+#else /* PROD */
+IMPORT void setAC IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setPE IPT1(IBOOL, val);
+#define setPE(val) c_setPE(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setPE(val) (*(Cpu.SetPE))(val)
+#else /* PROD */
+IMPORT void setPE IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setMP IPT1(IBOOL, val);
+#define setMP(val) c_setMP(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setMP(val) (*(Cpu.SetMP))(val)
+#else /* PROD */
+IMPORT void setMP IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setEM IPT1(IBOOL, val);
+#define setEM(val) c_setEM(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setEM(val) (*(Cpu.SetEM))(val)
+#else /* PROD */
+IMPORT void setEM IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setTS IPT1(IBOOL, val);
+#define setTS(val) c_setTS(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setTS(val) (*(Cpu.SetTS))(val)
+#else /* PROD */
+IMPORT void setTS IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setPG IPT1(IBOOL, val);
+#define setPG(val) c_setPG(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setPG(val) (*(Cpu.SetPG))(val)
+#else /* PROD */
+IMPORT void setPG IPT1(IBOOL, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setLDT_SELECTOR IPT1(IU16, val);
+#define setLDT_SELECTOR(val) c_setLDT_SELECTOR(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setLDT_SELECTOR(val) (*(Cpu.SetLDT_SELECTOR))(val)
+#else /* PROD */
+IMPORT void setLDT_SELECTOR IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setTR_SELECTOR IPT1(IU16, val);
+#define setTR_SELECTOR(val) c_setTR_SELECTOR(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setTR_SELECTOR(val) (*(Cpu.SetTR_SELECTOR))(val)
+#else /* PROD */
+IMPORT void setTR_SELECTOR IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU8 c_getAL IPT0();
+#define getAL() c_getAL()
+#else /* CCPU */
+
+#ifdef PROD
+#define getAL() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEax: GLOBAL_R_EAX)) & 0x000000FF)
+#else /* PROD */
+IMPORT IU8 getAL IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU8 c_getAH IPT0();
+#define getAH() c_getAH()
+#else /* CCPU */
+
+#ifdef PROD
+#define getAH() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEax: GLOBAL_R_EAX) >> 8) & 0x000000FF)
+#else /* PROD */
+IMPORT IU8 getAH IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getAX IPT0();
+#define getAX() c_getAX()
+#else /* CCPU */
+
+#ifdef PROD
+#define getAX() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEax: GLOBAL_R_EAX)) & 0x0000FFFF)
+#else /* PROD */
+IMPORT IU16 getAX IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getEAX IPT0();
+#define getEAX() c_getEAX()
+#else /* CCPU */
+
+#ifdef PROD
+#define getEAX() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEax: GLOBAL_R_EAX)) & 0xFFFFFFFF)
+#else /* PROD */
+IMPORT IU32 getEAX IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU8 c_getBL IPT0();
+#define getBL() c_getBL()
+#else /* CCPU */
+
+#ifdef PROD
+#define getBL() (*(Cpu.GetBL))()
+#else /* PROD */
+IMPORT IU8 getBL IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU8 c_getBH IPT0();
+#define getBH() c_getBH()
+#else /* CCPU */
+
+#ifdef PROD
+#define getBH() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEbx: GLOBAL_R_EBX) >> 8) & 0x000000FF)
+#else /* PROD */
+IMPORT IU8 getBH IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getBX IPT0();
+#define getBX() c_getBX()
+#else /* CCPU */
+
+#ifdef PROD
+#define getBX() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEbx: GLOBAL_R_EBX)) & 0x0000FFFF)
+#else /* PROD */
+IMPORT IU16 getBX IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getEBX IPT0();
+#define getEBX() c_getEBX()
+#else /* CCPU */
+
+#ifdef PROD
+#define getEBX() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEbx: GLOBAL_R_EBX)) & 0xFFFFFFFF)
+#else /* PROD */
+IMPORT IU32 getEBX IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU8 c_getCL IPT0();
+#define getCL() c_getCL()
+#else /* CCPU */
+
+#ifdef PROD
+#define getCL() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEcx: GLOBAL_R_ECX)) & 0x000000FF)
+#else /* PROD */
+IMPORT IU8 getCL IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU8 c_getCH IPT0();
+#define getCH() c_getCH()
+#else /* CCPU */
+
+#ifdef PROD
+#define getCH() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEcx: GLOBAL_R_ECX) >> 8) & 0x000000FF)
+#else /* PROD */
+IMPORT IU8 getCH IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getCX IPT0();
+#define getCX() c_getCX()
+#else /* CCPU */
+
+#ifdef PROD
+#define getCX() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEcx: GLOBAL_R_ECX)) & 0x0000FFFF)
+#else /* PROD */
+IMPORT IU16 getCX IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getECX IPT0();
+#define getECX() c_getECX()
+#else /* CCPU */
+
+#ifdef PROD
+#define getECX() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEcx: GLOBAL_R_ECX)) & 0xFFFFFFFF)
+#else /* PROD */
+IMPORT IU32 getECX IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU8 c_getDL IPT0();
+#define getDL() c_getDL()
+#else /* CCPU */
+
+#ifdef PROD
+#define getDL() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEdx: GLOBAL_R_EDX)) & 0x000000FF)
+#else /* PROD */
+IMPORT IU8 getDL IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU8 c_getDH IPT0();
+#define getDH() c_getDH()
+#else /* CCPU */
+
+#ifdef PROD
+#define getDH() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEdx: GLOBAL_R_EDX) >> 8) & 0x000000FF)
+#else /* PROD */
+IMPORT IU8 getDH IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getDX IPT0();
+#define getDX() c_getDX()
+#else /* CCPU */
+
+#ifdef PROD
+#define getDX() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEdx: GLOBAL_R_EDX)) & 0x0000FFFF)
+#else /* PROD */
+IMPORT IU16 getDX IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getEDX IPT0();
+#define getEDX() c_getEDX()
+#else /* CCPU */
+
+#ifdef PROD
+#define getEDX() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEdx: GLOBAL_R_EDX)) & 0xFFFFFFFF)
+#else /* PROD */
+IMPORT IU32 getEDX IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getSI IPT0();
+#define getSI() c_getSI()
+#else /* CCPU */
+
+#ifdef PROD
+#define getSI() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEsi: GLOBAL_R_ESI)) & 0x0000FFFF)
+#else /* PROD */
+IMPORT IU16 getSI IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getESI IPT0();
+#define getESI() c_getESI()
+#else /* CCPU */
+
+#ifdef PROD
+#define getESI() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEsi: GLOBAL_R_ESI)) & 0xFFFFFFFF)
+#else /* PROD */
+IMPORT IU32 getESI IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getDI IPT0();
+#define getDI() c_getDI()
+#else /* CCPU */
+
+#ifdef PROD
+#define getDI() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEdi: GLOBAL_R_EDI)) & 0x0000FFFF)
+#else /* PROD */
+IMPORT IU16 getDI IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getEDI IPT0();
+#define getEDI() c_getEDI()
+#else /* CCPU */
+
+#ifdef PROD
+#define getEDI() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEdi: GLOBAL_R_EDI)) & 0xFFFFFFFF)
+#else /* PROD */
+IMPORT IU32 getEDI IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getSP IPT0();
+#define getSP() c_getSP()
+#else /* CCPU */
+
+#ifdef PROD
+#define getSP() (*(Cpu.GetSP))()
+#else /* PROD */
+IMPORT IU16 getSP IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getESP IPT0();
+#define getESP() c_getESP()
+#else /* CCPU */
+
+#ifdef PROD
+#define getESP() (*(Cpu.GetESP))()
+#else /* PROD */
+IMPORT IU32 getESP IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getBP IPT0();
+#define getBP() c_getBP()
+#else /* CCPU */
+
+#ifdef PROD
+#define getBP() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEbp: GLOBAL_R_EBP)) & 0x0000FFFF)
+#else /* PROD */
+IMPORT IU16 getBP IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getEBP IPT0();
+#define getEBP() c_getEBP()
+#else /* CCPU */
+
+#ifdef PROD
+#define getEBP() (((GLOBAL_InNanoCpu ? GLOBAL_nanoEbp: GLOBAL_R_EBP)) & 0xFFFFFFFF)
+#else /* PROD */
+IMPORT IU32 getEBP IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getIP IPT0();
+#define getIP() c_getIP()
+#else /* CCPU */
+
+#ifdef PROD
+#define getIP() (*(Cpu.GetIP))()
+#else /* PROD */
+IMPORT IU16 getIP IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getEIP IPT0();
+#define getEIP() c_getEIP()
+#else /* CCPU */
+
+#ifdef PROD
+#define getEIP() (*(Cpu.GetEIP))()
+#else /* PROD */
+IMPORT IU32 getEIP IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getCS IPT0();
+#define getCS() c_getCS()
+#else /* CCPU */
+
+#ifdef PROD
+#define getCS() (*(Cpu.GetCS))()
+#else /* PROD */
+IMPORT IU16 getCS IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getSS IPT0();
+#define getSS() c_getSS()
+#else /* CCPU */
+
+#ifdef PROD
+#define getSS() (*(Cpu.GetSS))()
+#else /* PROD */
+IMPORT IU16 getSS IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getDS IPT0();
+#define getDS() c_getDS()
+#else /* CCPU */
+
+#ifdef PROD
+#define getDS() (*(Cpu.GetDS))()
+#else /* PROD */
+IMPORT IU16 getDS IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getES IPT0();
+#define getES() c_getES()
+#else /* CCPU */
+
+#ifdef PROD
+#define getES() (*(Cpu.GetES))()
+#else /* PROD */
+IMPORT IU16 getES IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getFS IPT0();
+#define getFS() c_getFS()
+#else /* CCPU */
+
+#ifdef PROD
+#define getFS() (*(Cpu.GetFS))()
+#else /* PROD */
+IMPORT IU16 getFS IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getGS IPT0();
+#define getGS() c_getGS()
+#else /* CCPU */
+
+#ifdef PROD
+#define getGS() (*(Cpu.GetGS))()
+#else /* PROD */
+IMPORT IU16 getGS IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getEFLAGS IPT0();
+#define getEFLAGS() c_getEFLAGS()
+#else /* CCPU */
+
+#ifdef PROD
+#define getEFLAGS() (*(Cpu.GetEFLAGS))()
+#else /* PROD */
+IMPORT IU32 getEFLAGS IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getSTATUS IPT0();
+#define getSTATUS() c_getSTATUS()
+#else /* CCPU */
+
+#ifdef PROD
+#define getSTATUS() (*(Cpu.GetSTATUS))()
+#else /* PROD */
+IMPORT IU16 getSTATUS IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU8 c_getIOPL IPT0();
+#define getIOPL() c_getIOPL()
+#else /* CCPU */
+
+#ifdef PROD
+#define getIOPL() (*(Cpu.GetIOPL))()
+#else /* PROD */
+IMPORT IU8 getIOPL IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getMSW IPT0();
+#define getMSW() c_getMSW()
+#else /* CCPU */
+
+#ifdef PROD
+#define getMSW() (*(Cpu.GetMSW))()
+#else /* PROD */
+IMPORT IU16 getMSW IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getCR0 IPT0();
+#define getCR0() c_getCR0()
+#else /* CCPU */
+
+#ifdef PROD
+#define getCR0() (*(Cpu.GetCR0))()
+#else /* PROD */
+IMPORT IU32 getCR0 IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getCR2 IPT0();
+#define getCR2() c_getCR2()
+#else /* CCPU */
+
+#ifdef PROD
+#define getCR2() (*(Cpu.GetCR2))()
+#else /* PROD */
+IMPORT IU32 getCR2 IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getCR3 IPT0();
+#define getCR3() c_getCR3()
+#else /* CCPU */
+
+#ifdef PROD
+#define getCR3() (*(Cpu.GetCR3))()
+#else /* PROD */
+IMPORT IU32 getCR3 IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getCF IPT0();
+#define getCF() c_getCF()
+#else /* CCPU */
+
+#ifdef PROD
+#define getCF() (*(Cpu.GetCF))()
+#else /* PROD */
+IMPORT IBOOL getCF IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getPF IPT0();
+#define getPF() c_getPF()
+#else /* CCPU */
+
+#ifdef PROD
+#define getPF() (*(Cpu.GetPF))()
+#else /* PROD */
+IMPORT IBOOL getPF IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getAF IPT0();
+#define getAF() c_getAF()
+#else /* CCPU */
+
+#ifdef PROD
+#define getAF() (*(Cpu.GetAF))()
+#else /* PROD */
+IMPORT IBOOL getAF IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getZF IPT0();
+#define getZF() c_getZF()
+#else /* CCPU */
+
+#ifdef PROD
+#define getZF() (*(Cpu.GetZF))()
+#else /* PROD */
+IMPORT IBOOL getZF IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getSF IPT0();
+#define getSF() c_getSF()
+#else /* CCPU */
+
+#ifdef PROD
+#define getSF() (*(Cpu.GetSF))()
+#else /* PROD */
+IMPORT IBOOL getSF IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getTF IPT0();
+#define getTF() c_getTF()
+#else /* CCPU */
+
+#ifdef PROD
+#define getTF() (*(Cpu.GetTF))()
+#else /* PROD */
+IMPORT IBOOL getTF IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getIF IPT0();
+#define getIF() c_getIF()
+#else /* CCPU */
+
+#ifdef PROD
+#define getIF() (*(Cpu.GetIF))()
+#else /* PROD */
+IMPORT IBOOL getIF IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getDF IPT0();
+#define getDF() c_getDF()
+#else /* CCPU */
+
+#ifdef PROD
+#define getDF() (*(Cpu.GetDF))()
+#else /* PROD */
+IMPORT IBOOL getDF IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getOF IPT0();
+#define getOF() c_getOF()
+#else /* CCPU */
+
+#ifdef PROD
+#define getOF() (*(Cpu.GetOF))()
+#else /* PROD */
+IMPORT IBOOL getOF IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getNT IPT0();
+#define getNT() c_getNT()
+#else /* CCPU */
+
+#ifdef PROD
+#define getNT() (*(Cpu.GetNT))()
+#else /* PROD */
+IMPORT IBOOL getNT IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getRF IPT0();
+#define getRF() c_getRF()
+#else /* CCPU */
+
+#ifdef PROD
+#define getRF() (*(Cpu.GetRF))()
+#else /* PROD */
+IMPORT IBOOL getRF IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getVM IPT0();
+#define getVM() c_getVM()
+#else /* CCPU */
+
+#ifdef PROD
+#define getVM() (*(Cpu.GetVM))()
+#else /* PROD */
+IMPORT IBOOL getVM IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getAC IPT0();
+#define getAC() c_getAC()
+#else /* CCPU */
+
+#ifdef PROD
+#define getAC() (*(Cpu.GetAC))()
+#else /* PROD */
+IMPORT IBOOL getAC IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getPE IPT0();
+#define getPE() c_getPE()
+#else /* CCPU */
+
+#ifdef PROD
+#define getPE() (*(Cpu.GetPE))()
+#else /* PROD */
+IMPORT IBOOL getPE IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getMP IPT0();
+#define getMP() c_getMP()
+#else /* CCPU */
+
+#ifdef PROD
+#define getMP() (*(Cpu.GetMP))()
+#else /* PROD */
+IMPORT IBOOL getMP IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getEM IPT0();
+#define getEM() c_getEM()
+#else /* CCPU */
+
+#ifdef PROD
+#define getEM() (*(Cpu.GetEM))()
+#else /* PROD */
+IMPORT IBOOL getEM IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getTS IPT0();
+#define getTS() c_getTS()
+#else /* CCPU */
+
+#ifdef PROD
+#define getTS() (*(Cpu.GetTS))()
+#else /* PROD */
+IMPORT IBOOL getTS IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getET IPT0();
+#define getET() c_getET()
+#else /* CCPU */
+
+#ifdef PROD
+#define getET() (*(Cpu.GetET))()
+#else /* PROD */
+IMPORT IBOOL getET IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getNE IPT0();
+#define getNE() c_getNE()
+#else /* CCPU */
+
+#ifdef PROD
+#define getNE() (*(Cpu.GetNE))()
+#else /* PROD */
+IMPORT IBOOL getNE IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getWP IPT0();
+#define getWP() c_getWP()
+#else /* CCPU */
+
+#ifdef PROD
+#define getWP() (*(Cpu.GetWP))()
+#else /* PROD */
+IMPORT IBOOL getWP IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_getPG IPT0();
+#define getPG() c_getPG()
+#else /* CCPU */
+
+#ifdef PROD
+#define getPG() (*(Cpu.GetPG))()
+#else /* PROD */
+IMPORT IBOOL getPG IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getGDT_BASE IPT0();
+#define getGDT_BASE() c_getGDT_BASE()
+#else /* CCPU */
+
+#ifdef PROD
+#define getGDT_BASE() (*(Cpu.GetGDT_BASE))()
+#else /* PROD */
+IMPORT IU32 getGDT_BASE IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getGDT_LIMIT IPT0();
+#define getGDT_LIMIT() c_getGDT_LIMIT()
+#else /* CCPU */
+
+#ifdef PROD
+#define getGDT_LIMIT() (*(Cpu.GetGDT_LIMIT))()
+#else /* PROD */
+IMPORT IU16 getGDT_LIMIT IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getIDT_BASE IPT0();
+#define getIDT_BASE() c_getIDT_BASE()
+#else /* CCPU */
+
+#ifdef PROD
+#define getIDT_BASE() (*(Cpu.GetIDT_BASE))()
+#else /* PROD */
+IMPORT IU32 getIDT_BASE IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getIDT_LIMIT IPT0();
+#define getIDT_LIMIT() c_getIDT_LIMIT()
+#else /* CCPU */
+
+#ifdef PROD
+#define getIDT_LIMIT() (*(Cpu.GetIDT_LIMIT))()
+#else /* PROD */
+IMPORT IU16 getIDT_LIMIT IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getLDT_SELECTOR IPT0();
+#define getLDT_SELECTOR() c_getLDT_SELECTOR()
+#else /* CCPU */
+
+#ifdef PROD
+#define getLDT_SELECTOR() (*(Cpu.GetLDT_SELECTOR))()
+#else /* PROD */
+IMPORT IU16 getLDT_SELECTOR IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getLDT_BASE IPT0();
+#define getLDT_BASE() c_getLDT_BASE()
+#else /* CCPU */
+
+#ifdef PROD
+#define getLDT_BASE() (*(Cpu.GetLDT_BASE))()
+#else /* PROD */
+IMPORT IU32 getLDT_BASE IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getLDT_LIMIT IPT0();
+#define getLDT_LIMIT() c_getLDT_LIMIT()
+#else /* CCPU */
+
+#ifdef PROD
+#define getLDT_LIMIT() (*(Cpu.GetLDT_LIMIT))()
+#else /* PROD */
+IMPORT IU32 getLDT_LIMIT IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getTR_SELECTOR IPT0();
+#define getTR_SELECTOR() c_getTR_SELECTOR()
+#else /* CCPU */
+
+#ifdef PROD
+#define getTR_SELECTOR() (*(Cpu.GetTR_SELECTOR))()
+#else /* PROD */
+IMPORT IU16 getTR_SELECTOR IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getTR_BASE IPT0();
+#define getTR_BASE() c_getTR_BASE()
+#else /* CCPU */
+
+#ifdef PROD
+#define getTR_BASE() (*(Cpu.GetTR_BASE))()
+#else /* PROD */
+IMPORT IU32 getTR_BASE IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getTR_LIMIT IPT0();
+#define getTR_LIMIT() c_getTR_LIMIT()
+#else /* CCPU */
+
+#ifdef PROD
+#define getTR_LIMIT() (*(Cpu.GetTR_LIMIT))()
+#else /* PROD */
+IMPORT IU32 getTR_LIMIT IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getTR_AR IPT0();
+#define getTR_AR() c_getTR_AR()
+#else /* CCPU */
+
+#ifdef PROD
+#define getTR_AR() (*(Cpu.GetTR_AR))()
+#else /* PROD */
+IMPORT IU16 getTR_AR IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IUH host_get_q_calib_val IPT0();
+#define host_get_q_calib_val host_get_q_calib_val
+#else /* CCPU */
+
+#ifdef PROD
+#define host_get_q_calib_val (*(Cpu.GetJumpCalibrateVal))
+#else /* PROD */
+IMPORT IUH host_get_q_calib_val IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IUH host_get_jump_restart IPT0();
+#define host_get_jump_restart host_get_jump_restart
+#else /* CCPU */
+
+#ifdef PROD
+#define host_get_jump_restart (*(Cpu.GetJumpInitialVal))
+#else /* PROD */
+IMPORT IUH host_get_jump_restart IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void host_set_jump_restart IPT1(IUH, initialVal);
+#define host_set_jump_restart(initialVal) host_set_jump_restart(initialVal)
+#else /* CCPU */
+
+#ifdef PROD
+#define host_set_jump_restart(initialVal) (*(Cpu.SetJumpInitialVal))(initialVal)
+#else /* PROD */
+IMPORT void host_set_jump_restart IPT1(IUH, initialVal);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void setEOIEnableAddr IPT1(IU8 *, initialVal);
+#define setEOIEnableAddr(initialVal) setEOIEnableAddr(initialVal)
+#else /* CCPU */
+
+#ifdef PROD
+#define setEOIEnableAddr(initialVal) (*(Cpu.SetEOIEnable))(initialVal)
+#else /* PROD */
+IMPORT void setEOIEnableAddr IPT1(IU8 *, initialVal);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void setAddProfileDataPtr IPT1(IHP, initialVal);
+#define setAddProfileDataPtr(initialVal) setAddProfileDataPtr(initialVal)
+#else /* CCPU */
+
+#ifdef PROD
+#define setAddProfileDataPtr(initialVal) (*(Cpu.SetAddProfileData))(initialVal)
+#else /* PROD */
+IMPORT void setAddProfileDataPtr IPT1(IHP, initialVal);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void setMaxProfileDataAddr IPT1(IHP, initialVal);
+#define setMaxProfileDataAddr(initialVal) setMaxProfileDataAddr(initialVal)
+#else /* CCPU */
+
+#ifdef PROD
+#define setMaxProfileDataAddr(initialVal) (*(Cpu.SetMaxProfileData))(initialVal)
+#else /* PROD */
+IMPORT void setMaxProfileDataAddr IPT1(IHP, initialVal);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IHP getAddProfileDataAddr IPT0();
+#define getAddProfileDataAddr() getAddProfileDataAddr()
+#else /* CCPU */
+
+#ifdef PROD
+#define getAddProfileDataAddr() (*(Cpu.GetAddProfileDataAddr))()
+#else /* PROD */
+IMPORT IHP getAddProfileDataAddr IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void PurgeLostIretHookLine IPT2(IU16, lineNum, IU32, depth);
+#define PurgeLostIretHookLine(lineNum, depth) PurgeLostIretHookLine(lineNum, depth)
+#else /* CCPU */
+
+#ifdef PROD
+#define PurgeLostIretHookLine(lineNum, depth) (*(Cpu.PurgeLostIretHookLine))(lineNum, depth)
+#else /* PROD */
+IMPORT void PurgeLostIretHookLine IPT2(IU16, lineNum, IU32, depth);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void ActivityCheckAfterTimeSlice IPT0();
+#define ActivityCheckAfterTimeSlice() ActivityCheckAfterTimeSlice()
+#else /* CCPU */
+
+#ifdef PROD
+#define ActivityCheckAfterTimeSlice() (*(Cpu.ActivityCheckAfterTimeSlice))()
+#else /* PROD */
+IMPORT void ActivityCheckAfterTimeSlice IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL CheckCsSelectorAndEipForCallFarPatching IPT3(IU16, csSel, IU32, eip, IU32 *, pCsBase);
+#define CheckCsSelectorAndEipForCallFarPatching(csSel, eip, pCsBase) CheckCsSelectorAndEipForCallFarPatching(csSel, eip, pCsBase)
+#else /* CCPU */
+
+#ifdef PROD
+#define CheckCsSelectorAndEipForCallFarPatching(csSel, eip, pCsBase) (*(Cpu.CheckCsSelectorAndEipForCallFarPatching))(csSel, eip, pCsBase)
+#else /* PROD */
+IMPORT IBOOL CheckCsSelectorAndEipForCallFarPatching IPT3(IU16, csSel, IU32, eip, IU32 *, pCsBase);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+typedef struct CpuStateREC * TypeCpuStateRECptr;
+typedef struct ConstraintBitMapREC * TypeConstraintBitMapRECptr;
+typedef struct EntryPointCacheREC * TypeEntryPointCacheRECptr;
+
+struct CpuPrivateVector {
+ IHP (*GetSadInfoTable) IPT0();
+ IBOOL (*SetGDT_BASE_LIMIT) IPT2(IU32, base, IU16, limit);
+ IBOOL (*SetIDT_BASE_LIMIT) IPT2(IU32, base, IU16, limit);
+ IBOOL (*SetLDT_BASE_LIMIT) IPT2(IU32, base, IU32, limit);
+ IBOOL (*SetTR_BASE_LIMIT) IPT2(IU32, base, IU32, limit);
+ IBOOL (*SetTR_BASE_LIMIT_AR) IPT3(IU32, base, IU32, limit, IU16, ar);
+ IBOOL (*SetCS_BASE_LIMIT_AR) IPT3(IU32, base, IU32, limit, IU16, ar);
+ IBOOL (*SetSS_BASE_LIMIT_AR) IPT3(IU32, base, IU32, limit, IU16, ar);
+ IBOOL (*SetDS_BASE_LIMIT_AR) IPT3(IU32, base, IU32, limit, IU16, ar);
+ IBOOL (*SetES_BASE_LIMIT_AR) IPT3(IU32, base, IU32, limit, IU16, ar);
+ IBOOL (*SetFS_BASE_LIMIT_AR) IPT3(IU32, base, IU32, limit, IU16, ar);
+ IBOOL (*SetGS_BASE_LIMIT_AR) IPT3(IU32, base, IU32, limit, IU16, ar);
+ void (*SetCS_SELECTOR) IPT1(IU16, val);
+ void (*SetSS_SELECTOR) IPT1(IU16, val);
+ void (*SetDS_SELECTOR) IPT1(IU16, val);
+ void (*SetES_SELECTOR) IPT1(IU16, val);
+ void (*SetFS_SELECTOR) IPT1(IU16, val);
+ void (*SetGS_SELECTOR) IPT1(IU16, val);
+ IU16 (*GetCS_SELECTOR) IPT0();
+ IU16 (*GetSS_SELECTOR) IPT0();
+ IU16 (*GetDS_SELECTOR) IPT0();
+ IU16 (*GetES_SELECTOR) IPT0();
+ IU16 (*GetFS_SELECTOR) IPT0();
+ IU16 (*GetGS_SELECTOR) IPT0();
+ IU32 (*GetCS_BASE) IPT0();
+ IU32 (*GetSS_BASE) IPT0();
+ IU32 (*GetDS_BASE) IPT0();
+ IU32 (*GetES_BASE) IPT0();
+ IU32 (*GetFS_BASE) IPT0();
+ IU32 (*GetGS_BASE) IPT0();
+ IU32 (*GetCS_LIMIT) IPT0();
+ IU32 (*GetSS_LIMIT) IPT0();
+ IU32 (*GetDS_LIMIT) IPT0();
+ IU32 (*GetES_LIMIT) IPT0();
+ IU32 (*GetFS_LIMIT) IPT0();
+ IU32 (*GetGS_LIMIT) IPT0();
+ IU16 (*GetCS_AR) IPT0();
+ IU16 (*GetSS_AR) IPT0();
+ IU16 (*GetDS_AR) IPT0();
+ IU16 (*GetES_AR) IPT0();
+ IU16 (*GetFS_AR) IPT0();
+ IU16 (*GetGS_AR) IPT0();
+ IUH (*GetCPL) IPT0();
+ void (*SetCPL) IPT1(IUH, prot);
+ void (*GetCpuState) IPT1(TypeCpuStateRECptr, state);
+ void (*SetCpuState) IPT1(TypeCpuStateRECptr, state);
+ void (*InitNanoCpu) IPT1(IU32, variety);
+ void (*PrepareBlocksToCompile) IPT1(IU32, variety);
+ void (*InitRdWrCacheAndCookies) IPT1(IU32, variety);
+ void (*ResetRdWrCacheAndCookies) IPT1(IU32, variety);
+ void (*SetRegConstraint) IPT2(IU32, regId, IU8, constraintType);
+ void (*BpiCompileBPI) IPT1(char *, instructions);
+ void (*TrashIntelRegisters) IPT0();
+ void (*FmDeleteAllStructures) IPT1(IU32, newCR0);
+ void (*SfForceVideoOff) IPT0();
+ void (*SfRestoreVideoState) IPT0();
+ void (*SfMarkPageAsParsed) IPT1(IU32, intelPpn);
+ void (*SfMarkPageAsNotParsed) IPT1(IU32, intelPpn);
+ void (*SfRemovePciMappings) IPT0();
+};
+
+#ifdef CCPU
+IMPORT IHP c_getSadInfoTable IPT0();
+#define getSadInfoTable() c_getSadInfoTable()
+#else /* CCPU */
+
+#ifdef PROD
+#define getSadInfoTable() (*((*(Cpu.Private)).GetSadInfoTable))()
+#else /* PROD */
+IMPORT IHP getSadInfoTable IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_setGDT_BASE_LIMIT IPT2(IU32, base, IU16, limit);
+#define setGDT_BASE_LIMIT(base, limit) c_setGDT_BASE_LIMIT(base, limit)
+#else /* CCPU */
+
+#ifdef PROD
+#define setGDT_BASE_LIMIT(base, limit) (*((*(Cpu.Private)).SetGDT_BASE_LIMIT))(base, limit)
+#else /* PROD */
+IMPORT IBOOL setGDT_BASE_LIMIT IPT2(IU32, base, IU16, limit);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_setIDT_BASE_LIMIT IPT2(IU32, base, IU16, limit);
+#define setIDT_BASE_LIMIT(base, limit) c_setIDT_BASE_LIMIT(base, limit)
+#else /* CCPU */
+
+#ifdef PROD
+#define setIDT_BASE_LIMIT(base, limit) (*((*(Cpu.Private)).SetIDT_BASE_LIMIT))(base, limit)
+#else /* PROD */
+IMPORT IBOOL setIDT_BASE_LIMIT IPT2(IU32, base, IU16, limit);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_setLDT_BASE_LIMIT IPT2(IU32, base, IU32, limit);
+#define setLDT_BASE_LIMIT(base, limit) c_setLDT_BASE_LIMIT(base, limit)
+#else /* CCPU */
+
+#ifdef PROD
+#define setLDT_BASE_LIMIT(base, limit) (*((*(Cpu.Private)).SetLDT_BASE_LIMIT))(base, limit)
+#else /* PROD */
+IMPORT IBOOL setLDT_BASE_LIMIT IPT2(IU32, base, IU32, limit);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_setTR_BASE_LIMIT IPT2(IU32, base, IU32, limit);
+#define setTR_BASE_LIMIT(base, limit) c_setTR_BASE_LIMIT(base, limit)
+#else /* CCPU */
+
+#ifdef PROD
+#define setTR_BASE_LIMIT(base, limit) (*((*(Cpu.Private)).SetTR_BASE_LIMIT))(base, limit)
+#else /* PROD */
+IMPORT IBOOL setTR_BASE_LIMIT IPT2(IU32, base, IU32, limit);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_setTR_BASE_LIMIT_AR IPT3(IU32, base, IU32, limit, IU16, ar);
+#define setTR_BASE_LIMIT_AR(base, limit, ar) c_setTR_BASE_LIMIT_AR(base, limit, ar)
+#else /* CCPU */
+
+#ifdef PROD
+#define setTR_BASE_LIMIT_AR(base, limit, ar) (*((*(Cpu.Private)).SetTR_BASE_LIMIT_AR))(base, limit, ar)
+#else /* PROD */
+IMPORT IBOOL setTR_BASE_LIMIT_AR IPT3(IU32, base, IU32, limit, IU16, ar);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_setCS_BASE_LIMIT_AR IPT3(IU32, base, IU32, limit, IU16, ar);
+#define setCS_BASE_LIMIT_AR(base, limit, ar) c_setCS_BASE_LIMIT_AR(base, limit, ar)
+#else /* CCPU */
+
+#ifdef PROD
+#define setCS_BASE_LIMIT_AR(base, limit, ar) (*((*(Cpu.Private)).SetCS_BASE_LIMIT_AR))(base, limit, ar)
+#else /* PROD */
+IMPORT IBOOL setCS_BASE_LIMIT_AR IPT3(IU32, base, IU32, limit, IU16, ar);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_setSS_BASE_LIMIT_AR IPT3(IU32, base, IU32, limit, IU16, ar);
+#define setSS_BASE_LIMIT_AR(base, limit, ar) c_setSS_BASE_LIMIT_AR(base, limit, ar)
+#else /* CCPU */
+
+#ifdef PROD
+#define setSS_BASE_LIMIT_AR(base, limit, ar) (*((*(Cpu.Private)).SetSS_BASE_LIMIT_AR))(base, limit, ar)
+#else /* PROD */
+IMPORT IBOOL setSS_BASE_LIMIT_AR IPT3(IU32, base, IU32, limit, IU16, ar);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_setDS_BASE_LIMIT_AR IPT3(IU32, base, IU32, limit, IU16, ar);
+#define setDS_BASE_LIMIT_AR(base, limit, ar) c_setDS_BASE_LIMIT_AR(base, limit, ar)
+#else /* CCPU */
+
+#ifdef PROD
+#define setDS_BASE_LIMIT_AR(base, limit, ar) (*((*(Cpu.Private)).SetDS_BASE_LIMIT_AR))(base, limit, ar)
+#else /* PROD */
+IMPORT IBOOL setDS_BASE_LIMIT_AR IPT3(IU32, base, IU32, limit, IU16, ar);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_setES_BASE_LIMIT_AR IPT3(IU32, base, IU32, limit, IU16, ar);
+#define setES_BASE_LIMIT_AR(base, limit, ar) c_setES_BASE_LIMIT_AR(base, limit, ar)
+#else /* CCPU */
+
+#ifdef PROD
+#define setES_BASE_LIMIT_AR(base, limit, ar) (*((*(Cpu.Private)).SetES_BASE_LIMIT_AR))(base, limit, ar)
+#else /* PROD */
+IMPORT IBOOL setES_BASE_LIMIT_AR IPT3(IU32, base, IU32, limit, IU16, ar);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_setFS_BASE_LIMIT_AR IPT3(IU32, base, IU32, limit, IU16, ar);
+#define setFS_BASE_LIMIT_AR(base, limit, ar) c_setFS_BASE_LIMIT_AR(base, limit, ar)
+#else /* CCPU */
+
+#ifdef PROD
+#define setFS_BASE_LIMIT_AR(base, limit, ar) (*((*(Cpu.Private)).SetFS_BASE_LIMIT_AR))(base, limit, ar)
+#else /* PROD */
+IMPORT IBOOL setFS_BASE_LIMIT_AR IPT3(IU32, base, IU32, limit, IU16, ar);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IBOOL c_setGS_BASE_LIMIT_AR IPT3(IU32, base, IU32, limit, IU16, ar);
+#define setGS_BASE_LIMIT_AR(base, limit, ar) c_setGS_BASE_LIMIT_AR(base, limit, ar)
+#else /* CCPU */
+
+#ifdef PROD
+#define setGS_BASE_LIMIT_AR(base, limit, ar) (*((*(Cpu.Private)).SetGS_BASE_LIMIT_AR))(base, limit, ar)
+#else /* PROD */
+IMPORT IBOOL setGS_BASE_LIMIT_AR IPT3(IU32, base, IU32, limit, IU16, ar);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setCS_SELECTOR IPT1(IU16, val);
+#define setCS_SELECTOR(val) c_setCS_SELECTOR(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setCS_SELECTOR(val) (*((*(Cpu.Private)).SetCS_SELECTOR))(val)
+#else /* PROD */
+IMPORT void setCS_SELECTOR IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setSS_SELECTOR IPT1(IU16, val);
+#define setSS_SELECTOR(val) c_setSS_SELECTOR(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setSS_SELECTOR(val) (*((*(Cpu.Private)).SetSS_SELECTOR))(val)
+#else /* PROD */
+IMPORT void setSS_SELECTOR IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setDS_SELECTOR IPT1(IU16, val);
+#define setDS_SELECTOR(val) c_setDS_SELECTOR(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setDS_SELECTOR(val) (*((*(Cpu.Private)).SetDS_SELECTOR))(val)
+#else /* PROD */
+IMPORT void setDS_SELECTOR IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setES_SELECTOR IPT1(IU16, val);
+#define setES_SELECTOR(val) c_setES_SELECTOR(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setES_SELECTOR(val) (*((*(Cpu.Private)).SetES_SELECTOR))(val)
+#else /* PROD */
+IMPORT void setES_SELECTOR IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setFS_SELECTOR IPT1(IU16, val);
+#define setFS_SELECTOR(val) c_setFS_SELECTOR(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setFS_SELECTOR(val) (*((*(Cpu.Private)).SetFS_SELECTOR))(val)
+#else /* PROD */
+IMPORT void setFS_SELECTOR IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setGS_SELECTOR IPT1(IU16, val);
+#define setGS_SELECTOR(val) c_setGS_SELECTOR(val)
+#else /* CCPU */
+
+#ifdef PROD
+#define setGS_SELECTOR(val) (*((*(Cpu.Private)).SetGS_SELECTOR))(val)
+#else /* PROD */
+IMPORT void setGS_SELECTOR IPT1(IU16, val);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getCS_SELECTOR IPT0();
+#define getCS_SELECTOR() c_getCS_SELECTOR()
+#else /* CCPU */
+
+#ifdef PROD
+#define getCS_SELECTOR() (*((*(Cpu.Private)).GetCS_SELECTOR))()
+#else /* PROD */
+IMPORT IU16 getCS_SELECTOR IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getSS_SELECTOR IPT0();
+#define getSS_SELECTOR() c_getSS_SELECTOR()
+#else /* CCPU */
+
+#ifdef PROD
+#define getSS_SELECTOR() (*((*(Cpu.Private)).GetSS_SELECTOR))()
+#else /* PROD */
+IMPORT IU16 getSS_SELECTOR IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getDS_SELECTOR IPT0();
+#define getDS_SELECTOR() c_getDS_SELECTOR()
+#else /* CCPU */
+
+#ifdef PROD
+#define getDS_SELECTOR() (*((*(Cpu.Private)).GetDS_SELECTOR))()
+#else /* PROD */
+IMPORT IU16 getDS_SELECTOR IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getES_SELECTOR IPT0();
+#define getES_SELECTOR() c_getES_SELECTOR()
+#else /* CCPU */
+
+#ifdef PROD
+#define getES_SELECTOR() (*((*(Cpu.Private)).GetES_SELECTOR))()
+#else /* PROD */
+IMPORT IU16 getES_SELECTOR IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getFS_SELECTOR IPT0();
+#define getFS_SELECTOR() c_getFS_SELECTOR()
+#else /* CCPU */
+
+#ifdef PROD
+#define getFS_SELECTOR() (*((*(Cpu.Private)).GetFS_SELECTOR))()
+#else /* PROD */
+IMPORT IU16 getFS_SELECTOR IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getGS_SELECTOR IPT0();
+#define getGS_SELECTOR() c_getGS_SELECTOR()
+#else /* CCPU */
+
+#ifdef PROD
+#define getGS_SELECTOR() (*((*(Cpu.Private)).GetGS_SELECTOR))()
+#else /* PROD */
+IMPORT IU16 getGS_SELECTOR IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getCS_BASE IPT0();
+#define getCS_BASE() c_getCS_BASE()
+#else /* CCPU */
+
+#ifdef PROD
+#define getCS_BASE() (*((*(Cpu.Private)).GetCS_BASE))()
+#else /* PROD */
+IMPORT IU32 getCS_BASE IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getSS_BASE IPT0();
+#define getSS_BASE() c_getSS_BASE()
+#else /* CCPU */
+
+#ifdef PROD
+#define getSS_BASE() (*((*(Cpu.Private)).GetSS_BASE))()
+#else /* PROD */
+IMPORT IU32 getSS_BASE IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getDS_BASE IPT0();
+#define getDS_BASE() c_getDS_BASE()
+#else /* CCPU */
+
+#ifdef PROD
+#define getDS_BASE() (*((*(Cpu.Private)).GetDS_BASE))()
+#else /* PROD */
+IMPORT IU32 getDS_BASE IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getES_BASE IPT0();
+#define getES_BASE() c_getES_BASE()
+#else /* CCPU */
+
+#ifdef PROD
+#define getES_BASE() (*((*(Cpu.Private)).GetES_BASE))()
+#else /* PROD */
+IMPORT IU32 getES_BASE IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getFS_BASE IPT0();
+#define getFS_BASE() c_getFS_BASE()
+#else /* CCPU */
+
+#ifdef PROD
+#define getFS_BASE() (*((*(Cpu.Private)).GetFS_BASE))()
+#else /* PROD */
+IMPORT IU32 getFS_BASE IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getGS_BASE IPT0();
+#define getGS_BASE() c_getGS_BASE()
+#else /* CCPU */
+
+#ifdef PROD
+#define getGS_BASE() (*((*(Cpu.Private)).GetGS_BASE))()
+#else /* PROD */
+IMPORT IU32 getGS_BASE IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getCS_LIMIT IPT0();
+#define getCS_LIMIT() c_getCS_LIMIT()
+#else /* CCPU */
+
+#ifdef PROD
+#define getCS_LIMIT() (*((*(Cpu.Private)).GetCS_LIMIT))()
+#else /* PROD */
+IMPORT IU32 getCS_LIMIT IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getSS_LIMIT IPT0();
+#define getSS_LIMIT() c_getSS_LIMIT()
+#else /* CCPU */
+
+#ifdef PROD
+#define getSS_LIMIT() (*((*(Cpu.Private)).GetSS_LIMIT))()
+#else /* PROD */
+IMPORT IU32 getSS_LIMIT IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getDS_LIMIT IPT0();
+#define getDS_LIMIT() c_getDS_LIMIT()
+#else /* CCPU */
+
+#ifdef PROD
+#define getDS_LIMIT() (*((*(Cpu.Private)).GetDS_LIMIT))()
+#else /* PROD */
+IMPORT IU32 getDS_LIMIT IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getES_LIMIT IPT0();
+#define getES_LIMIT() c_getES_LIMIT()
+#else /* CCPU */
+
+#ifdef PROD
+#define getES_LIMIT() (*((*(Cpu.Private)).GetES_LIMIT))()
+#else /* PROD */
+IMPORT IU32 getES_LIMIT IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getFS_LIMIT IPT0();
+#define getFS_LIMIT() c_getFS_LIMIT()
+#else /* CCPU */
+
+#ifdef PROD
+#define getFS_LIMIT() (*((*(Cpu.Private)).GetFS_LIMIT))()
+#else /* PROD */
+IMPORT IU32 getFS_LIMIT IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU32 c_getGS_LIMIT IPT0();
+#define getGS_LIMIT() c_getGS_LIMIT()
+#else /* CCPU */
+
+#ifdef PROD
+#define getGS_LIMIT() (*((*(Cpu.Private)).GetGS_LIMIT))()
+#else /* PROD */
+IMPORT IU32 getGS_LIMIT IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getCS_AR IPT0();
+#define getCS_AR() c_getCS_AR()
+#else /* CCPU */
+
+#ifdef PROD
+#define getCS_AR() (*((*(Cpu.Private)).GetCS_AR))()
+#else /* PROD */
+IMPORT IU16 getCS_AR IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getSS_AR IPT0();
+#define getSS_AR() c_getSS_AR()
+#else /* CCPU */
+
+#ifdef PROD
+#define getSS_AR() (*((*(Cpu.Private)).GetSS_AR))()
+#else /* PROD */
+IMPORT IU16 getSS_AR IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getDS_AR IPT0();
+#define getDS_AR() c_getDS_AR()
+#else /* CCPU */
+
+#ifdef PROD
+#define getDS_AR() (*((*(Cpu.Private)).GetDS_AR))()
+#else /* PROD */
+IMPORT IU16 getDS_AR IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getES_AR IPT0();
+#define getES_AR() c_getES_AR()
+#else /* CCPU */
+
+#ifdef PROD
+#define getES_AR() (*((*(Cpu.Private)).GetES_AR))()
+#else /* PROD */
+IMPORT IU16 getES_AR IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getFS_AR IPT0();
+#define getFS_AR() c_getFS_AR()
+#else /* CCPU */
+
+#ifdef PROD
+#define getFS_AR() (*((*(Cpu.Private)).GetFS_AR))()
+#else /* PROD */
+IMPORT IU16 getFS_AR IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IU16 c_getGS_AR IPT0();
+#define getGS_AR() c_getGS_AR()
+#else /* CCPU */
+
+#ifdef PROD
+#define getGS_AR() (*((*(Cpu.Private)).GetGS_AR))()
+#else /* PROD */
+IMPORT IU16 getGS_AR IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT IUH c_getCPL IPT0();
+#define getCPL() c_getCPL()
+#else /* CCPU */
+
+#ifdef PROD
+#define getCPL() (*((*(Cpu.Private)).GetCPL))()
+#else /* PROD */
+IMPORT IUH getCPL IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setCPL IPT1(IUH, prot);
+#define setCPL(prot) c_setCPL(prot)
+#else /* CCPU */
+
+#ifdef PROD
+#define setCPL(prot) (*((*(Cpu.Private)).SetCPL))(prot)
+#else /* PROD */
+IMPORT void setCPL IPT1(IUH, prot);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_getCpuState IPT1(TypeCpuStateRECptr, state);
+#define getCpuState(state) c_getCpuState(state)
+#else /* CCPU */
+
+#ifdef PROD
+#define getCpuState(state) (*((*(Cpu.Private)).GetCpuState))(state)
+#else /* PROD */
+IMPORT void getCpuState IPT1(TypeCpuStateRECptr, state);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setCpuState IPT1(TypeCpuStateRECptr, state);
+#define setCpuState(state) c_setCpuState(state)
+#else /* CCPU */
+
+#ifdef PROD
+#define setCpuState(state) (*((*(Cpu.Private)).SetCpuState))(state)
+#else /* PROD */
+IMPORT void setCpuState IPT1(TypeCpuStateRECptr, state);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_InitNanoCpu IPT1(IU32, variety);
+#define initNanoCpu(variety) c_InitNanoCpu(variety)
+#else /* CCPU */
+
+#ifdef PROD
+#define initNanoCpu(variety) (*((*(Cpu.Private)).InitNanoCpu))(variety)
+#else /* PROD */
+IMPORT void initNanoCpu IPT1(IU32, variety);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_PrepareBlocksToCompile IPT1(IU32, variety);
+#define prepareBlocksToCompile(variety) c_PrepareBlocksToCompile(variety)
+#else /* CCPU */
+
+#ifdef PROD
+#define prepareBlocksToCompile(variety) (*((*(Cpu.Private)).PrepareBlocksToCompile))(variety)
+#else /* PROD */
+IMPORT void prepareBlocksToCompile IPT1(IU32, variety);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_InitRdWrCacheAndCookies IPT1(IU32, variety);
+#define initRdWrCacheAndCookies(variety) c_InitRdWrCacheAndCookies(variety)
+#else /* CCPU */
+
+#ifdef PROD
+#define initRdWrCacheAndCookies(variety) (*((*(Cpu.Private)).InitRdWrCacheAndCookies))(variety)
+#else /* PROD */
+IMPORT void initRdWrCacheAndCookies IPT1(IU32, variety);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_ResetRdWrCacheAndCookies IPT1(IU32, variety);
+#define resetRdWrCacheAndCookies(variety) c_ResetRdWrCacheAndCookies(variety)
+#else /* CCPU */
+
+#ifdef PROD
+#define resetRdWrCacheAndCookies(variety) (*((*(Cpu.Private)).ResetRdWrCacheAndCookies))(variety)
+#else /* PROD */
+IMPORT void resetRdWrCacheAndCookies IPT1(IU32, variety);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_setRegConstraint IPT2(IU32, regId, IU8, constraintType);
+#define setRegConstraint(regId, constraintType) c_setRegConstraint(regId, constraintType)
+#else /* CCPU */
+
+#ifdef PROD
+#define setRegConstraint(regId, constraintType) (*((*(Cpu.Private)).SetRegConstraint))(regId, constraintType)
+#else /* PROD */
+IMPORT void setRegConstraint IPT2(IU32, regId, IU8, constraintType);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+
+#else /* CCPU */
+
+#ifdef PROD
+#define BpiCompileBPI(instructions) (*((*(Cpu.Private)).BpiCompileBPI))(instructions)
+#else /* PROD */
+IMPORT void BpiCompileBPI IPT1(char *, instructions);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+IMPORT void c_trashIntelRegisters IPT0();
+#define trashIntelregisters c_trashIntelRegisters
+#else /* CCPU */
+
+#ifdef PROD
+#define trashIntelregisters (*((*(Cpu.Private)).TrashIntelRegisters))
+#else /* PROD */
+IMPORT void trashIntelregisters IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+
+#else /* CCPU */
+
+#ifdef PROD
+#define FmDeleteAllStructures(newCR0) (*((*(Cpu.Private)).FmDeleteAllStructures))(newCR0)
+#else /* PROD */
+IMPORT void FmDeleteAllStructures IPT1(IU32, newCR0);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+
+#else /* CCPU */
+
+#ifdef PROD
+#define sfForceVideoOff (*((*(Cpu.Private)).SfForceVideoOff))
+#else /* PROD */
+IMPORT void sfForceVideoOff IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+
+#else /* CCPU */
+
+#ifdef PROD
+#define sfRestoreVideoState (*((*(Cpu.Private)).SfRestoreVideoState))
+#else /* PROD */
+IMPORT void sfRestoreVideoState IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+
+#else /* CCPU */
+
+#ifdef PROD
+#define sfMarkPageAsParsed (*((*(Cpu.Private)).SfMarkPageAsParsed))
+#else /* PROD */
+IMPORT void sfMarkPageAsParsed IPT1(IU32, intelPpn);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+
+#else /* CCPU */
+
+#ifdef PROD
+#define sfMarkPageAsNotParsed (*((*(Cpu.Private)).SfMarkPageAsNotParsed))
+#else /* PROD */
+IMPORT void sfMarkPageAsNotParsed IPT1(IU32, intelPpn);
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#ifdef CCPU
+
+#else /* CCPU */
+
+#ifdef PROD
+#define sfRemovePciMappings (*((*(Cpu.Private)).SfRemovePciMappings))
+#else /* PROD */
+IMPORT void sfRemovePciMappings IPT0();
+#endif /*PROD*/
+
+#endif /* CCPU */
+
+#endif /* _CPU4GEN_H_ */
+/*======================================== END ========================================*/
diff --git a/private/mvdm/softpc.new/base/ccpu386/cpuint_c.h b/private/mvdm/softpc.new/base/ccpu386/cpuint_c.h
new file mode 100644
index 000000000..7046a8326
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cpuint_c.h
@@ -0,0 +1,42 @@
+#ifndef _CpuInt_c_h
+#define _CpuInt_c_h
+#define ChipType (228)
+#define WaferRevision (1)
+#define nQuickTickerThreads (4)
+struct InterruptREC
+{
+ IBOOL Activity;
+ IBOOL Reset;
+ IBOOL PoolsGettingTooBig;
+ IBOOL Hardware;
+ IBOOL Interval;
+ IBOOL AsynchIO;
+ IBOOL QuickTickerScan;
+ IBOOL SRCI;
+ IBOOL Disabled;
+};
+struct QuickTickerThreadREC
+{
+ IBOOL Activity;
+ IUH triggerPoint;
+ IUH elapsed;
+};
+struct QuickTickerREC
+{
+ IUH triggerPoint;
+ IUH elapsed;
+ IUH perTickDelta;
+ IUH averageRate;
+ IUH averageError;
+ struct QuickTickerThreadREC *threads;
+};
+enum CPU_INT_TYPE
+{
+ CPU_HW_RESET = 0,
+ CPU_TIMER_TICK = 1,
+ CPU_HW_INT = 2,
+ CPU_SAD_INT = 3,
+ CPU_SIGIO_EVENT = 4,
+ CPU_NPX_INT = 5
+};
+#endif /* ! _CpuInt_c_h */
diff --git a/private/mvdm/softpc.new/base/ccpu386/cwd.c b/private/mvdm/softpc.new/base/ccpu386/cwd.c
new file mode 100644
index 000000000..dc37ebba9
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cwd.c
@@ -0,0 +1,42 @@
+/*[
+
+cwd.c
+
+LOCAL CHAR SccsID[]="@(#)cwd.c 1.5 02/09/94";
+
+CWD CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <cwd.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+CWD()
+ {
+ if ( GET_AX() & BIT15_MASK ) /* sign bit set? */
+ SET_DX(0xffff);
+ else
+ SET_DX(0);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/cwd.h b/private/mvdm/softpc.new/base/ccpu386/cwd.h
new file mode 100644
index 000000000..0de4ed754
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cwd.h
@@ -0,0 +1,11 @@
+/*
+ cwd.h
+
+ Define all CWD CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)cwd.h 1.5 09/01/94";
+ */
+
+IMPORT VOID CWD IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/cwde.c b/private/mvdm/softpc.new/base/ccpu386/cwde.c
new file mode 100644
index 000000000..cc607653a
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cwde.c
@@ -0,0 +1,43 @@
+/*[
+
+cwde.c
+
+LOCAL CHAR SccsID[]="@(#)cwde.c 1.5 02/09/94";
+
+CWDE CPU functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <cwde.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+CWDE()
+ {
+ IU32 temp;
+
+ if ( (temp = GET_AX()) & BIT15_MASK ) /* sign bit set? */
+ temp |= 0xffff0000;
+ SET_EAX(temp);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/cwde.h b/private/mvdm/softpc.new/base/ccpu386/cwde.h
new file mode 100644
index 000000000..93c580fd3
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/cwde.h
@@ -0,0 +1,11 @@
+/*
+ cwde.h
+
+ CWDE CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)cwde.h 1.5 09/01/94";
+ */
+
+IMPORT VOID CWDE IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/daa.c b/private/mvdm/softpc.new/base/ccpu386/daa.c
new file mode 100644
index 000000000..be1111bc9
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/daa.c
@@ -0,0 +1,65 @@
+/*[
+
+daa.c
+
+LOCAL CHAR SccsID[]="@(#)daa.c 1.5 02/09/94";
+
+DAA CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <daa.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+DAA()
+ {
+ IU8 temp_al;
+
+ temp_al = GET_AL();
+
+ if ( (temp_al & 0xf) > 9 || GET_AF() )
+ {
+ temp_al += 6;
+ SET_AF(1);
+ }
+
+ if ( GET_AL() > 0x99 || GET_CF() )
+ {
+ temp_al += 0x60;
+ SET_CF(1);
+ }
+
+ SET_AL(temp_al);
+
+ /* set ZF,SF,PF according to result */
+ SET_ZF(temp_al == 0);
+ SET_SF((temp_al & BIT7_MASK) != 0);
+ SET_PF(pf_table[temp_al]);
+
+ /* Set undefined flag(s) */
+#ifdef SET_UNDEFINED_FLAG
+ SET_OF(UNDEFINED_FLAG);
+#endif
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/daa.h b/private/mvdm/softpc.new/base/ccpu386/daa.h
new file mode 100644
index 000000000..5e181f18c
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/daa.h
@@ -0,0 +1,11 @@
+/*
+ daa.h
+
+ Define all DAA CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)daa.h 1.5 09/01/94";
+ */
+
+IMPORT VOID DAA IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/das.c b/private/mvdm/softpc.new/base/ccpu386/das.c
new file mode 100644
index 000000000..57e16bf56
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/das.c
@@ -0,0 +1,69 @@
+/*[
+
+das.c
+
+LOCAL CHAR SccsID[]="@(#)das.c 1.5 02/09/94";
+
+DAS CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <das.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+DAS()
+ {
+ IU8 temp_al;
+
+ temp_al = GET_AL();
+
+ if ( (temp_al & 0xf) > 9 || GET_AF() )
+ {
+ temp_al -= 6;
+ SET_AF(1);
+ }
+
+ if ( GET_AL() > 0x99 || GET_CF() )
+ {
+ temp_al -= 0x60;
+ SET_CF(1);
+ }
+ else if ( temp_al > 0x9f )
+ {
+ SET_CF(1);
+ }
+
+ SET_AL(temp_al);
+
+ /* set ZF,SF,PF according to result */
+ SET_ZF(temp_al == 0);
+ SET_SF((temp_al & BIT7_MASK) != 0);
+ SET_PF(pf_table[temp_al]);
+
+ /* Set undefined flag(s) */
+#ifdef SET_UNDEFINED_FLAG
+ SET_OF(UNDEFINED_FLAG);
+#endif
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/das.h b/private/mvdm/softpc.new/base/ccpu386/das.h
new file mode 100644
index 000000000..3349cefea
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/das.h
@@ -0,0 +1,11 @@
+/*
+ das.h
+
+ Define all DAS CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)das.h 1.5 09/01/94";
+ */
+
+IMPORT VOID DAS IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/dec.c b/private/mvdm/softpc.new/base/ccpu386/dec.c
new file mode 100644
index 000000000..b9a5932a4
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/dec.c
@@ -0,0 +1,66 @@
+/*[
+
+dec.c
+
+LOCAL CHAR SccsID[]="@(#)dec.c 1.5 02/09/94";
+
+DEC CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <dec.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'dec'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+DEC
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst/src operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+ IU32 msb;
+ IU32 op1_msb;
+ IU32 res_msb;
+
+ msb = SZ2MSB(op_sz);
+
+ result = *pop1 - 1 & SZ2MASK(op_sz); /* Do operation */
+ op1_msb = (*pop1 & msb) != 0; /* Isolate all msb's */
+ res_msb = (result & msb) != 0;
+ /* Determine flags */
+ SET_OF(op1_msb & !res_msb); /* OF = op1 & !res */
+ /* CF left unchanged */
+ SET_PF(pf_table[result & BYTE_MASK]);
+ SET_ZF(result == 0);
+ SET_SF((result & msb) != 0); /* SF = MSB */
+ SET_AF(((*pop1 ^ result) & BIT4_MASK) != 0); /* AF = Bit 4 carry */
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/dec.h b/private/mvdm/softpc.new/base/ccpu386/dec.h
new file mode 100644
index 000000000..3070822a6
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/dec.h
@@ -0,0 +1,17 @@
+/*
+ dec.h
+
+ Define all DEC CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)dec.h 1.4 02/09/94";
+ */
+
+IMPORT VOID DEC
+
+IPT2(
+ IU32 *, pop1,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/div.c b/private/mvdm/softpc.new/base/ccpu386/div.c
new file mode 100644
index 000000000..773c2bde8
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/div.c
@@ -0,0 +1,167 @@
+/*[
+
+div.c
+
+LOCAL CHAR SccsID[]="@(#)div.c 1.8 02/12/95";
+
+DIV CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <div.h>
+#include <c_div64.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Unsigned Divide. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+DIV8
+
+IFN1(
+ IU32, op2 /* divisor operand */
+ )
+
+
+ {
+ IU32 result;
+ IU32 op1;
+
+ if ( op2 == 0 )
+ Int0(); /* Divide by Zero Exception */
+
+ op1 = GET_AX();
+ result = op1 / op2; /* Do operation */
+
+ if ( result & 0xff00 )
+ Int0(); /* Result doesn't fit in destination */
+
+ SET_AL(result); /* Store Quotient */
+ SET_AH(op1 % op2); /* Store Remainder */
+
+ /*
+ * PCBench attempts to distinguish between processors by checking for
+ * the DIV8 instruction leaving all flags unchanged or clear. It is
+ * important we behave through this test in the same way as the 'real'
+ * 486 otherwise the app asks us to perform some unsupported ops.
+ *
+ * The real 486 has the following ('undefined') behaviour:
+ * CF set
+ * PF = pf_table[op2 - 1]
+ * AF = !( (op2 & 0xf) == 0 )
+ * ZF clear
+ * SF = (op2 <= 0x80)
+ * OF = some function of the actual division
+ *
+ * Given that the PCBench test is for a simple all-zero case, and that
+ * implementing the above is a needless overhead on the assembler CPU,
+ * we take the simplified form of ZF clear, CF set.
+ */
+#ifdef SET_UNDEFINED_DIV_FLAG
+ SET_CF(1);
+ SET_ZF(0);
+ SET_SF(UNDEFINED_FLAG);
+ SET_OF(UNDEFINED_FLAG);
+ SET_PF(UNDEFINED_FLAG);
+ SET_AF(UNDEFINED_FLAG);
+#endif
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Unsigned Divide. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+DIV16
+
+IFN1(
+ IU32, op2 /* divisor operand */
+ )
+
+
+ {
+ IU32 result;
+ IU32 op1;
+
+ if ( op2 == 0 )
+ Int0(); /* Divide by Zero Exception */
+
+ op1 = (IU32)GET_DX() << 16 | GET_AX();
+ result = op1 / op2; /* Do operation */
+
+ if ( result & 0xffff0000 )
+ Int0(); /* Result doesn't fit in destination */
+
+ SET_AX(result); /* Store Quotient */
+ SET_DX(op1 % op2); /* Store Remainder */
+
+ /* Set all undefined flag(s) */
+#ifdef SET_UNDEFINED_DIV_FLAG
+ SET_CF(1); /* see DIV8 for flag choice reasoning */
+ SET_ZF(0);
+ SET_OF(UNDEFINED_FLAG);
+ SET_SF(UNDEFINED_FLAG);
+ SET_PF(UNDEFINED_FLAG);
+ SET_AF(UNDEFINED_FLAG);
+#endif
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Unsigned Divide. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+DIV32
+
+IFN1(
+ IU32, op2 /* divisor operand */
+ )
+
+
+ {
+ IU32 lr; /* low result */
+ IU32 hr; /* high result */
+ IU32 rem; /* remainder */
+
+ if ( op2 == 0 )
+ Int0(); /* Divide by Zero Exception */
+
+ hr = GET_EDX();
+ lr = GET_EAX();
+ divu64(&hr, &lr, op2, &rem); /* Do operation */
+
+ if ( hr )
+ Int0(); /* Result doesn't fit in destination */
+
+ SET_EAX(lr); /* Store Quotient */
+ SET_EDX(rem); /* Store Remainder */
+
+ /* Set all undefined flag(s) */
+#ifdef SET_UNDEFINED_DIV_FLAG
+ SET_CF(1); /* see DIV8 for flag choice reasoning */
+ SET_ZF(0);
+ SET_OF(UNDEFINED_FLAG);
+ SET_SF(UNDEFINED_FLAG);
+ SET_PF(UNDEFINED_FLAG);
+ SET_AF(UNDEFINED_FLAG);
+#endif
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/div.h b/private/mvdm/softpc.new/base/ccpu386/div.h
new file mode 100644
index 000000000..85113bb41
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/div.h
@@ -0,0 +1,30 @@
+/*
+ div.h
+
+ Define all DIV CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)div.h 1.4 02/09/94";
+ */
+
+IMPORT VOID DIV8
+
+IPT1(
+ IU32, op2
+
+ );
+
+IMPORT VOID DIV16
+
+IPT1(
+ IU32, op2
+
+ );
+
+IMPORT VOID DIV32
+
+IPT1(
+ IU32, op2
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/enter.c b/private/mvdm/softpc.new/base/ccpu386/enter.c
new file mode 100644
index 000000000..8d6feac59
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/enter.c
@@ -0,0 +1,199 @@
+/*[
+
+enter.c
+
+LOCAL CHAR SccsID[]="@(#)enter.c 1.7 01/19/95";
+
+ENTER CPU functions.
+--------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <enter.h>
+#include <c_page.h>
+#include <fault.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+ENTER16
+
+IFN2(
+ IU32, op1, /* immediate data space required */
+ IU32, op2 /* level (indicates parameters which must be copied) */
+ )
+
+
+ {
+ IU32 frame_ptr;
+
+ IS32 p_delta = 0; /* posn of parameter relative to BP */
+ IU32 p_addr; /* memory address of parameter */
+ IU32 param; /* parameter read via BP */
+
+ op2 &= 0x1f; /* take level MOD 32 */
+
+ /* check room on stack for new data */
+ validate_stack_space(USE_SP, (ISM32)op2+1);
+
+ /* check old parameters exist */
+ if ( op2 > 1 )
+ {
+ /*
+ BP is pointing to the old stack before the parameters
+ were actually pushed, we therefore test for the presence
+ of the parameters by seeing if they could have been pushed,
+ if so they exist now.
+
+ We have to take care of the READ/WRITE stack addressability
+ ourselves. Because we have checked the new data can be
+ written we know the next call can not fail because of access
+ problems, however we don't yet know if the stack is readable.
+
+ Note we have been a bit severe on the paging unit because we
+ are asking if the old parameters could be written, if so they
+ can certainly be read from the point of view of the paging
+ unit!
+ */
+ /* do access check */
+ if ( GET_SS_AR_R() == 0 )
+ SF((IU16)0, FAULT_ENTER16_ACCESS);
+
+ /* now we know 'frigged' limit check is ok */
+ validate_stack_space(USE_BP, (ISM32)op2-1);
+ }
+
+ /* all ok - process instruction */
+
+ spush((IU32)GET_BP()); /* push BP */
+ frame_ptr = GetStackPointer(); /* save (E)SP */
+
+ if ( op2 > 0 )
+ {
+ /* level is >=1, copy stack parameters if they exist */
+ while ( --op2 > 0 )
+ {
+ /* copy parameter */
+ p_delta -= 2; /* decrement to next parameter */
+
+ /* calculate parameter address in 32/16bit arithmetic */
+ p_addr = get_current_BP() + p_delta;
+ if ( GET_SS_AR_X() == 0 ) /* look at SS 'B' bit */
+ p_addr &= WORD_MASK;
+
+ p_addr += GET_SS_BASE();
+
+ param = (IU32)vir_read_word(p_addr, NO_PHYSICAL_MAPPING);
+ spush(param);
+ }
+ spush((IU32)frame_ptr); /* save old (E)SP */
+ }
+
+ /* update (E)BP */
+ set_current_BP(frame_ptr);
+
+ /* finally allocate immediate data space on stack */
+ if ( op1 )
+ byte_change_SP((IS32)-op1);
+ }
+
+GLOBAL VOID
+ENTER32
+
+IFN2(
+ IU32, op1, /* immediate data space required */
+ IU32, op2 /* level (indicates parameters which must be copied) */
+ )
+
+
+ {
+ IU32 frame_ptr;
+
+ IS32 p_delta = 0; /* posn of parameter relative to EBP */
+ IU32 p_addr; /* memory address of parameter */
+ IU32 param; /* parameter read via EBP */
+
+ op2 &= 0x1f; /* take level MOD 32 */
+
+ /* check room on stack for new data */
+ validate_stack_space(USE_SP, (ISM32)op2+1);
+
+ /* check old parameters exist */
+ if ( op2 > 1 )
+ {
+ /*
+ EBP is pointing to the old stack before the parameters
+ were actually pushed, we therefore test for the presence
+ of the parameters by seeing if they could have been pushed,
+ if so they exist now.
+
+ We have to take care of the READ/WRITE stack addressability
+ ourselves. Because we have checked the new data can be
+ written we know the next call can not fail because of access
+ problems, however we don't yet know if the stack is readable.
+
+ Note we have been a bit severe on the paging unit because we
+ are asking if the old parameters could be written, if so they
+ can certainly be read from the point of view of the paging
+ unit!
+ */
+ /* do access check */
+ if ( GET_SS_AR_R() == 0 )
+ SF((IU16)0, FAULT_ENTER32_ACCESS);
+
+ /* now we know 'frigged' limit check is ok */
+ validate_stack_space(USE_BP, (ISM32)op2-1);
+ }
+
+ /* all ok - process instruction */
+
+ spush((IU32)GET_EBP()); /* push EBP */
+ frame_ptr = GetStackPointer(); /* save (E)SP */
+
+ if ( op2 > 0 )
+ {
+ /* level is >=1, copy stack parameters if they exist */
+ while ( --op2 > 0 )
+ {
+ /* copy parameter */
+ p_delta -= 4; /* decrement to next parameter */
+
+ /* calculate parameter address in 32/16bit arithmetic */
+ p_addr = get_current_BP() + p_delta;
+ if ( GET_SS_AR_X() == 0 ) /* look at SS 'B' bit */
+ p_addr &= WORD_MASK;
+
+ p_addr += GET_SS_BASE();
+
+ param = (IU32)vir_read_dword(p_addr, NO_PHYSICAL_MAPPING);
+ spush(param);
+ }
+ spush((IU32)frame_ptr); /* save old (E)SP */
+ }
+
+ /* update (E)BP */
+ set_current_BP(frame_ptr);
+
+ /* finally allocate immediate data space on stack */
+ if ( op1 )
+ byte_change_SP((IS32)-op1);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/enter.h b/private/mvdm/softpc.new/base/ccpu386/enter.h
new file mode 100644
index 000000000..9b885b23a
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/enter.h
@@ -0,0 +1,25 @@
+/*
+ enter.h
+
+ Define all ENTER CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)enter.h 1.4 02/09/94";
+ */
+
+IMPORT VOID ENTER16
+
+IPT2(
+ IU32, op1,
+ IU32, op2
+
+ );
+
+IMPORT VOID ENTER32
+
+IPT2(
+ IU32, op1,
+ IU32, op2
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/evid_c.h b/private/mvdm/softpc.new/base/ccpu386/evid_c.h
new file mode 100644
index 000000000..7d19ba4f1
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/evid_c.h
@@ -0,0 +1,146 @@
+#ifndef _Evid_c_h
+#define _Evid_c_h
+#define MODE_0 (0)
+#define MODE_1 (1)
+#define MODE_2 (2)
+#define MODE_3 (3)
+#define COPY_MODE (4)
+#define VGA_SRC (0)
+#define RAM_SRC (1)
+#define FORWARDS (0)
+#define BACKWARDS (1)
+#define FWD_BYTE (0)
+#define BWD_BYTE (1)
+#define FWD_WORD (2)
+#define BWD_WORD (3)
+#define UNCHAINED (0)
+#define CHAIN_2 (1)
+#define CHAIN_4 (2)
+#define SIMPLE_WRITES (3)
+#define FUNC_COPY (0)
+#define FUNC_AND (1)
+#define FUNC_OR (2)
+#define FUNC_XOR (3)
+#define FUNC_SHIFT (1)
+#define PLANE_ENABLE (1)
+#define FUNC_CODE (6)
+#define BIT_PROT (8)
+#define SET_RESET (16)
+#define PROT_OR_FUNC (14)
+#define READ_MODE_0 (0)
+#define READ_MODE_1 (1)
+#define DISABLED_RAM (2)
+#define SIMPLE_READ (3)
+#define SIMPLE_MARK (0)
+#define CGA_MARK (1)
+#define UNCHAINED_MARK (2)
+#define CHAIN_4_MARK (3)
+#define BYTE_SIZE (0)
+#define WORD_SIZE (1)
+#define DWORD_SIZE (2)
+#define STRING_SIZE (3)
+#define WRITE_RTN (0)
+#define FILL_RTN (1)
+#define MOVE_RTN (2)
+#define READ_RTN (3)
+#define EGA_INDEX (0)
+#define VGA_INDEX (1)
+#define GC_MASK (2)
+#define GC_MASK_FF (3)
+#define NUM_UNCHAINED_WRITES (21)
+#define NUM_CHAIN4_WRITES (21)
+#define NUM_CHAIN2_WRITES (5)
+#define NUM_DITHER_WRITES (4)
+#define NUM_M0_WRITES (12)
+#define NUM_M1_WRITES (1)
+#define NUM_M23_WRITES (4)
+#define NUM_READ_M0_READS (3)
+#define NUM_READ_M1_READS (3)
+struct VGAGLOBALSETTINGS
+{
+ IU32 latches;
+ IU8 *VGA_rplane;
+ IU8 *VGA_wplane;
+ IU8 *scratch;
+ IU32 sr_masked_val;
+ IU32 sr_nmask;
+ IU32 data_and_mask;
+ IU32 data_xor_mask;
+ IU32 latch_xor_mask;
+ IU32 bit_prot_mask;
+ IU32 plane_enable;
+ IU32 plane_enable_mask;
+ IUH *sr_lookup;
+ IU32*fwd_str_read_addr;
+ IU32*bwd_str_read_addr;
+ IU32 dirty_total;
+ IS32 dirty_low;
+ IS32 dirty_high;
+ IU8 *video_copy;
+ IU32*mark_byte;
+ IU32*mark_word;
+ IU32*mark_string;
+ IU32 read_shift_count;
+ IU32 read_mapped_plane;
+ IU32 colour_comp;
+ IU32 dont_care;
+ IU32 v7_bank_vid_copy_off;
+ void *video_base_ls0;
+ IU8 *route_reg1;
+ IU8 *route_reg2;
+ IU8 *screen_ptr;
+ IU32 rotate;
+ IU32 calc_data_xor;
+ IU32 calc_latch_xor;
+ IU32*read_byte_addr;
+ IU32 v7_fg_latches;
+ IUH **GCRegs;
+ IU8 lastGCindex;
+ IU8 dither;
+ IU8 wrmode;
+ IU8 chain;
+ IU8 wrstate;
+};
+struct EVIDWRITES
+{
+ IU32*byte_write;
+ IU32*word_write;
+ IU32*dword_write;
+ IU32*byte_fill;
+ IU32*word_fill;
+ IU32*dword_fill;
+ IU32*byte_fwd_move;
+ IU32*byte_bwd_move;
+ IU32*word_fwd_move;
+ IU32*word_bwd_move;
+ IU32*dword_fwd_move;
+ IU32*dword_bwd_move;
+};
+struct EVIDREADS
+{
+ IU32*byte_read;
+ IU32*word_read;
+ IU32*dword_read;
+ IU32*str_fwd_read;
+ IU32*str_bwd_read;
+};
+struct EVIDMARKS
+{
+ IU32*byte_mark;
+ IU32*word_mark;
+ IU32*dword_mark;
+ IU32*str_mark;
+};
+enum VidSections
+{
+ READ_FUNC = 0,
+ MARK_FUNC = 1,
+ SIMPLE_FUNC = 2,
+ DITHER_FUNC = 3,
+ PORT_FUNC = 4,
+ GENERIC_WRITES = 5,
+ UNCHAINED_WRITES = 6,
+ CHAIN4_WRITES = 7,
+ CHAIN2_WRITES = 8
+};
+#endif /* ! _Evid_c_h */
diff --git a/private/mvdm/softpc.new/base/ccpu386/evidfunc.h b/private/mvdm/softpc.new/base/ccpu386/evidfunc.h
new file mode 100644
index 000000000..9d62cbf62
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/evidfunc.h
@@ -0,0 +1,715 @@
+extern void S_2451_SimpleByteWrite IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2454_SimpleWordWrite IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2457_SimpleDwordWrite IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2452_SimpleByteFill IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2455_SimpleWordFill IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2458_SimpleDwordFill IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2453_SimpleByteMove_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2456_SimpleWordMove_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2459_SimpleDwordMove_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2460_SimpleByteMove_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2461_SimpleWordMove_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2462_SimpleDwordMove_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2550_GenericByteWrite IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2553_GenericWordWrite IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2556_GenericDwordWrite IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2551_GenericByteFill IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2554_GenericWordFill IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2557_GenericDwordFill IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2552_GenericByteMove_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2555_GenericWordMove_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2558_GenericDwordMove_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2559_GenericByteMove_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2560_GenericWordMove_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2561_GenericDwordMove_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2463_UnchainedByteWrite_00_0e_01 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2469_UnchainedWordWrite_00_0e_01 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2475_UnchainedDwordWrite_00_0e_01 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2464_UnchainedByteFill_00_0e_01 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2470_UnchainedWordFill_00_0e_01 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2476_UnchainedDwordFill_00_0e_01 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2466_UnchainedByteMove_00_0e_01_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2472_UnchainedWordMove_00_0e_01_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2477_UnchainedDwordMove_00_0e_01_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2517_UnchainedByteMove_00_0e_01_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2520_UnchainedWordMove_00_0e_01_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2523_UnchainedDwordMove_00_0e_01_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2478_UnchainedByteWrite_01_0e_01 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2483_UnchainedWordWrite_01_0e_01 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2488_UnchainedDwordWrite_01_0e_01 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2479_UnchainedByteFill_01_0e_01 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2484_UnchainedWordFill_01_0e_01 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2489_UnchainedDwordFill_01_0e_01 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2480_UnchainedByteMove_01_0e_01_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2485_UnchainedWordMove_01_0e_01_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2490_UnchainedDwordMove_01_0e_01_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2524_UnchainedByteMove_01_0e_01_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2527_UnchainedWordMove_01_0e_01_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2530_UnchainedDwordMove_01_0e_01_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2491_UnchainedByteWrite_02_0e_01 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2496_UnchainedWordWrite_02_0e_01 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2501_UnchainedDwordWrite_02_0e_01 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2492_UnchainedByteFill_02_0e_01 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2497_UnchainedWordFill_02_0e_01 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2502_UnchainedDwordFill_02_0e_01 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2493_UnchainedByteMove_02_0e_01_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2498_UnchainedWordMove_02_0e_01_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2503_UnchainedDwordMove_02_0e_01_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2531_UnchainedByteMove_02_0e_01_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2534_UnchainedWordMove_02_0e_01_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2537_UnchainedDwordMove_02_0e_01_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2504_UnchainedByteWrite_03_0e_01 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2509_UnchainedWordWrite_03_0e_01 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2514_UnchainedDwordWrite_03_0e_01 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2505_UnchainedByteFill_03_0e_01 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2510_UnchainedWordFill_03_0e_01 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2515_UnchainedDwordFill_03_0e_01 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2506_UnchainedByteMove_03_0e_01_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2511_UnchainedWordMove_03_0e_01_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2516_UnchainedDwordMove_03_0e_01_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2538_UnchainedByteMove_03_0e_01_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2541_UnchainedWordMove_03_0e_01_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2544_UnchainedDwordMove_03_0e_01_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2936_Chain2ByteWrite_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2942_Chain2WordWrite_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2948_Chain2DwordWrite_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2938_Chain2ByteFill_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2944_Chain2WordFill_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2950_Chain2DwordFill_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2940_Chain2ByteMove_00_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2946_Chain2WordMove_00_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2952_Chain2DwordMove_00_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3017_Chain2ByteMove_00_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3019_Chain2WordMove_00_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3021_Chain2DwordMove_00_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2954_Chain2ByteWrite_01 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2960_Chain2WordWrite_01 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2966_Chain2DwordWrite_01 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2956_Chain2ByteFill_01 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2962_Chain2WordFill_01 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2968_Chain2DwordFill_01 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2958_Chain2ByteMove_01_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2964_Chain2WordMove_01_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2970_Chain2DwordMove_01_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3023_Chain2ByteMove_01_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3025_Chain2WordMove_01_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3027_Chain2DwordMove_01_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2972_Chain2ByteWrite_02 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2978_Chain2WordWrite_02 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2984_Chain2DwordWrite_02 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2974_Chain2ByteFill_02 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2980_Chain2WordFill_02 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2986_Chain2DwordFill_02 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2976_Chain2ByteMove_02_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2982_Chain2WordMove_02_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2988_Chain2DwordMove_02_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3029_Chain2ByteMove_02_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3031_Chain2WordMove_02_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3033_Chain2DwordMove_02_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2990_Chain2ByteWrite_03 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2996_Chain2WordWrite_03 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3002_Chain2DwordWrite_03 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2992_Chain2ByteFill_03 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2998_Chain2WordFill_03 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3004_Chain2DwordFill_03 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2994_Chain2ByteMove_03_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3000_Chain2WordMove_03_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3006_Chain2DwordMove_03_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3035_Chain2ByteMove_03_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3037_Chain2WordMove_03_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3039_Chain2DwordMove_03_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3008_Chain2ByteWrite_Copy IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3011_Chain2WordWrite_Copy IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3014_Chain2DwordWrite_Copy IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3009_Chain2ByteFill_Copy IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3012_Chain2WordFill_Copy IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3015_Chain2DwordFill_Copy IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3010_Chain2ByteMove_Copy_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3013_Chain2WordMove_Copy_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3016_Chain2DwordMove_Copy_Fwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3041_Chain2ByteMove_Copy_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3042_Chain2WordMove_Copy_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3043_Chain2DwordMove_Copy_Bwd IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2562_UnchainedByteWrite_00_08_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2617_UnchainedWordWrite_00_08_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2672_UnchainedDwordWrite_00_08_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2574_UnchainedByteFill_00_08_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2629_UnchainedWordFill_00_08_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2684_UnchainedDwordFill_00_08_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2587_UnchainedByteMove_00_08_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2642_UnchainedWordMove_00_08_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2696_UnchainedDwordMove_00_08_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2813_UnchainedByteMove_00_08_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2843_UnchainedWordMove_00_08_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2873_UnchainedDwordMove_00_08_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2563_UnchainedByteWrite_00_09_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2618_UnchainedWordWrite_00_09_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2673_UnchainedDwordWrite_00_09_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2575_UnchainedByteFill_00_09_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2630_UnchainedWordFill_00_09_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2685_UnchainedDwordFill_00_09_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2590_UnchainedByteMove_00_09_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2645_UnchainedWordMove_00_09_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2697_UnchainedDwordMove_00_09_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2816_UnchainedByteMove_00_09_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2846_UnchainedWordMove_00_09_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2874_UnchainedDwordMove_00_09_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2564_UnchainedByteWrite_00_0e_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2619_UnchainedWordWrite_00_0e_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2674_UnchainedDwordWrite_00_0e_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2577_UnchainedByteFill_00_0e_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2632_UnchainedWordFill_00_0e_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2686_UnchainedDwordFill_00_0e_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2592_UnchainedByteMove_00_0e_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2647_UnchainedWordMove_00_0e_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2698_UnchainedDwordMove_00_0e_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2818_UnchainedByteMove_00_0e_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2848_UnchainedWordMove_00_0e_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2875_UnchainedDwordMove_00_0e_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2565_UnchainedByteWrite_00_0f_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2620_UnchainedWordWrite_00_0f_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2675_UnchainedDwordWrite_00_0f_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2578_UnchainedByteFill_00_0f_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2633_UnchainedWordFill_00_0f_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2687_UnchainedDwordFill_00_0f_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2595_UnchainedByteMove_00_0f_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2650_UnchainedWordMove_00_0f_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2699_UnchainedDwordMove_00_0f_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2821_UnchainedByteMove_00_0f_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2851_UnchainedWordMove_00_0f_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2876_UnchainedDwordMove_00_0f_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2566_UnchainedByteWrite_00_10_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2621_UnchainedWordWrite_00_10_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2676_UnchainedDwordWrite_00_10_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2579_UnchainedByteFill_00_10_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2634_UnchainedWordFill_00_10_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2688_UnchainedDwordFill_00_10_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2597_UnchainedByteMove_00_10_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2652_UnchainedWordMove_00_10_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2700_UnchainedDwordMove_00_10_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2823_UnchainedByteMove_00_10_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2853_UnchainedWordMove_00_10_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2877_UnchainedDwordMove_00_10_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2567_UnchainedByteWrite_00_11_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2622_UnchainedWordWrite_00_11_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2677_UnchainedDwordWrite_00_11_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2580_UnchainedByteFill_00_11_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2635_UnchainedWordFill_00_11_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2689_UnchainedDwordFill_00_11_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2600_UnchainedByteMove_00_11_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2655_UnchainedWordMove_00_11_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2701_UnchainedDwordMove_00_11_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2826_UnchainedByteMove_00_11_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2856_UnchainedWordMove_00_11_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2878_UnchainedDwordMove_00_11_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2568_UnchainedByteWrite_00_16_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2623_UnchainedWordWrite_00_16_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2678_UnchainedDwordWrite_00_16_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2581_UnchainedByteFill_00_16_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2636_UnchainedWordFill_00_16_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2690_UnchainedDwordFill_00_16_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2602_UnchainedByteMove_00_16_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2657_UnchainedWordMove_00_16_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2702_UnchainedDwordMove_00_16_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2828_UnchainedByteMove_00_16_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2858_UnchainedWordMove_00_16_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2879_UnchainedDwordMove_00_16_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2569_UnchainedByteWrite_00_17_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2624_UnchainedWordWrite_00_17_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2679_UnchainedDwordWrite_00_17_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2582_UnchainedByteFill_00_17_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2637_UnchainedWordFill_00_17_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2691_UnchainedDwordFill_00_17_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2605_UnchainedByteMove_00_17_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2660_UnchainedWordMove_00_17_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2703_UnchainedDwordMove_00_17_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2831_UnchainedByteMove_00_17_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2861_UnchainedWordMove_00_17_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2880_UnchainedDwordMove_00_17_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2570_UnchainedByteWrite_00_18_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2625_UnchainedWordWrite_00_18_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2680_UnchainedDwordWrite_00_18_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2583_UnchainedByteFill_00_18_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2638_UnchainedWordFill_00_18_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2692_UnchainedDwordFill_00_18_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2607_UnchainedByteMove_00_18_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2662_UnchainedWordMove_00_18_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2704_UnchainedDwordMove_00_18_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2833_UnchainedByteMove_00_18_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2863_UnchainedWordMove_00_18_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2881_UnchainedDwordMove_00_18_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2571_UnchainedByteWrite_00_19_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2626_UnchainedWordWrite_00_19_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2681_UnchainedDwordWrite_00_19_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2584_UnchainedByteFill_00_19_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2639_UnchainedWordFill_00_19_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2693_UnchainedDwordFill_00_19_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2610_UnchainedByteMove_00_19_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2665_UnchainedWordMove_00_19_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2705_UnchainedDwordMove_00_19_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2836_UnchainedByteMove_00_19_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2866_UnchainedWordMove_00_19_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2882_UnchainedDwordMove_00_19_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2572_UnchainedByteWrite_00_1e_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2627_UnchainedWordWrite_00_1e_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2682_UnchainedDwordWrite_00_1e_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2585_UnchainedByteFill_00_1e_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2640_UnchainedWordFill_00_1e_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2694_UnchainedDwordFill_00_1e_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2612_UnchainedByteMove_00_1e_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2667_UnchainedWordMove_00_1e_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2706_UnchainedDwordMove_00_1e_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2838_UnchainedByteMove_00_1e_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2868_UnchainedWordMove_00_1e_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2883_UnchainedDwordMove_00_1e_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2573_UnchainedByteWrite_00_1f_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2628_UnchainedWordWrite_00_1f_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2683_UnchainedDwordWrite_00_1f_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2586_UnchainedByteFill_00_1f_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2641_UnchainedWordFill_00_1f_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2695_UnchainedDwordFill_00_1f_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2615_UnchainedByteMove_00_1f_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2670_UnchainedWordMove_00_1f_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2707_UnchainedDwordMove_00_1f_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2841_UnchainedByteMove_00_1f_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2871_UnchainedWordMove_00_1f_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2884_UnchainedDwordMove_00_1f_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2708_UnchainedByteWrite_01_00_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2711_UnchainedWordWrite_01_00_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2714_UnchainedDwordWrite_01_00_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2709_UnchainedByteFill_01_00_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2712_UnchainedWordFill_01_00_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2715_UnchainedDwordFill_01_00_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2710_UnchainedByteMove_01_00_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2713_UnchainedWordMove_01_00_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2716_UnchainedDwordMove_01_00_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2885_UnchainedByteMove_01_00_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2886_UnchainedWordMove_01_00_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2887_UnchainedDwordMove_01_00_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2717_UnchainedByteWrite_02_08_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2735_UnchainedWordWrite_02_08_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2753_UnchainedDwordWrite_02_08_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2721_UnchainedByteFill_02_08_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2739_UnchainedWordFill_02_08_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2757_UnchainedDwordFill_02_08_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2725_UnchainedByteMove_02_08_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2743_UnchainedWordMove_02_08_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2761_UnchainedDwordMove_02_08_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2888_UnchainedByteMove_02_08_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2898_UnchainedWordMove_02_08_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2908_UnchainedDwordMove_02_08_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2718_UnchainedByteWrite_02_09_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2736_UnchainedWordWrite_02_09_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2754_UnchainedDwordWrite_02_09_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2722_UnchainedByteFill_02_09_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2740_UnchainedWordFill_02_09_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2758_UnchainedDwordFill_02_09_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2728_UnchainedByteMove_02_09_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2746_UnchainedWordMove_02_09_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2762_UnchainedDwordMove_02_09_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2891_UnchainedByteMove_02_09_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2901_UnchainedWordMove_02_09_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2909_UnchainedDwordMove_02_09_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2719_UnchainedByteWrite_02_0e_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2737_UnchainedWordWrite_02_0e_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2755_UnchainedDwordWrite_02_0e_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2723_UnchainedByteFill_02_0e_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2741_UnchainedWordFill_02_0e_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2759_UnchainedDwordFill_02_0e_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2730_UnchainedByteMove_02_0e_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2748_UnchainedWordMove_02_0e_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2763_UnchainedDwordMove_02_0e_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2893_UnchainedByteMove_02_0e_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2903_UnchainedWordMove_02_0e_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2910_UnchainedDwordMove_02_0e_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2720_UnchainedByteWrite_02_0f_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2738_UnchainedWordWrite_02_0f_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2756_UnchainedDwordWrite_02_0f_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2724_UnchainedByteFill_02_0f_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2742_UnchainedWordFill_02_0f_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2760_UnchainedDwordFill_02_0f_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2733_UnchainedByteMove_02_0f_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2751_UnchainedWordMove_02_0f_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2764_UnchainedDwordMove_02_0f_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2896_UnchainedByteMove_02_0f_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2906_UnchainedWordMove_02_0f_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2911_UnchainedDwordMove_02_0f_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2765_UnchainedByteWrite_03_08_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2783_UnchainedWordWrite_03_08_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2801_UnchainedDwordWrite_03_08_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2769_UnchainedByteFill_03_08_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2787_UnchainedWordFill_03_08_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2805_UnchainedDwordFill_03_08_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2773_UnchainedByteMove_03_08_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2791_UnchainedWordMove_03_08_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2809_UnchainedDwordMove_03_08_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2912_UnchainedByteMove_03_08_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2922_UnchainedWordMove_03_08_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2932_UnchainedDwordMove_03_08_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2766_UnchainedByteWrite_03_09_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2784_UnchainedWordWrite_03_09_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2802_UnchainedDwordWrite_03_09_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2770_UnchainedByteFill_03_09_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2788_UnchainedWordFill_03_09_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2806_UnchainedDwordFill_03_09_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2776_UnchainedByteMove_03_09_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2794_UnchainedWordMove_03_09_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2810_UnchainedDwordMove_03_09_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2915_UnchainedByteMove_03_09_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2925_UnchainedWordMove_03_09_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2933_UnchainedDwordMove_03_09_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2767_UnchainedByteWrite_03_0e_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2785_UnchainedWordWrite_03_0e_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2803_UnchainedDwordWrite_03_0e_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2771_UnchainedByteFill_03_0e_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2789_UnchainedWordFill_03_0e_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2807_UnchainedDwordFill_03_0e_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2778_UnchainedByteMove_03_0e_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2796_UnchainedWordMove_03_0e_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2811_UnchainedDwordMove_03_0e_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2917_UnchainedByteMove_03_0e_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2927_UnchainedWordMove_03_0e_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2934_UnchainedDwordMove_03_0e_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2768_UnchainedByteWrite_03_0f_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_2786_UnchainedWordWrite_03_0f_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_2804_UnchainedDwordWrite_03_0f_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_2772_UnchainedByteFill_03_0f_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_2790_UnchainedWordFill_03_0f_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_2808_UnchainedDwordFill_03_0f_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_2781_UnchainedByteMove_03_0f_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2799_UnchainedWordMove_03_0f_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2812_UnchainedDwordMove_03_0f_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2920_UnchainedByteMove_03_0f_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2930_UnchainedWordMove_03_0f_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_2935_UnchainedDwordMove_03_0f_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3044_Chain4ByteWrite_00_08 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3094_Chain4WordWrite_00_08 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3143_Chain4DwordWrite_00_08 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3056_Chain4ByteFill_00_08 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3106_Chain4WordFill_00_08 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3155_Chain4DwordFill_00_08 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3070_Chain4ByteMove_00_08_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3119_Chain4WordMove_00_08_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3167_Chain4DwordMove_00_08_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3277_Chain4ByteMove_00_08_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3301_Chain4WordMove_00_08_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3325_Chain4DwordMove_00_08_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3045_Chain4ByteWrite_00_09 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3095_Chain4WordWrite_00_09 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3144_Chain4DwordWrite_00_09 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3058_Chain4ByteFill_00_09 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3107_Chain4WordFill_00_09 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3156_Chain4DwordFill_00_09 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3072_Chain4ByteMove_00_09_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3121_Chain4WordMove_00_09_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3168_Chain4DwordMove_00_09_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3279_Chain4ByteMove_00_09_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3303_Chain4WordMove_00_09_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3326_Chain4DwordMove_00_09_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3046_Chain4ByteWrite_00_0e IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3096_Chain4WordWrite_00_0e IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3145_Chain4DwordWrite_00_0e IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3060_Chain4ByteFill_00_0e IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3109_Chain4WordFill_00_0e IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3157_Chain4DwordFill_00_0e IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3074_Chain4ByteMove_00_0e_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3123_Chain4WordMove_00_0e_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3169_Chain4DwordMove_00_0e_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3281_Chain4ByteMove_00_0e_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3305_Chain4WordMove_00_0e_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3327_Chain4DwordMove_00_0e_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3047_Chain4ByteWrite_00_0f IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3097_Chain4WordWrite_00_0f IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3146_Chain4DwordWrite_00_0f IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3061_Chain4ByteFill_00_0f IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3110_Chain4WordFill_00_0f IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3158_Chain4DwordFill_00_0f IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3076_Chain4ByteMove_00_0f_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3125_Chain4WordMove_00_0f_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3170_Chain4DwordMove_00_0f_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3283_Chain4ByteMove_00_0f_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3307_Chain4WordMove_00_0f_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3328_Chain4DwordMove_00_0f_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3048_Chain4ByteWrite_00_10 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3098_Chain4WordWrite_00_10 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3147_Chain4DwordWrite_00_10 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3062_Chain4ByteFill_00_10 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3111_Chain4WordFill_00_10 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3159_Chain4DwordFill_00_10 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3078_Chain4ByteMove_00_10_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3127_Chain4WordMove_00_10_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3171_Chain4DwordMove_00_10_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3285_Chain4ByteMove_00_10_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3309_Chain4WordMove_00_10_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3329_Chain4DwordMove_00_10_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3049_Chain4ByteWrite_00_11 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3099_Chain4WordWrite_00_11 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3148_Chain4DwordWrite_00_11 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3063_Chain4ByteFill_00_11 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3112_Chain4WordFill_00_11 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3160_Chain4DwordFill_00_11 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3080_Chain4ByteMove_00_11_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3129_Chain4WordMove_00_11_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3172_Chain4DwordMove_00_11_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3287_Chain4ByteMove_00_11_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3311_Chain4WordMove_00_11_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3330_Chain4DwordMove_00_11_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3050_Chain4ByteWrite_00_16 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3100_Chain4WordWrite_00_16 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3149_Chain4DwordWrite_00_16 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3064_Chain4ByteFill_00_16 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3113_Chain4WordFill_00_16 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3161_Chain4DwordFill_00_16 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3082_Chain4ByteMove_00_16_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3131_Chain4WordMove_00_16_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3173_Chain4DwordMove_00_16_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3289_Chain4ByteMove_00_16_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3313_Chain4WordMove_00_16_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3331_Chain4DwordMove_00_16_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3051_Chain4ByteWrite_00_17 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3101_Chain4WordWrite_00_17 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3150_Chain4DwordWrite_00_17 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3065_Chain4ByteFill_00_17 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3114_Chain4WordFill_00_17 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3162_Chain4DwordFill_00_17 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3084_Chain4ByteMove_00_17_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3133_Chain4WordMove_00_17_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3174_Chain4DwordMove_00_17_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3291_Chain4ByteMove_00_17_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3315_Chain4WordMove_00_17_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3332_Chain4DwordMove_00_17_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3052_Chain4ByteWrite_00_18 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3102_Chain4WordWrite_00_18 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3151_Chain4DwordWrite_00_18 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3066_Chain4ByteFill_00_18 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3115_Chain4WordFill_00_18 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3163_Chain4DwordFill_00_18 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3086_Chain4ByteMove_00_18_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3135_Chain4WordMove_00_18_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3175_Chain4DwordMove_00_18_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3293_Chain4ByteMove_00_18_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3317_Chain4WordMove_00_18_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3333_Chain4DwordMove_00_18_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3053_Chain4ByteWrite_00_19 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3103_Chain4WordWrite_00_19 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3152_Chain4DwordWrite_00_19 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3067_Chain4ByteFill_00_19 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3116_Chain4WordFill_00_19 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3164_Chain4DwordFill_00_19 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3088_Chain4ByteMove_00_19_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3137_Chain4WordMove_00_19_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3176_Chain4DwordMove_00_19_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3295_Chain4ByteMove_00_19_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3319_Chain4WordMove_00_19_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3334_Chain4DwordMove_00_19_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3054_Chain4ByteWrite_00_1e IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3104_Chain4WordWrite_00_1e IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3153_Chain4DwordWrite_00_1e IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3068_Chain4ByteFill_00_1e IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3117_Chain4WordFill_00_1e IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3165_Chain4DwordFill_00_1e IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3090_Chain4ByteMove_00_1e_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3139_Chain4WordMove_00_1e_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3177_Chain4DwordMove_00_1e_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3297_Chain4ByteMove_00_1e_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3321_Chain4WordMove_00_1e_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3335_Chain4DwordMove_00_1e_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3055_Chain4ByteWrite_00_1f IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3105_Chain4WordWrite_00_1f IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3154_Chain4DwordWrite_00_1f IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3069_Chain4ByteFill_00_1f IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3118_Chain4WordFill_00_1f IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3166_Chain4DwordFill_00_1f IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3092_Chain4ByteMove_00_1f_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3141_Chain4WordMove_00_1f_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3178_Chain4DwordMove_00_1f_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3299_Chain4ByteMove_00_1f_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3323_Chain4WordMove_00_1f_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3336_Chain4DwordMove_00_1f_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3179_Chain4ByteWrite_01_00 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3183_Chain4WordWrite_01_00 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3186_Chain4DwordWrite_01_00 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3180_Chain4ByteFill_01_00 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3184_Chain4WordFill_01_00 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3187_Chain4DwordFill_01_00 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3181_Chain4ByteMove_01_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3185_Chain4WordMove_01_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3188_Chain4DwordMove_01_00_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3337_Chain4ByteMove_01_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3339_Chain4WordMove_01_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3340_Chain4DwordMove_01_00_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3189_Chain4ByteWrite_02_08 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3205_Chain4WordWrite_02_08 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3221_Chain4DwordWrite_02_08 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3193_Chain4ByteFill_02_08 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3209_Chain4WordFill_02_08 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3225_Chain4DwordFill_02_08 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3197_Chain4ByteMove_02_08_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3213_Chain4WordMove_02_08_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3229_Chain4DwordMove_02_08_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3341_Chain4ByteMove_02_08_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3349_Chain4WordMove_02_08_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3357_Chain4DwordMove_02_08_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3190_Chain4ByteWrite_02_09 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3206_Chain4WordWrite_02_09 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3222_Chain4DwordWrite_02_09 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3194_Chain4ByteFill_02_09 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3210_Chain4WordFill_02_09 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3226_Chain4DwordFill_02_09 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3199_Chain4ByteMove_02_09_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3215_Chain4WordMove_02_09_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3230_Chain4DwordMove_02_09_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3343_Chain4ByteMove_02_09_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3351_Chain4WordMove_02_09_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3358_Chain4DwordMove_02_09_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3191_Chain4ByteWrite_02_0e IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3207_Chain4WordWrite_02_0e IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3223_Chain4DwordWrite_02_0e IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3195_Chain4ByteFill_02_0e IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3211_Chain4WordFill_02_0e IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3227_Chain4DwordFill_02_0e IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3201_Chain4ByteMove_02_0e_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3217_Chain4WordMove_02_0e_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3231_Chain4DwordMove_02_0e_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3345_Chain4ByteMove_02_0e_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3353_Chain4WordMove_02_0e_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3359_Chain4DwordMove_02_0e_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3192_Chain4ByteWrite_02_0f IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3208_Chain4WordWrite_02_0f IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3224_Chain4DwordWrite_02_0f IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3196_Chain4ByteFill_02_0f IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3212_Chain4WordFill_02_0f IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3228_Chain4DwordFill_02_0f IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3203_Chain4ByteMove_02_0f_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3219_Chain4WordMove_02_0f_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3232_Chain4DwordMove_02_0f_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3347_Chain4ByteMove_02_0f_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3355_Chain4WordMove_02_0f_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3360_Chain4DwordMove_02_0f_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3233_Chain4ByteWrite_03_08 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3249_Chain4WordWrite_03_08 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3265_Chain4DwordWrite_03_08 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3237_Chain4ByteFill_03_08 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3253_Chain4WordFill_03_08 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3269_Chain4DwordFill_03_08 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3241_Chain4ByteMove_03_08_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3257_Chain4WordMove_03_08_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3273_Chain4DwordMove_03_08_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3361_Chain4ByteMove_03_08_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3369_Chain4WordMove_03_08_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3377_Chain4DwordMove_03_08_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3234_Chain4ByteWrite_03_09 IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3250_Chain4WordWrite_03_09 IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3266_Chain4DwordWrite_03_09 IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3238_Chain4ByteFill_03_09 IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3254_Chain4WordFill_03_09 IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3270_Chain4DwordFill_03_09 IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3243_Chain4ByteMove_03_09_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3259_Chain4WordMove_03_09_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3274_Chain4DwordMove_03_09_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3363_Chain4ByteMove_03_09_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3371_Chain4WordMove_03_09_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3378_Chain4DwordMove_03_09_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3235_Chain4ByteWrite_03_0e IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3251_Chain4WordWrite_03_0e IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3267_Chain4DwordWrite_03_0e IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3239_Chain4ByteFill_03_0e IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3255_Chain4WordFill_03_0e IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3271_Chain4DwordFill_03_0e IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3245_Chain4ByteMove_03_0e_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3261_Chain4WordMove_03_0e_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3275_Chain4DwordMove_03_0e_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3365_Chain4ByteMove_03_0e_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3373_Chain4WordMove_03_0e_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3379_Chain4DwordMove_03_0e_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3236_Chain4ByteWrite_03_0f IPT2(IU32, eaOff, IU8, eaVal);
+extern void S_3252_Chain4WordWrite_03_0f IPT2(IU32, eaOff, IU16, eaVal);
+extern void S_3268_Chain4DwordWrite_03_0f IPT2(IU32, eaOff, IU32, eaVal);
+extern void S_3240_Chain4ByteFill_03_0f IPT3(IU32, eaOff, IU8, eaVal, IU32, count);
+extern void S_3256_Chain4WordFill_03_0f IPT3(IU32, eaOff, IU16, eaVal, IU32, count);
+extern void S_3272_Chain4DwordFill_03_0f IPT3(IU32, eaOff, IU32, eaVal, IU32, count);
+extern void S_3247_Chain4ByteMove_03_0f_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3263_Chain4WordMove_03_0f_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3276_Chain4DwordMove_03_0f_00 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3367_Chain4ByteMove_03_0f_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3375_Chain4WordMove_03_0f_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+extern void S_3380_Chain4DwordMove_03_0f_01 IPT4(IU32, eaOff, IHPE, fromOff, IU32, count, IBOOL, srcInRAM);
+
+
+/*========= Mark Functions ============ */
+
+extern S_2438_SimpleMark IPT0();
+extern S_2439_CGAMarkByte IPT1(IU32, eaOff);
+extern S_2440_CGAMarkWord IPT1(IU32, eaOff);
+extern S_2441_CGAMarkDword IPT1(IU32, eaOff);
+extern S_2442_CGAMarkString IPT2(IU32, eaOff, IU32, count);
+extern S_2443_UnchainedMarkByte IPT1(IU32, eaOff);
+extern S_2444_UnchainedMarkWord IPT1(IU32, eaOff);
+extern S_2445_UnchainedMarkDword IPT1(IU32, eaOff);
+extern S_2446_UnchainedMarkString IPT2(IU32, eaOff, IU32, count);
+extern S_2447_Chain4MarkByte IPT1(IU32, eaOff);
+extern S_2448_Chain4MarkWord IPT1(IU32, eaOff);
+extern S_2449_Chain4MarkDword IPT1(IU32, eaOff);
+extern S_2450_Chain4MarkString IPT2(IU32, eaOff, IU32, count);
+extern S_2447_Chain4MarkByte IPT1(IU32, eaOff);
+extern S_2448_Chain4MarkWord IPT1(IU32, eaOff);
+extern S_2449_Chain4MarkDword IPT1(IU32, eaOff);
+extern S_2450_Chain4MarkString IPT2(IU32, eaOff, IU32, count);
+
+
+/*========= Read Functions ============ */
+
+extern IU32 S_2427_SimpleByteRead IPT1(IU32, eaOff);
+extern IU32 S_2428_SimpleWordRead IPT1(IU32, eaOff);
+extern IU32 S_2429_SimpleDwordRead IPT1(IU32, eaOff);
+extern void S_2430_SimpleStringRead IPT3(IU8 *, dest, IU32, eaOff, IU32, count);
+extern void S_2430_SimpleStringRead IPT3(IU8 *, dest, IU32, eaOff, IU32, count);
+extern IU32 S_2423_DisabledRAMByteRead IPT1(IU32, eaOff);
+extern IU32 S_2424_DisabledRAMWordRead IPT1(IU32, eaOff);
+extern IU32 S_2425_DisabledRAMDwordRead IPT1(IU32, eaOff);
+extern void S_2426_DisabledRAMStringReadFwd IPT3(IU8 *, dest, IU32, eaOff, IU32, count);
+extern void S_2437_DisabledRAMStringReadBwd IPT3(IU8 *, dest, IU32, eaOff, IU32, count);
+extern IU32 S_2407_RdMode0UnchainedByteRead IPT1(IU32, eaOff);
+extern IU32 S_2408_RdMode0UnchainedWordRead IPT1(IU32, eaOff);
+extern IU32 S_2409_RdMode0UnchainedDwordRead IPT1(IU32, eaOff);
+extern void S_2410_RdMode0UnchainedStringReadFwd IPT3(IU8 *, dest, IU32, eaOff, IU32, count);
+extern void S_2433_RdMode0UnchainedStringReadBwd IPT3(IU8 *, dest, IU32, eaOff, IU32, count);
+extern IU32 S_2419_RdMode1UnchainedByteRead IPT1(IU32, eaOff);
+extern IU32 S_2420_RdMode1UnchainedWordRead IPT1(IU32, eaOff);
+extern IU32 S_2421_RdMode1UnchainedDwordRead IPT1(IU32, eaOff);
+extern void S_2422_RdMode1UnchainedStringReadFwd IPT3(IU8 *, dest, IU32, eaOff, IU32, count);
+extern void S_2436_RdMode1UnchainedStringReadBwd IPT3(IU8 *, dest, IU32, eaOff, IU32, count);
+extern IU32 S_2399_RdMode0Chain2ByteRead IPT1(IU32, eaOff);
+extern IU32 S_2400_RdMode0Chain2WordRead IPT1(IU32, eaOff);
+extern IU32 S_2401_RdMode0Chain2DwordRead IPT1(IU32, eaOff);
+extern void S_2402_RdMode0Chain2StringReadFwd IPT3(IU8 *, dest, IU32, eaOff, IU32, count);
+extern void S_2431_RdMode0Chain2StringReadBwd IPT3(IU8 *, dest, IU32, eaOff, IU32, count);
+extern IU32 S_2411_RdMode1Chain2ByteRead IPT1(IU32, eaOff);
+extern IU32 S_2412_RdMode1Chain2WordRead IPT1(IU32, eaOff);
+extern IU32 S_2413_RdMode1Chain2DwordRead IPT1(IU32, eaOff);
+extern void S_2414_RdMode1Chain2StringReadFwd IPT3(IU8 *, dest, IU32, eaOff, IU32, count);
+extern void S_2434_RdMode1Chain2StringReadBwd IPT3(IU8 *, dest, IU32, eaOff, IU32, count);
+extern IU32 S_2403_RdMode0Chain4ByteRead IPT1(IU32, eaOff);
+extern IU32 S_2404_RdMode0Chain4WordRead IPT1(IU32, eaOff);
+extern IU32 S_2405_RdMode0Chain4DwordRead IPT1(IU32, eaOff);
+extern void S_2406_RdMode0Chain4StringReadFwd IPT3(IU8 *, dest, IU32, eaOff, IU32, count);
+extern void S_2432_RdMode0Chain4StringReadBwd IPT3(IU8 *, dest, IU32, eaOff, IU32, count);
+extern IU32 S_2415_RdMode1Chain4ByteRead IPT1(IU32, eaOff);
+extern IU32 S_2416_RdMode1Chain4WordRead IPT1(IU32, eaOff);
+extern IU32 S_2417_RdMode1Chain4DwordRead IPT1(IU32, eaOff);
+extern void S_2418_RdMode1Chain4StringReadFwd IPT3(IU8 *, dest, IU32, eaOff, IU32, count);
+extern void S_2435_RdMode1Chain4StringReadBwd IPT3(IU8 *, dest, IU32, eaOff, IU32, count);
+extern EVID_WRT_POINTERS simple_evid;
+extern EVID_WRT_POINTERS gricvid_evid;
+extern EVID_WRT_POINTERS dith_evid[];
+extern EVID_WRT_POINTERS chain2_evid[];
+extern EVID_WRT_POINTERS unchained_evid[];
+extern EVID_WRT_POINTERS chain4_evid[];
+extern EVID_READ_POINTERS simple_read_evid;
+extern EVID_READ_POINTERS ram_dsbld_read_evid;
+extern EVID_READ_POINTERS read_mode0_evid[];
+extern EVID_READ_POINTERS read_mode1_evid[];
+extern EVID_MARK_POINTERS simple_mark_evid;
+extern EVID_MARK_POINTERS cga_mark_evid;
+extern EVID_MARK_POINTERS unchained_mark_evid;
+extern EVID_MARK_POINTERS chain4_mark_evid;
diff --git a/private/mvdm/softpc.new/base/ccpu386/evidgen.h b/private/mvdm/softpc.new/base/ccpu386/evidgen.h
new file mode 100644
index 000000000..dca9cf720
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/evidgen.h
@@ -0,0 +1,25 @@
+/*[
+ * Generated File: evidgen.h
+ *
+]*/
+
+#ifndef _EVIDGEN_H_
+#define _EVIDGEN_H_
+
+struct VideoVector {
+ IU32 (*GetVideolatches) IPT0();
+ void (*SetVideolatches) IPT1(IU32, value);
+ void (*setWritePointers) IPT0();
+ void (*setReadPointers) IPT1(IUH, readset);
+ void (*setMarkPointers) IPT1(IUH, markset);
+};
+
+extern struct VideoVector Video;
+
+#define getVideolatches() (*(Video.GetVideolatches))()
+#define setVideolatches(value) (*(Video.SetVideolatches))(value)
+#define SetWritePointers() (*(Video.setWritePointers))()
+#define SetReadPointers(readset) (*(Video.setReadPointers))(readset)
+#define SetMarkPointers(markset) (*(Video.setMarkPointers))(markset)
+#endif /* _EVIDGEN_H_ */
+/*======================================== END ========================================*/
diff --git a/private/mvdm/softpc.new/base/ccpu386/fault.h b/private/mvdm/softpc.new/base/ccpu386/fault.h
new file mode 100644
index 000000000..a37bebf64
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/fault.h
@@ -0,0 +1,231 @@
+/*[
+
+fault.h
+
+"@(#)fault.h 1.2 01/19/95"
+
+Fault codes for exceptions; one per call instance of
+any of the exception routines to enable tracking of the
+original cause of an exception in the CCPU.
+
+Currently the Int0, Int1 etc routines are not counted
+as exceptions for this purpose.
+
+]*/
+
+
+/*
+ * Fault codes for: c_addr.c
+ */
+#define FAULT_LIMITCHK_SEG_LIMIT 1
+
+/*
+ * Fault codes for: c_intr.c
+ */
+#define FAULT_INT_DEST_NOT_IN_IDT 2
+#define FAULT_INT_DEST_BAD_SEG_TYPE 3
+#define FAULT_INT_DEST_ACCESS 4
+#define FAULT_INT_DEST_NOTPRESENT 5
+#define FAULT_INTR_RM_CS_LIMIT 6
+#define FAULT_INTR_TASK_CS_LIMIT 7
+#define FAULT_INTR_PM_CS_LIMIT_1 8
+#define FAULT_INTR_PM_CS_LIMIT_2 9
+
+/*
+ * Fault codes for: c_main.c
+ */
+#define FAULT_CCPU_LLDT_ACCESS 10
+#define FAULT_CCPU_LTR_ACCESS 11
+#define FAULT_CCPU_LGDT_ACCESS 12
+#define FAULT_CCPU_LMSW_ACCESS 13
+#define FAULT_CCPU_INVLPG_ACCESS 14
+#define FAULT_CCPU_CLTS_ACCESS 15
+#define FAULT_CCPU_INVD_ACCESS 16
+#define FAULT_CCPU_WBIND_ACCESS 17
+#define FAULT_CCPU_MOV_R_C_ACCESS 18
+#define FAULT_CCPU_MOV_R_D_ACCESS 19
+#define FAULT_CCPU_MOV_C_R_ACCESS 20
+#define FAULT_CCPU_MOV_D_R_ACCESS 21
+#define FAULT_CCPU_MOV_R_T_ACCESS 22
+#define FAULT_CCPU_MOV_T_R_ACCESS 23
+#define FAULT_CCPU_PUSHF_ACCESS 24
+#define FAULT_CCPU_POPF_ACCESS 25
+#define FAULT_CCPU_INT_ACCESS 26
+#define FAULT_CCPU_IRET_ACCESS 27
+#define FAULT_CCPU_HLT_ACCESS 28
+#define FAULT_CCPU_CLI_ACCESS 29
+#define FAULT_CCPU_STI_ACCESS 30
+#define FAULT_CHKIOMAP_BAD_TSS 31
+#define FAULT_CHKIOMAP_BAD_MAP 32
+#define FAULT_CHKIOMAP_BAD_TR 33
+#define FAULT_CHKIOMAP_ACCESS 34
+
+/*
+ * Fault codes for: c_oprnd.h
+ */
+#define FAULT_OP0_SEG_NOT_READABLE 35
+#define FAULT_OP0_SEG_NOT_WRITABLE 36
+#define FAULT_OP0_SEG_NO_READ_OR_WRITE 37
+#define FAULT_OP1_SEG_NOT_READABLE 38
+#define FAULT_OP1_SEG_NOT_WRITABLE 39
+#define FAULT_OP1_SEG_NO_READ_OR_WRITE 40
+
+/*
+ * Fault codes for: c_prot.c
+ */
+#define FAULT_CHECKSS_SELECTOR 41
+#define FAULT_CHECKSS_BAD_SEG_TYPE 42
+#define FAULT_CHECKSS_ACCESS 43
+#define FAULT_CHECKSS_NOTPRESENT 44
+#define FAULT_VALSS_CHG_SELECTOR 45
+#define FAULT_VALSS_CHG_ACCESS 46
+#define FAULT_VALSS_CHG_BAD_SEG_TYPE 47
+#define FAULT_VALSS_CHG_NOTPRESENT 48
+#define FAULT_VALTSS_SELECTOR 49
+#define FAULT_VALTSS_NP 50
+
+/*
+ * Fault codes for: c_seg.c
+ */
+#define FAULT_LOADCS_SELECTOR 51
+#define FAULT_LOADCS_ACCESS_1 52
+#define FAULT_LOADCS_NOTPRESENT_1 53
+#define FAULT_LOADCS_ACCESS_2 54
+#define FAULT_LOADCS_NOTPRESENT_2 55
+#define FAULT_LOADCS_BAD_SEG_TYPE 56
+#define FAULT_LOADDS_SELECTOR 57
+#define FAULT_LOADDS_BAD_SEG_TYPE 58
+#define FAULT_LOADDS_ACCESS 59
+#define FAULT_LOADDS_NOTPRESENT 60
+
+/*
+ * Fault codes for: c_stack.c
+ */
+#define FAULT_VALNEWSPC_SS_LIMIT_16 61
+#define FAULT_VALNEWSPC_SS_LIMIT_32 62
+#define FAULT_VALSTACKEX_ACCESS 63
+#define FAULT_VALSTKSPACE_ACCESS 64
+
+/*
+ * Fault codes for: c_tlb.c
+ */
+#define FAULT_LIN2PHY_ACCESS 65
+#define FAULT_LIN2PHY_PDE_NOTPRESENT 66
+#define FAULT_LIN2PHY_PTE_NOTPRESENT 67
+#define FAULT_LIN2PHY_PROTECT_FAIL 68
+
+/*
+ * Fault codes for: c_tsksw.c
+ */
+#define FAULT_LOADLDT_SELECTOR 69
+#define FAULT_LOADLDT_NOT_AN_LDT 70
+#define FAULT_LOADLDT_NOTPRESENT 71
+#define FAULT_SWTASK_NULL_TR_SEL 72
+#define FAULT_SWTASK_BAD_TSS_SIZE_1 73
+#define FAULT_SWTASK_BAD_TSS_SIZE_2 74
+#define FAULT_SWTASK_BAD_TSS_SIZE_3 75
+#define FAULT_SWTASK_BAD_TSS_SIZE_4 76
+#define FAULT_SWTASK_BAD_CS_SELECTOR 77
+#define FAULT_SWTASK_CONFORM_CS_NP 78
+#define FAULT_SWTASK_ACCESS_1 79
+#define FAULT_SWTASK_NOCONFORM_CS_NP 80
+#define FAULT_SWTASK_ACCESS_2 81
+#define FAULT_SWTASK_BAD_SEG_TYPE 82
+
+/*
+ * Fault codes for: c_xfer.c
+ */
+#define FAULT_RM_REL_IP_CS_LIMIT 83
+#define FAULT_PM_REL_IP_CS_LIMIT 84
+#define FAULT_FAR_DEST_SELECTOR 85
+#define FAULT_FAR_DEST_ACCESS_1 86
+#define FAULT_FAR_DEST_NP_CONFORM 87
+#define FAULT_FAR_DEST_ACCESS_2 88
+#define FAULT_FAR_DEST_NP_NONCONFORM 89
+#define FAULT_FAR_DEST_ACCESS_3 90
+#define FAULT_FAR_DEST_NP_CALLG 91
+#define FAULT_FAR_DEST_ACCESS_4 92
+#define FAULT_FAR_DEST_NP_TASKG 93
+#define FAULT_FAR_DEST_TSS_IN_LDT 94
+#define FAULT_FAR_DEST_ACCESS_5 95
+#define FAULT_FAR_DEST_NP_TSS 96
+#define FAULT_FAR_DEST_BAD_SEG_TYPE 97
+#define FAULT_GATE_DEST_SELECTOR 98
+#define FAULT_GATE_DEST_ACCESS_1 99
+#define FAULT_GATE_DEST_ACCESS_2 100
+#define FAULT_GATE_DEST_ACCESS_3 101
+#define FAULT_GATE_DEST_BAD_SEG_TYPE 102
+#define FAULT_GATE_DEST_GATE_SIZE 103
+#define FAULT_GATE_DEST_NP 104
+#define FAULT_TASK_DEST_SELECTOR 105
+#define FAULT_TASK_DEST_NOT_TSS 106
+#define FAULT_TASK_DEST_NP 107
+
+/*
+ * Fault codes for: call.c
+ */
+#define FAULT_CALLF_RM_CS_LIMIT 108
+#define FAULT_CALLF_TASK_CS_LIMIT 109
+#define FAULT_CALLF_PM_CS_LIMIT_1 110
+#define FAULT_CALLF_PM_CS_LIMIT_2 111
+#define FAULT_CALLN_RM_CS_LIMIT 112
+#define FAULT_CALLN_PM_CS_LIMIT 113
+#define FAULT_CALLR_RM_CS_LIMIT 114
+#define FAULT_CALLR_PM_CS_LIMIT 115
+
+/*
+ * Fault codes for: enter.c
+ */
+#define FAULT_ENTER16_ACCESS 116
+#define FAULT_ENTER32_ACCESS 117
+
+/*
+ * Fault codes for: iret.c
+ */
+#define FAULT_IRET_RM_CS_LIMIT 118
+#define FAULT_IRET_PM_TASK_CS_LIMIT 119
+#define FAULT_IRET_VM_CS_LIMIT 120
+#define FAULT_IRET_CS_ACCESS_1 121
+#define FAULT_IRET_SELECTOR 122
+#define FAULT_IRET_ACCESS_2 123
+#define FAULT_IRET_ACCESS_3 124
+#define FAULT_IRET_BAD_SEG_TYPE 125
+#define FAULT_IRET_NP_CS 126
+#define FAULT_IRET_PM_CS_LIMIT_1 127
+#define FAULT_IRET_PM_CS_LIMIT_2 128
+
+/*
+ * Fault codes for: jmp.c
+ */
+#define FAULT_JMPF_RM_CS_LIMIT 129
+#define FAULT_JMPF_TASK_CS_LIMIT 130
+#define FAULT_JMPF_PM_CS_LIMIT 131
+#define FAULT_JMPN_RM_CS_LIMIT 132
+#define FAULT_JMPN_PM_CS_LIMIT 133
+
+/*
+ * Fault codes for: lldt.c
+ */
+#define FAULT_LLDT_SELECTOR 134
+#define FAULT_LLDT_NOT_LDT 135
+#define FAULT_LLDT_NP 136
+
+/*
+ * Fault codes for: mov.c
+ */
+#define FAULT_MOV_CR_PAGE_IN_RM 137
+
+/*
+ * Fault codes for: ret.c
+ */
+#define FAULT_RETF_RM_CS_LIMIT 138
+#define FAULT_RETF_PM_ACCESS 139
+#define FAULT_RETF_SELECTOR 140
+#define FAULT_RETF_ACCESS_1 141
+#define FAULT_RETF_ACCESS_2 142
+#define FAULT_RETF_BAD_SEG_TYPE 143
+#define FAULT_RETF_CS_NOTPRESENT 144
+#define FAULT_RETF_PM_CS_LIMIT_1 145
+#define FAULT_RETF_PM_CS_LIMIT_2 146
+#define FAULT_RETN_CS_LIMIT 147
+
diff --git a/private/mvdm/softpc.new/base/ccpu386/fpu.c b/private/mvdm/softpc.new/base/ccpu386/fpu.c
new file mode 100644
index 000000000..cc6085479
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/fpu.c
@@ -0,0 +1,5948 @@
+/*[
+ * ============================================================================
+ *
+ * Name: fpu.c
+ *
+ * Author: Paul Murray
+ *
+ * Sccs ID: @(#)fpu.c 1.54 03/23/95
+ *
+ * Purpose:
+ *
+ * Implements the Npx functionality of the Ccpu.
+ *
+ * (c)Copyright Insignia Solutions Ltd., 1993,1994. All rights reserved.
+ *
+ * ============================================================================
+]*/
+#include "insignia.h"
+#include "host_def.h"
+#include <math.h>
+#include "cfpu_def.h"
+#include "ckmalloc.h"
+
+typedef enum {
+FPSTACK,
+M16I,
+M32I,
+M64I,
+M32R,
+M64R,
+M80R
+} NPXOPTYPE;
+
+
+/* Function prototypes - everything returns void */
+LOCAL FPH npx_rint IPT1(FPH, fpval);
+LOCAL VOID GetIntelStatusWord IPT0();
+LOCAL VOID SetIntelTagword IPT1(IU32, new_tag);
+LOCAL VOID ReadI16FromIntel IPT2(IU32 *, valI16, VOID *, memPtr);
+LOCAL VOID ReadI32FromIntel IPT2(IU32 *, valI32, VOID *, memPtr);
+LOCAL VOID WriteI16ToIntel IPT2(VOID *, memPtr, IU16, valI16);
+LOCAL VOID WriteI32ToIntel IPT2(VOID *, memPtr, IU32, valI32);
+LOCAL VOID WriteNaNToIntel IPT2(VOID *, memPtr, FPSTACKENTRY *, valPtr);
+LOCAL VOID WriteZeroToIntel IPT2(VOID *, memPtr, IU16, negZero);
+LOCAL VOID SetIntelStatusWord IPT1(IU32, new_stat);
+LOCAL VOID AdjustOverflowResponse IPT0();
+LOCAL VOID AdjustUnderflowResponse IPT0();
+LOCAL VOID WriteIndefiniteToIntel IPT1(VOID *, memPtr);
+LOCAL VOID SignalDivideByZero IPT1(FPSTACKENTRY *, stackPtr);
+LOCAL VOID SetPrecisionBit IPT0();
+LOCAL VOID GetIntelTagword IPT1(IU32 *, current_tag);
+LOCAL VOID WriteFP32ToIntel IPT2(VOID *, destPtr, FPSTACKENTRY *, srcPtr);
+LOCAL VOID WriteFP64ToIntel IPT2(VOID *, destPtr, FPSTACKENTRY *, srcPtr);
+LOCAL VOID WriteFP80ToIntel IPT2(VOID *, destPtr, FPSTACKENTRY *, srcPtr);
+LOCAL VOID Mul64Bit8Bit IPT2(FPU_I64 *, as64, IU8, mul_count);
+LOCAL VOID CopyFP IPT2(FPSTACKENTRY *, dest_addr, FPSTACKENTRY *, src_addr);
+LOCAL VOID WriteBiggestNaN IPT3(IU16, destInd, FPSTACKENTRY *, val1Ptr, FPSTACKENTRY *, val2Ptr);
+LOCAL VOID Sub64Bit64Bit IPT2(FPU_I64 *, as64a, FPU_I64 *, as64b);
+LOCAL VOID CVTR80FPH IPT2(FPSTACKENTRY *, destPtr, FPSTACKENTRY *, srcPtr);
+LOCAL BOOL Cmp64BitGTE IPT2(FPU_I64 *, as64a, FPU_I64 *, as64b);
+LOCAL VOID CopyR32 IPT2(FPSTACKENTRY *, destPtr, VOID *, srcPtr);
+LOCAL VOID CVTI64FPH IPT1(FPU_I64 *, as64);
+LOCAL VOID CVTFPHI64 IPT2(FPU_I64 *, as64, FPH *, FPPtr);
+LOCAL VOID Add64Bit8Bit IPT2(FPU_I64 *, as64, IU8, small_val);
+LOCAL VOID CopyR64 IPT2(FPSTACKENTRY *, destPtr, VOID *, srcPtr);
+LOCAL VOID CopyR80 IPT2(FPSTACKENTRY *, destPtr, VOID *, srcPtr);
+LOCAL VOID CVTFPHR80 IPT1(FPSTACKENTRY *, memPtr);
+LOCAL VOID WriteInfinityToIntel IPT2(VOID *, memPtr, IU16, neg_val);
+LOCAL VOID PopStack IPT0();
+LOCAL VOID CPY64BIT8BIT IPT2(FPU_I64 *, as64, IU8 *, as8);
+LOCAL VOID WriteIntegerIndefinite IPT1(VOID *, memPtr);
+LOCAL VOID SignalStackOverflow IPT1(FPSTACKENTRY *, StackPtr);
+LOCAL VOID Set64Bit IPT2(FPU_I64 *, as64, IU8, small_val);
+LOCAL VOID Sub64Bit8Bit IPT2(FPU_I64 *, as64, IU8, small_val);
+LOCAL VOID SignalBCDIndefinite IPT1(IU8 *, memPtr);
+GLOBAL VOID InitNpx IPT1(IBOOL, disabled);
+LOCAL VOID LoadValue IPT2(VOID *, SrcOp, IU16 *, IndexVal);
+LOCAL VOID Loadi16ToFP IPT2(FPSTACKENTRY *, FPPtr, VOID *, memPtr);
+LOCAL VOID Loadi32ToFP IPT2(FPSTACKENTRY *, FPPtr, VOID *, memPtr);
+LOCAL VOID Loadi64ToFP IPT2(FPSTACKENTRY *, FPPtr, VOID *, memPtr);
+LOCAL VOID Loadr32ToFP IPT3(FPSTACKENTRY *, FPPtr, VOID *, memPtr, BOOL, setTOS);
+LOCAL VOID Loadr64ToFP IPT3(FPSTACKENTRY *, FPPtr, VOID *, memPtr, BOOL, setTOS);
+LOCAL VOID Loadr80ToFP IPT2(FPSTACKENTRY *, FPPtr, VOID *, memPtr);
+LOCAL VOID LoadTByteToFP IPT2(FPSTACKENTRY *, FPPtr, VOID *, memPtr);
+LOCAL VOID ConvertR80 IPT1(FPSTACKENTRY *, memPtr);
+LOCAL VOID PostCheckOUP IPT0();
+LOCAL VOID CalcTagword IPT1(FPSTACKENTRY *, FPPtr);
+LOCAL VOID SignalStackUnderflow IPT1(FPSTACKENTRY *, StackPtr);
+LOCAL VOID SignalSNaN IPT1(FPSTACKENTRY *, StackPtr);
+LOCAL VOID SignalIndefinite IPT1(FPSTACKENTRY *, StackPtr);
+LOCAL VOID SignalInvalid IPT0();
+LOCAL VOID WriteIndefinite IPT1(FPSTACKENTRY *, StackPtr);
+LOCAL VOID Test2NaN IPT3(IU16, destIndex, FPSTACKENTRY *, src1_addr, FPSTACKENTRY *, src2_addr);
+LOCAL VOID GenericAdd IPT3(IU16, destIndex, IU16, src1Index, IU16, src2Index);
+LOCAL VOID AddBCDByte IPT2(FPU_I64 *, total, IU8, byte_val);
+LOCAL VOID ConvertBCD IPT1(FPSTACKENTRY *, bcdPtr);
+LOCAL VOID GenericCompare IPT1(IU16, src2Index);
+LOCAL VOID GenericDivide IPT3(IU16, destIndex, IU16, src1Index, IU16, src2Index);
+LOCAL VOID OpFpuStoreFpuState IPT2(VOID *, memPtr, IU32, fsave_offset);
+LOCAL VOID OpFpuRestoreFpuState IPT2(VOID *, memPtr, IU32, frstor_offset);
+LOCAL VOID GenericMultiply IPT3(IU16, destIndex, IU16, src1Index, IU16, src2Index);
+LOCAL VOID CheckOUPForIntel IPT0();
+LOCAL VOID GenericSubtract IPT3(IU16, destIndex, IU16, src1Index, IU16, src2Index);
+GLOBAL VOID F2XM1 IPT0();
+GLOBAL VOID FABS IPT0();
+GLOBAL VOID FADD IPT3(IU16, destIndex, IU16, src1Index, VOID *, src2);
+GLOBAL VOID FBLD IPT1(IU8 *, memPtr);
+GLOBAL VOID FBSTP IPT1(IU8 *, memPtr);
+GLOBAL VOID FCHS IPT0();
+GLOBAL VOID FCLEX IPT0();
+GLOBAL VOID FCOM IPT1(VOID *, src2);
+GLOBAL VOID FCOS IPT0();
+GLOBAL VOID FDECSTP IPT0();
+GLOBAL VOID FDIV IPT3(IU16, destIndex, IU16, src1Index, VOID *, src2);
+GLOBAL VOID FFREE IPT1(IU16, destIndex);
+GLOBAL VOID FLD IPT1(VOID *, memPtr);
+GLOBAL VOID FINCSTP IPT0();
+GLOBAL VOID FINIT IPT0();
+GLOBAL VOID FIST IPT1(VOID *, memPtr);
+GLOBAL VOID FLDCONST IPT1(IU8, const_index);
+GLOBAL VOID FLDCW IPT1(VOID *, memPtr);
+GLOBAL VOID FLDCW16 IPT1(VOID *, memPtr);
+GLOBAL VOID FLDENV IPT1(VOID *, memPtr);
+GLOBAL VOID FMUL IPT3(IU16, destIndex, IU16, src1Index, VOID *, src2);
+GLOBAL VOID PTOP IPT0();
+GLOBAL VOID FPATAN IPT0();
+GLOBAL VOID FPREM IPT0();
+GLOBAL VOID FPREM1 IPT0();
+GLOBAL VOID FPTAN IPT0();
+GLOBAL VOID FRNDINT IPT0();
+GLOBAL VOID FSTCW IPT1(VOID *, memPtr);
+GLOBAL VOID FRSTOR IPT1(VOID *, memPtr);
+GLOBAL VOID FSAVE IPT1(VOID *, memPtr);
+GLOBAL VOID FSCALE IPT0();
+GLOBAL VOID FSIN IPT0();
+GLOBAL VOID FSINCOS IPT0();
+GLOBAL VOID FSQRT IPT0();
+GLOBAL VOID FST IPT1(VOID *, memPtr);
+GLOBAL VOID FSTENV IPT1(VOID *, memPtr);
+GLOBAL VOID FSTSW IPT2(VOID *, memPtr, BOOL, toAX);
+GLOBAL VOID FSUB IPT3(IU16, destIndex, IU16, src1Index, VOID *, src2);
+GLOBAL VOID FTST IPT0();
+GLOBAL VOID FXAM IPT0();
+GLOBAL VOID FXCH IPT1(IU16, destIndex);
+GLOBAL VOID FXTRACT IPT1(IU16, destIndex);
+GLOBAL VOID FYL2X IPT0();
+GLOBAL VOID FYL2XP1 IPT0();
+GLOBAL IU32 getNpxControlReg IPT0();
+GLOBAL VOID setNpxControlReg IPT1(IU32, newControl);
+GLOBAL IU32 getNpxStatusReg IPT0();
+GLOBAL VOID setNpxStatusReg IPT1(IU32, newStatus);
+GLOBAL IU32 getNpxTagwordReg IPT0();
+GLOBAL VOID setNpxTagwordReg IPT1(IU32, newTag);
+GLOBAL void getNpxStackRegs IPT1(FPSTACKENTRY *, dumpPtr);
+GLOBAL void setNpxStackRegs IPT1(FPSTACKENTRY *, loadPtr);
+
+/* DEFINED values */
+#ifndef NULL
+#define NULL ((VOID *)0)
+#endif
+#define TAG_NEGATIVE_MASK 1
+#define TAG_ZERO_MASK 2
+#define TAG_INFINITY_MASK 4
+#define TAG_DENORMAL_MASK 8
+#define TAG_NAN_MASK 16
+#define TAG_SNAN_MASK 32
+#define TAG_UNSUPPORTED_MASK 64
+#define TAG_EMPTY_MASK 128
+#define TAG_FSCALE_MASK 256
+#define TAG_BCD_MASK 512
+#define TAG_R80_MASK 1024
+#define UNEVALMASK 1536
+#define FPTEMP_INDEX (IU16)-1
+#define SW_IE_MASK 1
+#define SW_DE_MASK 2
+#define SW_ZE_MASK 4
+#define SW_OE_MASK 8
+#define SW_UE_MASK 16
+#define SW_PE_MASK 32
+#define SW_SF_MASK 64
+#define SW_ES_MASK 128
+#define C3C2C0MASK 0xb8ff
+#define FCLEX_MASK 0x7f00
+#define CW_IM_MASK 1
+#define CW_DM_MASK 2
+#define CW_ZM_MASK 4
+#define CW_OM_MASK 8
+#define CW_UM_MASK 16
+#define CW_PM_MASK 32
+#define COMP_LT 0
+#define COMP_GT 1
+#define COMP_EQ 2
+#define INTEL_COMP_NC 0x4500
+#define INTEL_COMP_GT 0x0000
+#define INTEL_COMP_LT 0x0100
+#define INTEL_COMP_EQ 0x4000
+#define ROUND_NEAREST 0x0000
+#define ROUND_NEG_INFINITY 0x0400
+#define ROUND_POS_INFINITY 0x0800
+#define ROUND_ZERO 0x0c00
+
+/* MACROS */
+#define FlagC0(x) NpxStatus &= 0xfeff; \
+ NpxStatus |= ((x) << 8)
+#define FlagC1(x) NpxStatus &= 0xfdff; \
+ NpxStatus |= ((x) << 9)
+#define FlagC2(x) NpxStatus &= 0xfbff; \
+ NpxStatus |= ((x) << 10)
+#define FlagC3(x) NpxStatus &= 0xbfff; \
+ NpxStatus |= ((x) << 14)
+#define TestUneval(testPtr) \
+ if (((testPtr)->tagvalue & UNEVALMASK) != 0) { \
+ switch ((testPtr)->tagvalue & UNEVALMASK) { \
+ case TAG_BCD_MASK: ConvertBCD((testPtr)); \
+ break; \
+ case TAG_R80_MASK: ConvertR80((testPtr)); \
+ break; \
+ } \
+ }
+
+#define StackEntryByIndex(i) (i==FPTEMP_INDEX? &FPTemp : &FPUStackBase[(TOSPtr-FPUStackBase+i)%8])
+
+/*
+ * Pigging the FYL2X & FYL2XP1 opcodes requires that we use the same
+ * maths functions as the assembler CPU to avoid pig errors due to slight
+ * algorithmic differences; so allow host to specify different functions
+ * if it wants - by default we only require log().
+ */
+#ifndef host_log2
+#define host_log2(x) (log(x)/log(2.0))
+#endif /* !host_log2 */
+
+#ifndef host_log1p
+#define host_log1p(x) (host_log2(1.0 + x))
+#endif /* !host_log1p */
+
+/*
+ * System wide variables
+ */
+GLOBAL IU8 FPtype;
+GLOBAL IU32 NpxLastSel;
+GLOBAL IU32 NpxLastOff;
+GLOBAL IU32 NpxFEA;
+GLOBAL IU32 NpxFDS;
+GLOBAL IU32 NpxFIP;
+GLOBAL IU32 NpxFOP;
+GLOBAL IU32 NpxFCS;
+GLOBAL BOOL POPST;
+GLOBAL BOOL DOUBLEPOP;
+GLOBAL BOOL UNORDERED;
+GLOBAL BOOL REVERSE;
+GLOBAL BOOL NPX_ADDRESS_SIZE_32;
+GLOBAL BOOL NPX_PROT_MODE;
+GLOBAL BOOL NpxException;
+
+/*
+ * FPU-wide variables
+*/
+
+#ifdef SUN4
+LOCAL IU8 *FPout; /* HostGet*Exception() macros need this for Sparc ports. */
+#endif /* SUN4 */
+
+LOCAL IU32 NpxControl;
+LOCAL IU32 NpxStatus;
+LOCAL BOOL DoAPop;
+LOCAL IU16 tag_or;
+LOCAL IU16 tag_xor;
+LOCAL FPSTACKENTRY IntelSpecial;
+LOCAL FPSTACKENTRY *FPUpload = &IntelSpecial;
+LOCAL FPSTACKENTRY FPTemp;
+LOCAL FPSTACKENTRY *FPUStackBase;
+LOCAL FPSTACKENTRY *TOSPtr;
+LOCAL IU16 npxRounding;
+LOCAL FPH FPRes;
+LOCAL FPH MaxBCDValue=999999999999999999.0;
+
+LOCAL IU8 zero_string[] = {"zero"};
+LOCAL IU8 minus_zero_string[] = {"minus zero"};
+LOCAL IU8 infinity_string[] = {"infinity"};
+LOCAL IU8 minus_infinity_string[] = {"minus infinity"};
+LOCAL IU8 nan_string[] = {" NaN"};
+LOCAL IU8 minus_nan_string[] = {" Negative NaN"};
+LOCAL IU8 unsupported_string[] = {"unsupported"};
+LOCAL IU8 unevaluated_string[] = {"unevaluated"};
+LOCAL IU8 empty_string[] = {"empty"};
+LOCAL IU8 convert_string[100];
+
+LOCAL IU16 FscaleTable[] = {
+0,
+0,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK,
+TAG_INFINITY_MASK,
+TAG_ZERO_MASK,
+0,
+0,
+0,
+0,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK,
+TAG_INFINITY_MASK | TAG_NEGATIVE_MASK,
+TAG_ZERO_MASK | TAG_NEGATIVE_MASK,
+0,
+0,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK | TAG_UNSUPPORTED_MASK,
+TAG_FSCALE_MASK,
+0,
+0,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK | TAG_UNSUPPORTED_MASK,
+TAG_ZERO_MASK | TAG_NEGATIVE_MASK,
+0,
+0,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK,
+TAG_INFINITY_MASK,
+TAG_FSCALE_MASK | TAG_UNSUPPORTED_MASK,
+0,
+0,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK,
+TAG_FSCALE_MASK | TAG_UNSUPPORTED_MASK,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0,
+0};
+
+LOCAL FPSTACKENTRY ConstTable[]= {
+{1.0, 0, 0}, /* 1.0 */
+{M_LN10/M_LN2, 0, 0}, /* Log2(10) */
+{M_LOG2E, 0, 0}, /* Log2(e) */
+{M_PI, 0, 0}, /* pi */
+{M_LN2/M_LN10, 0, 0}, /* Log10(2) */
+{M_LN2, 0, 0}, /* Loge(2) */
+{0.0, 0, TAG_ZERO_MASK} /* 0.0 */
+};
+
+LOCAL FPSTACKENTRY FPConstants[] = {
+{0.0, 0, TAG_ZERO_MASK},
+{-0.0, 0, (TAG_ZERO_MASK | TAG_NEGATIVE_MASK)},
+{1.0, 0, 0},
+{2.0, 0, 0},
+{M_PI, 0, 0},
+{-M_PI, 0, TAG_NEGATIVE_MASK},
+{M_PI_2, 0, 0},
+{-(M_PI_2), 0, TAG_NEGATIVE_MASK},
+{M_PI_4, 0, 0},
+{-(M_PI_4), 0, TAG_NEGATIVE_MASK},
+{3.0*M_PI_4, 0, 0},
+{-(3.0*M_PI_4), 0, TAG_NEGATIVE_MASK}
+};
+
+LOCAL FPSTACKENTRY *npx_zero = FPConstants + 0;
+LOCAL FPSTACKENTRY *npx_minus_zero = FPConstants + 1;
+LOCAL FPSTACKENTRY *npx_one = FPConstants + 2;
+LOCAL FPSTACKENTRY *npx_two = FPConstants + 3;
+LOCAL FPSTACKENTRY *npx_pi = FPConstants + 4;
+LOCAL FPSTACKENTRY *npx_minus_pi = FPConstants + 5;
+LOCAL FPSTACKENTRY *npx_pi_by_two = FPConstants + 6;
+LOCAL FPSTACKENTRY *npx_minus_pi_by_two = FPConstants + 7;
+LOCAL FPSTACKENTRY *npx_pi_by_four = FPConstants + 8;
+LOCAL FPSTACKENTRY *npx_minus_pi_by_four = FPConstants + 9;
+LOCAL FPSTACKENTRY *npx_three_pi_by_four = FPConstants + 10;
+LOCAL FPSTACKENTRY *npx_minus_three_pi_by_four = FPConstants + 11;
+
+LOCAL IU32 CompZeroTable[] = {
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_GT,
+INTEL_COMP_GT,
+INTEL_COMP_LT,
+INTEL_COMP_GT,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_LT,
+INTEL_COMP_LT,
+INTEL_COMP_LT,
+INTEL_COMP_GT,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_LT, /* 16 */
+INTEL_COMP_GT,
+INTEL_COMP_EQ,
+INTEL_COMP_EQ,
+INTEL_COMP_LT,
+INTEL_COMP_GT,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_LT,
+INTEL_COMP_GT,
+INTEL_COMP_EQ,
+INTEL_COMP_EQ,
+INTEL_COMP_LT,
+INTEL_COMP_GT,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_GT, /* 32 */
+INTEL_COMP_GT,
+INTEL_COMP_GT,
+INTEL_COMP_GT,
+INTEL_COMP_EQ,
+INTEL_COMP_GT,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_LT,
+INTEL_COMP_LT,
+INTEL_COMP_LT,
+INTEL_COMP_LT,
+INTEL_COMP_LT,
+INTEL_COMP_EQ,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_NC, /* 48 */
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_NC,
+INTEL_COMP_NC
+};
+
+#ifdef BIGEND
+/* Note enforcement of word ordering as high word/low word */
+LOCAL FPU_I64 BCDLowNibble[] = {
+{0x002386f2, 0x6fc10000},
+{0x00005af3, 0x107a4000},
+{0x000000e8, 0xd4a51000},
+{0x00000002, 0x540be400},
+{0x00000000, 0x05f5e100},
+{0x00000000, 0x000f4240},
+{0x00000000, 0x00002710},
+{0x00000000, 0x00000064},
+{0x00000000, 0x00000001}
+};
+
+LOCAL FPU_I64 BCDHighNibble[] = {
+{0x01634578, 0x5d8a0000},
+{0x00038d7e, 0xa4c68000},
+{0x00000918, 0x4e72a000},
+{0x00000017, 0x4876e800},
+{0x00000000, 0x3b9aca00},
+{0x00000000, 0x00989680},
+{0x00000000, 0x000186a0},
+{0x00000000, 0x000003e8},
+{0x00000000, 0x0000000a}
+};
+#else /* !BIGEND */
+LOCAL FPU_I64 BCDLowNibble[] = {
+{0x6fc10000, 0x002386f2},
+{0x107a4000, 0x00005af3},
+{0xd4a51000, 0x000000e8},
+{0x540be400, 0x00000002},
+{0x05f5e100, 0x00000000},
+{0x000f4240, 0x00000000},
+{0x00002710, 0x00000000},
+{0x00000064, 0x00000000},
+{0x00000001, 0x00000000}
+};
+
+LOCAL FPU_I64 BCDHighNibble[] = {
+{0x5d8a0000, 0x01634578},
+{0xa4c68000, 0x00038d7e},
+{0x4e72a000, 0x00000918},
+{0x4876e800, 0x00000017},
+{0x3b9aca00, 0x00000000},
+{0x00989680, 0x00000000},
+{0x000186a0, 0x00000000},
+{0x000003e8, 0x00000000},
+{0x0000000a, 0x00000000}
+};
+#endif /* !BIGEND */
+
+
+LOCAL FPSTACKENTRY *FpatanTable[64];
+
+LOCAL IBOOL NpxDisabled = FALSE; /* Set by the UIF */
+
+/* Imported functions */
+IMPORT VOID DoNpxException();
+
+
+LOCAL FPH npx_rint IFN1(FPH, fpval)
+{
+ FPH localfp;
+
+ switch (NpxControl & ROUND_ZERO) {
+ case ROUND_NEAREST :
+ localfp = fpval - floor(fpval);
+ if (localfp > 0.5) {
+ localfp = ceil(fpval);
+ } else {
+ if (localfp < 0.5) {
+ localfp = floor(fpval);
+ } else {
+ if ((fpval-localfp)/2.0 != floor((fpval-localfp)/2.0)) {
+ localfp = ceil(fpval);
+ } else {
+ localfp = floor(fpval);
+ }
+ }
+ }
+ break;
+ case ROUND_NEG_INFINITY :
+ localfp = floor(fpval);
+ /* help the poor HP over this hurdle... */
+ if ( fpval >= localfp + 1.0 )
+ localfp += 1.0;
+ break;
+ case ROUND_POS_INFINITY :
+ localfp = ceil(fpval);
+ /* help the poor HP over this hurdle... */
+ if ( fpval <= localfp - 1.0 )
+ localfp -= 1.0;
+ break;
+ case ROUND_ZERO :
+ if (fpval < 0.0) {
+ localfp = ceil(fpval);
+ } else {
+ localfp = floor(fpval);
+ }
+ break;
+ }
+ /* Check sign of zero */
+ if (localfp == 0.0) {
+ if (fpval < 0.0) {
+ ((FPHOST *)&(localfp))->hiword.sign = 1;
+ } else {
+ ((FPHOST *)&(localfp))->hiword.sign = 0;
+ }
+ }
+ return(localfp);
+}
+
+
+LOCAL VOID GetIntelStatusWord IFN0()
+{
+ /* The status word already contains the correct 'sticky' bits */
+ /* for any potential exceptions. What need to be filled in are */
+ /* the flag bits and the ST value */
+ NpxStatus &= 0xc7ff; /* Clear the st bits */
+ NpxStatus |= ((TOSPtr-FPUStackBase) << 11);
+}
+
+
+LOCAL VOID SetIntelTagword IFN1(IU32, new_tag)
+{
+ FPSTACKENTRY *tagPtr = FPUStackBase;
+ IU8 counter = 0;
+
+ /* We only consider whether the thing is marked as empty or not.
+ If it is anything other than empty we will want to precisely calculate
+ it by using CalcTagword() */
+ while (counter++ < 8) {
+ if ((new_tag & 3) == 3) {
+ /* It's empty */
+ tagPtr->tagvalue = TAG_EMPTY_MASK;
+ } else {
+ tagPtr->tagvalue = 0;
+ }
+ new_tag >>= 2;
+ tagPtr++;
+ }
+}
+
+
+/* Reads and writes for 16 and 32 bit integers are easy as they are handled
+correctly in order to satisfy the integer CPU */
+/* This function is only called from fldenv/frstor where 16-bit data has to
+be extracted from a large (bigendian organised) buffer */
+LOCAL VOID ReadI16FromIntel IFN2(IU32 *, valI16, VOID *, memPtr)
+{
+ IU32 res;
+
+ res = *((IU8 *)memPtr + 0);
+ res <<= 8;
+ res |= *((IU8 *)memPtr + 1);
+ *valI16 = res;
+}
+
+
+/* This function is only called from fldwnv/frstor where 32-bit data has to
+be extrated from a large (bigendian organised) buffer */
+LOCAL VOID ReadI32FromIntel IFN2(IU32 *, valI32, VOID *, memPtr)
+{
+ IU32 res;
+
+ res = *((IU8 *)memPtr + 0);
+ res <<= 8;
+ res |= *((IU8 *)memPtr + 1);
+ res <<= 8;
+ res |= *((IU8 *)memPtr + 2);
+ res <<= 8;
+ res |= *((IU8 *)memPtr + 3);
+ *valI32 = res;
+}
+
+/* This function is only used in fsave/fstenv */
+LOCAL VOID WriteI16ToIntel IFN2(VOID *, memPtr, IU16, valI16)
+{
+ *((IU8 *)memPtr + 1) = (IU8)(valI16 & 0xff);
+ valI16 >>= 8;
+ *((IU8 *)memPtr + 0) = (IU8)(valI16 & 0xff);
+}
+
+
+/* And so is this one */
+LOCAL VOID WriteI32ToIntel IFN2(VOID *, memPtr, IU32, valI32)
+{
+ *((IU8 *)memPtr + 3) = (IU8)(valI32 & 0xff);
+ valI32 >>= 8;
+ *((IU8 *)memPtr + 2) = (IU8)(valI32 & 0xff);
+ valI32 >>= 8;
+ *((IU8 *)memPtr + 1) = (IU8)(valI32 & 0xff);
+ valI32 >>= 8;
+ *((IU8 *)memPtr + 0) = (IU8)(valI32 & 0xff);
+}
+
+
+/* Anything over 32-bits becomes painful as data is read and written using
+the vir_read_bytes and vir_write_bytes routines respectively, which simply
+dump data from the topmost intel address to the lowest intel address. The
+value of the offsets is defined one way round for bigendian ports and the
+other way for little-endian */
+LOCAL VOID WriteNaNToIntel IFN2(VOID *, memPtr, FPSTACKENTRY *, valPtr)
+{
+ IU32 mant_hi;
+ IU32 mant_lo;
+
+ /* Ok for endian-ness as we FORCE this presentation */
+ mant_hi = ((IU32 *)&(valPtr->fpvalue))[NPX_HIGH_32_BITS];
+ mant_lo = ((IU32 *)&(valPtr->fpvalue))[NPX_LOW_32_BITS];
+ if (FPtype == M32R) {
+ /* OK since this forces the output to be independent of
+ endian-ness. */
+ mant_hi |= 0x40000000; /* Make it quiet */
+ mant_hi >>= 8;
+ if ((valPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ mant_hi |= 0xff000000;
+ } else {
+ mant_hi |= 0x7f000000;
+ }
+ *(IU32 *)memPtr = mant_hi;
+ }
+ if (FPtype == M64R) {
+ mant_hi |= 0x40000000; /* Make it quiet */
+ if ((valPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ *((IU8 *)memPtr + 0) = 0xff;
+ } else {
+ *((IU8 *)memPtr + 0) = 0x7f;
+ }
+ mant_lo >>= 3;
+ mant_lo |= (mant_hi << 29);
+ mant_hi >>= 3;
+ mant_hi |= 0xe0000000;
+ mant_lo >>= 8;
+ *((IU8 *)memPtr + 7) = (mant_lo & 0xff);
+ mant_lo >>= 8;
+ *((IU8 *)memPtr + 6) = (mant_lo & 0xff);
+ mant_lo >>= 8;
+ *((IU8 *)memPtr + 5) = (mant_lo & 0xff);
+ *((IU8 *)memPtr + 4) = (mant_hi & 0xff);
+ mant_hi >>= 8;
+ *((IU8 *)memPtr + 3) = (mant_hi & 0xff);
+ mant_hi >>= 8;
+ *((IU8 *)memPtr + 2) = (mant_hi & 0xff);
+ mant_hi >>= 8;
+ *((IU8 *)memPtr + 1) = (mant_hi & 0xff);
+ }
+ if (FPtype == M80R) {
+ if ((valPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ *((IU8 *)memPtr + 0) = 0xff;
+ } else {
+ *((IU8 *)memPtr + 0) = 0x7f;
+ }
+ *((IU8 *)memPtr + 1) = 0xff;
+ *((IU8 *)memPtr + 9) = (mant_lo & 0xff);
+ mant_lo >>= 8;
+ *((IU8 *)memPtr + 8) = (mant_lo & 0xff);
+ mant_lo >>= 8;
+ *((IU8 *)memPtr + 7) = (mant_lo & 0xff);
+ mant_lo >>= 8;
+ *((IU8 *)memPtr + 6) = (mant_lo & 0xff);
+ *((IU8 *)memPtr + 5) = (mant_hi & 0xff);
+ mant_hi >>= 8;
+ *((IU8 *)memPtr + 4) = (mant_hi & 0xff);
+ mant_hi >>= 8;
+ *((IU8 *)memPtr + 3) = (mant_hi & 0xff);
+ mant_hi >>= 8;
+ *((IU8 *)memPtr + 2) = (mant_hi & 0xff);
+ }
+}
+
+
+LOCAL VOID WriteZeroToIntel IFN2(VOID *, memPtr, IU16, negZero)
+{
+ if (FPtype == M32R) {
+ if (negZero == 0) {
+ *(IU32 *)memPtr = 0x00000000;
+ } else {
+ *(IU32 *)memPtr = 0x80000000;
+ }
+ } else {
+ if (FPtype == M80R) {
+ if (negZero == 0) {
+ *((IU8 *)memPtr + 0) = 0;
+ } else {
+ *((IU8 *)memPtr + 0) = 0x80;
+ }
+ *((IU8 *)memPtr + 1) = 0;
+ *((IU8 *)memPtr + 2) = 0;
+ *((IU8 *)memPtr + 3) = 0;
+ *((IU8 *)memPtr + 4) = 0;
+ *((IU8 *)memPtr + 5) = 0;
+ *((IU8 *)memPtr + 6) = 0;
+ *((IU8 *)memPtr + 7) = 0;
+ *((IU8 *)memPtr + 8) = 0;
+ *((IU8 *)memPtr + 9) = 0;
+ } else {
+ if (negZero == 0) {
+ *((IU8 *)memPtr + 0) = 0;
+ } else {
+ *((IU8 *)memPtr + 0) = 0x80;
+ }
+ *((IU8 *)memPtr + 1) = 0;
+ *((IU8 *)memPtr + 2) = 0;
+ *((IU8 *)memPtr + 3) = 0;
+ *((IU8 *)memPtr + 4) = 0;
+ *((IU8 *)memPtr + 5) = 0;
+ *((IU8 *)memPtr + 6) = 0;
+ *((IU8 *)memPtr + 7) = 0;
+ }
+ }
+}
+
+
+LOCAL VOID SetIntelStatusWord IFN1(IU32, new_stat)
+{
+ TOSPtr = &FPUStackBase[(new_stat >> 11) & 0x7];
+ NpxStatus = new_stat;
+}
+
+
+LOCAL VOID AdjustOverflowResponse IFN0()
+{
+}
+
+
+LOCAL VOID AdjustUnderflowResponse IFN0()
+{
+}
+
+
+LOCAL VOID WriteIndefiniteToIntel IFN1(VOID *, memPtr)
+{
+ switch (FPtype) {
+ case M32R : *(IU32 *)memPtr = 0xffc00000;
+ break;
+ case M64R : *((IU8 *)memPtr + 0) = 0xff;
+ *((IU8 *)memPtr + 1) = 0xf8;
+ *((IU8 *)memPtr + 2) = 0;
+ *((IU8 *)memPtr + 3) = 0;
+ *((IU8 *)memPtr + 4) = 0;
+ *((IU8 *)memPtr + 5) = 0;
+ *((IU8 *)memPtr + 6) = 0;
+ *((IU8 *)memPtr + 7) = 0;
+ break;
+ case M80R : *((IU8 *)memPtr + 0) = 0xff;
+ *((IU8 *)memPtr + 1) = 0xff;
+ *((IU8 *)memPtr + 2) = 0xc0;
+ *((IU8 *)memPtr + 3) = 0;
+ *((IU8 *)memPtr + 4) = 0;
+ *((IU8 *)memPtr + 5) = 0;
+ *((IU8 *)memPtr + 6) = 0;
+ *((IU8 *)memPtr + 7) = 0;
+ *((IU8 *)memPtr + 8) = 0;
+ *((IU8 *)memPtr + 9) = 0;
+ break;
+ }
+}
+
+
+LOCAL VOID SignalDivideByZero IFN1(FPSTACKENTRY *, stackPtr)
+{
+ /* Raise divide by zero */
+ NpxStatus |= SW_ZE_MASK;
+ if ((NpxControl & CW_ZM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ }
+ stackPtr->tagvalue = TAG_INFINITY_MASK + (tag_xor & TAG_NEGATIVE_MASK);
+}
+
+LOCAL VOID SetPrecisionBit IFN0()
+{
+ NpxStatus |= SW_PE_MASK;
+ if (npxRounding == ROUND_POS_INFINITY) {
+ FlagC1(1);
+ } else {
+ FlagC1(0);
+ }
+}
+
+LOCAL VOID GetIntelTagword IFN1(IU32 *, current_tag)
+{
+ FPSTACKENTRY *tagPtr = &FPUStackBase[7];
+ IU8 counter = 0;
+
+ *current_tag = 0;
+ while (counter++ < 8) {
+ TestUneval(tagPtr);
+ *current_tag <<= 2;
+ if ((tagPtr->tagvalue & TAG_EMPTY_MASK) != 0) {
+ *current_tag |= 3;
+ } else {
+ if ((tagPtr->tagvalue & TAG_ZERO_MASK) != 0) {
+ *current_tag |= 1;
+ } else {
+ if ((tagPtr->tagvalue & ~TAG_NEGATIVE_MASK) != 0) {
+ *current_tag |= 2;
+ }
+ }
+ }
+ tagPtr--;
+ }
+}
+
+
+/* These functions write host format quantities out to the (bigendian
+organised) intel memory. This requires that we define an ordering between the
+two. The values in HOST_xxx are dependent upon the endian-ness of the port */
+/* According to this organisation, HOST_nnn_BYTE_0 is the offset to the most
+significant byte in the representation of this format, and so on. */
+LOCAL VOID WriteFP32ToIntel IFN2(VOID *, destPtr, FPSTACKENTRY *, srcPtr)
+{
+ *(IU32 *)destPtr = *(IU32 *)srcPtr;
+}
+
+
+LOCAL VOID WriteFP64ToIntel IFN2(VOID *, destPtr, FPSTACKENTRY *, srcPtr)
+{
+ *((IU8 *)destPtr + 0) = *((IU8 *)srcPtr + HOST_R64_BYTE_0);
+ *((IU8 *)destPtr + 1) = *((IU8 *)srcPtr + HOST_R64_BYTE_1);
+ *((IU8 *)destPtr + 2) = *((IU8 *)srcPtr + HOST_R64_BYTE_2);
+ *((IU8 *)destPtr + 3) = *((IU8 *)srcPtr + HOST_R64_BYTE_3);
+ *((IU8 *)destPtr + 4) = *((IU8 *)srcPtr + HOST_R64_BYTE_4);
+ *((IU8 *)destPtr + 5) = *((IU8 *)srcPtr + HOST_R64_BYTE_5);
+ *((IU8 *)destPtr + 6) = *((IU8 *)srcPtr + HOST_R64_BYTE_6);
+ *((IU8 *)destPtr + 7) = *((IU8 *)srcPtr + HOST_R64_BYTE_7);
+}
+
+
+LOCAL VOID WriteFP80ToIntel IFN2(VOID *, destPtr, FPSTACKENTRY *, srcPtr)
+{
+ *((IU8 *)destPtr + 0) = *((IU8 *)srcPtr + HOST_R80_BYTE_0);
+ *((IU8 *)destPtr + 1) = *((IU8 *)srcPtr + HOST_R80_BYTE_1);
+ *((IU8 *)destPtr + 2) = *((IU8 *)srcPtr + HOST_R80_BYTE_2);
+ *((IU8 *)destPtr + 3) = *((IU8 *)srcPtr + HOST_R80_BYTE_3);
+ *((IU8 *)destPtr + 4) = *((IU8 *)srcPtr + HOST_R80_BYTE_4);
+ *((IU8 *)destPtr + 5) = *((IU8 *)srcPtr + HOST_R80_BYTE_5);
+ *((IU8 *)destPtr + 6) = *((IU8 *)srcPtr + HOST_R80_BYTE_6);
+ *((IU8 *)destPtr + 7) = *((IU8 *)srcPtr + HOST_R80_BYTE_7);
+ *((IU8 *)destPtr + 8) = *((IU8 *)srcPtr + HOST_R80_BYTE_8);
+ *((IU8 *)destPtr + 9) = *((IU8 *)srcPtr + HOST_R80_BYTE_9);
+}
+
+
+LOCAL VOID Mul64Bit8Bit IFN2(FPU_I64 *, as64, IU8, mul_count)
+{
+ CVTI64FPH(as64);
+ FPRes *= (FPH)mul_count;
+ CVTFPHI64(as64, &FPRes);
+}
+
+
+LOCAL VOID CopyFP IFN2(FPSTACKENTRY *, dest_addr, FPSTACKENTRY *, src_addr)
+{
+ (VOID)memcpy((VOID *)dest_addr, (VOID *)src_addr, sizeof(FPSTACKENTRY));
+}
+
+
+LOCAL VOID MakeNaNQuiet IFN1(FPSTACKENTRY *, srcPtr)
+{
+ NpxStatus |= SW_IE_MASK;
+ NpxStatus &= ~SW_SF_MASK;
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ DoAPop=FALSE;
+ } else {
+ srcPtr->tagvalue ^= TAG_SNAN_MASK;
+ ((IU32 *)&(srcPtr->fpvalue))[NPX_HIGH_32_BITS] |= 0x40000000;
+ }
+}
+
+
+LOCAL VOID WriteBiggestNaN IFN3(IU16, destInd, FPSTACKENTRY *, val1Ptr, FPSTACKENTRY *, val2Ptr)
+{
+ FPSTACKENTRY *destPtr = StackEntryByIndex(destInd);
+
+ /* We explicitely and deliberately store NaNs as two 32-bit values high word then low word */
+ if (((IU32 *)&(val1Ptr->fpvalue))[NPX_HIGH_32_BITS] == ((IU32 *)&(val2Ptr->fpvalue))[NPX_HIGH_32_BITS]) {
+ if (((IU32 *)&(val1Ptr->fpvalue))[NPX_LOW_32_BITS] >= ((IU32 *)&(val2Ptr->fpvalue))[NPX_LOW_32_BITS]) {
+ /* It's val1 */
+ CopyFP(destPtr, val1Ptr);
+ } else {
+ CopyFP(destPtr, val2Ptr);
+ }
+ } else {
+ if (((IU32 *)&(val1Ptr->fpvalue))[NPX_HIGH_32_BITS] > ((IU32 *)&(val2Ptr->fpvalue))[NPX_HIGH_32_BITS]) {
+ /* It's val1 */
+ CopyFP(destPtr, val1Ptr);
+ } else {
+ CopyFP(destPtr, val2Ptr);
+ }
+ }
+ /* Always make it a quiet NaN */
+ ((IU32 *)&(destPtr->fpvalue))[NPX_HIGH_32_BITS] |= 0x40000000;
+ destPtr->tagvalue &= ~TAG_SNAN_MASK;
+}
+
+
+LOCAL VOID Sub64Bit64Bit IFN2(FPU_I64 *, as64a, FPU_I64 *, as64b)
+{
+ FPH FPlocal;
+
+ CVTI64FPH(as64b);
+ FPlocal = FPRes;
+ CVTI64FPH(as64a);
+ FPRes -= FPlocal;
+ CVTFPHI64(as64a, &FPRes);
+}
+
+
+LOCAL VOID CVTR80FPH IFN2(FPSTACKENTRY *, destPtr, FPSTACKENTRY *, srcPtr)
+{
+ IU32 munger;
+ IU16 bitleft;
+
+ /* First, copy the sign bit */
+ ((FPHOST *)&(destPtr->fpvalue))->hiword.sign = ((FP80 *)&(srcPtr->fpvalue))->sign_exp.sign;
+ /* Then, copy the modified exponent */
+ munger = (IU32)((FP80 *)&(srcPtr->fpvalue))->sign_exp.exp;
+ munger -= (16383 - HOST_BIAS);
+ ((FPHOST *)&(destPtr->fpvalue))->hiword.exp = munger;
+ /* Finally, the mantissa */
+ munger = (IU32)((FP80 *)&(srcPtr->fpvalue))->mant_hi;
+ munger <<= 1;
+ ((FPHOST *)&(destPtr->fpvalue))->hiword.mant_hi = (munger >> 12);
+ munger <<= 20;
+ munger |= ((FP80 *)&(srcPtr->fpvalue))->mant_lo >> 11;
+ bitleft = ((FP80 *)&(srcPtr->fpvalue))->mant_lo & 0x7ff;
+
+ if (bitleft != 0) {
+ switch (NpxControl & ROUND_ZERO) {
+ case ROUND_NEAREST :
+ if (bitleft > 0x3ff) {
+ munger += 1;
+ }
+ break;
+ case ROUND_NEG_INFINITY :
+ if (((FPHOST *)&(destPtr->fpvalue))->hiword.sign = 1) {
+ munger += 1;
+ }
+ break;
+ case ROUND_POS_INFINITY :
+ if (((FPHOST *)&(destPtr->fpvalue))->hiword.sign = 0) {
+ munger += 1;
+ }
+ break;
+ case ROUND_ZERO :
+ /* Do nothing */
+ break;
+ }
+ }
+ ((FPHOST *)&(destPtr->fpvalue))->mant_lo = munger;
+}
+
+
+LOCAL BOOL Cmp64BitGTE IFN2(FPU_I64 *, as64a, FPU_I64 *, as64b)
+{
+ FPH FPlocal;
+
+ CVTI64FPH(as64b);
+ FPlocal = FPRes;
+ CVTI64FPH(as64a);
+ return(FPRes >= FPlocal);
+}
+
+
+LOCAL VOID CopyR32 IFN2(FPSTACKENTRY *, destPtr, VOID *, srcPtr)
+{
+ *(IU32 *)destPtr = *(IU32 *)srcPtr;
+}
+
+
+LOCAL VOID CVTI64FPH IFN1(FPU_I64 *, as64)
+{
+ FPRes = (FPH)as64->high_word * 4294967296.0 + (FPH)as64->low_word;
+}
+
+
+LOCAL VOID CVTFPHI64 IFN2(FPU_I64 *, as64, FPH *, FPPtr)
+{
+ IU32 high32 = 0;
+ IU32 low32 = 0;
+ IS32 exp;
+ IU32 holder;
+ IU32 signbit = 0;
+
+ exp = ((FPHOST *)FPPtr)->hiword.exp;
+ if (exp != 0) {
+ high32 = ((FPHOST *)FPPtr)->hiword.mant_hi;
+ low32 = ((FPHOST *)FPPtr)->mant_lo;
+ /* Now stick a 1 at the top of the mantissa */
+ /* Calculate where this is */
+ holder = HOST_MAX_EXP+1;
+ signbit = 1;
+ while (holder >>= 1) {
+ signbit += 1;
+ }
+ high32 |= (1 << (32-signbit));
+ exp -= HOST_BIAS;
+ exp -= (64-signbit);
+
+ signbit = ((FPHOST *)FPPtr)->hiword.sign;
+
+ /* high32 and low32 are (mantissa)*(2^52 )
+ * exp is (true exponent-52) = number of bit positions to shift
+ * +ve implies shift left, -ve implies shift right
+ */
+ if (exp > 0) {
+ if (exp >= 32) {
+ high32 = low32 << ( exp - 32 ) ;
+ low32 = 0;
+ } else {
+ high32 = high32 << exp ;
+ holder = low32 >> ( 32 -exp ) ;
+ high32 = high32 | holder ;
+ low32 = low32 << exp ;
+ }
+ } else {
+ if ( exp < 0) {
+ exp = -exp;
+ if ( exp >= 32 ) {
+ low32 = high32 >> ( exp - 32 ) ;
+ high32 = 0 ;
+ } else {
+ low32 = low32 >> exp ;
+ holder = high32 << ( 32 -exp ) ;
+ low32 = low32 | holder ;
+ high32 = high32 >> exp ;
+ }
+ }
+ }
+ }
+ if (signbit != 0) {
+ /* Make it negative */
+ high32 ^= 0xffffffff;
+ low32 ^= 0xffffffff;
+ low32 += 1;
+ if (low32 == 0) {
+ high32 += 1;
+ }
+ }
+ as64->high_word = high32;
+ as64->low_word = low32;
+}
+
+
+LOCAL VOID Add64Bit8Bit IFN2(FPU_I64 *, as64, IU8, small_val)
+{
+ CVTI64FPH(as64);
+ FPRes += (FPH)small_val;
+ CVTFPHI64(as64, &FPRes);
+}
+
+
+LOCAL VOID CopyR64 IFN2(FPSTACKENTRY *, destPtr, VOID *, srcPtr)
+{
+ *((IU8 *)destPtr + HOST_R64_BYTE_0) = *((IU8 *)srcPtr + 0);
+ *((IU8 *)destPtr + HOST_R64_BYTE_1) = *((IU8 *)srcPtr + 1);
+ *((IU8 *)destPtr + HOST_R64_BYTE_2) = *((IU8 *)srcPtr + 2);
+ *((IU8 *)destPtr + HOST_R64_BYTE_3) = *((IU8 *)srcPtr + 3);
+ *((IU8 *)destPtr + HOST_R64_BYTE_4) = *((IU8 *)srcPtr + 4);
+ *((IU8 *)destPtr + HOST_R64_BYTE_5) = *((IU8 *)srcPtr + 5);
+ *((IU8 *)destPtr + HOST_R64_BYTE_6) = *((IU8 *)srcPtr + 6);
+ *((IU8 *)destPtr + HOST_R64_BYTE_7) = *((IU8 *)srcPtr + 7);
+}
+
+/*
+ * CopyR80 is different from the above as it is called to copy
+ * between FPSTACKENTRYs. Copy straight through.
+ */
+LOCAL VOID CopyR80 IFN2(FPSTACKENTRY *, destPtr, VOID *, srcPtr)
+{
+ *(FP80 *)destPtr = *(FP80 *)srcPtr;
+}
+
+
+LOCAL VOID CVTFPHR80 IFN1(FPSTACKENTRY *, memPtr)
+{
+ IU32 munger;
+
+ /* First, copy the sign bit */
+ ((FP80 *)&(FPTemp.fpvalue))->sign_exp.sign = ((FPHOST *)&(memPtr->fpvalue))->hiword.sign;
+ /* Then, copy the modified exponent */
+ munger = (IU32)((FPHOST *)&(memPtr->fpvalue))->hiword.exp;
+ munger += (16383 - HOST_BIAS);
+ ((FP80 *)&(FPTemp.fpvalue))->sign_exp.exp = munger;
+ /* Finally, the mantissa */
+ munger = (IU32)((FPHOST *)&(memPtr->fpvalue))->hiword.mant_hi;
+ munger <<= 11;
+ munger |= 0x80000000;
+ ((FP80 *)&(FPTemp.fpvalue))->mant_hi = munger | (((FPHOST *)&(memPtr->fpvalue))->mant_lo >> 21);
+ ((FP80 *)&(FPTemp.fpvalue))->mant_lo = ((((FPHOST *)&(memPtr->fpvalue))->mant_lo) << 11);
+}
+
+
+LOCAL VOID WriteInfinityToIntel IFN2(VOID *, memPtr, IU16, neg_val)
+{
+ if (FPtype == M32R) {
+ if (neg_val == 0) {
+ *(IU32 *)memPtr = 0x7f800000;
+ } else {
+ *(IU32 *)memPtr = 0xff800000;
+ }
+ } else {
+ if (FPtype == M80R) {
+ if (neg_val == 0) {
+ *((IU8 *)memPtr + 0) = 0x7f;
+ } else {
+ *((IU8 *)memPtr + 0) = 0xff;
+ }
+ *((IU8 *)memPtr + 1) = 0xff;
+ *((IU8 *)memPtr + 2) = 0x80;
+ *((IU8 *)memPtr + 3) = 0;
+ *((IU8 *)memPtr + 4) = 0;
+ *((IU8 *)memPtr + 5) = 0;
+ *((IU8 *)memPtr + 6) = 0;
+ *((IU8 *)memPtr + 7) = 0;
+ *((IU8 *)memPtr + 8) = 0;
+ *((IU8 *)memPtr + 9) = 0;
+ } else {
+ if (neg_val == 0) {
+ *((IU8 *)memPtr + 0) = 0x7f;
+ } else {
+ *((IU8 *)memPtr + 0) = 0xff;
+ }
+ *((IU8 *)memPtr + 1) = 0xf0;
+ *((IU8 *)memPtr + 2) = 0;
+ *((IU8 *)memPtr + 3) = 0;
+ *((IU8 *)memPtr + 4) = 0;
+ *((IU8 *)memPtr + 5) = 0;
+ *((IU8 *)memPtr + 6) = 0;
+ *((IU8 *)memPtr + 7) = 0;
+ }
+ }
+}
+
+
+LOCAL VOID PopStack IFN0()
+{
+ /* Mark current TOS as free */
+ TOSPtr->tagvalue = TAG_EMPTY_MASK;
+ TOSPtr = StackEntryByIndex(1);
+ DoAPop = FALSE;
+}
+
+
+LOCAL VOID CPY64BIT8BIT IFN2(FPU_I64 *, as64, IU8 *, as8)
+{
+ *as8 = (as64->low_word & 0xff);
+}
+
+
+LOCAL VOID WriteIntegerIndefinite IFN1(VOID *, memPtr)
+{
+ switch (FPtype) {
+ case M16I : *((IU32 *)memPtr) = 0x8000;
+ break;
+ case M32I : *((IU32 *)memPtr) = 0x80000000;
+ break;
+ case M64I : *((IU8 *)memPtr + 0) = 0x80;
+ *((IU8 *)memPtr + 1) = 0;
+ *((IU8 *)memPtr + 2) = 0;
+ *((IU8 *)memPtr + 3) = 0;
+ *((IU8 *)memPtr + 4) = 0;
+ *((IU8 *)memPtr + 5) = 0;
+ *((IU8 *)memPtr + 6) = 0;
+ *((IU8 *)memPtr + 7) = 0;
+ break;
+ }
+}
+
+
+/*(
+Name : SignalStackOverflow
+Function : To set the required bits in the status word following
+ a stack overflow exception, and to issue the required
+ response.
+)*/
+
+
+LOCAL VOID SignalStackOverflow IFN1(FPSTACKENTRY *, StackPtr)
+{
+ NpxStatus |= (SW_IE_MASK | SW_SF_MASK);
+ FlagC1(1);
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ DoAPop=FALSE; /* Just in case it was set */
+ } else {
+ WriteIndefinite(StackPtr);
+ }
+}
+
+
+LOCAL VOID Set64Bit IFN2(FPU_I64 *, as64, IU8, small_val)
+{
+ as64->high_word = 0;
+ as64->low_word = small_val;
+}
+
+
+LOCAL VOID Sub64Bit8Bit IFN2(FPU_I64 *, as64, IU8, small_val)
+{
+ CVTI64FPH(as64);
+ FPRes -= (FPH)small_val;
+ CVTFPHI64(as64, &FPRes);
+}
+
+
+LOCAL VOID SignalBCDIndefinite IFN1(IU8 *, memPtr)
+{
+ *((IU8 *)memPtr + 0) = 0xff;
+ *((IU8 *)memPtr + 1) = 0xff;
+ *((IU8 *)memPtr + 2) = 0xc0;
+ *((IU8 *)memPtr + 3) = 0;
+ *((IU8 *)memPtr + 4) = 0;
+ *((IU8 *)memPtr + 5) = 0;
+ *((IU8 *)memPtr + 6) = 0;
+ *((IU8 *)memPtr + 7) = 0;
+ *((IU8 *)memPtr + 8) = 0;
+ *((IU8 *)memPtr + 9) = 0;
+}
+
+/* Called from cpu_init and cpu_reset */
+
+GLOBAL VOID InitNpx IFN1(IBOOL, disabled)
+{
+ IU16 i;
+ IU8 *bottom_ptr;
+ IU16 stackPtr = 0;
+ SAVED IBOOL first = TRUE;
+
+ /* Set up a couple of control type things */
+ NpxException = FALSE;
+ NPX_ADDRESS_SIZE_32 = FALSE;
+ NPX_PROT_MODE = FALSE;
+
+ if (first)
+ {
+ /* Get the required memory */
+#ifndef SFELLOW
+ check_malloc(FPUStackBase, 8, FPSTACKENTRY);
+#else
+ FPUStackBase = (FPSTACKENTRY *)SFMalloc(8*sizeof(FPSTACKENTRY), FALSE);
+#endif /* SFELLOW */
+ first = FALSE;
+ }
+
+ for (i=0; i<8; i++) {
+ (FPUStackBase+i)->tagvalue = TAG_EMPTY_MASK;
+ }
+ TOSPtr = FPUStackBase;
+ DoAPop = FALSE;
+
+ i=0;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = npx_pi_by_two;
+ FpatanTable[i++] = npx_pi_by_two;
+ FpatanTable[i++] = npx_zero;
+ FpatanTable[i++] = npx_pi;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = npx_minus_pi_by_two;
+ FpatanTable[i++] = npx_minus_pi_by_two;
+ FpatanTable[i++] = npx_minus_zero;
+ FpatanTable[i++] = npx_minus_pi;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = npx_zero;
+ FpatanTable[i++] = npx_pi;
+ FpatanTable[i++] = npx_zero;
+ FpatanTable[i++] = npx_pi;
+ FpatanTable[i++] = npx_zero;
+ FpatanTable[i++] = npx_pi;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = npx_minus_zero;
+ FpatanTable[i++] = npx_minus_pi;
+ FpatanTable[i++] = npx_minus_zero;
+ FpatanTable[i++] = npx_minus_pi;
+ FpatanTable[i++] = npx_minus_zero;
+ FpatanTable[i++] = npx_minus_pi;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = npx_pi_by_two;
+ FpatanTable[i++] = npx_pi_by_two;
+ FpatanTable[i++] = npx_pi_by_two;
+ FpatanTable[i++] = npx_pi_by_two;
+ FpatanTable[i++] = npx_pi_by_four;
+ FpatanTable[i++] = npx_three_pi_by_four;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = npx_minus_pi_by_two;
+ FpatanTable[i++] = npx_minus_pi_by_two;
+ FpatanTable[i++] = npx_minus_pi_by_two;
+ FpatanTable[i++] = npx_minus_pi_by_two;
+ FpatanTable[i++] = npx_minus_pi_by_four;
+ FpatanTable[i++] = npx_minus_three_pi_by_four;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i++] = NULL;
+ FpatanTable[i] = NULL;
+
+ /* Finally, the rest of the FINIT functionality */
+
+ NpxDisabled = disabled; /* If disabled via the UIF we must ignore FSTSW/FSTCW */
+
+ NpxControl = 0x037f;
+ npxRounding = ROUND_NEAREST;
+ NpxStatus = 0;
+ NpxLastSel=0;
+ NpxLastOff=0;
+ NpxFEA=0;
+ NpxFDS=0;
+ NpxFIP=0;
+ NpxFOP=0;
+ NpxFCS=0;
+
+}
+
+
+/*(
+Name : LoadValue
+Function : Load up the value for any flavour of operand.
+ This is ALWAYS inlined.
+)*/
+
+
+LOCAL VOID LoadValue IFN2(VOID *, SrcOp, IU16 *, IndexVal)
+{
+ if (FPtype == FPSTACK) {
+ *IndexVal = *(IU16 *)SrcOp;
+ } else {
+ switch (FPtype) {
+ case M16I: Loadi16ToFP(&FPTemp, SrcOp);
+ break;
+ case M32I: Loadi32ToFP(&FPTemp, SrcOp);
+ break;
+ case M64I: Loadi64ToFP(&FPTemp, SrcOp);
+ break;
+ case M32R: Loadr32ToFP(&FPTemp, SrcOp, FALSE);
+ break;
+ case M64R: Loadr64ToFP(&FPTemp, SrcOp, FALSE);
+ break;
+ case M80R: Loadr80ToFP(&FPTemp, SrcOp);
+ break;
+ }
+ *IndexVal = FPTEMP_INDEX;
+ }
+}
+
+
+/*(
+Name : Loadi16ToFP
+Function : Load a 16-bit value from intel memory and convert it
+ to FPH
+)*/
+
+LOCAL VOID Loadi16ToFP IFN2(FPSTACKENTRY *, FPPtr, VOID *, memPtr)
+{
+ IS16 asint;
+
+ asint = (IS16)*((IU32 *)memPtr); /* High byte */
+ if (asint == 0) {
+ /* Fast pass through */
+ FPPtr->tagvalue = TAG_ZERO_MASK;
+ } else {
+ FPPtr->fpvalue = (FPH)asint;
+ if (asint < 0) {
+ FPPtr->tagvalue = TAG_NEGATIVE_MASK;
+ } else {
+ FPPtr->tagvalue = 0;
+ }
+ }
+}
+
+
+
+/*(
+Name : Loadi32ToFP
+Function : Load a 32-bit value from intel memory and convert it
+ to FPH
+)*/
+
+
+LOCAL VOID Loadi32ToFP IFN2(FPSTACKENTRY *, FPPtr, VOID *, memPtr)
+{
+ IS32 asint;
+
+ asint = *((IS32 *)memPtr);
+ if (asint == 0) {
+ /* Fast pass through */
+ FPPtr->tagvalue = TAG_ZERO_MASK;
+ } else {
+ FPPtr->fpvalue = (FPH)asint;
+ if (asint < 0) {
+ FPPtr->tagvalue = TAG_NEGATIVE_MASK;
+ } else {
+ FPPtr->tagvalue = 0;
+ }
+ }
+}
+
+
+
+/*(
+Name : Loadi64ToFP
+Function : Load a 64-bit value from intel memory and convert it
+ to FPH
+)*/
+
+
+LOCAL VOID Loadi64ToFP IFN2(FPSTACKENTRY *, FPPtr, VOID *, memPtr)
+{
+ IS32 asint_hi;
+ IU32 asint_lo;
+
+ asint_hi = *((IS8 *)memPtr + 0);
+ asint_hi <<= 8;
+ asint_hi += *((IU8 *)memPtr + 1);
+ asint_hi <<= 8;
+ asint_hi += *((IU8 *)memPtr + 2);
+ asint_hi <<= 8;
+ asint_hi += *((IU8 *)memPtr + 3);
+
+ asint_lo = *((IU8 *)memPtr + 4);
+ asint_lo <<= 8;
+ asint_lo += *((IU8 *)memPtr + 5);
+ asint_lo <<= 8;
+ asint_lo += *((IU8 *)memPtr + 6);
+ asint_lo <<= 8;
+ asint_lo += *((IU8 *)memPtr + 7);
+
+ if ((asint_hi | asint_lo) == 0) {
+ /* Fast pass through */
+ FPPtr->tagvalue = TAG_ZERO_MASK;
+ } else {
+ FPPtr->fpvalue = (FPH)asint_hi*4294967296.0 + (FPH)asint_lo;
+ if (asint_hi < 0) {
+ FPPtr->tagvalue = TAG_NEGATIVE_MASK;
+ } else {
+ FPPtr->tagvalue = 0;
+ }
+ }
+}
+
+
+
+/*(
+Name : Loadr32ToFP
+Function : Load a 32-bit real value from intel memory and convert
+ it to FPH
+)*/
+
+
+LOCAL VOID Loadr32ToFP IFN3(FPSTACKENTRY *, FPPtr, VOID *, memPtr, BOOL, setTOS)
+{
+ IU16 localtag;
+ IS32 mantissa;
+
+ /* Note that this, being a 32-bit quantity, is loaded with correct
+ host endianness */
+ if (((FP32 *)memPtr)->sign == 1) {
+ localtag = TAG_NEGATIVE_MASK;
+ } else {
+ localtag = 0;
+ }
+ /* Now check the exponent... */
+ if (((FP32 *)memPtr)->exp == 0) {
+ /* It's either zero or denormal */
+ mantissa = ((FP32 *)memPtr)->mant;
+ if (mantissa == 0x0) {
+ /* It's zero */
+ localtag |= TAG_ZERO_MASK;
+ } else {
+ /* It's a denormal */
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ if (setTOS)
+ TOSPtr = FPPtr;
+ DoNpxException();
+ return;
+ } else {
+ FPPtr->fpvalue = (FPH)(*(float *)memPtr);
+ }
+ }
+ } else {
+ if (((FP32 *)memPtr)->exp == 255) {
+ /* It's either infinity or a NaN */
+ mantissa = ((FP32 *)memPtr)->mant;
+ if (mantissa == 0x0) {
+ /* It's infinity */
+ localtag |= TAG_INFINITY_MASK;
+ } else {
+ localtag |= TAG_NAN_MASK;
+ /* Is it quiet or signalling? */
+ if ((mantissa & 0x400000) == 0) {
+ /* It's a signalling NaN */
+ NpxStatus |= SW_IE_MASK;
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ return;
+ }
+ }
+ /* Must load up the mantissa of the NaN */
+ ((IU32 *)FPPtr)[NPX_HIGH_32_BITS] = ((mantissa << 8) | 0x80000000);
+ ((IU32 *)FPPtr)[NPX_LOW_32_BITS] = 0;
+ if ((mantissa & 0x400000) == 0) {
+ if (setTOS)
+ ((IS32 *)FPPtr)[NPX_HIGH_32_BITS] |= 0x40000000;
+ else
+ localtag |= TAG_SNAN_MASK;
+ }
+ }
+ } else {
+ /* It's a boring ordinary number */
+ FPPtr->fpvalue = (FPH)(*(float *)memPtr);
+ }
+ }
+ FPPtr->tagvalue = localtag;
+}
+
+
+/*(
+Name : Loadr64ToFP
+Function : Load a 64-bit real value from intel memory and convert
+ it to FPH
+)*/
+
+LOCAL VOID Loadr64ToFP IFN3(FPSTACKENTRY *, FPPtr, VOID *, memPtr, BOOL, setTOS)
+{
+ IU16 localtag;
+ IS32 mantissa_lo;
+ IS32 mantissa_hi;
+
+ CopyR64(FPUpload, memPtr);
+ if (((FP64 *)&(FPUpload->fpvalue))->hiword.sign != 0) {
+ localtag = TAG_NEGATIVE_MASK;
+ } else {
+ localtag = 0;
+ }
+ /* Now check the exponent... */
+ if (((FP64 *)&(FPUpload->fpvalue))->hiword.exp == 0) {
+ /* It's either zero or denormal */
+ mantissa_lo = ((FP64 *)&(FPUpload->fpvalue))->mant_lo;
+ mantissa_hi = ((FP64 *)&(FPUpload->fpvalue))->hiword.mant_hi;
+ if ((mantissa_lo | mantissa_hi) == 0) {
+ /* It's zero */
+ localtag |= TAG_ZERO_MASK;
+ } else {
+ /* It's a denormal */
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ if (setTOS)
+ TOSPtr = FPPtr;
+ DoNpxException();
+ } else {
+ FPPtr->fpvalue = (FPH)(*(DOUBLE *)&(FPUpload->fpvalue));
+ /* Really need a sort of host denormal detection */
+ /* localtag |= TAG_DENORMAL_MASK; */
+ }
+ }
+ } else {
+ if (((FP64 *)&(FPUpload->fpvalue))->hiword.exp == 2047) {
+ /* It's either infinity or a NaN */
+ mantissa_lo = ((FP64 *)&(FPUpload->fpvalue))->mant_lo;
+ mantissa_hi = ((FP64 *)&(FPUpload->fpvalue))->hiword.mant_hi;
+ if ((mantissa_lo | mantissa_hi) == 0) {
+ /* It's infinity */
+ localtag |= TAG_INFINITY_MASK;
+ } else {
+ localtag |= TAG_NAN_MASK;
+ /* Is it quiet or signalling? */
+ if ((mantissa_hi & 0x80000) == 0) {
+ /* It's a signalling NaN */
+ NpxStatus |= SW_IE_MASK;
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ return;
+ }
+ }
+ /* Must load up the mantissa of the NaN */
+ ((IS32 *)FPPtr)[NPX_HIGH_32_BITS] = ((mantissa_hi << 11) | 0x80000000);
+ ((IS32 *)FPPtr)[NPX_HIGH_32_BITS] |= ((IU32)mantissa_lo >> 21);
+ ((IS32 *)FPPtr)[NPX_LOW_32_BITS] = (mantissa_lo << 11);
+ if ((mantissa_hi & 0x80000) == 0) {
+ if (setTOS)
+ ((IS32 *)FPPtr)[NPX_HIGH_32_BITS] |= 0x40000000;
+ else
+ localtag |= TAG_SNAN_MASK;
+ }
+ }
+ } else {
+ /* It's a boring ordinary number */
+ FPPtr->fpvalue = (FPH)(*(DOUBLE *)FPUpload);
+ }
+ }
+ FPPtr->tagvalue = localtag;
+}
+
+
+/*(
+Name : LoadrTByteToFP
+Function : Load a 80-bit real value from intel memory and convert
+ it to FPH
+)*/
+
+
+/*
+ * The R80 representation is { IU64 mant; IU16 signexp }
+ * in order to be compatible with the Acpu representation of things.
+ */
+LOCAL VOID LoadTByteToFP IFN2(FPSTACKENTRY *, FPPtr, VOID *, memPtr)
+{
+ *((IU8 *)FPPtr + HOST_R80_BYTE_0) = *((IU8 *)memPtr + 0);
+ *((IU8 *)FPPtr + HOST_R80_BYTE_1) = *((IU8 *)memPtr + 1);
+ *((IU8 *)FPPtr + HOST_R80_BYTE_2) = *((IU8 *)memPtr + 2);
+ *((IU8 *)FPPtr + HOST_R80_BYTE_3) = *((IU8 *)memPtr + 3);
+ *((IU8 *)FPPtr + HOST_R80_BYTE_4) = *((IU8 *)memPtr + 4);
+ *((IU8 *)FPPtr + HOST_R80_BYTE_5) = *((IU8 *)memPtr + 5);
+ *((IU8 *)FPPtr + HOST_R80_BYTE_6) = *((IU8 *)memPtr + 6);
+ *((IU8 *)FPPtr + HOST_R80_BYTE_7) = *((IU8 *)memPtr + 7);
+ *((IU8 *)FPPtr + HOST_R80_BYTE_8) = *((IU8 *)memPtr + 8);
+ *((IU8 *)FPPtr + HOST_R80_BYTE_9) = *((IU8 *)memPtr + 9);
+}
+
+
+/*(
+Name : Loadr80ToFP
+Function : Load a 80-bit real value from intel memory
+)*/
+
+
+LOCAL VOID Loadr80ToFP IFN2(FPSTACKENTRY *, FPPtr, VOID *, memPtr)
+{
+ LoadTByteToFP(FPPtr, memPtr);
+ FPPtr->tagvalue = TAG_R80_MASK;
+}
+
+
+LOCAL VOID ConvertR80 IFN1(FPSTACKENTRY *, memPtr)
+{
+IU32 mantissa_hi;
+IU32 mantissa_lo;
+IU16 exp_value;
+
+ CopyR80(FPUpload, (VOID *)&(memPtr->fpvalue));
+ if (((FP80 *)&(FPUpload->fpvalue))->sign_exp.sign != 0) {
+ memPtr->tagvalue = TAG_NEGATIVE_MASK;
+ } else {
+ memPtr->tagvalue = 0;
+ }
+ exp_value = ((FP80 *)&(FPUpload->fpvalue))->sign_exp.exp;
+ mantissa_hi = ((FP80 *)&(FPUpload->fpvalue))->mant_hi;
+ mantissa_lo = ((FP80 *)&(FPUpload->fpvalue))->mant_lo;
+ /* Now check the exponent... */
+ if ((exp_value >= (16383-HOST_BIAS)) && (exp_value <= (16383+HOST_BIAS))) {
+ /* It's a boring ordinary number */
+ /* But let's check that it isn't an unnormal */
+ if ((mantissa_hi & 0x80000000) == 0) {
+ memPtr->tagvalue |= TAG_UNSUPPORTED_MASK;
+ } else {
+ CVTR80FPH(memPtr, FPUpload);
+ }
+ return;
+ }
+ if (exp_value == 0) {
+ /* It's either zero or denormal */
+ /* It's only meaningful to check for a denorm if HOST_BIAS
+ is equal to or greater than 16383. Otherwise we can do
+ nothing except set the thing to zero.
+ */
+#if (HOST_BIAS >= 16383)
+ if ((mantissa_hi | mantissa_lo) == 0) {
+ /* It's zero */
+ memPtr->tagvalue |= TAG_ZERO_MASK;
+ } else {
+ /* It's a denormal */
+ /* First, check it isn't a pseudodenorm */
+ if ((mantissa_hi & 0x80000000) != 0) {
+ memPtr->tagvalue |= TAG_UNSUPPORTED_MASK;
+ } else {
+ memPtr->tagvalue |= TAG_DENORMAL_MASK;
+ CVTR80FPH(memPtr, FPUpload);
+ }
+ }
+#else
+ /* It's zero either way */
+ if ((mantissa_hi | mantissa_lo) != 0) {
+ /* It's a denormal */
+ memPtr->tagvalue |= TAG_DENORMAL_MASK;
+ }
+ memPtr->tagvalue |= TAG_ZERO_MASK;
+#endif
+ } else {
+ if ((mantissa_hi & 0x80000000) == 0) {
+ memPtr->tagvalue |= TAG_UNSUPPORTED_MASK;
+ } else {
+ if (exp_value == 32767) {
+ /* It's either infinity or a NaN */
+ if ((mantissa_hi == 0x80000000) && mantissa_lo == 0) {
+ /* It's infinity */
+ memPtr->tagvalue |= TAG_INFINITY_MASK;
+ } else {
+ memPtr->tagvalue |= TAG_NAN_MASK;
+ /* Is it quiet or signalling? */
+ if ((mantissa_hi & 0x40000000) == 0) {
+ /* It's a signalling NaN */
+ memPtr->tagvalue |= TAG_SNAN_MASK;
+ }
+ /* Must load up the mantissa of the NaN */
+ ((IU32 *)memPtr)[NPX_HIGH_32_BITS] = mantissa_hi;
+ ((IU32 *)memPtr)[NPX_LOW_32_BITS] = mantissa_lo;
+ }
+ } else {
+ if (exp_value > 16384) {
+ /* Default to infinity */
+ memPtr->tagvalue |= TAG_INFINITY_MASK;
+ } else {
+ /* Default to zero */
+ memPtr->tagvalue |= TAG_ZERO_MASK;
+ }
+ }
+ }
+ }
+}
+
+
+
+/*(
+Name : PostCheckOUP
+Function : This generator is associated with the result of an
+ instruction emulation whose result, an FPH, is to
+ be written out to the stack. We check for O, U anf
+ P exceptions here, but we make no attempt to write out
+ the result. This is because the writing of the result
+ is independent of these exceptions, since for results
+ being written to the stack, delivery of the result
+ cannot be prevented even where these exceptions are
+ unmasked.
+)*/
+
+
+LOCAL VOID PostCheckOUP IFN0()
+{
+ if (HostGetOverflowException() != 0) {
+ NpxStatus |= SW_OE_MASK; /* Set the overflow bit */
+ /* For the masked overflow case, the result delivered by */
+ /* the host will be correct, provided it is IEEE compliant. */
+ if ((NpxControl & CW_OM_MASK) == 0) {
+ AdjustOverflowResponse();
+ NpxStatus |= SW_ES_MASK;
+ NpxException = TRUE;
+ }
+ } else {
+ /* Overflow and underflow being mutually exclusive... */
+ if (HostGetUnderflowException() != 0) {
+ NpxStatus |= SW_UE_MASK;
+ if ((NpxControl & CW_UM_MASK) == 0) {
+ AdjustUnderflowResponse();
+ NpxStatus |= SW_ES_MASK;
+ NpxException = TRUE;
+ }
+ }
+ }
+ if (HostGetPrecisionException() != 0) {
+ SetPrecisionBit();
+ if ((NpxControl & CW_PM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ NpxException = TRUE;
+ }
+ }
+}
+
+
+
+/*(
+Name : CalcTagword
+Function : To calculate the tagword associated with a value
+ and write out the result where appropriate.
+)*/
+
+
+LOCAL VOID CalcTagword IFN1(FPSTACKENTRY *, FPPtr)
+{
+ IU16 tagword;
+
+ FPPtr->fpvalue = FPRes;
+ if (((FPHOST *)&(FPPtr->fpvalue))->hiword.sign == 1) {
+ tagword = TAG_NEGATIVE_MASK;
+ } else {
+ tagword = 0;
+ }
+ if (((FPHOST *)&(FPPtr->fpvalue))->hiword.exp == 0) {
+ /* It's either a zero or a denorm */
+ if (FPPtr->fpvalue == 0.0) {
+ /* It's a zero */
+ tagword |= TAG_ZERO_MASK;
+#if (HOST_BIAS >= 16383)
+ } else {
+ /* It's a denorm */
+ tagword |= TAG_DENORMAL_MASK;
+#endif
+ }
+ } else {
+ if (((FPHOST *)&(FPPtr->fpvalue))->hiword.exp == HOST_MAX_EXP) {
+ /* It MUST be infinity as we can't generate NaNs */
+ tagword |= TAG_INFINITY_MASK;
+ }
+ }
+ FPPtr->tagvalue = tagword;
+ if (NpxException) {
+ DoNpxException();
+ }
+}
+
+
+
+/*(
+Name : SignalStackUnderflow
+Function : To set the required bits in the status word following
+ a stack underflow exception, and to issue the required
+ response.
+)*/
+
+LOCAL VOID SignalStackUnderflow IFN1(FPSTACKENTRY *, StackPtr)
+{
+ NpxStatus |= (SW_IE_MASK | SW_SF_MASK);
+ FlagC1(0);
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ DoAPop=FALSE; /* Just in case it was set */
+ } else {
+ WriteIndefinite(StackPtr);
+ }
+}
+
+
+/*(
+Name : SignalSNaN
+Function : To set the required bits in the status word following
+ detection of a signalling NaN.
+)*/
+
+
+LOCAL VOID SignalSNaN IFN1(FPSTACKENTRY *, StackPtr)
+{
+ NpxStatus |= SW_IE_MASK;
+ NpxStatus &= ~SW_SF_MASK;
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ DoAPop=FALSE;
+ }
+}
+
+
+/*(
+Name : SignalInvalid
+Function : To set the required bits in the status word following
+ any standard "invalid" exception
+)*/
+
+
+LOCAL VOID SignalIndefinite IFN1(FPSTACKENTRY *, StackPtr)
+{
+ NpxStatus |= SW_IE_MASK;
+ NpxStatus &= ~SW_SF_MASK;
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ DoAPop=FALSE;
+ } else {
+ WriteIndefinite(StackPtr);
+ }
+}
+
+
+
+LOCAL VOID SignalInvalid IFN0()
+{
+ NpxStatus |= SW_IE_MASK;
+ NpxStatus &= ~SW_SF_MASK;
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ DoAPop=FALSE;
+ }
+}
+
+
+
+/*(
+Name : WriteIndefinite
+Function : Write the value "indefinite" into the location
+)*/
+
+LOCAL VOID WriteIndefinite IFN1(FPSTACKENTRY *, StackPtr)
+{
+ StackPtr->tagvalue = (TAG_NEGATIVE_MASK | TAG_NAN_MASK);
+ (((IU32 *)StackPtr)[NPX_HIGH_32_BITS]) = 0xc0000000;
+ (((IU32 *)StackPtr)[NPX_LOW_32_BITS]) = 0;
+}
+
+
+
+/* This generator should always be inlined. */
+
+
+LOCAL VOID Test2NaN IFN3(IU16, destIndex, FPSTACKENTRY *, src1_addr, FPSTACKENTRY *, src2_addr)
+{
+ /* Are they both NaNs? */
+ if ((tag_xor & TAG_NAN_MASK) == 0) {
+ /* Yes, they are. */
+ WriteBiggestNaN(destIndex, src1_addr, src2_addr);
+ } else {
+ /* No, only one NaN. */
+ if ((src1_addr->tagvalue & TAG_NAN_MASK) != 0) {
+ /* It was src1. */
+ src2_addr = StackEntryByIndex(destIndex);
+ CopyFP(src2_addr, src1_addr);
+ if ((src2_addr->tagvalue & TAG_SNAN_MASK) != 0) {
+ src2_addr->tagvalue ^= TAG_SNAN_MASK;
+ SignalInvalid();
+ (((IU32 *)src2_addr)[NPX_HIGH_32_BITS]) |= 0x40000000;
+ }
+ } else {
+ /* It was src2. */
+ src1_addr = StackEntryByIndex(destIndex);
+ CopyFP(src1_addr, src2_addr);
+ if ((src1_addr->tagvalue & TAG_SNAN_MASK) != 0) {
+ src1_addr->tagvalue ^= TAG_SNAN_MASK;
+ SignalInvalid();
+ (((IU32 *)src1_addr)[NPX_HIGH_32_BITS]) |= 0x40000000;
+ }
+ }
+ }
+}
+
+
+
+/*
+Name : F2XM1
+Function : Compute 2**x - 1
+Operation : ST <- (2**ST - 1)
+Flags : C1 set as per table 15-1
+Exceptions : P, U, D, I, IS
+Valid range : -1 < ST < +1
+Notes : If ST is outside the required range, the result is
+ undefined.
+)*/
+
+
+GLOBAL VOID F2XM1 IFN0()
+{
+ /* Clear C1 */
+ FlagC1(0);
+ TestUneval(TOSPtr);
+ /* Check if a real value... */
+ if ((TOSPtr->tagvalue & ~TAG_NEGATIVE_MASK) == 0) {
+ HostClearExceptions();
+ /* We can just write the value straight out */
+ FPRes = pow(2.0, TOSPtr->fpvalue) - 1.0;
+ PostCheckOUP();
+ /* This could return anything really.... */
+ CalcTagword(TOSPtr);
+ return;
+ } else {
+ /* Some funny bit was set. Check for the possibilities */
+ /* We begin with the most obvious cases... */
+ /* Response to zero is to return zero with same sign */
+ if ((TOSPtr->tagvalue & TAG_ZERO_MASK) != 0) {
+ return; /* The required result! */
+ }
+ /* We do denorm checking and bit setting ourselves because this */
+ /* reduces the overhead if the thing is masked. */
+ if ((TOSPtr->tagvalue & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ } else {
+ HostClearExceptions();
+ FPRes = pow(2.0, TOSPtr->fpvalue) - 1.0;
+ PostCheckOUP();
+ /* Could return a denorm, zero, real, infinity... */
+ CalcTagword(TOSPtr);
+ }
+ return;
+ }
+ /* If -infinity, return -1. If +infinity, return that */
+ /* Sensible enough really, I suppose */
+ if ((TOSPtr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ if ((TOSPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ memset((char*)TOSPtr,0,sizeof(FPSTACKENTRY));
+ TOSPtr->fpvalue = -1.0;
+ TOSPtr->tagvalue = TAG_NEGATIVE_MASK;
+ }
+ return;
+ }
+ if ((TOSPtr->tagvalue & TAG_EMPTY_MASK) != 0) {
+ SignalStackUnderflow(TOSPtr);
+ return;
+ }
+ if ((TOSPtr->tagvalue & TAG_SNAN_MASK) != 0) {
+ MakeNaNQuiet(TOSPtr);
+ return;
+ }
+ if ((TOSPtr->tagvalue & TAG_UNSUPPORTED_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ return;
+ }
+ }
+}
+
+/*(
+Name : FABS
+Function : Make the value absolute
+Operation : sign bit of ST <- 0
+Flags : C1 as per table 15-1. C0, C2 and C3 undefined
+Exceptions : IS
+Valid range : Any
+Notes : Note that only the IS exception can be flagged. All
+ other error conditions are ignored, even a signalling
+ NaN! We ALWAYS attempt to make the value positive.
+)*/
+
+
+GLOBAL VOID FABS IFN0()
+{
+ /* Clear C1 */
+ FlagC1(0);
+ TestUneval(TOSPtr);
+ if ((TOSPtr->tagvalue & TAG_EMPTY_MASK) == 0) {
+ /* Now clear the negative bit. */
+ if ((TOSPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ TOSPtr->tagvalue ^= TAG_NEGATIVE_MASK;
+ /* If the value is real or denormal, we'll want to change the MSB */
+ if ((TOSPtr->tagvalue & ~TAG_DENORMAL_MASK) == 0) {
+ ((FPHOST *)&(TOSPtr->fpvalue))->hiword.sign = 0;
+ }
+ }
+ } else {
+ SignalStackUnderflow(TOSPtr);
+ }
+}
+
+/*(
+Name : FADD
+Function : Add two numbers together
+Operation : Dest <- Src1 + Src2
+Flags : C1 as per table 15-1. C0, C2 and C3 undefined
+Exceptions : IS
+Valid range : Any
+Notes : Note the dependence on the rounding mode when
+ calculating the sign of zero for situations
+ where two zeroes of different sign are input.
+)*/
+
+
+GLOBAL VOID FADD IFN3(IU16, destIndex, IU16, src1Index, VOID *, src2)
+{
+ IU16 src2Index;
+
+ LoadValue(src2, &src2Index);
+ if (POPST) {
+ DoAPop=TRUE;
+ }
+ GenericAdd(destIndex, src1Index, src2Index);
+ if (POPST) {
+ if (DoAPop) {
+ PopStack();
+ }
+ }
+}
+
+
+
+/*(
+Name : GenericAdd
+Function : To return dest <- src1+src2
+)*/
+
+
+LOCAL VOID GenericAdd IFN3(IU16, destIndex, IU16, src1Index, IU16, src2Index)
+{
+ FPSTACKENTRY *src1_addr;
+ FPSTACKENTRY *src2_addr;
+
+ src1_addr = StackEntryByIndex(src1Index);
+ src2_addr = StackEntryByIndex(src2Index);
+
+ /* Clear C1 */
+ FlagC1(0);
+ /* If the only tagword bits set are negative or denormal then just proceed */
+ TestUneval(src1_addr);
+ TestUneval(src2_addr);
+ tag_or = (src1_addr->tagvalue | src2_addr->tagvalue);
+ if ((tag_or & ~TAG_NEGATIVE_MASK) == 0) {
+ HostClearExceptions();
+ FPRes = src1_addr->fpvalue + src2_addr->fpvalue;
+ /* Reuse one of the above to calculate the destination */
+ src1_addr = StackEntryByIndex(destIndex);
+ PostCheckOUP();
+ /* Could return virtually anything */
+ CalcTagword(src1_addr);
+ } else {
+ /* Some funny bit was set. Check for the possibilities */
+ /* The odds on an 'empty', 'unsupported' or 'nan' must be low... */
+ if ((tag_or & ((TAG_EMPTY_MASK | TAG_UNSUPPORTED_MASK) | TAG_NAN_MASK)) != 0) {
+ if ((tag_or & TAG_UNSUPPORTED_MASK) != 0) {
+ src1_addr = StackEntryByIndex(destIndex);
+ SignalIndefinite(src1_addr);
+ } else {
+ if ((tag_or & TAG_EMPTY_MASK) != 0) {
+ src1_addr = StackEntryByIndex(destIndex);
+ SignalStackUnderflow(src1_addr);
+ } else {
+ /* It must be a NaN type thing. */
+ /* Calculate the xor of the tagwords. */
+ tag_xor = (src1_addr->tagvalue ^ src2_addr->tagvalue);
+ Test2NaN(destIndex, src1_addr, src2_addr);
+ }
+ }
+ return;
+ }
+ /* Check for the denorm case...I think the odds on it are low, however */
+ if ((tag_or & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ DoAPop=FALSE;
+ return;
+ } else {
+ /* First, make sure that we don't have any zeros or */
+ /* infinities lurking around... */
+ if ((tag_or & ~(TAG_DENORMAL_MASK | TAG_NEGATIVE_MASK)) == 0) {
+ HostClearExceptions();
+ FPRes = src1_addr->fpvalue + src2_addr->fpvalue;
+ /* Reuse one of the above to calculate the destination */
+ src1_addr = StackEntryByIndex(destIndex);
+ PostCheckOUP();
+ /* Could return anything */
+ CalcTagword(src1_addr);
+ return;
+ }
+ /* If there were zeros or infinities then we go on to the */
+ /* appropriate code */
+ }
+ }
+ tag_xor = (src1_addr->tagvalue ^ src2_addr->tagvalue);
+ /* Check for the case of zero... This is very likely */
+ if ((tag_or & TAG_ZERO_MASK) != 0) {
+ if ((tag_xor & TAG_ZERO_MASK) != 0) {
+ /* Only one zero. */
+ if ((src1_addr->tagvalue & TAG_ZERO_MASK) != 0) {
+ src1_addr = StackEntryByIndex(destIndex);
+ CopyFP(src1_addr, src2_addr);
+ } else {
+ src2_addr = StackEntryByIndex(destIndex);
+ CopyFP(src2_addr, src1_addr);
+ }
+ } else {
+ /* Both are zeros. Do they have the same sign? */
+ src1_addr = StackEntryByIndex(destIndex);
+ if ((tag_xor & TAG_NEGATIVE_MASK) != 0) {
+ /* No, they don't */
+ if (npxRounding == ROUND_NEG_INFINITY) {
+ src1_addr->tagvalue = (TAG_ZERO_MASK | TAG_NEGATIVE_MASK);
+ } else {
+ src1_addr->tagvalue = TAG_ZERO_MASK;
+ }
+ }
+ }
+ return;
+ }
+ /* The only funny bit left is infinity */
+ if ((tag_xor & TAG_INFINITY_MASK) == 0) {
+ /* They are both infinity. */
+ /* If they are the same sign, copy either */
+ src1_addr = StackEntryByIndex(destIndex);
+ if ((tag_xor & TAG_NEGATIVE_MASK) == 0) {
+ src1_addr->tagvalue = tag_or;
+ } else {
+ /* If opposite signed, raise Invalid */
+ SignalIndefinite(src1_addr);
+ }
+ } else {
+ /* Only one is infinity. That is the result. */
+ if ((src1_addr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ src2_addr = StackEntryByIndex(destIndex);
+ src2_addr->tagvalue = src1_addr->tagvalue;
+ } else {
+ src1_addr = StackEntryByIndex(destIndex);
+ src1_addr->tagvalue = src2_addr->tagvalue;
+ }
+ }
+ }
+}
+
+
+
+/* AddBCDByte(). This generator should be inlined.
+ This generator add in a BCD byte to a grand total.
+*/
+
+LOCAL VOID AddBCDByte IFN2(FPU_I64 *, total, IU8, byte_val)
+{
+ Add64Bit8Bit(total, byte_val);
+ if (byte_val >= 0x10) { /* Odds ought to be 16 to 1 on. */
+ /* We've added in 16 times the high BCD digit, */
+ /* so we need to subtract off 6 times that amount. */
+ byte_val &= 0xf0; /* Isolate the high digit */
+ byte_val >>= 2; /* This is now four times the high digit */
+ Sub64Bit8Bit(total, byte_val);
+ byte_val >>= 1; /* This is twice the high digit */
+ Sub64Bit8Bit(total, byte_val);
+ }
+}
+
+
+
+/* FBLD: Load BCD value from intel memory.
+ The alorithm used here is identical to that in the generic NPX.
+ We take each BCD digit and multiply it up by an appropriate amount
+ (1, 10, 100, 1000 etc) in order to create two nine digit 32-bit binary
+ values. We then convert the word with the high digits (d17-d9) into
+ floating point format and multiply by the representation of the value
+ for 10**9. This is then stored away (in FPTEMP) and the word with the
+ low digits (d8-d0) is converted to floating point format and added to
+ the value in FPTEMP. This is then the final binary representation of
+ the original BCD value that can be stored at TOS. */
+
+/*(
+Name : FBLD
+Function : Load the BCD value in intel memory onto TOS
+Operation : ST <- Convert to FPH(memPtr);
+Flags : C1 as per table 15-1. C0, C2 and C3 undefined
+Exceptions : IS
+Valid range : -999999999999999999 to 999999999999999999
+)*/
+
+
+GLOBAL VOID FBLD IFN1(IU8 *, memPtr)
+{
+
+ /* Clear C1 */
+ FlagC1(0);
+ /* All we shall do is load it up without consideration */
+ TOSPtr = StackEntryByIndex(7);
+ if ((TOSPtr->tagvalue & TAG_EMPTY_MASK) == 0) { /* Highly unlikely, see notes. */
+ SignalStackOverflow(TOSPtr);
+ } else {
+ /* We just copy the bytes directly */
+ LoadTByteToFP(TOSPtr, memPtr);
+ TOSPtr->tagvalue = TAG_BCD_MASK;
+ }
+}
+
+
+LOCAL VOID ConvertBCD IFN1(FPSTACKENTRY *, bcdPtr)
+{
+ IU8 *memPtr = (IU8 *)&(bcdPtr->fpvalue);
+ FPU_I64 total;
+
+ Set64Bit(&total, 0);
+ AddBCDByte(&total, memPtr[HOST_R80_BYTE_1]); /* Get d17d16 */
+ Mul64Bit8Bit(&total, 100);
+ AddBCDByte(&total, memPtr[HOST_R80_BYTE_2]);
+ Mul64Bit8Bit(&total, 100);
+ AddBCDByte(&total, memPtr[HOST_R80_BYTE_3]);
+ Mul64Bit8Bit(&total, 100);
+ AddBCDByte(&total, memPtr[HOST_R80_BYTE_4]);
+ Mul64Bit8Bit(&total, 100);
+ AddBCDByte(&total, memPtr[HOST_R80_BYTE_5]);
+ Mul64Bit8Bit(&total, 100);
+ AddBCDByte(&total, memPtr[HOST_R80_BYTE_6]);
+ Mul64Bit8Bit(&total, 100);
+ AddBCDByte(&total, memPtr[HOST_R80_BYTE_7]);
+ Mul64Bit8Bit(&total, 100);
+ AddBCDByte(&total, memPtr[HOST_R80_BYTE_8]);
+ Mul64Bit8Bit(&total, 100);
+ AddBCDByte(&total, memPtr[HOST_R80_BYTE_9]);
+ CVTI64FPH(&total);
+ if ((*(memPtr + 0) & 0x80) != 0) {
+ FPRes = -FPRes; /* Make it negative! */
+ }
+ CalcTagword(bcdPtr); /* Silly...it can only be negative */
+ /* or zero. */
+}
+
+
+/* FBSTP: Store binary coded decimal and pop.
+This uses much the same algorithm as before, but reversed. You begin
+by checking that the value at TOS is real, then compare it against the
+maximum possible value (having first forced the sign bit to be zero).
+If it's OK, then turn it into a 64 bit integer and perform the
+required repeated subtractions to calculate each of the BCD digits. */
+
+
+GLOBAL VOID FBSTP IFN1(IU8 *, memPtr)
+{
+ FPH local_fp;
+ IS8 nibble_num;
+ IU8 byte_val;
+ FPU_I64 as64bit;
+
+ /* Clear C1 */
+ FlagC1(0);
+ if ((TOSPtr->tagvalue & UNEVALMASK) != 0) {
+ switch (TOSPtr->tagvalue & UNEVALMASK) {
+ case TAG_BCD_MASK: /* We just copy the bytes directly */
+ WriteFP80ToIntel(memPtr, TOSPtr);
+ PopStack();
+ return;
+ break;
+ case TAG_R80_MASK: ConvertR80(TOSPtr);
+ break;
+ }
+ }
+ if ((TOSPtr->tagvalue & ~(TAG_DENORMAL_MASK | TAG_NEGATIVE_MASK)) == 0) {
+ /* We're OK. Let's do some checking... */
+ if (fabs(TOSPtr->fpvalue) >= MaxBCDValue) {
+ /* It's all gone horribly wrong */
+ SignalInvalid();
+ SignalBCDIndefinite((IU8 *)memPtr);
+ PopStack();
+ return;
+ }
+ /* The value is OK. Do the conversion. */
+ local_fp = npx_rint(TOSPtr->fpvalue);
+ ((FPHOST *)&local_fp)->hiword.sign = 0; /* Force it to be positive */
+ CVTFPHI64(&as64bit, &local_fp);
+ byte_val = 0;
+ while (Cmp64BitGTE(&as64bit, &BCDHighNibble[0])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDHighNibble[0]);
+ }
+ byte_val <<= 4;
+ while (Cmp64BitGTE(&as64bit, &BCDLowNibble[0])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDLowNibble[0]);
+ }
+ *(memPtr + 1) = byte_val;
+
+ byte_val = 0;
+ while (Cmp64BitGTE(&as64bit, &BCDHighNibble[1])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDHighNibble[1]);
+ }
+ byte_val <<= 4;
+ while (Cmp64BitGTE(&as64bit, &BCDLowNibble[1])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDLowNibble[1]);
+ }
+ *(memPtr + 2) = byte_val;
+
+ byte_val = 0;
+ while (Cmp64BitGTE(&as64bit, &BCDHighNibble[2])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDHighNibble[2]);
+ }
+ byte_val <<= 4;
+ while (Cmp64BitGTE(&as64bit, &BCDLowNibble[2])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDLowNibble[2]);
+ }
+ *(memPtr + 3) = byte_val;
+
+ byte_val = 0;
+ while (Cmp64BitGTE(&as64bit, &BCDHighNibble[3])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDHighNibble[3]);
+ }
+ byte_val <<= 4;
+ while (Cmp64BitGTE(&as64bit, &BCDLowNibble[3])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDLowNibble[3]);
+ }
+ *(memPtr + 4) = byte_val;
+
+ byte_val = 0;
+ while (Cmp64BitGTE(&as64bit, &BCDHighNibble[4])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDHighNibble[4]);
+ }
+ byte_val <<= 4;
+ while (Cmp64BitGTE(&as64bit, &BCDLowNibble[4])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDLowNibble[4]);
+ }
+ *(memPtr + 5) = byte_val;
+
+ byte_val = 0;
+ while (Cmp64BitGTE(&as64bit, &BCDHighNibble[5])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDHighNibble[5]);
+ }
+ byte_val <<= 4;
+ while (Cmp64BitGTE(&as64bit, &BCDLowNibble[5])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDLowNibble[5]);
+ }
+ *(memPtr + 6) = byte_val;
+
+ byte_val = 0;
+ while (Cmp64BitGTE(&as64bit, &BCDHighNibble[6])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDHighNibble[6]);
+ }
+ byte_val <<= 4;
+ while (Cmp64BitGTE(&as64bit, &BCDLowNibble[6])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDLowNibble[6]);
+ }
+ *(memPtr + 7) = byte_val;
+
+ byte_val = 0;
+ while (Cmp64BitGTE(&as64bit, &BCDHighNibble[7])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDHighNibble[7]);
+ }
+ byte_val <<= 4;
+ while (Cmp64BitGTE(&as64bit, &BCDLowNibble[7])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDLowNibble[7]);
+ }
+ *(memPtr + 8) = byte_val;
+
+ byte_val = 0;
+ while (Cmp64BitGTE(&as64bit, &BCDHighNibble[8])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDHighNibble[8]);
+ }
+ byte_val <<= 4;
+ while (Cmp64BitGTE(&as64bit, &BCDLowNibble[8])) {
+ byte_val += 1;
+ Sub64Bit64Bit(&as64bit, &BCDLowNibble[8]);
+ }
+ *(memPtr + 9) = byte_val;
+
+ if ((TOSPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ *(memPtr + 0) = 0x80;
+ ((FPHOST *)&local_fp)->hiword.sign = 1;
+ } else {
+ *(memPtr + 0) = 0;
+ }
+ /* Can't prevent delivery of result with unmasked precision
+ exception... */
+ if (local_fp != TOSPtr->fpvalue) {
+ SetPrecisionBit();
+ if ((NpxControl & CW_PM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ PopStack();
+ DoNpxException();
+ return;
+ }
+ }
+ } else {
+ if ((TOSPtr->tagvalue & TAG_ZERO_MASK) == 0) {
+ /* Anything else: Infinity, NaN or whatever... */
+ SignalInvalid();
+ SignalBCDIndefinite((IU8 *)memPtr);
+ PopStack();
+ return;
+ }
+ *(memPtr + 3) = (IU8)0;
+ *(memPtr + 4) = (IU8)0;
+ *(memPtr + 5) = (IU8)0;
+ *(memPtr + 6) = (IU8)0;
+ *(memPtr + 7) = (IU8)0;
+ *(memPtr + 8) = (IU8)0;
+ *(memPtr + 9) = (IU8)0;
+ if ((TOSPtr->tagvalue & TAG_ZERO_MASK) == 0) { /* Again, to check what top bytes should be. */
+ *(memPtr + 0) = (IU8)0xff; /* Not the zero case...It must be indefinite */
+ *(memPtr + 1) = (IU8)0xff;
+ *(memPtr + 2) = (IU8)0xc0;
+ } else {
+ *(memPtr + 1) = (IU8)0;
+ *(memPtr + 2) = (IU8)0;
+ if ((TOSPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ *(memPtr + 0) = 0x80;
+ } else {
+ *(memPtr + 0) = 0;
+ }
+ }
+ }
+ PopStack();
+}
+
+
+
+/*(
+Name : FCHS
+Function : Change the sign of the value at TOS
+Operation : ST <- Change sign (ST)
+Flags : C1 as per table 15-1. C0, C2 and C3 undefined
+Exceptions : IS
+Valid range : Any
+)*/
+
+
+GLOBAL VOID FCHS IFN0()
+{
+ /* Clear C1 */
+ FlagC1(0);
+ TestUneval(TOSPtr);
+ if ((TOSPtr->tagvalue & TAG_EMPTY_MASK) != 0) {
+ SignalStackUnderflow(TOSPtr);
+ return;
+ }
+ /* That is the only exception condition possible. FCHS always */
+ /* succeeds! What a strange instruction! */
+ TOSPtr->tagvalue ^= TAG_NEGATIVE_MASK; /* Twiddle the tagword bit */
+ /* We only twiddle the sign bit in numbers that are really */
+ /* being represented. */
+ if ((TOSPtr->tagvalue & ~(TAG_DENORMAL_MASK | TAG_NEGATIVE_MASK)) == 0) {
+ ((FPHOST *)&(TOSPtr->fpvalue))->hiword.sign ^= 1;
+ }
+}
+
+
+
+/*(
+Name : FCLEX
+Function : Clear the exception flags, exception status flag
+ and busy flag in the FPU status word.
+Operation : SW[0..7]<-0; SW[15]<-0
+Flags : C0, C1, C2 and C3 undefined
+Exceptions : None
+Valid range : Any
+)*/
+
+
+GLOBAL VOID FCLEX IFN0()
+{
+ NpxStatus &= FCLEX_MASK;
+}
+
+
+/* Comparision opcodes: The following opcodes are all taken care of
+in this routine: FCOM m32r, FCOM m64r, FCOM ST(i), FCOM, FCOMP m32real,
+FCOMP m64real, FCOMP ST(i), FCOMP, FCOMPP, FICOM m16i, FICOM m32i,
+FICOMP m16i, FICOMP m32i.
+The method is simple: In every case, one of the two operands for which
+comparison is to occur is ST. The second operand is either one of the
+four memory operand types specified, or another stack element, ST(i).
+There are, in addition, two possible control variables - POPST and
+DOUBLEPOP, which set appropriate values in global variables.
+*/
+
+
+GLOBAL VOID FCOM IFN1(VOID *, src2)
+{
+ IU16 src2Index;
+
+ LoadValue(src2, &src2Index);
+ if (POPST || DOUBLEPOP) {
+ DoAPop=TRUE;
+ }
+ GenericCompare(src2Index);
+ if (POPST || DOUBLEPOP) {
+ if (DoAPop) {
+ PopStack();
+ if (DOUBLEPOP) {
+ PopStack();
+ }
+ }
+ }
+}
+
+
+
+LOCAL VOID GenericCompare IFN1(IU16, src2Index)
+{
+ FPSTACKENTRY *src2_addr;
+
+ src2_addr = StackEntryByIndex(src2Index);
+
+ /* Clear C1 */
+ FlagC1(0);
+ TestUneval(TOSPtr);
+ TestUneval(src2_addr);
+ tag_or = (TOSPtr->tagvalue | src2_addr->tagvalue);
+ /* If the only tagword bit set is negative then just proceed */
+ if ((tag_or & ~TAG_NEGATIVE_MASK) == 0) {
+ NpxStatus &= C3C2C0MASK; /* Clear those bits */
+ if (TOSPtr->fpvalue > src2_addr->fpvalue) {
+ NpxStatus |= INTEL_COMP_GT;
+ } else {
+ if (TOSPtr->fpvalue < src2_addr->fpvalue) {
+ NpxStatus |= INTEL_COMP_LT;
+ } else {
+ NpxStatus |= INTEL_COMP_EQ;
+ }
+ }
+ } else {
+ /* Everything was not sweetness and light... */
+ if ((tag_or & ((TAG_EMPTY_MASK | TAG_UNSUPPORTED_MASK) | TAG_NAN_MASK)) != 0) {
+ if ((tag_or & TAG_UNSUPPORTED_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ } else {
+ if ((tag_or & TAG_EMPTY_MASK) != 0) {
+ SignalStackUnderflow(TOSPtr);
+ } else {
+ /* It must be a NaN. Just set the "not comparable" result */
+ if (UNORDERED) {
+ if ((tag_or & TAG_SNAN_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ }
+ } else {
+ SignalIndefinite(TOSPtr);
+ }
+ }
+ }
+ NpxStatus &= C3C2C0MASK;
+ NpxStatus |= INTEL_COMP_NC;
+ return;
+ }
+ if ((tag_or & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ return;
+ } else {
+ /* We can do it now, providing we've got no zeros or infinities */
+ if ((tag_or & ~(TAG_NEGATIVE_MASK | TAG_DENORMAL_MASK)) == 0) {
+ NpxStatus &= C3C2C0MASK; /* Clear those bits */
+ if (TOSPtr->fpvalue > src2_addr->fpvalue) {
+ NpxStatus |= INTEL_COMP_GT;
+ } else {
+ if (TOSPtr->fpvalue < src2_addr->fpvalue) {
+ NpxStatus |= INTEL_COMP_LT;
+ } else {
+ NpxStatus |= INTEL_COMP_EQ;
+ }
+ }
+ return;
+ }
+ }
+ }
+ /* We can calculate the result immediately based on any combination */
+ /* of zero, infinity and negative bits. These are the only bits left. */
+ /* We will calculate the result using a little table */
+ /* First, get the index: */
+ tag_or = (TOSPtr->tagvalue & 0x7);
+ tag_or <<= 3;
+ tag_or |= (src2_addr->tagvalue & 0x7);
+ /* This table looks as shown below: */
+ /* TOSPtr Other Value Result */
+ /* INF ZERO NEG INF ZERO NEG */
+ /* 0 0 0 0 1 0 COMP_GT */
+ /* 0 0 0 0 1 1 COMP_GT */
+ /* 0 0 0 1 0 0 COMP_LT */
+ /* 0 0 0 1 0 1 COMP_GT */
+ /* 0 1 0 0 0 0 COMP_LT */
+ /* 0 1 0 0 0 1 COMP_GT */
+ /* 0 1 0 0 1 0 COMP_EQ */
+ /* 0 1 0 0 1 1 COMP_EQ */
+ /* 0 1 0 1 0 0 COMP_LT */
+ /* 0 1 0 1 0 1 COMP_GT */
+ /* 0 1 1 0 0 0 COMP_LT */
+ /* 0 1 1 0 0 1 COMP_GT */
+ /* 0 1 1 0 1 0 COMP_EQ */
+ /* 0 1 1 0 1 1 COMP_EQ */
+ /* 0 1 1 1 0 0 COMP_LT */
+ /* 0 1 1 1 0 1 COMP_GT */
+ /* 1 0 0 0 0 0 COMP_GT */
+ /* 1 0 0 0 0 1 COMP_GT */
+ /* 1 0 0 0 1 0 COMP_GT */
+ /* 1 0 0 0 1 1 COMP_GT */
+ /* 1 0 0 1 0 0 COMP_EQ */
+ /* 1 0 0 1 0 1 COMP_GT */
+ /* 1 0 1 0 0 0 COMP_LT */
+ /* 1 0 1 0 0 1 COMP_LT */
+ /* 1 0 1 0 1 0 COMP_LT */
+ /* 1 0 1 0 1 1 COMP_LT */
+ /* 1 0 1 1 0 0 COMP_LT */
+ /* 1 0 1 1 0 1 COMP_EQ */
+ /* */
+ /* All other values are not possible. */
+ NpxStatus &= C3C2C0MASK;
+ NpxStatus |= CompZeroTable[tag_or];
+ return;
+ }
+}
+
+
+/*(
+Name : FCOS
+Function : Calculate the cosine of ST
+Operation : ST <- COSINE(ST)
+Flags : C1, C2 as per table 15-2. C0 and C3 undefined.
+Exceptions : P. U, D, I, IS
+Valid range : |ST| < 2**63.
+)*/
+
+GLOBAL VOID FCOS IFN0()
+{
+ /* Clear C1 */
+ FlagC1(0);
+ /* Clear C2 */
+ FlagC2(0);
+ TestUneval(TOSPtr);
+ if ((TOSPtr->tagvalue & ~TAG_NEGATIVE_MASK) == 0) {
+ HostClearExceptions();
+ /* We can just write the value straight out */
+ FPRes = cos(TOSPtr->fpvalue);
+ PostCheckOUP();
+ /* The return value must be in the range -1 to +1. */
+ CalcTagword(TOSPtr);
+ return;
+ } else {
+ /* Lets do the most probable cases first... */
+ /* Response to either zero is to return +1 */
+ if ((TOSPtr->tagvalue & TAG_ZERO_MASK) != 0) {
+ memset((char*)TOSPtr,0,sizeof(FPSTACKENTRY));
+ TOSPtr->fpvalue = 1.0;
+ TOSPtr->tagvalue = 0;
+ return;
+ }
+ /* Lets check for a denormal */
+ if ((TOSPtr->tagvalue & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ } else {
+ HostClearExceptions();
+ FPRes = cos(TOSPtr->fpvalue);
+ PostCheckOUP();
+ /* The return value must be in the range -1 to +1 */
+ CalcTagword(TOSPtr);
+ }
+ return;
+ }
+ /* Or it could possibly be infinity... */
+ /* For this, the C2 bit is set and the result remains */
+ /* unchanged. */
+ if ((TOSPtr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ FlagC2(1);
+ return;
+ }
+ /* It was one of the really wacky bits... */
+ if ((TOSPtr->tagvalue & TAG_EMPTY_MASK) != 0) {
+ SignalStackUnderflow(TOSPtr);
+ return;
+ }
+ if ((TOSPtr->tagvalue & TAG_SNAN_MASK) != 0) {
+ MakeNaNQuiet(TOSPtr);
+ return;
+ }
+ if ((TOSPtr->tagvalue & TAG_UNSUPPORTED_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ return;
+ }
+ }
+}
+
+
+
+/*(
+Name : FDECSTP
+Function : Subtract one from the TOS
+Operation : if (ST != 0) { ST <- ST-1 else { ST <- 7 }
+Flags : C1 as per table 15-1. C0, C2 and C3 undefined.
+Exceptions : None
+Valid range : N/A
+)*/
+
+
+GLOBAL VOID FDECSTP IFN0()
+{
+ /* Clear C1 */
+ FlagC1(0);
+ TOSPtr = StackEntryByIndex(7);
+}
+
+
+
+/*(
+Name : FDIV
+Function : Divide the two numbers
+Operation : Dest <- Src1 / Src2 or Dest <- Src2 / Src1
+Flags : C1 as per table 15-1. C0, C2 and C3 undefined
+Exceptions : P, U, O, Z, D, I, IS
+Valid range : Any
+Notes : The REVERSE control variable determines which of the
+ two forms of the operation is used. Popping after a
+ successful execution is controlled by POPST.
+)*/
+
+
+GLOBAL VOID FDIV IFN3(IU16, destIndex, IU16, src1Index, VOID *, src2)
+{
+ IU16 src2Index;
+
+ LoadValue(src2, &src2Index);
+ if (POPST) {
+ DoAPop=TRUE;
+ }
+ GenericDivide(destIndex, REVERSE?src2Index:src1Index, REVERSE?src1Index:src2Index);
+ if (POPST) {
+ if (DoAPop) {
+ PopStack();
+ }
+ }
+}
+
+
+/*(
+Name : GenericDivide
+Function : To return dest <- src1/src2
+)*/
+
+
+LOCAL VOID GenericDivide IFN3(IU16, destIndex, IU16, src1Index, IU16, src2Index)
+{
+ FPSTACKENTRY *src1_addr;
+ FPSTACKENTRY *src2_addr;
+
+ src1_addr = StackEntryByIndex(src1Index);
+ src2_addr = StackEntryByIndex(src2Index);
+
+ /* Clear C1 */
+ FlagC1(0);
+ TestUneval(src1_addr);
+ TestUneval(src2_addr);
+ tag_or = (src1_addr->tagvalue | src2_addr->tagvalue);
+ /* If the only tagword bit set is negative then just proceed */
+ if ((tag_or & (~TAG_NEGATIVE_MASK)) == 0) {
+ HostClearExceptions();
+ FPRes = src1_addr->fpvalue/src2_addr->fpvalue;
+ /* Reuse one of the above to calculate the destination */
+ src1_addr = StackEntryByIndex(destIndex);
+ PostCheckOUP();
+ /* Value could be anything */
+ CalcTagword(src1_addr);
+ } else {
+ /* Some funny bit was set. Check for the possibilities */
+ if ((tag_or & ((TAG_EMPTY_MASK | TAG_UNSUPPORTED_MASK) | TAG_NAN_MASK)) != 0) {
+ if ((tag_or & TAG_EMPTY_MASK) != 0) {
+ src1_addr = StackEntryByIndex(destIndex);
+ SignalStackUnderflow(src1_addr);
+ } else {
+ if ((tag_or & TAG_UNSUPPORTED_MASK) != 0) {
+ src1_addr = StackEntryByIndex(destIndex);
+ SignalIndefinite(src1_addr);
+ } else {
+ /* Well, I suppose it has to be the NaN case... */
+ /* Calculate the xor of the tagwords */
+ tag_xor = (src1_addr->tagvalue ^ src2_addr->tagvalue);
+ Test2NaN(destIndex, src1_addr, src2_addr);
+ }
+ }
+ return;
+ }
+ if ((tag_or & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ DoAPop = FALSE;
+ return;
+ } else {
+ if ((tag_or & ~(TAG_NEGATIVE_MASK | TAG_DENORMAL_MASK)) == 0) {
+ /* OK to proceed */
+ HostClearExceptions();
+ FPRes = src1_addr->fpvalue/src2_addr->fpvalue;
+ /* Reuse one of the above to calculate the destination */
+ src1_addr = StackEntryByIndex(destIndex);
+ PostCheckOUP();
+ /* Value could be anything */
+ CalcTagword(src1_addr);
+ return;
+ }
+ }
+ }
+ tag_xor = (src1_addr->tagvalue ^ src2_addr->tagvalue);
+ /* Check for infinity as it has higher precendence than zero. */
+ if ((tag_or & TAG_INFINITY_MASK) != 0) {
+ if ((tag_xor & TAG_INFINITY_MASK) == 0) {
+ /* They are both infinity. This is invalid. */
+ src1_addr = StackEntryByIndex(destIndex);
+ SignalIndefinite(src1_addr);
+ } else {
+ /* Only one is infinity. If src1 in infinity, then so */
+ /* is the result (even if src2 is zero). */
+ src2_addr = StackEntryByIndex(destIndex);
+ if ((src1_addr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ tag_or = TAG_INFINITY_MASK;
+ } else {
+ tag_or = TAG_ZERO_MASK;
+ }
+ tag_or |= (tag_xor & TAG_NEGATIVE_MASK);
+ src2_addr->tagvalue = tag_or;
+ }
+ return;
+ }
+ /* The only funny bit left is zero */
+ if ((tag_xor & TAG_ZERO_MASK) != 0) {
+ /* Only one zero. */
+ if ((src1_addr->tagvalue & TAG_ZERO_MASK) == 0) {
+ /* Src2 is zero. Raise divide by zero */
+ NpxStatus |= SW_ZE_MASK;
+ if ((NpxControl & CW_ZM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ DoAPop=FALSE;
+ return;
+ } else {
+ /* Unmasked. Infinity with xor of signs. */
+ tag_or = TAG_INFINITY_MASK;
+ }
+ } else {
+ /* Src1 is zero. The result is zero with */
+ /* the xor of the sign bits. */
+ tag_or = TAG_ZERO_MASK;
+ }
+ src1_addr = StackEntryByIndex(destIndex);
+ tag_or |= (tag_xor & TAG_NEGATIVE_MASK);
+ src1_addr->tagvalue = tag_or;
+ } else {
+ /* Both are zeros. This is an invalid operation */
+ src1_addr = StackEntryByIndex(destIndex);
+ SignalIndefinite(src1_addr);
+ }
+ }
+}
+
+
+/*
+Name : FFREE
+Function : Set the 'empty' tagword bit in the destination
+Operation : Tag(dest) <- 'empty'
+Flags : All undefined
+Exceptions : None
+Valid range : Any
+Notes :
+*/
+
+
+GLOBAL VOID FFREE IFN1(IU16, destIndex)
+{
+ FPSTACKENTRY *dest_addr;
+
+ dest_addr = StackEntryByIndex(destIndex);
+ dest_addr->tagvalue = TAG_EMPTY_MASK;
+ if (POPST) {
+ PopStack();
+ }
+}
+
+
+/*
+Name : FILD
+Function : Push the memory integer onto the stack
+Operation : Decrement TOS; ST(0) <- SRC.
+Flags : C1 as per table 15-1. Others undefined.
+Exceptions : IS
+Valid range : Any
+Notes : FLD Instruction only: source operand is denormal.
+ Masked response: No special action, load as usual.
+ fld gives an Invalid exception if the stack is full. Unmasked
+ Invalid exceptions leave the stack unchanged. Neither the MIPS
+ nor the 68k code notice stack full, so it is probably safe to
+ assume that it rarely happens, and optimise for the case where
+ there is no exception.
+ fld does not generate an Invalid exception if the ST is a NaN.
+ When loading a Short real or Long real NaN, fld extends the
+ significand by adding zeros at the least significant end.
+ Load operations raise denormal as an "after" exception: the
+ register stack is already updated when the exception is raised
+ fld produces a denormal result only when loading from memory:
+ using fld to transfer a denormal value between registers has
+ no effect.
+*/
+
+
+GLOBAL VOID FLD IFN1(VOID *, memPtr)
+{
+ FPSTACKENTRY *src_addr;
+ IU16 IndexVal;
+
+ /* Clear C1 */
+ FlagC1(0);
+ src_addr = StackEntryByIndex(7);
+ if ((src_addr->tagvalue & TAG_EMPTY_MASK) == 0) { /* Highly unlikely, see notes. */
+ NpxStatus |= (SW_IE_MASK | SW_SF_MASK);
+ FlagC1(1);
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ } else {
+ TOSPtr = src_addr;
+ WriteIndefinite(TOSPtr);
+ }
+ } else {
+ if (FPtype == FPSTACK) {
+ IndexVal = *(IU16 *)memPtr;
+ src_addr = StackEntryByIndex(IndexVal);
+ TOSPtr = StackEntryByIndex(7);
+ CopyFP(TOSPtr, src_addr);
+ } else {
+ switch (FPtype) {
+ case M16I : TOSPtr = src_addr;
+ Loadi16ToFP(TOSPtr, memPtr);
+ break;
+ case M32I : TOSPtr = src_addr;
+ Loadi32ToFP(TOSPtr, memPtr);
+ break;
+ case M64I : TOSPtr = src_addr;
+ Loadi64ToFP(TOSPtr, memPtr);
+ break;
+ case M32R : Loadr32ToFP(src_addr, memPtr, TRUE);
+ TOSPtr = src_addr;
+ break;
+ case M64R : Loadr64ToFP(src_addr, memPtr, TRUE);
+ TOSPtr = src_addr;
+ break;
+ case M80R : TOSPtr = src_addr;
+ Loadr80ToFP(TOSPtr, memPtr);
+ break;
+ }
+ }
+ }
+}
+
+
+
+/*(
+Name : FINCSTP
+Function : Add one to the TOS
+Operation : if (ST != 7) { ST <- ST+1 else { ST <- 0 ENDif
+Flags : C1 as per table 15-1. C0, C2 and C3 undefined.
+Exceptions : None
+Valid range : N/A
+)*/
+
+
+GLOBAL VOID FINCSTP IFN0()
+{
+ /* Clear C1 */
+ FlagC1(0);
+ TOSPtr = StackEntryByIndex(1);
+}
+
+
+
+/*(
+Name : FINIT
+Function : Initialise the floating point unit
+Operation : CW<-037F; SW<-0; TW<-FFFFH; FEA<-0; FDS<-0;
+ FIP<-0; FOP<-0; FCS<-0;
+Flags : All reset
+Exceptions : None
+Valid range : N/A
+)*/
+
+
+GLOBAL VOID FINIT IFN0()
+{
+ IU8 counter;
+
+ NpxControl = 0x037f;
+ npxRounding = ROUND_NEAREST;
+ NpxStatus = 0;
+ NpxLastSel=0;
+ NpxLastOff=0;
+ NpxFEA=0;
+ NpxFDS=0;
+ NpxFIP=0;
+ NpxFOP=0;
+ NpxFCS=0;
+ TOSPtr = FPUStackBase;
+ counter=0;
+ while (counter++ < 8) {
+ TOSPtr->tagvalue = TAG_EMPTY_MASK;
+ TOSPtr++;
+ }
+ TOSPtr = FPUStackBase;
+}
+
+
+
+/*(
+Name : FIST(P)
+Function : Store integer from top of stack to memory
+Operation : [mem] <- (I)ST
+Flags : C1 as per table 15-1. All other underfined.
+Exceptions : P, I, IS
+Valid range : N/A
+Notes : FIST (integer store) rounds the content of the stack top to an
+ integer according to the RC field of the control word and transfers
+ the result to the destination. The destination may define a word or
+ short integer variable. Negative zero is stored in the same encoding
+ as positive zero: 0000..00.
+ Where the source register is empty, a NaN, denormal, unsupported,
+ infinity, or exceeds the representable range of destination, the
+ Masked Response: Store integer indefinite.
+*/
+
+
+GLOBAL VOID FIST IFN1(VOID *, memPtr)
+{
+ IS16 exp_value;
+ IS32 res_out;
+
+ /* Clear C1 */
+ FlagC1(0);
+ if (POPST) {
+ DoAPop = TRUE;
+ }
+ /* If anything other than the negative bit is set then we should deal */
+ /* with it here... */
+ TestUneval(TOSPtr);
+ if ((TOSPtr->tagvalue & (~TAG_NEGATIVE_MASK)) != 0) { /* Must be unlikely */
+ if ((TOSPtr->tagvalue & TAG_ZERO_MASK) != 0) { /* But this is the most likely of them */
+ switch (FPtype) {
+ case M16I :
+ case M32I : *((IS32 *)memPtr) = 0;
+ break;
+ case M64I : *((IU8 *)memPtr + 0) = 0;
+ *((IU8 *)memPtr + 1) = 0;
+ *((IU8 *)memPtr + 2) = 0;
+ *((IU8 *)memPtr + 3) = 0;
+ *((IU8 *)memPtr + 4) = 0;
+ *((IU8 *)memPtr + 5) = 0;
+ *((IU8 *)memPtr + 6) = 0;
+ *((IU8 *)memPtr + 7) = 0;
+ break;
+ }
+ } else {
+ NpxStatus |= SW_IE_MASK;
+ if ((TOSPtr->tagvalue & TAG_EMPTY_MASK) != 0) {
+ NpxStatus |= SW_SF_MASK;
+ }
+ FlagC1(0);
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ if (POPST) {
+ DoAPop=FALSE; /* Unset it - we won't be popping. */
+ }
+ } else {
+ WriteIntegerIndefinite(memPtr);
+ }
+ }
+ } else {
+ HostClearExceptions();
+ exp_value = 0;
+ /* The result of conversion is written out */
+ /* to FPTemp? */
+ switch (FPtype) {
+ case M16I : *(IS16 *)&FPTemp = (IS16)npx_rint(TOSPtr->fpvalue);
+ /* Check for overflow */
+ if ((FPH)(*(IS16 *)&FPTemp) != npx_rint(TOSPtr->fpvalue)) {
+ exp_value = 1; /* flag exception */
+ }
+ break;
+ case M32I : *(IS32 *)&FPTemp = (IS32)npx_rint(TOSPtr->fpvalue);
+ /* Check for overflow */
+ if ((FPH)(*(IS32 *)&FPTemp) != npx_rint(TOSPtr->fpvalue)) {
+ exp_value = 1; /* flag exception */
+ }
+ break;
+ case M64I : CVTFPHI64((FPU_I64 *)&FPTemp, &(TOSPtr->fpvalue)); /* Must be writing the result to FPTemp as well... */
+ CVTI64FPH((FPU_I64 *)&FPTemp); /* Result in FPRes */
+ /* Check for overflow */
+ if (FPRes != npx_rint(TOSPtr->fpvalue)) {
+ exp_value = 1; /* flag exception */
+ }
+ break;
+ }
+ if (exp_value == 1) {
+ NpxStatus |= SW_IE_MASK; /* Set the invalid bit */
+ /* For the masked overflow case, the result delivered by */
+ /* the host will be correct, provided it is IEEE compliant. */
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ DoAPop = FALSE;
+ } else {
+ WriteIntegerIndefinite(memPtr);
+ }
+ }
+ if (exp_value == 0) {
+ switch (FPtype) {
+ case M16I : res_out = *(IS16 *)&FPTemp;
+ *((IU32 *)memPtr) = (IU32)res_out;
+ break;
+ case M32I : res_out = *(IS32 *)&FPTemp;
+ *((IS32 *)memPtr) = (IS32)res_out;
+ break;
+ case M64I : res_out = ((FPU_I64 *)&FPTemp)->high_word;
+ *((IU8 *)memPtr + 3) = res_out & 0xff;
+ res_out >>= 8;
+ *((IU8 *)memPtr + 2) = res_out & 0xff;
+ res_out >>= 8;
+ *((IU8 *)memPtr + 1) = res_out & 0xff;
+ res_out >>= 8;
+ *((IU8 *)memPtr + 0) = res_out & 0xff;
+ res_out = ((FPU_I64 *)&FPTemp)->low_word;
+ *((IU8 *)memPtr + 7) = res_out & 0xff;
+ res_out >>= 8;
+ *((IU8 *)memPtr + 6) = res_out & 0xff;
+ res_out >>= 8;
+ *((IU8 *)memPtr + 5) = res_out & 0xff;
+ res_out >>= 8;
+ *((IU8 *)memPtr + 4) = res_out & 0xff;
+ break;
+ }
+ /* Check for precision */
+ if (TOSPtr->fpvalue != npx_rint(TOSPtr->fpvalue)) {
+ SetPrecisionBit();
+ if ((NpxControl & CW_PM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ if (POPST) {
+ if (DoAPop) {
+ PopStack();
+ }
+ }
+ DoNpxException();
+ return;
+ }
+ }
+ }
+ }
+ if (POPST) {
+ if (DoAPop) {
+ PopStack();
+ }
+ }
+}
+
+
+
+/*(
+Name : FLDconstant
+Function : Load constant value to TOS
+Operation : Push ST: ST(0) <- constant
+Flags : C1 as per table 15-1. All other underfined.
+Exceptions : IS
+Valid range : N/A
+*/
+
+
+GLOBAL VOID FLDCONST IFN1(IU8, const_index)
+{
+
+ /* Clear C1 */
+ FlagC1(0);
+ TOSPtr = StackEntryByIndex(7);
+ if ((TOSPtr->tagvalue & TAG_EMPTY_MASK) == 0) {
+ SignalStackOverflow(TOSPtr);
+ } else {
+ memset((char*)TOSPtr,0,sizeof(FPSTACKENTRY));
+ TOSPtr->fpvalue = ConstTable[const_index].fpvalue;
+ TOSPtr->tagvalue = ConstTable[const_index].tagvalue;
+ }
+}
+
+
+
+/*(
+Name : FLDCW
+Function : Replace the current value of the FPU control word with
+ the value in the specified memory location.
+Operation : CW <- SRC.
+Flags : All undefined.
+Exceptions : None - but unmasking previously masked exceptions will
+ cause the unmasked exception to be triggered if the
+ matching bit is set in the status word.
+Valid range : N/A
+*/
+
+
+GLOBAL VOID FLDCW IFN1(VOID *, memPtr)
+{
+ IU32 result;
+/*
+This function has to modify things. The control word contains the
+following information:
+Precision control - not implemented.
+Rounding control - implemented.
+Exception masks - implemented.
+Thus when we read in a value for the control word, we have to update
+the host's rounding mode and also the exception masks.
+*/
+ /* First, set the rounding mode */
+ result = *(IU32 *)memPtr;
+ NpxControl = (IU16)result;
+ npxRounding = (NpxControl & 0xc00);
+ switch (npxRounding) {
+ case ROUND_NEAREST : HostSetRoundToNearest();
+ break;
+ case ROUND_NEG_INFINITY : HostSetRoundDown();
+ break;
+ case ROUND_POS_INFINITY : HostSetRoundUp();
+ break;
+ case ROUND_ZERO : HostSetRoundToZero();
+ break;
+ }
+ /* Now adjust the exceptions. If an exception is unmasked, then the */
+ /* bit value in NpxControl in '0'. If the exception has been */
+ /* triggered then the corresponding bit in NpxStatus is '1'.Thus, */
+ /* the expression ~NpxControl(5..0) | NpxStatus(5..0) will be */
+ /* non-zero when we have unmasked exceptions that were previously */
+ /* masked. */
+ if (((~(NpxControl & 0x3f)) & (NpxStatus & 0x3f)) != 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ }
+}
+
+GLOBAL VOID FLDCW16 IFN1(VOID *, memPtr)
+{
+/*
+This function has to modify things. The control word contains the
+following information:
+Precision control - not implemented.
+Rounding control - implemented.
+Exception masks - implemented.
+Thus when we read in a value for the control word, we have to update
+the host's rounding mode and also the exception masks.
+*/
+ /* First, set the rounding mode */
+ NpxControl = *(IU16 *)memPtr;
+ npxRounding = (NpxControl & 0xc00);
+ switch (npxRounding) {
+ case ROUND_NEAREST : HostSetRoundToNearest();
+ break;
+ case ROUND_NEG_INFINITY : HostSetRoundDown();
+ break;
+ case ROUND_POS_INFINITY : HostSetRoundUp();
+ break;
+ case ROUND_ZERO : HostSetRoundToZero();
+ break;
+ }
+ /* Now adjust the exceptions. If an exception is unmasked, then the */
+ /* bit value in NpxControl in '0'. If the exception has been */
+ /* triggered then the corresponding bit in NpxStatus is '1'.Thus, */
+ /* the expression ~NpxControl(5..0) | NpxStatus(5..0) will be */
+ /* non-zero when we have unmasked exceptions that were previously */
+ /* masked. */
+ if (((~(NpxControl & 0x3f)) & (NpxStatus & 0x3f)) != 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ }
+}
+
+/*(
+Name : FLDENV
+Function : Reload the FPU state from memory.
+Operation : FPU state <- SRC
+Flags : As loaded.
+Exceptions : None - but unmasking previously masked exceptions will
+ cause the unmasked exception to be triggered if the
+ matching bit is set in the status word.
+Valid range : N/A
+*/
+
+
+GLOBAL VOID FLDENV IFN1(VOID *, memPtr)
+{
+ /* First. load the control, status, tagword regs. etc. */
+ OpFpuRestoreFpuState(memPtr, 0);
+ /* Finally, check to see if any previously unmasked exceptions */
+ /* are now needed to go off. Do this by anding the "triggered" bits in */
+ /* NpxStatus with the one's complement of the "masked" bits in NpxControl. */
+ if (((NpxStatus & 0x3f) & (~(NpxControl & 0x3f))) != 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ }
+}
+
+/* This generator is used to write out the 14/28 bytes stored by FSTENV,
+and FSAVE. */
+
+
+LOCAL VOID OpFpuStoreFpuState IFN2(VOID *, memPtr, IU32, fsave_offset)
+{
+ IU32 result;
+
+ /* how the copy takes place depends on the addressing mode */
+ /* NPX_ADDRESS_SIZE_32 and NPX_PROT_MODE settings */
+ /*************************************************************** */
+ /* Need to do similar thing to strings to check that space */
+ /* is available and that there is not paging fault!!!! */
+ /*************************************************************** */
+ /* The operation should store the control word, tag word */
+ /* and status word, so these need to be calculated. It also */
+ /* stores the last instruction and data pointers and the opcode */
+ /* (if in real mode) */
+ /* The offsets from memPtr look strange. Remember that we are going to*/
+ /* write this data using the "write bytes" function. This assumes that*/
+ /* the data is stored bigendian and writes it out back to front for */
+ /* the little-endian intel, as it were. Are you with me? */
+ /* fsave offset is required since if we are asked to do an "fsave" */
+ /* (as opposed to an fstenv), then the "string" that we are going to */
+ /* write will be even bigger, and this stuff must be at the top end */
+ /* of it. Horrible but logical */
+ if (NPX_PROT_MODE) {
+ if (NPX_ADDRESS_SIZE_32) {
+ WriteI32ToIntel(((IU8 *)memPtr+24+fsave_offset), (IU32)NpxControl);
+ GetIntelStatusWord();
+ WriteI32ToIntel(((IU8 *)memPtr+20+fsave_offset), (IU32)NpxStatus);
+ GetIntelTagword(&result);
+ WriteI32ToIntel(((IU8 *)memPtr+16+fsave_offset), (IU32)result);
+ WriteI32ToIntel(((IU8 *)memPtr+12+fsave_offset), (IU32)NpxFIP);
+ WriteI32ToIntel(((IU8 *)memPtr+8+fsave_offset), (IU32)NpxFCS);
+ WriteI32ToIntel(((IU8 *)memPtr+4+fsave_offset), (IU32)NpxFEA);
+ WriteI32ToIntel(((IU8 *)memPtr+0+fsave_offset), (IU32)NpxFDS);
+ } else {
+ WriteI16ToIntel(((IU8 *)memPtr+12+fsave_offset), (IU16)NpxControl);
+ GetIntelStatusWord();
+ WriteI16ToIntel(((IU8 *)memPtr+10+fsave_offset), (IU16)NpxStatus);
+ GetIntelTagword(&result);
+ WriteI16ToIntel(((IU8 *)memPtr+8+fsave_offset), (IU16)result);
+ WriteI16ToIntel(((IU8 *)memPtr+6+fsave_offset), (IU16)NpxFIP);
+ WriteI16ToIntel(((IU8 *)memPtr+4+fsave_offset), (IU16)NpxFCS);
+ WriteI16ToIntel(((IU8 *)memPtr+2+fsave_offset), (IU16)NpxFEA);
+ WriteI16ToIntel(((IU8 *)memPtr+0+fsave_offset), (IU16)NpxFDS);
+ }
+ } else {
+ if (NPX_ADDRESS_SIZE_32) {
+ WriteI32ToIntel(((IU8 *)memPtr+24+fsave_offset), (IU32)NpxControl);
+ GetIntelStatusWord();
+ WriteI32ToIntel(((IU8 *)memPtr+20+fsave_offset), (IU32)NpxStatus);
+ GetIntelTagword(&result);
+ WriteI32ToIntel(((IU8 *)memPtr+16+fsave_offset), (IU32)result);
+ WriteI32ToIntel(((IU8 *)memPtr+12+fsave_offset), (IU32)((NpxFIP+(NpxFCS<<4)) & 0xffff));
+ WriteI32ToIntel(((IU8 *)memPtr+8+fsave_offset), (IU32)((((NpxFIP+(NpxFCS<<4)) & 0xffff0000) >> 4) | ((IU32)(NpxFOP & 0x7ff))));
+ WriteI32ToIntel(((IU8 *)memPtr+4+fsave_offset), (IU32)((NpxFEA+(NpxFDS<<4)) & 0xffff));
+ WriteI32ToIntel(((IU8 *)memPtr+0+fsave_offset), (IU32)(((NpxFEA+(NpxFDS<<4)) & 0xffff0000) >> 4));
+ } else {
+ WriteI16ToIntel(((IU8 *)memPtr+12+fsave_offset), (IU16)NpxControl);
+ GetIntelStatusWord();
+ WriteI16ToIntel(((IU8 *)memPtr+10+fsave_offset), (IU16)NpxStatus);
+ GetIntelTagword(&result);
+ WriteI16ToIntel(((IU8 *)memPtr+8+fsave_offset), (IU16)result);
+ WriteI16ToIntel(((IU8 *)memPtr+6+fsave_offset), (IU16)((NpxFIP+(NpxFCS<<4)) & 0xffff));
+ WriteI16ToIntel(((IU8 *)memPtr+4+fsave_offset), (IU16)((((NpxFIP+(NpxFCS<<4)) & 0xffff0000) >> 4) | ((IU16)(NpxFOP & 0x7ff))));
+ WriteI16ToIntel(((IU8 *)memPtr+2+fsave_offset), (IU16)(((NpxFDS<<4)+NpxFEA) & 0xffff));
+ WriteI16ToIntel(((IU8 *)memPtr+0+fsave_offset), (IU16)(((NpxFEA+(NpxFDS<<4)) & 0xffff0000) >> 4));
+ }
+ }
+}
+
+/* This generator is called by FLDENV and FRSTOR, to load up the 14/28
+byte block. */
+
+
+LOCAL VOID OpFpuRestoreFpuState IFN2(VOID *, memPtr, IU32, frstor_offset)
+{
+ IU32 result;
+
+ /* how the copy takes place depends on the addressing mode */
+ /* NPX_ADDRESS_SIZE_32 and NPX_PROT_MODE settings */
+ /*************************************************************** */
+ /* Need to do similar thing to strings to check that space */
+ /* is available and that there is not paging fault!!!! */
+ /************************************************************** */
+ /* The operation should restore the control word, tag word */
+ /* and status word, so these need to be translated. It also */
+ /* restores the last instruction and data pointers and the opcode */
+ /* (if in real mode) */
+
+
+ /* get the rest of the data, instruction and data pointers */
+ if ( NPX_PROT_MODE ) {
+ if (NPX_ADDRESS_SIZE_32) {
+ ReadI32FromIntel(&result, ((IU8 *)memPtr+24+frstor_offset));
+ FLDCW((VOID *)&result);
+ ReadI32FromIntel(&result, ((IU8 *)memPtr+20+frstor_offset));
+ SetIntelStatusWord(result);
+ ReadI32FromIntel(&result, ((IU8 *)memPtr+16+frstor_offset));
+ SetIntelTagword(result);
+ ReadI32FromIntel(&NpxFIP, ((IU8 *)memPtr+12+frstor_offset));
+ ReadI32FromIntel(&NpxFCS, ((IU8 *)memPtr+8+frstor_offset));
+ ReadI32FromIntel(&NpxFEA, ((IU8 *)memPtr+4+frstor_offset));
+ ReadI32FromIntel(&NpxFDS, ((IU8 *)memPtr+0+frstor_offset));
+ } else {
+ ReadI16FromIntel(&result, ((IU8 *)memPtr+12+frstor_offset));
+ /* Note this is a 32-bit result ! */
+ FLDCW((VOID *)&result);
+ ReadI16FromIntel(&result, ((IU8 *)memPtr+10+frstor_offset));
+ SetIntelStatusWord(result);
+ ReadI16FromIntel(&result, ((IU8 *)memPtr+8+frstor_offset));
+ SetIntelTagword(result);
+ ReadI16FromIntel(&NpxFIP, ((IU8 *)memPtr+6+frstor_offset));
+ ReadI16FromIntel(&NpxFCS, ((IU8 *)memPtr+4+frstor_offset));
+ ReadI16FromIntel(&NpxFEA, ((IU8 *)memPtr+2+frstor_offset));
+ ReadI16FromIntel(&NpxFDS, ((IU8 *)memPtr+0+frstor_offset));
+ }
+ } else {
+ if (NPX_ADDRESS_SIZE_32) {
+ ReadI32FromIntel(&result, ((IU8 *)memPtr+24+frstor_offset));
+ FLDCW((VOID *)&result);
+ ReadI32FromIntel(&result, ((IU8 *)memPtr+20+frstor_offset));
+ SetIntelStatusWord(result);
+ ReadI32FromIntel(&result, ((IU8 *)memPtr+16+frstor_offset));
+ SetIntelTagword(result);
+ ReadI32FromIntel(&NpxFIP, ((IU8 *)memPtr+12+frstor_offset));
+ NpxFIP &= 0xffff;
+ ReadI32FromIntel(&result, ((IU8 *)memPtr+8+frstor_offset));
+ NpxFIP |= ((result & 0x0ffff000) << 4);
+ ReadI32FromIntel(&NpxFOP, ((IU8 *)memPtr+8+frstor_offset));
+ NpxFOP &= 0x7ff;
+ ReadI32FromIntel(&NpxFEA, ((IU8 *)memPtr+4+frstor_offset));
+ NpxFEA &= 0xffff;
+ ReadI32FromIntel(&result, ((IU8 *)memPtr+0+frstor_offset));
+ NpxFEA |= ((result & 0x0ffff000) << 4);
+ } else {
+ ReadI16FromIntel(&result, ((IU8 *)memPtr+12+frstor_offset));
+ FLDCW((VOID *)&result);
+ ReadI16FromIntel(&result, ((IU8 *)memPtr+10+frstor_offset));
+ SetIntelStatusWord(result);
+ ReadI16FromIntel(&result, ((IU8 *)memPtr+8+frstor_offset));
+ SetIntelTagword(result);
+ ReadI16FromIntel(&NpxFIP, ((IU8 *)memPtr+6+frstor_offset));
+ ReadI16FromIntel(&result, ((IU8 *)memPtr+4+frstor_offset));
+ NpxFIP |= ((result & 0xf000) << 4);
+ ReadI16FromIntel(&NpxFOP, ((IU8 *)memPtr+4+frstor_offset));
+ NpxFOP &= 0x7ff;
+ ReadI16FromIntel(&NpxFEA, ((IU8 *)memPtr+2+frstor_offset));
+ ReadI16FromIntel(&result, ((IU8 *)memPtr+0+frstor_offset));
+ NpxFEA |= (IU32)((result & 0xf000) << 4);
+ }
+ }
+}
+
+
+
+/*(
+Name : FMUL
+Function : Multiply two numbers together
+Operation : Dest <- Src1 * Src2
+Flags : C1 as per table 15-1. C0, C2 and C3 undefined
+Exceptions : P, U, O, D, I, IS
+Valid range : Any
+Notes :
+)*/
+
+
+GLOBAL VOID FMUL IFN3(IU16, destIndex, IU16, src1Index, VOID *, src2)
+{
+ IU16 src2Index;
+
+ LoadValue(src2, &src2Index);
+ if (POPST) {
+ DoAPop=TRUE;
+ }
+ GenericMultiply(destIndex, src1Index, src2Index);
+ if (POPST) {
+ if (DoAPop) {
+ PopStack();
+ }
+ }
+}
+
+
+
+LOCAL VOID GenericMultiply IFN3(IU16, destIndex, IU16, src1Index, IU16, src2Index)
+{
+ FPSTACKENTRY *src1_addr;
+ FPSTACKENTRY *src2_addr;
+
+ src1_addr = StackEntryByIndex(src1Index);
+ src2_addr = StackEntryByIndex(src2Index);
+
+ /* Clear C1 */
+ FlagC1(0);
+ TestUneval(src1_addr);
+ TestUneval(src2_addr);
+ tag_or = (src1_addr->tagvalue | src2_addr->tagvalue);
+ /* If the only tagword bits set are negative or denormal then just proceed */
+ if ((tag_or & ~TAG_NEGATIVE_MASK) == 0) {
+ HostClearExceptions();
+ FPRes = src1_addr->fpvalue * src2_addr->fpvalue;
+ /* Reuse one of the above to calculate the destination */
+ src1_addr = StackEntryByIndex(destIndex);
+ PostCheckOUP();
+ /* Value could be anything */
+ CalcTagword(src1_addr);
+ } else {
+ /* Some funny bit was set. Check for the possibilities */
+ if ((tag_or & ((TAG_EMPTY_MASK | TAG_UNSUPPORTED_MASK) | TAG_NAN_MASK)) != 0) {
+ if ((tag_or & TAG_EMPTY_MASK) != 0) {
+ src1_addr = StackEntryByIndex(destIndex);
+ SignalStackUnderflow(src1_addr);
+ } else {
+ if ((tag_or & TAG_UNSUPPORTED_MASK) != 0) {
+ src1_addr = StackEntryByIndex(destIndex);
+ SignalIndefinite(src1_addr);
+ } else {
+ /* It must be NaN */
+ tag_xor = (src1_addr->tagvalue ^ src2_addr->tagvalue);
+ Test2NaN(destIndex, src1_addr, src2_addr);
+ }
+ }
+ return;
+ }
+ /* Check for the denorm case... */
+ if ((tag_or & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ DoAPop=FALSE; /* Just in case */
+ return;
+ } else {
+ /* Proceed if we've no zeroes or infinities. */
+ if ((tag_or & ~(TAG_DENORMAL_MASK | TAG_NEGATIVE_MASK)) == 0) {
+ HostClearExceptions();
+ FPRes = src1_addr->fpvalue * src2_addr->fpvalue;
+ /* Reuse one of the above to calculate the destination */
+ src1_addr = StackEntryByIndex(destIndex);
+ PostCheckOUP();
+ /* Value could be anything */
+ CalcTagword(src1_addr);
+ return;
+ }
+ }
+ }
+ tag_xor = (src1_addr->tagvalue ^ src2_addr->tagvalue);
+ /* For zero or infinity operands we will have the result */
+ src2_addr = StackEntryByIndex(destIndex);
+ if ((tag_or & TAG_ZERO_MASK) != 0) {
+ /* Multiplying zero by infinity yields zero with the xor of the signs */
+ if ((tag_or & TAG_INFINITY_MASK) != 0) {
+ SignalIndefinite(src2_addr);
+ } else {
+ /* Zero by anything else is zero with sign equal */
+ /* to the xor of the signs of the two sources. */
+ src2_addr->tagvalue = (TAG_ZERO_MASK | (tag_xor & TAG_NEGATIVE_MASK));
+ }
+ return;
+ }
+ /* The only funny bit left is infinity. The result is going */
+ /* to be infinity with sign equal to the xor of the signs of */
+ /* the sources. */
+ src2_addr->tagvalue = TAG_INFINITY_MASK | (tag_xor & TAG_NEGATIVE_MASK);
+ }
+}
+
+
+
+/* The FNOP operation doesn't do anything, it just does the normal
+checks for exceptions. */
+
+
+GLOBAL VOID FNOP IFN0()
+{
+}
+
+
+/* FPATAN: This generator returns the value ARCTAN(ST(1)/ST) to ST(1)
+then pops the stack. Its response to zeros and infinities is rather
+unusual...
++-0 / +X = 0 with sign of original zero
++-0 / -X = pi with sign of original zero
++-X /+-0 = pi/2 with sign of original X
++-0 / +0 = 0 with sign of original zero
++-0 / -0 = pi with sign of original zero
++inf / +-0 = +pi/2
+-inf / +-0 = -pi/2
++-0 / +inf = 0 with sign of original zero
++-0 / -inf = pi with sign of original zero
++-inf / +-X = pi/2 with sign of original infinity
++-Y / +inf = 0 with sign of original Y
++-Y / -inf = pi with sign of original Y
++-inf / +inf = pi/4 with sign of original inf
++-inf / -inf = 3*pi/4 with sign of original inf
+Otherwise, we just take the two operands from the stack and call the
+appropriate EDL to do the instruction.
+The use of an invalid operand with masked exception set causes
+the pop to go off, cruds up the contents of the stack and doesn't set
+the invalid exception, although if the invalid is infinity or NaN,
+overflow and precision exceptions are also generated, while if it is
+a denorm, underflow and precision exceptions are generated.
+With unmasked exceptions, exactly the same chain of events occurs.
+UNDER ALL CIRCUMSTANCES, THE STACK GETS POPPED.
+*/
+
+
+GLOBAL VOID FPATAN IFN0()
+{
+ FPSTACKENTRY *st1_addr;
+
+ st1_addr = StackEntryByIndex(1);
+ /* Clear C1 */
+ FlagC1(0);
+ /* If only the negative bit is set, just proceed.... */
+ TestUneval(TOSPtr);
+ TestUneval(st1_addr);
+ tag_or = (TOSPtr->tagvalue | st1_addr->tagvalue);
+ if ((tag_or & ~TAG_NEGATIVE_MASK) == 0) {
+ HostClearExceptions();
+ FPRes = atan2(st1_addr->fpvalue, TOSPtr->fpvalue);
+ PostCheckOUP();
+ /* The retrun value has to be in the range -pi to +pi */
+ CalcTagword(st1_addr);
+ } else {
+ /* Some funny bit set.... */
+ if ((tag_or & ((TAG_EMPTY_MASK | TAG_UNSUPPORTED_MASK) | TAG_NAN_MASK)) != 0) {
+ if ((tag_or & TAG_EMPTY_MASK) != 0) {
+ SignalStackUnderflow(st1_addr);
+ } else {
+ if ((tag_or & TAG_UNSUPPORTED_MASK) != 0) {
+ SignalIndefinite(st1_addr);
+ } else {
+ /* It must be a NaN. */
+ tag_xor = (TOSPtr->tagvalue ^ st1_addr->tagvalue);
+ Test2NaN(0, TOSPtr, st1_addr);
+ }
+ }
+ PopStack();
+ return;
+ }
+ if ((tag_or & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ PopStack();
+ return;
+ } else {
+ /* Proceed if we've no zeroes or infinities. */
+ if ((tag_or & ~(TAG_DENORMAL_MASK | TAG_NEGATIVE_MASK)) == 0) {
+ HostClearExceptions();
+ FPRes = atan2(st1_addr->fpvalue, TOSPtr->fpvalue);
+ PostCheckOUP();
+ /* The return value is -pi to +pi */
+ CalcTagword(st1_addr);
+ PopStack();
+ return;
+ }
+ }
+ }
+ /* It must have been a zero or an infinity. As can be seen */
+ /* from the table above, there is a complicated interaction */
+ /* between the result for each type and its option. */
+ /* Let's simplify it by use of a little table. */
+ /* ST ST(1) Result */
+ /* Z I S Z I S */
+ /* 0 0 0 0 1 0 pi/2 */
+ /* 0 0 0 0 1 1 -pi/2 */
+ /* 0 0 0 1 0 0 +0 */
+ /* 0 0 0 1 0 1 -0 */
+ /* 0 1 0 0 1 0 pi/4 */
+ /* 0 1 0 0 1 1 3*pi/4 */
+ /* 0 1 0 1 0 0 pi/2 */
+ /* 0 1 0 1 0 1 pi/2 */
+ /* 0 1 1 0 1 0 -pi/4 */
+ /* 0 1 1 0 1 1 -3*pi/4 */
+ /* 0 1 1 1 0 0 -pi/2 */
+ /* 0 1 1 1 0 1 -pi/2 */
+ /* 1 0 0 0 1 0 +0 */
+ /* 1 0 0 0 1 1 pi */
+ /* 1 0 0 1 0 0 +0 */
+ /* 1 0 0 1 0 1 pi */
+ /* 1 0 1 0 1 0 -0 */
+ /* 1 0 1 0 1 1 -pi */
+ /* 1 0 1 1 0 0 -0 */
+ /* 1 0 1 1 0 1 -pi */
+ /* */
+ /* All other combinations are invalid, as they would involve */
+ /* a tagword having both infinity and zero bits set. */
+ tag_xor = (st1_addr->tagvalue & 7);
+ tag_xor <<= 3;
+ tag_xor |= (TOSPtr->tagvalue & 7);
+ CopyFP(st1_addr, FpatanTable[tag_xor]);
+ }
+ /* No matter what has happened... We ALWAYS pop on FPATAN!!! */
+ PopStack();
+}
+
+
+
+/* FPREM: This is the same function as implemented on the 80287. It is
+NOT the same as the IEEE required REM function, this is now supplied as
+FPREM1. FPREM predates the final draft of IEEE 754 and is maintained for
+the purpose of backward compatibility.
+*/
+
+
+GLOBAL VOID FPREM IFN0()
+{
+ IS16 exp_diff;
+ IU8 little_rem;
+ FPU_I64 remainder;
+ FPH fprem_val;
+ FPSTACKENTRY *st1_addr;
+
+ st1_addr = StackEntryByIndex(1);
+ TestUneval(TOSPtr);
+ TestUneval(st1_addr);
+ tag_or = (TOSPtr->tagvalue | st1_addr->tagvalue);
+ /* First, check if the values are real. If so, we can proceed. */
+ if ((tag_or & ~(TAG_NEGATIVE_MASK | TAG_DENORMAL_MASK)) == 0) {
+ /* First, check for the denormal possibility... */
+ if ((tag_or & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ return;
+ }
+ }
+ /* Make both values positive */
+ ((FPHOST *)&(TOSPtr->fpvalue))->hiword.sign = 0;
+ ((FPHOST *)&(st1_addr->fpvalue))->hiword.sign = 0;
+
+ /* Find the difference in exponents... */
+ exp_diff = ((FPHOST *)&(TOSPtr->fpvalue))->hiword.exp - ((FPHOST *)&(st1_addr->fpvalue))->hiword.exp;
+ /* If it's more than 63, we can't do it at once... */
+ if (exp_diff >= 64) {
+ ((FPHOST *) &fprem_val) -> hiword.sign = 0;
+ ((FPHOST *) &fprem_val) -> hiword.mant_hi = 0;
+ ((FPHOST *) &fprem_val) -> mant_lo = 0;
+ ((FPHOST *) &fprem_val) -> hiword.exp = (exp_diff - 50) + HOST_BIAS;
+ FlagC2(1); /* This will be incomplete reduction */
+ } else {
+ FlagC2(0); /* This will be complete reduction */
+ }
+ HostClearExceptions();
+ tag_xor = (NpxControl & 0xc00);
+ NpxControl &= 0xf3ff;
+ NpxControl |= ROUND_ZERO;
+ HostSetRoundToZero();
+ /* Unfortunately, because the function isn't the strict */
+ /* IEEE compliant style, if we use an IEEE compliant FREM */
+ /* operation, as like as not we'd get the wrong answer. So */
+ /* we perform the operation by doing the steps given in the */
+ /* page in the instruction set. */
+ FPRes = TOSPtr->fpvalue / st1_addr->fpvalue;
+ if ((NpxStatus & 0x0400) != 0) { /* The incomplete reduction case */
+ FPRes = FPRes / fprem_val;
+ }
+ FPRes = npx_rint(FPRes);
+ /* Calculate the remainder */
+ if ((NpxStatus & 0x0400) == 0) {
+ CVTFPHI64(&remainder, &FPRes);
+ CPY64BIT8BIT(&remainder, &little_rem);
+ }
+ switch (tag_xor) {
+ case ROUND_NEAREST : HostSetRoundToNearest();
+ break;
+ case ROUND_NEG_INFINITY : HostSetRoundDown();
+ break;
+ case ROUND_POS_INFINITY : HostSetRoundUp();
+ break;
+ case ROUND_ZERO : HostSetRoundToZero();
+ break;
+ }
+ NpxControl &= 0xf3ff;
+ NpxControl |= tag_xor;
+ FPRes *= st1_addr->fpvalue;
+ if ((NpxStatus & 0x0400) != 0) { /* The incomplete reduction case */
+ FPRes *= fprem_val;
+ FPRes = TOSPtr->fpvalue - FPRes;
+ } else { /* Complete reduction */
+ FPRes = TOSPtr->fpvalue - FPRes;
+ FlagC0((little_rem&4)?1:0);
+ FlagC3((little_rem&2)?1:0);
+ FlagC1((little_rem&1));
+ }
+ /* Check for an underflow response */
+ if (HostGetUnderflowException() != 0) {
+ NpxStatus |= SW_UE_MASK;
+ if ((NpxControl & CW_UM_MASK) == 0) {
+ AdjustUnderflowResponse();
+ NpxStatus |= SW_ES_MASK;
+ NpxException = TRUE;
+ }
+ }
+ /* But the remainder must have the sign of the original ST! */
+ if ((TOSPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ ((FPHOST *)&(FPRes))->hiword.sign = 1;
+ } else {
+ ((FPHOST *)&(FPRes))->hiword.sign = 0;
+ }
+ /* And restore st1 sign bit if required */
+ if ((st1_addr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ ((FPHOST *)&(st1_addr->fpvalue))->hiword.sign = 1;
+ }
+ CalcTagword(TOSPtr);
+ } else {
+ /* We had a funny thing */
+ if ((tag_or & ((TAG_EMPTY_MASK | TAG_UNSUPPORTED_MASK) | TAG_NAN_MASK)) != 0) {
+ if ((tag_or & TAG_EMPTY_MASK) != 0) {
+ SignalStackUnderflow(TOSPtr);
+ } else {
+ if ((tag_or & TAG_UNSUPPORTED_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ } else {
+ /* It must be a NaN. */
+ tag_xor = (TOSPtr->tagvalue ^ st1_addr->tagvalue);
+ Test2NaN(0, TOSPtr, st1_addr);
+ }
+ }
+ return;
+ }
+ /* The logical way to arrange zeroes and infinities is zero first. */
+ if ((tag_or & TAG_ZERO_MASK) != 0) {
+ /* A zero in ST(1) is ALWAYS invalid... */
+ if ((st1_addr->tagvalue & TAG_ZERO_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ }
+ /* The zero must be in ST, the result is what is there... */
+ FlagC0(0);
+ FlagC1(0);
+ FlagC2(0);
+ FlagC3(0);
+ return;
+ }
+ /* OK, it HAS to be infinity */
+ /* An infinity at ST is ALWAYS invalid... */
+ if ((TOSPtr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ }
+ /* An infinity at ST(1) leaves ST untouched */
+ FlagC0(0);
+ FlagC1(0);
+ FlagC2(0);
+ FlagC3(0);
+ }
+}
+
+
+
+
+/* FPREM1: This is the IEEE required REM function, this is now supplied as
+FPREM1. FPREM predates the final draft of IEEE 754 and is maintained for
+the purpose of backward compatibility.
+*/
+
+
+GLOBAL VOID FPREM1 IFN0()
+{
+ IS16 exp_diff;
+ IU8 little_rem;
+ FPU_I64 remainder;
+ FPH fprem_val;
+ FPSTACKENTRY *st1_addr;
+
+ st1_addr = StackEntryByIndex(1);
+ TestUneval(TOSPtr);
+ TestUneval(st1_addr);
+ tag_or = (TOSPtr->tagvalue | st1_addr->tagvalue);
+ /* First, check if the values are real. If so, we can proceed. */
+ if ((tag_or & ~(TAG_NEGATIVE_MASK | TAG_DENORMAL_MASK)) == 0) {
+ /* First, check for the denormal possibility... */
+ if ((tag_or & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ return;
+ }
+ }
+ /* Make both values positive */
+ ((FPHOST *)&(TOSPtr->fpvalue))->hiword.sign = 0;
+ ((FPHOST *)&(st1_addr->fpvalue))->hiword.sign = 0;
+
+ /* Find the difference in exponents... */
+ exp_diff = ((FPHOST *)&(TOSPtr->fpvalue))->hiword.exp - ((FPHOST *)&(st1_addr->fpvalue))->hiword.exp;
+ /* If it's more than 63, we can't do it at once... */
+ if (exp_diff >= 64) {
+ ((FPHOST *) &fprem_val) -> hiword.sign = 0;
+ ((FPHOST *) &fprem_val) -> hiword.mant_hi = 0;
+ ((FPHOST *) &fprem_val) -> mant_lo = 0;
+ ((FPHOST *) &fprem_val) -> hiword.exp = (exp_diff - 50) + HOST_BIAS;
+ FlagC2(1); /* This will be incomplete reduction */
+ } else {
+ FlagC2(0); /* This will be complete reduction */
+ }
+ HostClearExceptions();
+ /* Note that this is the only difference between FPREM and
+ FPREM1. For the incomplete reduction case we use "round
+ to nearest" rather than "round to zero".
+ */
+ tag_xor = (NpxControl & 0xc00);
+ NpxControl &= 0xf3ff;
+ if ((NpxStatus & 0x0400) == 0) {
+ HostSetRoundToZero();
+ NpxControl |= ROUND_ZERO;
+ } else {
+ HostSetRoundToNearest();
+ NpxControl |= ROUND_NEAREST;
+ }
+ FPRes = TOSPtr->fpvalue / st1_addr->fpvalue;
+ if ((NpxStatus & 0x0400) != 0) { /* The incomplete reduction case */
+ FPRes = FPRes / fprem_val;
+ }
+ FPRes = npx_rint(FPRes);
+ /* Calculate the remainder */
+ if ((NpxStatus & 0x0400) == 0) {
+ CVTFPHI64(&remainder, &FPRes);
+ CPY64BIT8BIT(&remainder, &little_rem);
+ }
+ switch (tag_xor) {
+ case ROUND_NEAREST : HostSetRoundToNearest();
+ break;
+ case ROUND_NEG_INFINITY : HostSetRoundDown();
+ break;
+ case ROUND_POS_INFINITY : HostSetRoundUp();
+ break;
+ case ROUND_ZERO : HostSetRoundToZero();
+ break;
+ }
+ NpxControl &= 0xf3ff;
+ NpxControl |= tag_xor;
+ FPRes = st1_addr->fpvalue * FPRes;
+ if ((NpxStatus & 0x0400) != 0) { /* The incomplete reduction case */
+ FPRes = FPRes * fprem_val;
+ FPRes = TOSPtr->fpvalue - FPRes;
+ } else { /* Complete reduction */
+ FPRes = TOSPtr->fpvalue - FPRes;
+ FlagC0((little_rem&4)?1:0);
+ FlagC3((little_rem&2)?1:0);
+ FlagC1(little_rem&1);
+ }
+ /* Check for an underflow response */
+ if (HostGetUnderflowException() != 0) {
+ NpxStatus |= SW_UE_MASK;
+ if ((NpxControl & CW_UM_MASK) == 0) {
+ AdjustUnderflowResponse();
+ NpxStatus |= SW_ES_MASK;
+ NpxException = TRUE;
+ }
+ }
+ /* But the remainder must have the sign of the original ST! */
+ if ((TOSPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ ((FPHOST *)&(FPRes))->hiword.sign = 1;
+ } else {
+ ((FPHOST *)&(FPRes))->hiword.sign = 0;
+ }
+ /* And restore st1 sign bit if required */
+ if ((st1_addr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ ((FPHOST *)&(st1_addr->fpvalue))->hiword.sign = 1;
+ }
+ CalcTagword(TOSPtr);
+ } else {
+ /* We had a funny thing */
+ if ((tag_or & ((TAG_EMPTY_MASK | TAG_UNSUPPORTED_MASK) | TAG_NAN_MASK)) != 0) {
+ if ((tag_or & TAG_EMPTY_MASK) != 0) {
+ SignalStackUnderflow(TOSPtr);
+ } else {
+ if ((tag_or & TAG_UNSUPPORTED_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ } else {
+ /* It must be a NaN. */
+ tag_xor = (TOSPtr->tagvalue ^ st1_addr->tagvalue);
+ Test2NaN(0, TOSPtr, st1_addr);
+ }
+ }
+ return;
+ }
+ /* The logical way to arrange zeroes and infinities is zero first. */
+ if ((tag_or & TAG_ZERO_MASK) != 0) {
+ /* A zero in ST(1) is ALWAYS invalid... */
+ if ((st1_addr->tagvalue & TAG_ZERO_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ }
+ /* The zero must be in ST, the result is what is there... */
+ FlagC0(0);
+ FlagC1(0);
+ FlagC2(0);
+ FlagC3(0);
+ return;
+ }
+ /* OK, it HAS to be infinity */
+ /* An infinity at ST is ALWAYS invalid... */
+ if ((TOSPtr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ }
+ /* An infinity at ST(1) leaves ST untouched */
+ FlagC0(0);
+ FlagC1(0);
+ FlagC2(0);
+ FlagC3(0);
+ }
+}
+
+
+
+/*(
+ * Name : FPTAN
+ * Operation : Compute the value of TAN(ST)
+ * Flags : C1 as per table 15-1, others undefined.
+ * Exceptions : P, U, D, I, IS
+ * Valid range : |ST| < 2**63
+ * Notes : This function has been substantially overhauled
+ since the 80287. It now has a much wider range
+ (it previously had to be 0<ST<(PI/4). In addition,
+ the return value is now really the tan of ST, with
+ a 1 pushed above it on the stack to maintain
+ compatibility with the 8087/80287. Previously the
+ result was a ratio of two values, neither of which
+ could be guaranteed.
+)*/
+
+
+GLOBAL VOID FPTAN IFN0()
+{
+ FPSTACKENTRY *st1_addr;
+
+ /* Clear C1 */
+ FlagC1(0);
+ /* Set C2 to zero */
+ FlagC2(0);
+ st1_addr = StackEntryByIndex(7);
+ /* Make sure that the stack element is free */
+ if ((st1_addr->tagvalue & TAG_EMPTY_MASK) == 0) {
+ WriteIndefinite(TOSPtr);
+ TOSPtr = st1_addr;
+ SignalStackOverflow(TOSPtr);
+ return;
+ }
+ TestUneval(TOSPtr);
+ /* Check if a real value...We won't bother with limit checking */
+ if ((TOSPtr->tagvalue & ~TAG_NEGATIVE_MASK) == 0) {
+ HostClearExceptions();
+ /* We can just write the value straight out */
+ FPRes = tan(TOSPtr->fpvalue);
+ PostCheckOUP();
+ /* The return value could be absolutely anything */
+ CalcTagword(TOSPtr);
+ TOSPtr = st1_addr;
+ CopyFP(TOSPtr, npx_one);
+ } else {
+ /* Some funny bit was set. Check for the possibilities */
+ /* We begin with the most obvious cases... */
+ /* Response to zero is to return zero with same sign */
+ if ((TOSPtr->tagvalue & TAG_ZERO_MASK) != 0) {
+ TOSPtr = st1_addr;
+ CopyFP(TOSPtr, npx_one);
+ return; /* The required result! */
+ }
+ /* We do denorm checking and bit setting ourselves because this */
+ /* reduces the overhead if the thing is masked. */
+ if ((TOSPtr->tagvalue & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ } else {
+ HostClearExceptions();
+ FPRes = tan(TOSPtr->fpvalue);
+ PostCheckOUP();
+ /* The return value could be anything */
+ CalcTagword(TOSPtr);
+ TOSPtr = st1_addr;
+ CopyFP(TOSPtr, npx_one);
+ }
+ return;
+ }
+ /* If the value is outside the acceptable range (including */
+ /* infinity) then we set the C2 flag and leave everything */
+ /* unchanged. */
+ /* Sensible enough really, I suppose */
+ if ((TOSPtr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ FlagC2(1);
+ return;
+ }
+ if ((TOSPtr->tagvalue & TAG_EMPTY_MASK) != 0) {
+ SignalStackUnderflow(TOSPtr);
+ return;
+ }
+ if ((TOSPtr->tagvalue & TAG_SNAN_MASK) != 0) {
+ MakeNaNQuiet(TOSPtr);
+ return;
+ }
+ if ((TOSPtr->tagvalue & TAG_UNSUPPORTED_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ return;
+ }
+ }
+}
+
+
+
+/*(
+ * Name : FRNDINT
+ * Operation : ST <- rounded ST
+ * Flags : C1 as per table 15-1, others undefined.
+ * Exceptions : P, U, D, I, IS
+ * Valid range : All
+ * Notes : On the 80287, a precision exception would be
+ raised if the operand wasn't an integer.
+ I begin by ASSUMING that on the 486 the response
+ is IEEE compliant so no OUP exceptions.
+)*/
+
+
+GLOBAL VOID FRNDINT IFN0()
+{
+ /* Clear C1 */
+ FlagC1(0);
+ TestUneval(TOSPtr);
+ if ((TOSPtr->tagvalue & ~TAG_NEGATIVE_MASK) == 0) {
+ HostClearExceptions();
+ /* We can just write the value straight out */
+ FPRes = npx_rint(TOSPtr->fpvalue);
+ if (FPRes != TOSPtr->fpvalue) {
+ SetPrecisionBit();
+ /* If the rounding mode is "round to nearest" and we've
+ rounded up then we'll set C1 */
+ if (npxRounding == ROUND_NEAREST) {
+ if (TOSPtr->fpvalue < FPRes) {
+ FlagC1(1);
+ }
+ }
+ if ((NpxControl & CW_PM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ return;
+ }
+ }
+ /* It was a real before, it still must be one now. It could */
+ /* be zero possibly. */
+ CalcTagword(TOSPtr);
+ } else {
+ /* Lets do the most probable cases first... */
+ /* If it's a zero or infinity, we do nothing. */
+ if ((TOSPtr->tagvalue & (TAG_ZERO_MASK | TAG_INFINITY_MASK)) == 0) {
+ /* Lets check for a denormal */
+ if ((TOSPtr->tagvalue & TAG_DENORMAL_MASK) != 0) {
+ SetPrecisionBit();
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ } else {
+ /* The result of rounding a denorm is dependent on */
+ /* its sign and the prevailing rounding mode */
+ switch (npxRounding) {
+ case ROUND_ZERO :
+ case ROUND_NEAREST :
+ TOSPtr->tagvalue &= TAG_NEGATIVE_MASK;
+ TOSPtr->tagvalue |= TAG_ZERO_MASK;
+
+ break;
+ case ROUND_NEG_INFINITY :
+ if ((TOSPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ memset((char*)TOSPtr,0,sizeof(FPSTACKENTRY));
+ TOSPtr->fpvalue = -1.0;
+ TOSPtr->tagvalue = TAG_NEGATIVE_MASK;
+ } else {
+ TOSPtr->tagvalue &= TAG_NEGATIVE_MASK;
+ TOSPtr->tagvalue |= TAG_ZERO_MASK;
+ }
+ break;
+ case ROUND_POS_INFINITY :
+ if ((TOSPtr->tagvalue & TAG_NEGATIVE_MASK) == 0) {
+ memset((char*)TOSPtr,0,sizeof(FPSTACKENTRY));
+ TOSPtr->fpvalue = 1.0;
+ TOSPtr->tagvalue = 0;
+ } else {
+ TOSPtr->tagvalue &= TAG_NEGATIVE_MASK;
+ TOSPtr->tagvalue |= TAG_ZERO_MASK;
+ }
+ break;
+ }
+ }
+ return;
+ }
+ /* It was one of the really wacky bits... */
+ if ((TOSPtr->tagvalue & TAG_EMPTY_MASK) != 0) {
+ SignalStackUnderflow(TOSPtr);
+ return;
+ }
+ if ((TOSPtr->tagvalue & TAG_SNAN_MASK) != 0) {
+ MakeNaNQuiet(TOSPtr);
+ return;
+ }
+ if ((TOSPtr->tagvalue & TAG_UNSUPPORTED_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ return;
+ }
+ }
+ }
+}
+
+
+
+
+/*(
+Name : FSTCW
+Function : Write the FPU control word to memory
+Operation : DEST <- Cw
+Flags : All undefined.
+Exceptions : None - but unmasking previously masked exceptions will
+ cause the unmasked exception to be triggered if the
+ matching bit is set in the status word.
+Valid range : N/A
+*/
+
+
+GLOBAL VOID FSTCW IFN1(VOID *, memPtr)
+{
+ if (NpxDisabled)
+ {
+ /* UIF has told us to pretend we do not have an NPX */
+ *(IU32 *)memPtr = (IU16)0xFFFF;
+ }
+ else
+ {
+ *(IU32 *)memPtr = (IU16)NpxControl;
+ }
+}
+
+
+
+/*(
+Name : FRSTOR
+Function : Reload the FPU state from memory.
+Operation : FPU state <- SRC
+Flags : As loaded.
+Exceptions : None - but unmasking previously masked exceptions will
+ cause the unmasked exception to be triggered if the
+ matching bit is set in the status word.
+Valid range : N/A
+*/
+
+
+GLOBAL VOID FRSTOR IFN1(VOID *, memPtr)
+{
+ IU8 *FPPtr;
+ IU32 i;
+ /* First. load the control, status, tagword regs. etc. */
+ OpFpuRestoreFpuState(memPtr, 80);
+ FPPtr = (IU8 *)((IU8 *)memPtr+70);
+ FPtype = M80R;
+ for ( i=8; i--; )
+ {
+ if ((TOSPtr->tagvalue & TAG_EMPTY_MASK) == 0) {
+ /* We have to do a bit of fiddling to make FLD happy */
+ TOSPtr->tagvalue = TAG_EMPTY_MASK;
+ TOSPtr = StackEntryByIndex(1);
+ FLD(FPPtr);
+ }
+ TOSPtr = StackEntryByIndex(1);
+ FPPtr -= 10;
+ }
+ /* Finally, check to see if any previously unmasked exceptions */
+ /* are now needed to go off. Do this by anding the "triggered" bits in */
+ /* NpxStatus with the one's complement of the "masked" bits in NpxControl. */
+ if (((NpxStatus & 0x3f) & (~(NpxControl & 0x3f))) != 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ }
+}
+
+
+
+/*(
+Name : FSAVE
+Function : Write the FPU state to memory.
+Operation : DEST <- FPU STATE
+Flags : All cleared.
+Exceptions : None.
+Valid range : N/A
+*/
+
+GLOBAL VOID FSAVE IFN1(VOID *, memPtr)
+{
+ IU8 *FPPtr;
+ IU32 i;
+
+ OpFpuStoreFpuState(memPtr, 80);
+ FPPtr = (IU8 *)((IU8 *)memPtr+70);
+ /* Now store out the eight values... */
+ FPtype = M80R;
+ FST(FPPtr);
+ for ( i=7; i--; )
+ {
+ FPPtr -= 10; /* Go back to the next entry */
+ TOSPtr = StackEntryByIndex(1);
+ FST(FPPtr);
+ }
+ /* Finally, reset the FPU... */
+ FINIT();
+}
+
+
+
+/*(
+Name : FSCALE
+Function : Scale up ST by a factor involving ST(1)
+Operation : ST <- ST * 2**ST(1)
+Flags : C1 as per table 15-1. Others undefined.
+Exceptions : P, U, O, D, I, IS
+Valid range : Any
+)*/
+
+
+GLOBAL VOID FSCALE IFN0()
+{
+ FPSTACKENTRY *st1_addr;
+
+ st1_addr = StackEntryByIndex(1);
+ /* Clear C1 */
+ FlagC1(0);
+ TestUneval(TOSPtr);
+ TestUneval(st1_addr);
+ tag_or = (TOSPtr->tagvalue | st1_addr->tagvalue);
+ /* First, check if the values are real. If so, we can proceed. */
+ if ((tag_or & ~(TAG_NEGATIVE_MASK | TAG_DENORMAL_MASK)) == 0) {
+ /* First, check for the denormal case. */
+ if ((tag_or & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ return;
+ }
+ }
+ /* OK. ST(1) has to be rounded to an integer. */
+ /* We want a 'chop' function */
+ if (st1_addr->fpvalue > 0.0) {
+ FPRes = floor(st1_addr->fpvalue);
+ } else {
+ FPRes = ceil(st1_addr->fpvalue);
+ }
+ HostClearExceptions();
+ FPRes = pow(2.0, FPRes);
+ FPRes = TOSPtr->fpvalue * FPRes;
+ PostCheckOUP();
+ /* Return value could be anything */
+ CalcTagword(TOSPtr);
+ } else {
+ /* A funny thing happened on the way to the answer */
+ if ((tag_or & ((TAG_EMPTY_MASK | TAG_UNSUPPORTED_MASK) | TAG_NAN_MASK)) != 0) {
+ if ((tag_or & TAG_EMPTY_MASK) != 0) {
+ SignalStackUnderflow(TOSPtr);
+ } else {
+ if ((tag_or & TAG_UNSUPPORTED_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ } else {
+ /* It must be a NaN. */
+ tag_xor = (TOSPtr->tagvalue ^ st1_addr->tagvalue);
+ Test2NaN(0, TOSPtr, st1_addr);
+ }
+ }
+ return;
+ }
+ /* The rules for scaling combinations of zeroes, reals and infinities, both */
+ /* positive and negative, are so complex that I don't intend to do lots of */
+ /* logic to figure them out. Basically, there are six options: */
+ /* 1. Leave the TOS alone */
+ /* 2. +Infinity */
+ /* 3. +0 */
+ /* 4. -Infinity */
+ /* 5. -0 */
+ /* 6. Raise Invalid operation exception */
+ /* */
+ /* TOS ST(1) RESULT */
+ /* I S Z I S Z */
+ /* 0 0 0 0 0 1 1 */
+ /* 0 0 0 0 1 1 1 */
+ /* 0 0 0 1 0 0 2 */
+ /* 0 0 0 1 1 0 3 */
+ /* 0 0 1 0 0 0 1 */
+ /* 0 0 1 0 0 1 1 */
+ /* 0 0 1 0 1 0 1 */
+ /* 0 0 1 0 1 1 1 */
+ /* 0 0 1 1 0 0 6 */
+ /* 0 0 1 1 1 0 1 */
+ /* 0 1 0 0 0 1 1 */
+ /* 0 1 0 0 1 1 1 */
+ /* 0 1 0 1 0 0 4 */
+ /* 0 1 0 1 1 0 5 */
+ /* 0 1 1 0 0 0 1 */
+ /* 0 1 1 0 0 1 1 */
+ /* 0 1 1 0 1 0 1 */
+ /* 0 1 1 0 1 1 1 */
+ /* 0 1 1 1 0 0 6 */
+ /* 0 1 1 1 1 0 1 */
+ /* 1 0 0 0 0 0 1 */
+ /* 1 0 0 0 0 1 1 */
+ /* 1 0 0 0 1 0 1 */
+ /* 1 0 0 0 1 1 1 */
+ /* 1 0 0 1 0 0 6 */
+ /* 1 1 0 0 0 0 1 */
+ /* 1 1 0 0 0 1 1 */
+ /* 1 1 0 0 1 0 1 */
+ /* 1 1 0 0 1 1 1 */
+ /* 1 1 0 1 0 0 1 */
+ /* 1 1 0 1 1 0 6 */
+ /* */
+ /* All other combinations are impossible. This can be done as a look up */
+ /* table with an enumerated type. */
+ tag_or = (TOSPtr->tagvalue & 7);
+ tag_or <<= 3;
+ tag_or |= (st1_addr->tagvalue & 7);
+ tag_or = FscaleTable[tag_or];
+ if ((tag_or & TAG_FSCALE_MASK) != 0) {
+ if ((tag_or & TAG_UNSUPPORTED_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ }
+ } else {
+ TOSPtr->tagvalue = tag_or;
+ }
+ }
+}
+
+
+
+/*(
+Name : FSIN
+Function : Calculate the sine of ST
+Operation : ST <- SINE(ST)
+Flags : C1, C2 as per table 15-2. C0 and C3 undefined.
+Exceptions : P. U, D, I, IS
+Valid range : |ST| < 2**63.
+)*/
+
+
+GLOBAL VOID FSIN IFN0()
+{
+ /* Clear C1 */
+ FlagC1(0);
+ /* Clear C2 */
+ FlagC2(0);
+ TestUneval(TOSPtr);
+ if ((TOSPtr->tagvalue & ~TAG_NEGATIVE_MASK) == 0) {
+ HostClearExceptions();
+ /* We can just write the value straight out */
+ FPRes = sin(TOSPtr->fpvalue);
+ PostCheckOUP();
+ /* Return value must be in the range -1 to +1 */
+ CalcTagword(TOSPtr);
+ } else {
+ /* Lets do the most probable cases first... */
+ /* A zero returns exactly the same thing */
+ if ((TOSPtr->tagvalue & TAG_ZERO_MASK) != 0) {
+ return;
+ }
+ /* Lets check for a denormal */
+ if ((TOSPtr->tagvalue & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ } else {
+ HostClearExceptions();
+ FPRes = sin(TOSPtr->fpvalue);
+ PostCheckOUP();
+ /* Return value must be in the range -1 to +1 */
+ CalcTagword(TOSPtr);
+ }
+ return;
+ }
+ /* Or it could possibly be infinity... */
+ /* For this, the C2 bit is set and the result remains */
+ /* unchanged. */
+ if ((TOSPtr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ FlagC2(1);
+ return;
+ }
+ /* It was one of the really wacky bits... */
+ if ((TOSPtr->tagvalue & TAG_EMPTY_MASK) != 0) {
+ SignalStackUnderflow(TOSPtr);
+ return;
+ }
+ if ((TOSPtr->tagvalue & TAG_SNAN_MASK) != 0) {
+ MakeNaNQuiet(TOSPtr);
+ return;
+ }
+ if ((TOSPtr->tagvalue & TAG_UNSUPPORTED_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ return;
+ }
+ }
+}
+
+
+
+/*(
+Name : FSINCOS
+Function : Calculate the sine and cosine of ST
+Operation : TEMP <-COSINE(ST); ST <- SINE(ST); PUSH; ST <- TEMP
+Flags : C1, C2 as per table 15-2. C0 and C3 undefined.
+Exceptions : P. U, D, I, IS
+Valid range : |ST| < 2**63.
+)*/
+
+
+GLOBAL VOID FSINCOS IFN0()
+{
+ FPSTACKENTRY *st1_addr;
+
+ /* Clear C1 */
+ FlagC1(0);
+ /* Clear C2 */
+ FlagC2(0);
+ st1_addr = StackEntryByIndex(7);
+ /* First, check that this one is empty. */
+ if ((st1_addr->tagvalue & TAG_EMPTY_MASK) == 0) {
+ WriteIndefinite(TOSPtr);
+ TOSPtr = st1_addr;
+ SignalStackOverflow(TOSPtr);
+ return;
+ }
+ TestUneval(TOSPtr);
+ if ((TOSPtr->tagvalue & ~TAG_NEGATIVE_MASK) == 0) {
+ HostClearExceptions();
+ /* We can just write the value straight out */
+ FPRes = cos(TOSPtr->fpvalue);
+ /* The range for a cosine is -1 through to +1. */
+ CalcTagword(st1_addr);
+ /* I can write out the SINE myself, since as we are */
+ /* writing to the stack, even an unmasked U or P */
+ /* cannot stop delivery of the result. */
+ /* The range for a sine is -1 through to +1. */
+ FPRes = sin(TOSPtr->fpvalue);
+ CalcTagword(TOSPtr);
+ TOSPtr = st1_addr;
+ PostCheckOUP();
+ return;
+ } else {
+ /* Lets do the most probable cases first... */
+ /* A zero returns exactly the same thing */
+ if ((TOSPtr->tagvalue & TAG_ZERO_MASK) != 0) {
+ /* The sine of zero is zero so just push the stack */
+ TOSPtr = st1_addr;
+ /* Now write out plus one */
+ CopyFP(TOSPtr, npx_one);
+ return;
+ }
+ /* Lets check for a denormal */
+ if ((TOSPtr->tagvalue & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ } else {
+ HostClearExceptions();
+ /* We can just write the value straight out */
+ FPRes = cos(TOSPtr->fpvalue);
+ /* The range for a cos is -1 through to +1 */
+ CalcTagword(st1_addr);
+ /* I can write out the SINE myself, since as we are */
+ /* writing to the stack, even an unmasked U or P */
+ /* cannot stop delivery of the result. */
+ /* The range for a sine is -1 through to +1 */
+ FPRes = sin(TOSPtr->fpvalue);
+ CalcTagword(TOSPtr);
+ TOSPtr = st1_addr;
+ PostCheckOUP();
+ }
+ return;
+ }
+ /* Or it could possibly be infinity... */
+ /* For this, the C2 bit is set and the result remains */
+ /* unchanged. */
+ if ((TOSPtr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ FlagC2(1);
+ return;
+ }
+ /* It was one of the really wacky bits... */
+ if ((TOSPtr->tagvalue & TAG_EMPTY_MASK) != 0) {
+ SignalStackUnderflow(TOSPtr);
+ return;
+ }
+ if ((TOSPtr->tagvalue & TAG_SNAN_MASK) != 0) {
+ MakeNaNQuiet(TOSPtr);
+ return;
+ }
+ if ((TOSPtr->tagvalue & TAG_UNSUPPORTED_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ return;
+ }
+ }
+}
+
+
+
+/*(
+Name : FSQRT
+Function : Calculate the square root of ST
+Operation : ST <- SQRT(ST)
+Flags : C1 as per table 15-1. Others undefined.
+Exceptions : P. D, I, IS
+Valid range : ST >= -0.0
+)*/
+
+
+GLOBAL VOID FSQRT IFN0()
+{
+
+ /* Clear C1 */
+ FlagC1(0);
+ TestUneval(TOSPtr);
+ if (TOSPtr->tagvalue == 0) {
+ HostClearExceptions();
+ /* We can just write the value straight out */
+ FPRes = sqrt(TOSPtr->fpvalue);
+ PostCheckOUP();
+ TOSPtr->fpvalue = FPRes;
+ /* The tagword can't have changed! */
+ return;
+ } else {
+ /* Lets do the most probable cases first... */
+ /* A zero returns exactly the same thing */
+ if ((TOSPtr->tagvalue & TAG_ZERO_MASK) != 0) {
+ return;
+ }
+ if ((TOSPtr->tagvalue & TAG_NAN_MASK) != 0) {
+ if ((TOSPtr->tagvalue & TAG_SNAN_MASK) != 0) {
+ MakeNaNQuiet(TOSPtr);
+ }
+ return;
+ }
+ /* Having taken care of that case, lets check for negative... */
+ if ((TOSPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ return;
+ }
+ /* Lets check for a denormal */
+ if ((TOSPtr->tagvalue & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ } else {
+ HostClearExceptions();
+ FPRes = sqrt(TOSPtr->fpvalue);
+ PostCheckOUP();
+ /* It might not be a denorm anymore */
+ CalcTagword(TOSPtr);
+ }
+ return;
+ }
+ /* Or it could possibly be infinity...This just returns. */
+ if ((TOSPtr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ return;
+ }
+ /* It was one of the really wacky bits... */
+ if ((TOSPtr->tagvalue & TAG_EMPTY_MASK) != 0) {
+ SignalStackUnderflow(TOSPtr);
+ return;
+ }
+ if ((TOSPtr->tagvalue & TAG_UNSUPPORTED_MASK) != 0) {
+ SignalIndefinite(TOSPtr);
+ return;
+ }
+ }
+}
+
+
+/* CheckOUPForIntel: This is a special version of the PostCheckOUP
+routine that is designed for use in situations where the result
+is to be written to intel memory space. It just looks at the
+excpetions bits and sets the appropriate bits, it doesn't write
+the value back or anything like that. */
+
+
+LOCAL VOID CheckOUPForIntel IFN0()
+{
+ tag_or=0; /* Prime tag_or */
+ if (HostGetOverflowException() != 0) {
+ NpxStatus |= SW_OE_MASK; /* Set the overflow bit */
+ /* For the masked overflow case, the result delivered by */
+ /* the host will be correct, provided it is IEEE compliant. */
+ if ((NpxControl & CW_OM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ NpxException = TRUE;
+ tag_or = 1;
+ }
+ } else {
+ /* Overflow and underflow being mutually exclusive... */
+ if (HostGetUnderflowException() != 0) {
+ NpxStatus |= SW_UE_MASK;
+ if ((NpxControl & CW_UM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ NpxException = TRUE;
+ tag_or=1;
+ }
+ }
+ }
+ if (HostGetPrecisionException() != 0) {
+ SetPrecisionBit();
+ if ((NpxControl & CW_PM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ NpxException = TRUE;
+ /* An unmasked precision exception cannot prevent
+ delivery of the result */
+ }
+ }
+ /* Only call for overflow or underflow */
+ if (NpxException && (tag_or == 1)) {
+ NpxException = FALSE;
+ DoNpxException();
+ }
+}
+
+
+
+/*(
+Name : FST{P}
+Function : Copy ST to the specified location
+Operation : DEST <- ST(0); if FSTP { pop ST FI;
+Flags : C1 as per table 15-1. Others undefined.
+Exceptions : For stack or extended-real, IS.
+ For single or double-real P. U, O, D, I, IS
+Valid range : N/A
+)*/
+
+
+GLOBAL VOID FST IFN1(VOID *, memPtr)
+{
+ /* Clear C1 */
+ FlagC1(0);
+ if (POPST) {
+ DoAPop=TRUE;
+ }
+ if ((TOSPtr->tagvalue & UNEVALMASK) != 0) {
+ if ((TOSPtr->tagvalue & TAG_BCD_MASK) != 0) {
+ ConvertBCD(TOSPtr);
+ } else {
+ /* Doesn't apply for FPStack or M80R types */
+ if ((FPtype == M32R) || (FPtype == M64R)) {
+ ConvertR80(TOSPtr);
+ }
+ }
+ }
+ if ( ((TOSPtr->tagvalue & TAG_R80_MASK) != 0)
+ || ((TOSPtr->tagvalue & ~(TAG_NEGATIVE_MASK | TAG_DENORMAL_MASK)) == 0)
+ || (FPtype == FPSTACK)) {
+ if (FPtype == FPSTACK) {
+ /* check for empty here */
+ if (TOSPtr->tagvalue & TAG_EMPTY_MASK) {
+ NpxStatus |= SW_IE_MASK|SW_SF_MASK;
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ return;
+ }
+ WriteIndefinite(StackEntryByIndex(*(IU16 *)memPtr));
+ } else
+ /* The invalid operation doesn't apply to non-empty */
+ /* stack locations. We carry on regardless. */
+ CopyFP(StackEntryByIndex(*(IU16 *)memPtr), TOSPtr);
+ } else {
+ if (FPtype == M80R) {
+ if ((TOSPtr->tagvalue & TAG_R80_MASK) == 0) {
+ CVTFPHR80(TOSPtr);
+ WriteFP80ToIntel(memPtr, &FPTemp);
+ } else {
+ WriteFP80ToIntel(memPtr, TOSPtr);
+ }
+ } else {
+ /* First, check for the denormal case... */
+ if ((TOSPtr->tagvalue & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ return;
+ }
+ }
+ HostClearExceptions();
+ /* The result of the conversion should be written to FPTemp. */
+ if (FPtype == M32R) {
+ *(float *)&(FPTemp.fpvalue) = (float)TOSPtr->fpvalue;
+ /* Our host MUST have double precision, so we will have to */
+ /* test for problems caused by the conversion... */
+ CheckOUPForIntel();
+ if (tag_or == 0) {
+ WriteFP32ToIntel(memPtr, &FPTemp);
+ }
+ }
+ if (FPtype == M64R) {
+ *(DOUBLE *)&(FPTemp.fpvalue) = (DOUBLE)TOSPtr->fpvalue;
+ /* If we are dealing with a 64-bit host, then the J-code for */
+ /* the above is nothing at all, and we don't need to do any */
+ /* testing, but if the host precision is, say 80-bit, then */
+ /* we do! Note that this doesn't use the @if format in order */
+ /* to avoid generating different J-code for different hosts... */
+ CheckOUPForIntel();
+ if (tag_or == 0) {
+ WriteFP64ToIntel(memPtr, &FPTemp);
+ }
+ }
+ }
+ }
+ } else {
+ /* Test for funny values */
+ if ((TOSPtr->tagvalue & TAG_ZERO_MASK) != 0) {
+ /* In this case, we'll allow the casting to be done for us! */
+ WriteZeroToIntel(memPtr, TOSPtr->tagvalue & TAG_NEGATIVE_MASK);
+ } else if ((TOSPtr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ if ((FPtype == M32R) || (FPtype == M64R)) {
+ NpxStatus |= SW_OE_MASK;
+ if ((NpxControl & CW_OM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ return;
+ }
+ }
+ WriteInfinityToIntel(memPtr, TOSPtr->tagvalue & TAG_NEGATIVE_MASK);
+ } else if ((TOSPtr->tagvalue & TAG_NAN_MASK) != 0) {
+ if ((TOSPtr->tagvalue & TAG_SNAN_MASK) != 0) {
+ /* Signal invalid for sNaN */
+ if (((FPtype == M32R) || (FPtype == M64R))) {
+ NpxStatus |= SW_IE_MASK;
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ return;
+ }
+ }
+ }
+ WriteNaNToIntel(memPtr, TOSPtr);
+ } else if ( (TOSPtr->tagvalue & TAG_EMPTY_MASK) != 0 ) {
+ NpxStatus |= (SW_IE_MASK | SW_SF_MASK);
+ FlagC1(0);
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ return;
+ }
+ WriteIndefiniteToIntel(memPtr);
+ } else { /* Must be unsupported. */
+ if (FPtype == M80R) {
+ /* unsupported: Write back the unresolved string */
+ if ((TOSPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ ((FP80 *)&(TOSPtr->fpvalue))->sign_exp.sign = 1;
+ } else {
+ ((FP80 *)&(TOSPtr->fpvalue))->sign_exp.sign = 0;
+ }
+ WriteFP80ToIntel(memPtr, TOSPtr);
+ } else {
+ NpxStatus |= SW_IE_MASK;
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ return;
+ }
+ WriteIndefiniteToIntel(memPtr);
+ }
+ }
+ }
+ if (POPST) {
+ if (DoAPop == TRUE) {
+ PopStack();
+ }
+ }
+ /* Check for the case of an unmasked precision exception */
+ if (NpxException) {
+ NpxException = FALSE;
+ DoNpxException();
+ }
+}
+
+
+
+/*(
+Name : FSTENV
+Function : Store the FPU environment
+Operation : DEST <- FPU environment
+Flags : All undefined.
+Exceptions : None
+Valid range : N/A
+*/
+
+
+GLOBAL VOID FSTENV IFN1(VOID *, memPtr)
+{
+ /* First. load the control, status, tagword regs. etc. */
+ OpFpuStoreFpuState(memPtr,0);
+ /* Then set all the exceptions to be masked */
+ NpxControl |= 0x0000003f;
+}
+
+
+/*(
+Name : FSTSW
+Function : Write the FPU status word to memory
+Operation : DEST <- SW
+Flags : All undefined.
+Exceptions : None
+Valid range : N/A
+*/
+
+
+GLOBAL VOID FSTSW IFN2(VOID *, memPtr, BOOL, toAX)
+{
+ GetIntelStatusWord();
+
+ if (NpxDisabled)
+ {
+ /* UIF has told us to pretend we do not have an NPX */
+
+ if (toAX) {
+ *(IU16 *)memPtr = 0xFFFF;
+ } else {
+ /* Write it out host format */
+
+ *(IU16 *)memPtr = (IU16)NpxStatus;
+ }
+ } else {
+ if (toAX) {
+ *(IU16 *)memPtr = (IU16)NpxStatus;
+ } else {
+ *(IU32 *)memPtr = (IU32)NpxStatus;
+ }
+ }
+}
+
+/*(
+Name : FSUB
+Function : Subtract one number from the other
+Operation : Dest <- Src1 - Src2 or Dest <- Src2 - Src1
+Flags : C1 as per table 15-1. C0, C2 and C3 undefined
+Exceptions : P, U, O, D, I, IS
+Valid range : Any
+Notes : The REVERSE control variable determines which of the
+ two forms of the operation is used. Popping after a
+ successful execution is controlled by POPST.
+)*/
+
+
+GLOBAL VOID FSUB IFN3(IU16, destIndex, IU16, src1Index, VOID *, src2)
+{
+ IU16 src2Index;
+
+ LoadValue(src2, &src2Index);
+ if (POPST) {
+ DoAPop=TRUE;
+ }
+ GenericSubtract(destIndex, REVERSE?src2Index:src1Index, REVERSE?src1Index:src2Index);
+ if (POPST) {
+ if (DoAPop) {
+ PopStack();
+ }
+ }
+}
+
+
+/*(
+Name : GenericSubtract
+Function : To return dest <- src1-src2
+)*/
+
+
+LOCAL VOID GenericSubtract IFN3(IU16, destIndex, IU16, src1Index, IU16, src2Index)
+{
+ FPSTACKENTRY *src1_addr;
+ FPSTACKENTRY *src2_addr;
+
+ src1_addr = StackEntryByIndex(src1Index);
+ src2_addr = StackEntryByIndex(src2Index);
+
+ /* Clear C1 */
+ FlagC1(0);
+ TestUneval(src1_addr);
+ TestUneval(src2_addr);
+ tag_or = (src1_addr->tagvalue | src2_addr->tagvalue);
+ /* If the only tagword bit set is negative then just proceed */
+ if ((tag_or & ~TAG_NEGATIVE_MASK) == 0) {
+ HostClearExceptions();
+ FPRes=src1_addr->fpvalue - src2_addr->fpvalue;
+ /* Reuse one of the above to calculate the destination */
+ src1_addr = StackEntryByIndex(destIndex);
+ PostCheckOUP();
+ /* Could be anything */
+ CalcTagword(src1_addr);
+ } else {
+ /* Some funny bit was set. Check for the possibilities */
+ if ((tag_or & ((TAG_EMPTY_MASK | TAG_UNSUPPORTED_MASK) | TAG_NAN_MASK)) != 0) {
+ if ((tag_or & TAG_EMPTY_MASK) != 0) {
+ src1_addr = StackEntryByIndex(destIndex);
+ SignalStackUnderflow(src1_addr);
+ } else {
+ if ((tag_or & TAG_UNSUPPORTED_MASK) != 0) {
+ src1_addr = StackEntryByIndex(destIndex);
+ SignalIndefinite(src1_addr);
+ } else {
+ /* Well, I suppose it has to be the NaN case... */
+ /* Calculate the xor of the tagwords */
+ tag_xor = (src1_addr->tagvalue ^ src2_addr->tagvalue);
+ Test2NaN(destIndex, src1_addr, src2_addr);
+ }
+ }
+ return;
+ }
+ if ((tag_or & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ DoAPop = FALSE;
+ return;
+ } else {
+ if ((tag_or & ~(TAG_NEGATIVE_MASK | TAG_DENORMAL_MASK)) == 0) {
+ /* OK to proceed */
+ HostClearExceptions();
+ FPRes=src1_addr->fpvalue - src2_addr->fpvalue;
+ /* Reuse one of the above to calculate the destination */
+ src1_addr = StackEntryByIndex(destIndex);
+ PostCheckOUP();
+ /* Could be anything */
+ CalcTagword(src1_addr);
+ return;
+ }
+ }
+ }
+ tag_xor = (src1_addr->tagvalue ^ src2_addr->tagvalue);
+ /* Check for infinity as it has higher precendence than zero. */
+ if ((tag_or & TAG_INFINITY_MASK) != 0) {
+ if ((tag_xor & TAG_INFINITY_MASK) == 0) {
+ /* Have they the same sign? */
+ if ((tag_xor & TAG_NEGATIVE_MASK) == 0) {
+ /* They are both the same sign infinity. This is invalid. */
+ src1_addr = StackEntryByIndex(destIndex);
+ SignalIndefinite(src1_addr);
+ } else {
+ /* If of different sign then src1 is the answer */
+ src2_addr = StackEntryByIndex(destIndex);
+ src2_addr->tagvalue = src1_addr->tagvalue;
+ }
+ } else {
+ /* Only one is infinity. If src1 in infinity, then the result */
+ /* is the same. If src2 is infinity, then the result is an */
+ /* infinity of opposite sign. */
+ tag_or = src2_addr->tagvalue;
+ src2_addr = StackEntryByIndex(destIndex);
+ if ((src1_addr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ src2_addr->tagvalue = src1_addr->tagvalue;
+ } else {
+ src2_addr->tagvalue = tag_or ^ TAG_NEGATIVE_MASK;
+ }
+ }
+ return;
+ }
+ /* Check for the case of zero... This is very likely */
+ if ((tag_or & TAG_ZERO_MASK) != 0) {
+ if ((tag_xor & TAG_ZERO_MASK) != 0) {
+ /* Only one zero. */
+ if ((src1_addr->tagvalue & TAG_ZERO_MASK) != 0) {
+ /* If src1 is zero, -src2 is result */
+ src1_addr = StackEntryByIndex(destIndex);
+ CopyFP(src1_addr, src2_addr);
+ src1_addr->tagvalue ^= TAG_NEGATIVE_MASK;
+ ((FPHOST *)&(src1_addr->fpvalue))->hiword.sign ^= 1;
+ } else {
+ /* If src2 is zero, src1 is result. */
+ src2_addr = StackEntryByIndex(destIndex);
+ CopyFP(src2_addr, src1_addr);
+ }
+ } else {
+ /* Both are zeros. Do they have the same sign? */
+ src2_addr = StackEntryByIndex(destIndex);
+ if ((tag_xor & TAG_NEGATIVE_MASK) != 0) {
+ /* No, they don't - the result is src1 */
+ src2_addr->tagvalue = src1_addr->tagvalue;
+ } else {
+ /* Yes, they do... */
+ if (npxRounding == ROUND_NEG_INFINITY) {
+ src2_addr->tagvalue = (TAG_ZERO_MASK | TAG_NEGATIVE_MASK);
+ } else {
+ src2_addr->tagvalue = TAG_ZERO_MASK;
+ }
+ }
+ }
+ return;
+ }
+ }
+}
+
+
+
+/*(
+Name : FTST
+Function : Compare ST against 0.0
+Operation : Set C023 on result of comparison
+Flags : C1 as per table 15-1. C0, C2 and C3 as result of comparison.
+Exceptions : D, I, IS
+Valid range : Any
+)*/
+
+
+GLOBAL VOID FTST IFN0()
+{
+ /* Clear C1 */
+ FlagC1(0);
+ TestUneval(TOSPtr);
+ if ((TOSPtr->tagvalue & ~((TAG_NEGATIVE_MASK | TAG_DENORMAL_MASK) | TAG_INFINITY_MASK)) == 0) {
+ /* First, check for the denormal case... */
+ if ((TOSPtr->tagvalue & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ return;
+ }
+ }
+ FlagC2(0);
+ FlagC3(0);
+ if ((TOSPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ /* ST is less than zero */
+ FlagC0(1);
+ } else {
+ /* ST is greater than zero */
+ FlagC0(0);
+ }
+ } else {
+ if ((TOSPtr->tagvalue & TAG_ZERO_MASK) != 0) {
+ FlagC0(0);
+ FlagC2(0);
+ FlagC3(1);
+ } else {
+ /* For anything else the result is "unordered" */
+ FlagC0(1);
+ FlagC2(1);
+ FlagC3(1);
+ NpxStatus |= SW_IE_MASK;
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ }
+ }
+ }
+}
+
+
+/*(
+Name : FXAM
+Function : Report on the type of object in ST
+Operation : Set C0123 on result of comparison
+Flags : C0, C1, C2 and C3 as required.
+Exceptions : None
+Valid range : Any
+)*/
+
+
+GLOBAL VOID FXAM IFN0()
+{
+ TestUneval(TOSPtr);
+ tag_or = TOSPtr->tagvalue;
+ if ((tag_or & TAG_NEGATIVE_MASK) == 0) {
+ FlagC1(0);
+ } else {
+ FlagC1(1);
+ tag_or &= ~TAG_NEGATIVE_MASK;
+ }
+ tag_or &= ~TAG_SNAN_MASK;
+ /* This gets rid of all the confusing bits... */
+ /* There is now only one bit set or none at all... */
+ if (tag_or == 0) {
+ FlagC0(0);
+ FlagC2(1);
+ FlagC3(0);
+ return;
+ }
+ if ((tag_or & TAG_ZERO_MASK) != 0) {
+ FlagC0(0);
+ FlagC2(0);
+ FlagC3(1);
+ return;
+ }
+ if ((tag_or & TAG_INFINITY_MASK) != 0) {
+ FlagC0(1);
+ FlagC2(1);
+ FlagC3(0);
+ return;
+ }
+ if ((tag_or & TAG_DENORMAL_MASK) != 0) {
+ FlagC0(0);
+ FlagC2(1);
+ FlagC3(1);
+ return;
+ }
+ if ((tag_or & TAG_NAN_MASK) != 0) {
+ FlagC0(1);
+ FlagC2(0);
+ FlagC3(0);
+ return;
+ }
+ if ((tag_or & TAG_UNSUPPORTED_MASK) != 0) {
+ FlagC0(0);
+ FlagC2(0);
+ FlagC3(0);
+ return;
+ }
+ /* MUST be empty */
+ FlagC0(1);
+ FlagC2(0);
+ FlagC3(1);
+}
+
+
+/*(
+Name : FXCH
+Function : Swap the contents of two stack registers.
+Operation : TEMP <- ST; ST <- DEST; DEST <- TEMP
+Flags : C1 as per table 15-1. Others undefined
+Exceptions : IS
+Valid range : Any
+Notes : If either of the registers is tagged empty then it is
+ loaded with indefinite and the exchange performed.
+)*/
+
+
+GLOBAL VOID FXCH IFN1(IU16, destIndex)
+{
+ FPSTACKENTRY *dest_addr;
+
+ dest_addr = StackEntryByIndex(destIndex);
+ /* Clear C1 */
+ FlagC1(0);
+ tag_or = (TOSPtr->tagvalue | dest_addr->tagvalue);
+ if ((tag_or & TAG_EMPTY_MASK) != 0) {
+ NpxStatus |= SW_IE_MASK;
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ return;
+ }
+ if ((TOSPtr->tagvalue & TAG_EMPTY_MASK) != 0) {
+ WriteIndefinite(TOSPtr);
+ }
+ if ((dest_addr->tagvalue & TAG_EMPTY_MASK) != 0) {
+ WriteIndefinite(dest_addr);
+ }
+ }
+ CopyFP(&FPTemp, TOSPtr);
+ CopyFP(TOSPtr, dest_addr);
+ CopyFP(dest_addr, &FPTemp);
+}
+
+
+
+/*(
+Name : FXTRACT
+Function : Split the value in ST into its exponent and significand
+Operation : TEMP<-sig(ST); ST<-exp(ST); Dec ST; ST<-TEMP
+Flags : C1 as per table 15-1. Others undefined
+Exceptions : Z, D, I, IS
+Valid range : Any
+Notes : If the original operand is zero, result is ST(1) is -infinity
+ and ST is the original zero. The zero divide exception is also
+ raised. If the original operand is infinity, ST(1) is +infinity
+ and ST is the original infinity. If ST(7) is not empty, the
+ invalid operation exception is raised.
+)*/
+
+
+GLOBAL VOID FXTRACT IFN1(IU16, destIndex)
+{
+ FPSTACKENTRY *dest_addr;
+ IS16 exp_val;
+
+ dest_addr = StackEntryByIndex(7);
+ /* Clear C1 */
+ FlagC1(0);
+ if ((dest_addr->tagvalue & TAG_EMPTY_MASK) == 0) {
+ NpxStatus |= SW_IE_MASK;
+ NpxStatus &= ~SW_SF_MASK;
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ } else {
+ WriteIndefinite(TOSPtr);
+ TOSPtr=dest_addr;
+ WriteIndefinite(TOSPtr);
+ }
+ return;
+ }
+ TestUneval(TOSPtr);
+ if ((TOSPtr->tagvalue & ~(TAG_NEGATIVE_MASK | TAG_DENORMAL_MASK)) == 0) {
+ if ((TOSPtr->tagvalue & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ return;
+ }
+ /* It won't be a denormal after we've finished */
+ TOSPtr->tagvalue ^= TAG_DENORMAL_MASK;
+ }
+ /* It is entirely valid */
+ exp_val = ((FPHOST *)&(TOSPtr->fpvalue))->hiword.exp-HOST_BIAS;
+ ((FPHOST *)&(TOSPtr->fpvalue))->hiword.exp=HOST_BIAS;
+ TOSPtr->tagvalue &= TAG_NEGATIVE_MASK;
+ CopyFP(dest_addr, TOSPtr);
+ FPRes = (FPH)exp_val;
+ /* This MUST be a real number, it could be negative. */
+ CalcTagword(TOSPtr);
+ TOSPtr = dest_addr;
+ } else {
+ /* Check if it was a zero */
+ if ((TOSPtr->tagvalue & TAG_ZERO_MASK) != 0) {
+ dest_addr->tagvalue = TOSPtr->tagvalue;
+ TOSPtr->tagvalue = (TAG_INFINITY_MASK | TAG_NEGATIVE_MASK);
+ TOSPtr = dest_addr;
+ NpxStatus |= SW_ZE_MASK;
+ if ((NpxControl & CW_ZM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ }
+ return;
+ }
+ /* Check if it was an infinity */
+ if ((TOSPtr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ dest_addr->tagvalue = TOSPtr->tagvalue;
+ TOSPtr->tagvalue = TAG_INFINITY_MASK;
+ TOSPtr = dest_addr;
+ return;
+ }
+ /* There was something funny...Was it empty or unsupported? */
+ if ((TOSPtr->tagvalue & (TAG_EMPTY_MASK | TAG_UNSUPPORTED_MASK)) != 0) {
+ NpxStatus |= SW_IE_MASK;
+ NpxStatus &= ~SW_SF_MASK;
+ if ((NpxControl & CW_IM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ } else {
+ WriteIndefinite(TOSPtr);
+ TOSPtr=dest_addr;
+ WriteIndefinite(TOSPtr);
+ }
+ return;
+ }
+ CopyFP(dest_addr, TOSPtr);
+ TOSPtr = dest_addr;
+ }
+}
+
+
+
+/*(
+FYL2X (Y log base 2 of X) calculates the function Z=Y*LOG2(X). X is
+taken from ST(0) and Y is taken from ST(1). The operands must be in
+the range 0 < X < +inf and -inf < Y < +inf. The instruction pops the
+)*/
+
+
+GLOBAL VOID FYL2X IFN0()
+{
+ FPSTACKENTRY *st1_addr;
+
+ /* Clear C1 */
+ FlagC1(0);
+ st1_addr = StackEntryByIndex(1);
+ TestUneval(TOSPtr);
+ TestUneval(st1_addr);
+ tag_or = (TOSPtr->tagvalue | st1_addr->tagvalue);
+ /* First, check if the values are real. If so, we can proceed. */
+ if ((tag_or & ~(TAG_DENORMAL_MASK | TAG_NEGATIVE_MASK)) == 0) {
+ /* Check for the denorm case... */
+ if ((tag_or & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ /* We ALWAYS pop!!! */
+ TOSPtr->tagvalue = TAG_EMPTY_MASK;
+ TOSPtr = st1_addr;
+ return;
+ }
+ }
+ /* Check for the case of a negative in ST */
+ if ((TOSPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ SignalIndefinite(st1_addr);
+ TOSPtr->tagvalue = TAG_EMPTY_MASK;
+ TOSPtr = st1_addr;
+ return;
+ }
+
+ /* OK, we can do the operation ... */
+
+ FPRes = st1_addr->fpvalue * host_log2(TOSPtr->fpvalue);
+
+ PostCheckOUP();
+ /* Tgis is just a multiplication, result could be anything */
+ CalcTagword(st1_addr);
+ } else {
+ if ((tag_or & ((TAG_EMPTY_MASK | TAG_UNSUPPORTED_MASK) | TAG_NAN_MASK)) != 0) {
+ if ((tag_or & TAG_EMPTY_MASK) != 0) {
+ SignalStackUnderflow(st1_addr);
+ } else {
+ if ((tag_or & TAG_UNSUPPORTED_MASK) != 0) {
+ SignalIndefinite(st1_addr);
+ } else {
+ /* Well, I suppose it has to be the NaN case... */
+ /* Calculate the xor of the tagwords */
+ tag_xor = (TOSPtr->tagvalue ^ st1_addr->tagvalue);
+ Test2NaN(1, TOSPtr, st1_addr);
+ }
+ }
+ TOSPtr->tagvalue = TAG_EMPTY_MASK;
+ TOSPtr = st1_addr;
+ return;
+ }
+ /* The only possibilities left are infinity and zero.. */
+ /* Let's begin with the zeroes case.. */
+ if ((tag_or & TAG_ZERO_MASK) != 0) {
+ if ((TOSPtr->tagvalue & TAG_ZERO_MASK) != 0) {
+ /* ST is zero. Can have two possibilities */
+ /* if ST(1) is zero, raise invalid */
+ /* Otherwise raise divide by zero */
+ if ((st1_addr->tagvalue & TAG_ZERO_MASK) != 0) {
+ SignalIndefinite(st1_addr);
+ } else {
+ if ((st1_addr->tagvalue & TAG_INFINITY_MASK) == 0) {
+ /* Calculate the xor of the tagwords */
+ tag_xor = (TOSPtr->tagvalue ^ st1_addr->tagvalue);
+ SignalDivideByZero(st1_addr);
+ } else {
+ st1_addr->tagvalue ^= TAG_NEGATIVE_MASK;
+ }
+ }
+ } else {
+ /* ST(1) must be zero */
+ /* We already know that TOSPtr isn't zero. */
+ /* There are three possibilities again. */
+ /* If TOSPtr is infinity, raise invalid exception. */
+ /* If TOSPtr < 1.0 then the result is zero with the */
+ /* complement of the sign of ST(1) */
+ /* If TOSPtr >= 1.0 then the result is ST(1) */
+ if ((TOSPtr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ SignalIndefinite(st1_addr);
+ } else {
+ if ((TOSPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ SignalIndefinite(st1_addr);
+ } else {
+ if (TOSPtr->fpvalue < 1.0) {
+ st1_addr->tagvalue ^= TAG_NEGATIVE_MASK;
+ }
+ }
+ }
+ }
+ TOSPtr->tagvalue = TAG_EMPTY_MASK;
+ TOSPtr = st1_addr;
+ return;
+ }
+ /* The only thing left is infinity... */
+ /* If ST is infinity then there are two possibilities... */
+ /* If it is +infinity the result is infinity with sign of ST(1) */
+ /* If it is -infinity the result is an invalid operation */
+ if ((TOSPtr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ if ((TOSPtr->tagvalue & TAG_NEGATIVE_MASK) == 0) {
+ st1_addr->tagvalue &= TAG_NEGATIVE_MASK;
+ st1_addr->tagvalue |= TAG_INFINITY_MASK;
+ } else {
+ SignalIndefinite(st1_addr);
+ }
+ } else {
+ /* ST(1) MUST be infinity (and ST is real). */
+ /* There are three possibilities: */
+ /* If ST is exactly 1.0 then raise Invalid */
+ /* If ST is less than 1.0 then the result is the */
+ /* infinity with the complement of its sign. */
+ /* If ST is greater than 1.0 the result is the infinity. */
+ if (TOSPtr->fpvalue == 1.0) {
+ SignalIndefinite(st1_addr);
+ } else {
+ if (TOSPtr->fpvalue < 1.0) {
+ if ((TOSPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ SignalIndefinite(st1_addr);
+ } else {
+ st1_addr->tagvalue ^= TAG_NEGATIVE_MASK;
+ }
+ }
+ }
+ }
+ }
+ TOSPtr->tagvalue = TAG_EMPTY_MASK;
+ TOSPtr = st1_addr;
+}
+
+
+
+/*(
+FYL2XP1 (Y log base 2 of (X+1)) calculates the function Z=Y*LOG2(X+1). X is
+taken from ST(0) and Y is taken from ST(1). The operands must be in
+the range 0 < X < +inf and -inf < Y < +inf. The instruction pops the
+TOS value. This is better than FYL2X when X is very small, since more significant
+digits can be retained for 1+X than can be for X alone.
+)*/
+
+
+GLOBAL VOID FYL2XP1 IFN0()
+{
+ FPSTACKENTRY *st1_addr;
+
+ /* Clear C1 */
+ FlagC1(0);
+ st1_addr = StackEntryByIndex(1);
+ TestUneval(TOSPtr);
+ TestUneval(st1_addr);
+ tag_or = (TOSPtr->tagvalue | st1_addr->tagvalue);
+ /* First, check if the values are real. If so, we can proceed. */
+ if ((tag_or & ~(TAG_DENORMAL_MASK | TAG_NEGATIVE_MASK)) == 0) {
+ /* Check for the denorm case... */
+ if ((tag_or & TAG_DENORMAL_MASK) != 0) {
+ NpxStatus |= SW_DE_MASK;
+ if ((NpxControl & CW_DM_MASK) == 0) {
+ NpxStatus |= SW_ES_MASK;
+ DoNpxException();
+ /* We ALWAYS pop!!! */
+ TOSPtr->tagvalue = TAG_EMPTY_MASK;
+ TOSPtr = st1_addr;
+ return;
+ }
+ }
+ /* Check for the case of a value less than -1 */
+ if (TOSPtr->fpvalue <= -1.0) {
+ SignalIndefinite(st1_addr);
+ TOSPtr->tagvalue = TAG_EMPTY_MASK;
+ TOSPtr = st1_addr;
+ return;
+ }
+
+ /* OK, we can do the operation ... */
+
+ FPRes = st1_addr->fpvalue * host_log1p(TOSPtr->fpvalue);
+
+ PostCheckOUP();
+ /* This is just a numtiplication - result could be anything */
+ CalcTagword(st1_addr);
+ } else {
+ if ((tag_or & ((TAG_EMPTY_MASK | TAG_UNSUPPORTED_MASK) | TAG_NAN_MASK)) != 0) {
+ if ((tag_or & TAG_EMPTY_MASK) != 0) {
+ SignalStackUnderflow(st1_addr);
+ } else {
+ if ((tag_or & TAG_UNSUPPORTED_MASK) != 0) {
+ SignalIndefinite(st1_addr);
+ } else {
+ /* Well, I suppose it has to be the NaN case... */
+ /* Calculate the xor of the tagwords */
+ tag_xor = (TOSPtr->tagvalue ^ st1_addr->tagvalue);
+ Test2NaN(1, TOSPtr, st1_addr);
+ }
+ }
+ TOSPtr->tagvalue = TAG_EMPTY_MASK;
+ TOSPtr = st1_addr;
+ return;
+ }
+ /* The only possibilities left are infinity and zero.. */
+ /* Let's begin with the zeroes case.. */
+ if ((tag_or & TAG_ZERO_MASK) != 0) {
+ if ((TOSPtr->tagvalue & TAG_ZERO_MASK) != 0) {
+ /* ST is zero. Can have two possibilities */
+ /* if ST(1) is positive, result is ST */
+ /* if ST(1) is negative, result is -ST */
+ if ((st1_addr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ st1_addr->tagvalue = (TAG_ZERO_MASK | (TOSPtr->tagvalue & TAG_NEGATIVE_MASK));
+ } else {
+ st1_addr->tagvalue = (TAG_ZERO_MASK | (TOSPtr->tagvalue ^ TAG_NEGATIVE_MASK));
+ }
+ } else {
+ /* ST(1) must be zero */
+ /* We already know that TOSPtr isn't zero. */
+ /* There are three possibilities again. */
+ /* If TOSPtr is infinity, raise invalid exception. */
+ /* If TOSPtr < 0 then the result is zero with the */
+ /* complement of the sign of ST(1) */
+ /* If TOSPtr >= 0 then the result is ST(1) */
+ if ((TOSPtr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ SignalIndefinite(st1_addr);
+ } else {
+ if ((TOSPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ st1_addr->tagvalue ^= TAG_NEGATIVE_MASK;
+ }
+ }
+ }
+ TOSPtr->tagvalue = TAG_EMPTY_MASK;
+ TOSPtr = st1_addr;
+ return;
+ }
+ /* The only thing left is infinity... */
+ /* If ST is infinity then there are two possibilities... */
+ /* If it is +infinity the result is infinity with sign of ST(1) */
+ /* If it is -infinity the result is an invalid operation */
+ if ((TOSPtr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ if ((TOSPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ st1_addr->tagvalue &= TAG_NEGATIVE_MASK;
+ st1_addr->tagvalue |= TAG_INFINITY_MASK;
+ } else {
+ SignalIndefinite(st1_addr);
+ }
+ } else {
+ /* ST(1) MUST be infinity (and ST is non-zero). */
+ /* There are three possibilities: */
+ /* If ST is exactly 1.0 then raise Invalid */
+ /* If ST is less than 0.0 then the result is the */
+ /* infinity with the complement of its sign. */
+ /* If ST is greater than 0.0 the result is the infinity. */
+ if (TOSPtr->fpvalue == 1.0) {
+ SignalIndefinite(st1_addr);
+ } else {
+ if (TOSPtr->fpvalue < 0.0) {
+ st1_addr->tagvalue ^= TAG_NEGATIVE_MASK;
+ }
+ }
+ }
+ }
+ TOSPtr->tagvalue = TAG_EMPTY_MASK;
+ TOSPtr = st1_addr;
+}
+
+/* These functions are provided in order to facilitate pigging */
+
+#ifndef PIG
+/* copied here from FmNpx.c */
+
+GLOBAL void NpxStackRegAsString IFN3(FPSTACKENTRY *, fpStPtr, char *, buf, IU32, prec)
+{
+ /* The overwhelmingly most likely option is empty. */
+ if ((fpStPtr->tagvalue & TAG_EMPTY_MASK) != 0) {
+ strcpy(buf, "empty");
+ return;
+ }
+ if ((fpStPtr->tagvalue & ~(TAG_NEGATIVE_MASK | TAG_DENORMAL_MASK)) == 0) {
+ sprintf(buf, "%.*g", prec, fpStPtr->fpvalue);
+ return;
+ }
+ /* OK, one of the funny bits was set. But which? */
+ if ((fpStPtr->tagvalue & TAG_ZERO_MASK) != 0) {
+ if ((fpStPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ strcpy(buf, "-0");
+ } else {
+ strcpy(buf, "0");
+ }
+ return;
+ }
+ if ((fpStPtr->tagvalue & UNEVALMASK) != 0) {
+ sprintf(buf, "%04x %08x%08x",
+ ((FP80*)fpStPtr)->sign_exp,
+ ((FP80*)fpStPtr)->mant_hi,
+ ((FP80*)fpStPtr)->mant_lo);
+ strcat(buf, " uneval");
+ return;
+ }
+ if ((fpStPtr->tagvalue & TAG_INFINITY_MASK) != 0) {
+ if ((fpStPtr->tagvalue & TAG_NEGATIVE_MASK) != 0) {
+ strcpy(buf, "minus infinity");
+ } else {
+ strcpy(buf, "infinity");
+ }
+ return;
+ }
+ if ((fpStPtr->tagvalue & (TAG_NAN_MASK|TAG_SNAN_MASK)) != 0) {
+ if ( ((FP80*)fpStPtr)->mant_lo == 0
+ && ((FP80*)fpStPtr)->mant_hi == 0xC0000000
+ && *(IU16*)&((FP80*)fpStPtr)->sign_exp == 0xFFFF )
+ strcpy(buf, "indefinite");
+ else
+ sprintf(buf, "%08x%08x %s %sNan",
+ ((FP80*)fpStPtr)->mant_hi,
+ ((FP80*)fpStPtr)->mant_lo,
+ (fpStPtr->tagvalue & TAG_NEGATIVE_MASK) ? "negative" : "",
+ (fpStPtr->tagvalue & TAG_SNAN_MASK) ? "S" : "");
+ return;
+ }
+ /* It MUST be unsupported */
+ sprintf(buf, "%04 %08x%08x unsupported",
+ ((FP80*)fpStPtr)->sign_exp,
+ ((FP80*)fpStPtr)->mant_hi,
+ ((FP80*)fpStPtr)->mant_lo);
+ return;
+}
+
+/* this one is only ever used in trace.c and only if pure CCPU */
+GLOBAL char * getNpxStackReg IFN2(IU32, reg_num, char *, buffer)
+{
+ reg_num += TOSPtr - FPUStackBase;
+ NpxStackRegAsString (&FPUStackBase[reg_num&7], buffer, 12);
+ return buffer;
+}
+#endif /* !PIG */
+
+GLOBAL IU32 getNpxControlReg IFN0()
+{
+ return(NpxControl);
+}
+
+GLOBAL VOID setNpxControlReg IFN1(IU32, newControl)
+{
+ NpxControl = newControl;
+ npxRounding = (NpxControl & 0xc00);
+ switch (npxRounding) {
+ case ROUND_NEAREST : HostSetRoundToNearest();
+ break;
+ case ROUND_NEG_INFINITY : HostSetRoundDown();
+ break;
+ case ROUND_POS_INFINITY : HostSetRoundUp();
+ break;
+ case ROUND_ZERO : HostSetRoundToZero();
+ break;
+ }
+}
+
+GLOBAL IU32 getNpxStatusReg IFN0()
+{
+ GetIntelStatusWord();
+ return(NpxStatus);
+}
+
+GLOBAL VOID setNpxStatusReg IFN1( IU32, newStatus)
+{
+ TOSPtr = FPUStackBase + ((newStatus >> 11) & 7);
+ NpxStatus = newStatus;
+}
+
+GLOBAL IU32 getNpxTagwordReg IFN0()
+{
+ IU32 result;
+ FPSTACKENTRY *tagPtr = &FPUStackBase[7];
+ IU8 counter = 0;
+
+ result = 0;
+ while (counter++ < 8) {
+ result <<= 2;
+ if ((tagPtr->tagvalue & TAG_EMPTY_MASK) != 0) {
+ result |= 3;
+ } else {
+ if ((tagPtr->tagvalue & TAG_ZERO_MASK) != 0) {
+ result |= 1;
+ } else {
+ if ((tagPtr->tagvalue & ~TAG_NEGATIVE_MASK) != 0) {
+ result |= 2;
+ }
+ }
+ }
+ tagPtr--;
+ }
+ return(result);
+}
+
+GLOBAL VOID setNpxTagwordReg IFN1(IU32, newTag)
+{
+ /* Don't do it!! It fucks you up!! */
+ /* SetIntelTagword(newTag); */
+}
+
+GLOBAL void getNpxStackRegs IFN1(FPSTACKENTRY *, dumpPtr)
+{
+ memcpy((char *)dumpPtr, (char *)FPUStackBase, 8 * sizeof(FPSTACKENTRY));
+}
+
+GLOBAL void setNpxStackRegs IFN1(FPSTACKENTRY *, loadPtr)
+{
+ memcpy((char *)FPUStackBase, (char *)loadPtr, 8 * sizeof(FPSTACKENTRY));
+}
+
+
+/* And finally some stubs */
+GLOBAL void initialise_npx IFN0()
+{
+}
+
+GLOBAL void npx_reset IFN0()
+{
+
+}
diff --git a/private/mvdm/softpc.new/base/ccpu386/fpu_c.h b/private/mvdm/softpc.new/base/ccpu386/fpu_c.h
new file mode 100644
index 000000000..999b99579
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/fpu_c.h
@@ -0,0 +1,168 @@
+#ifndef _Fpu_c_h
+#define _Fpu_c_h
+#define HOST_BIAS (1023)
+#define MIN_EXP (-1023)
+#define MAX_EXP (1024)
+#define STACKENTRYSIZE (16)
+#define STACKSIZE (128)
+#define STACKWRAP (-129)
+#define TAG_NEGATIVE_BIT (0)
+#define TAG_ZERO_BIT (1)
+#define TAG_INFINITY_BIT (2)
+#define TAG_DENORMAL_BIT (3)
+#define TAG_NAN_BIT (4)
+#define TAG_SNAN_BIT (5)
+#define TAG_UNSUPPORTED_BIT (6)
+#define TAG_EMPTY_BIT (7)
+#define TAG_FSCALE_BIT (8)
+#define TAG_BCD_BIT (9)
+#define TAG_R80_BIT (10)
+#define TAG_UNEVAL_BIT_E (9)
+#define TAG_UNEVAL_BIT_S (10)
+#define TAG_REAL_POSITIVE_MASK (0)
+#define TAG_NEGATIVE_MASK (1)
+#define TAG_ZERO_MASK (2)
+#define TAG_INFINITY_MASK (4)
+#define TAG_DENORMAL_MASK (8)
+#define TAG_NAN_MASK (16)
+#define TAG_SNAN_MASK (32)
+#define TAG_UNSUPPORTED_MASK (64)
+#define TAG_EMPTY_MASK (128)
+#define TAG_FSCALE_MASK (256)
+#define TAG_BCD_MASK (512)
+#define TAG_R80_MASK (1024)
+#define TAG_UNEVAL_MASK (1536)
+#define ST_IE_BIT (0)
+#define ST_DE_BIT (1)
+#define ST_ZE_BIT (2)
+#define ST_OE_BIT (3)
+#define ST_UE_BIT (4)
+#define ST_PE_BIT (5)
+#define ST_SF_BIT (6)
+#define ST_ES_BIT (7)
+#define ST_IE_MASK (1)
+#define ST_DE_MASK (2)
+#define ST_ZE_MASK (4)
+#define ST_OE_MASK (8)
+#define ST_UE_MASK (16)
+#define ST_PE_MASK (32)
+#define ST_SF_MASK (64)
+#define ST_ES_MASK (128)
+#define ST_C0_BIT (8)
+#define ST_C1_BIT (9)
+#define ST_C2_BIT (10)
+#define ST_C3_BIT (14)
+#define ST_C0_MASK (65279)
+#define ST_C1_MASK (65023)
+#define ST_C2_MASK (64511)
+#define ST_C3_MASK (49151)
+#define ST_B_BIT (15)
+#define ST_ST_BIT_S (13)
+#define ST_ST_BIT_E (11)
+#define ST_B_MASK (61439)
+#define ST_ST_MASK (51199)
+#define TW_TAG_0_S (1)
+#define TW_TAG_0_E (0)
+#define TW_TAG_LENGTH (2)
+#define TW_TAG_0_MASK (3)
+#define TW_TAG_VALID (0)
+#define TW_TAG_ZERO (1)
+#define TW_TAG_INVALID (2)
+#define TW_TAG_EMPTY (3)
+#define CW_IM_BIT (0)
+#define CW_DM_BIT (1)
+#define CW_ZM_BIT (2)
+#define CW_OM_BIT (3)
+#define CW_UM_BIT (4)
+#define CW_PM_BIT (5)
+#define CW_PC_BIT_E (8)
+#define CW_PC_BIT_S (9)
+#define CW_RC_BIT_E (10)
+#define CW_RC_BIT_S (11)
+#define CW_IC_BIT (12)
+#define CW_IM_MASK (1)
+#define CW_DM_MASK (2)
+#define CW_ZM_MASK (4)
+#define CW_OM_MASK (8)
+#define CW_UM_MASK (16)
+#define CW_PM_MASK (32)
+#define CW_PC_MASK (768)
+#define CW_RC_MASK (3072)
+#define CW_IC_MASK (4096)
+#define ROUND_NEAREST (0)
+#define ROUND_NEG_INFINITY (1)
+#define ROUND_POS_INFINITY (2)
+#define ROUND_TO_ZERO (3)
+#define INTEL_I16_SIGN_BIT (15)
+#define INTEL_I16_SIGN_MASK (32768)
+#define INTEL_I32_SIGN_BIT (31)
+#define INTEL_I32_SIGN_MASK (-2147483648)
+#define INTEL_I64_SIGN_BIT (31)
+#define INTEL_BCD_SIGN_BIT (15)
+#define INTEL_SR_SIGN_BIT (31)
+#define INTEL_SR_SIGN_MASK (-2147483648)
+#define INTEL_SR_EXP_S (30)
+#define INTEL_SR_EXP_E (23)
+#define INTEL_SR_EXP_MASK (2139095040)
+#define INTEL_SR_EXP_MAX (255)
+#define INTEL_SR_MANT_S (22)
+#define INTEL_SR_MANT_E (0)
+#define INTEL_SR_MANT_MASK (8388607)
+#define INTEL_LR_SIGN_BIT (31)
+#define INTEL_LR_SIGN_MASK (-2147483648)
+#define INTEL_LR_EXP_S (30)
+#define INTEL_LR_EXP_E (20)
+#define INTEL_LR_EXP_MASK (2146435072)
+#define INTEL_LR_EXP_MAX (2047)
+#define INTEL_LR_MANT_S (19)
+#define INTEL_LR_MANT_E (0)
+#define INTEL_LR_MANT_MASK (1048575)
+#define INTEL_TR_SIGN_BIT (15)
+#define INTEL_TR_SIGN_MASK (32768)
+#define INTEL_TR_EXP_S (14)
+#define INTEL_TR_EXP_E (0)
+#define INTEL_TR_EXP_MASK (32767)
+#define INTEL_COMP_NC (17664)
+#define INTEL_COMP_GT (0)
+#define INTEL_COMP_LT_BIT (8)
+#define INTEL_COMP_EQ_BIT (14)
+#define INTEL_COMP_LT (256)
+#define INTEL_COMP_EQ (16384)
+#define FPTEMP_INDEX (8)
+#define C3C2C0MASK (47359)
+#define FCLEX_MASK (32512)
+#define COMP_LT (0)
+#define COMP_GT (1)
+#define COMP_EQ (2)
+#define FPBASE_OFFSET (8)
+#define CONST_ONE_OFFSET (0)
+#define CONST_LOG2_10_OFFSET (1)
+#define CONST_LOG2_E_OFFSET (2)
+#define CONST_PI_OFFSET (3)
+#define CONST_LOG10_2_OFFSET (4)
+#define CONST_LOGE_2_OFFSET (5)
+#define CONST_ZERO_OFFSET (6)
+#define CONST_TWO_OFFSET (7)
+#define CONST_MINUS_ONE_OFFSET (8)
+struct FPSTACKENTRY
+{
+ double fpvalue;
+ IUH padding;
+ IUH tagvalue;
+};
+struct FP_I64
+{
+ IU32 word1;
+ IU32 word2;
+};
+struct FP_R64
+{
+ IU32 word1;
+ IU32 word2;
+};
+struct FP_R80
+{
+ struct FP_I64 mantissa;
+ IU16 exponent;
+};
+#endif /* ! _Fpu_c_h */
diff --git a/private/mvdm/softpc.new/base/ccpu386/gdpvar.h b/private/mvdm/softpc.new/base/ccpu386/gdpvar.h
new file mode 100644
index 000000000..bf7e23e78
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/gdpvar.h
@@ -0,0 +1,1859 @@
+#ifndef _gdpvar_h
+#define _gdpvar_h
+#define GLOBAL_EDL_WORKSPACE_0 (*(IUH *)((IUH)Gdp + 43))
+#define SET_GLOBAL_EDL_WORKSPACE_0(v) (GLOBAL_EDL_WORKSPACE_0 = (v))
+#define GLOBAL_EDL_WORKSPACE_1 (*(IUH *)((IUH)Gdp + 47))
+#define SET_GLOBAL_EDL_WORKSPACE_1(v) (GLOBAL_EDL_WORKSPACE_1 = (v))
+#define GLOBAL_EDL_WORKSPACE_2 (*(IUH *)((IUH)Gdp + 51))
+#define SET_GLOBAL_EDL_WORKSPACE_2(v) (GLOBAL_EDL_WORKSPACE_2 = (v))
+#define GLOBAL_EDL_WORKSPACE_3 (*(IUH *)((IUH)Gdp + 55))
+#define SET_GLOBAL_EDL_WORKSPACE_3(v) (GLOBAL_EDL_WORKSPACE_3 = (v))
+#define GLOBAL_EDL_WORKSPACE_4 (*(IUH *)((IUH)Gdp + 59))
+#define SET_GLOBAL_EDL_WORKSPACE_4(v) (GLOBAL_EDL_WORKSPACE_4 = (v))
+#define GLOBAL_EDL_WORKSPACE_5 (*(IUH *)((IUH)Gdp + 63))
+#define SET_GLOBAL_EDL_WORKSPACE_5(v) (GLOBAL_EDL_WORKSPACE_5 = (v))
+#define GLOBAL_EDL_WORKSPACE_6 (*(IUH *)((IUH)Gdp + 67))
+#define SET_GLOBAL_EDL_WORKSPACE_6(v) (GLOBAL_EDL_WORKSPACE_6 = (v))
+#define GLOBAL_EDL_WORKSPACE_7 (*(IUH *)((IUH)Gdp + 71))
+#define SET_GLOBAL_EDL_WORKSPACE_7(v) (GLOBAL_EDL_WORKSPACE_7 = (v))
+#define GLOBAL_EDL_WORKSPACE_8 (*(IUH *)((IUH)Gdp + 75))
+#define SET_GLOBAL_EDL_WORKSPACE_8(v) (GLOBAL_EDL_WORKSPACE_8 = (v))
+#define GLOBAL_EDL_WORKSPACE_9 (*(IUH *)((IUH)Gdp + 79))
+#define SET_GLOBAL_EDL_WORKSPACE_9(v) (GLOBAL_EDL_WORKSPACE_9 = (v))
+#define GLOBAL_EDL_WORKSPACE_10 (*(IUH *)((IUH)Gdp + 83))
+#define SET_GLOBAL_EDL_WORKSPACE_10(v) (GLOBAL_EDL_WORKSPACE_10 = (v))
+#define GLOBAL_EDL_WORKSPACE_11 (*(IUH *)((IUH)Gdp + 87))
+#define SET_GLOBAL_EDL_WORKSPACE_11(v) (GLOBAL_EDL_WORKSPACE_11 = (v))
+#define GLOBAL_EDL_WORKSPACE_12 (*(IUH *)((IUH)Gdp + 91))
+#define SET_GLOBAL_EDL_WORKSPACE_12(v) (GLOBAL_EDL_WORKSPACE_12 = (v))
+#define GLOBAL_EDL_WORKSPACE_13 (*(IUH *)((IUH)Gdp + 95))
+#define SET_GLOBAL_EDL_WORKSPACE_13(v) (GLOBAL_EDL_WORKSPACE_13 = (v))
+#define GLOBAL_EDL_WORKSPACE_14 (*(IUH *)((IUH)Gdp + 99))
+#define SET_GLOBAL_EDL_WORKSPACE_14(v) (GLOBAL_EDL_WORKSPACE_14 = (v))
+#define GLOBAL_EDL_WORKSPACE_15 (*(IUH *)((IUH)Gdp + 103))
+#define SET_GLOBAL_EDL_WORKSPACE_15(v) (GLOBAL_EDL_WORKSPACE_15 = (v))
+#define GLOBAL_EDL_WORKSPACE_16 (*(IUH *)((IUH)Gdp + 107))
+#define SET_GLOBAL_EDL_WORKSPACE_16(v) (GLOBAL_EDL_WORKSPACE_16 = (v))
+#define GLOBAL_EDL_WORKSPACE_17 (*(IUH *)((IUH)Gdp + 111))
+#define SET_GLOBAL_EDL_WORKSPACE_17(v) (GLOBAL_EDL_WORKSPACE_17 = (v))
+#define GLOBAL_EDL_WORKSPACE_18 (*(IUH *)((IUH)Gdp + 115))
+#define SET_GLOBAL_EDL_WORKSPACE_18(v) (GLOBAL_EDL_WORKSPACE_18 = (v))
+#define GLOBAL_EDL_WORKSPACE_19 (*(IUH *)((IUH)Gdp + 119))
+#define SET_GLOBAL_EDL_WORKSPACE_19(v) (GLOBAL_EDL_WORKSPACE_19 = (v))
+#define GLOBAL_EDL_WORKSPACE_20 (*(IUH *)((IUH)Gdp + 123))
+#define SET_GLOBAL_EDL_WORKSPACE_20(v) (GLOBAL_EDL_WORKSPACE_20 = (v))
+#define GLOBAL_EDL_WORKSPACE_21 (*(IUH *)((IUH)Gdp + 127))
+#define SET_GLOBAL_EDL_WORKSPACE_21(v) (GLOBAL_EDL_WORKSPACE_21 = (v))
+#define GLOBAL_EDL_WORKSPACE_22 (*(IUH *)((IUH)Gdp + 131))
+#define SET_GLOBAL_EDL_WORKSPACE_22(v) (GLOBAL_EDL_WORKSPACE_22 = (v))
+#define GLOBAL_EDL_WORKSPACE_23 (*(IUH *)((IUH)Gdp + 135))
+#define SET_GLOBAL_EDL_WORKSPACE_23(v) (GLOBAL_EDL_WORKSPACE_23 = (v))
+#define GLOBAL_EDL_WORKSPACE_24 (*(IUH *)((IUH)Gdp + 139))
+#define SET_GLOBAL_EDL_WORKSPACE_24(v) (GLOBAL_EDL_WORKSPACE_24 = (v))
+#define GLOBAL_EDL_WORKSPACE_25 (*(IUH *)((IUH)Gdp + 143))
+#define SET_GLOBAL_EDL_WORKSPACE_25(v) (GLOBAL_EDL_WORKSPACE_25 = (v))
+#define GLOBAL_EDL_WORKSPACE_26 (*(IUH *)((IUH)Gdp + 147))
+#define SET_GLOBAL_EDL_WORKSPACE_26(v) (GLOBAL_EDL_WORKSPACE_26 = (v))
+#define GLOBAL_EDL_WORKSPACE_27 (*(IUH *)((IUH)Gdp + 151))
+#define SET_GLOBAL_EDL_WORKSPACE_27(v) (GLOBAL_EDL_WORKSPACE_27 = (v))
+#define GLOBAL_EDL_WORKSPACE_28 (*(IUH *)((IUH)Gdp + 155))
+#define SET_GLOBAL_EDL_WORKSPACE_28(v) (GLOBAL_EDL_WORKSPACE_28 = (v))
+#define GLOBAL_EDL_WORKSPACE_29 (*(IUH *)((IUH)Gdp + 159))
+#define SET_GLOBAL_EDL_WORKSPACE_29(v) (GLOBAL_EDL_WORKSPACE_29 = (v))
+#define GLOBAL_EDL_WORKSPACE_30 (*(IUH *)((IUH)Gdp + 163))
+#define SET_GLOBAL_EDL_WORKSPACE_30(v) (GLOBAL_EDL_WORKSPACE_30 = (v))
+#define GLOBAL_EDL_WORKSPACE_31 (*(IUH *)((IUH)Gdp + 167))
+#define SET_GLOBAL_EDL_WORKSPACE_31(v) (GLOBAL_EDL_WORKSPACE_31 = (v))
+#define GLOBAL_EDL_WORKSPACE_32 (*(IUH *)((IUH)Gdp + 171))
+#define SET_GLOBAL_EDL_WORKSPACE_32(v) (GLOBAL_EDL_WORKSPACE_32 = (v))
+#define GLOBAL_EDL_WORKSPACE_33 (*(IUH *)((IUH)Gdp + 175))
+#define SET_GLOBAL_EDL_WORKSPACE_33(v) (GLOBAL_EDL_WORKSPACE_33 = (v))
+#define GLOBAL_EDL_WORKSPACE_34 (*(IUH *)((IUH)Gdp + 179))
+#define SET_GLOBAL_EDL_WORKSPACE_34(v) (GLOBAL_EDL_WORKSPACE_34 = (v))
+#define GLOBAL_EDL_WORKSPACE_35 (*(IUH *)((IUH)Gdp + 183))
+#define SET_GLOBAL_EDL_WORKSPACE_35(v) (GLOBAL_EDL_WORKSPACE_35 = (v))
+#define GLOBAL_EDL_WORKSPACE_36 (*(IUH *)((IUH)Gdp + 187))
+#define SET_GLOBAL_EDL_WORKSPACE_36(v) (GLOBAL_EDL_WORKSPACE_36 = (v))
+#define GLOBAL_EDL_WORKSPACE_37 (*(IUH *)((IUH)Gdp + 191))
+#define SET_GLOBAL_EDL_WORKSPACE_37(v) (GLOBAL_EDL_WORKSPACE_37 = (v))
+#define GLOBAL_EDL_WORKSPACE_38 (*(IUH *)((IUH)Gdp + 195))
+#define SET_GLOBAL_EDL_WORKSPACE_38(v) (GLOBAL_EDL_WORKSPACE_38 = (v))
+#define GLOBAL_EDL_WORKSPACE_39 (*(IUH *)((IUH)Gdp + 199))
+#define SET_GLOBAL_EDL_WORKSPACE_39(v) (GLOBAL_EDL_WORKSPACE_39 = (v))
+#define GLOBAL_EDL_WORKSPACE_40 (*(IUH *)((IUH)Gdp + 203))
+#define SET_GLOBAL_EDL_WORKSPACE_40(v) (GLOBAL_EDL_WORKSPACE_40 = (v))
+#define GLOBAL_EDL_WORKSPACE_41 (*(IUH *)((IUH)Gdp + 207))
+#define SET_GLOBAL_EDL_WORKSPACE_41(v) (GLOBAL_EDL_WORKSPACE_41 = (v))
+#define GLOBAL_EDL_WORKSPACE_42 (*(IUH *)((IUH)Gdp + 211))
+#define SET_GLOBAL_EDL_WORKSPACE_42(v) (GLOBAL_EDL_WORKSPACE_42 = (v))
+#define GLOBAL_EDL_WORKSPACE_43 (*(IUH *)((IUH)Gdp + 215))
+#define SET_GLOBAL_EDL_WORKSPACE_43(v) (GLOBAL_EDL_WORKSPACE_43 = (v))
+#define GLOBAL_EDL_WORKSPACE_44 (*(IUH *)((IUH)Gdp + 219))
+#define SET_GLOBAL_EDL_WORKSPACE_44(v) (GLOBAL_EDL_WORKSPACE_44 = (v))
+#define GLOBAL_EDL_WORKSPACE_45 (*(IUH *)((IUH)Gdp + 223))
+#define SET_GLOBAL_EDL_WORKSPACE_45(v) (GLOBAL_EDL_WORKSPACE_45 = (v))
+#define GLOBAL_EDL_WORKSPACE_46 (*(IUH *)((IUH)Gdp + 227))
+#define SET_GLOBAL_EDL_WORKSPACE_46(v) (GLOBAL_EDL_WORKSPACE_46 = (v))
+#define GLOBAL_EDL_WORKSPACE_47 (*(IUH *)((IUH)Gdp + 231))
+#define SET_GLOBAL_EDL_WORKSPACE_47(v) (GLOBAL_EDL_WORKSPACE_47 = (v))
+#define GLOBAL_EDL_WORKSPACE_48 (*(IUH *)((IUH)Gdp + 235))
+#define SET_GLOBAL_EDL_WORKSPACE_48(v) (GLOBAL_EDL_WORKSPACE_48 = (v))
+#define GLOBAL_EDL_WORKSPACE_49 (*(IUH *)((IUH)Gdp + 239))
+#define SET_GLOBAL_EDL_WORKSPACE_49(v) (GLOBAL_EDL_WORKSPACE_49 = (v))
+#define GLOBAL_EDL_WORKSPACE_50 (*(IUH *)((IUH)Gdp + 243))
+#define SET_GLOBAL_EDL_WORKSPACE_50(v) (GLOBAL_EDL_WORKSPACE_50 = (v))
+#define GLOBAL_EDL_WORKSPACE_51 (*(IUH *)((IUH)Gdp + 247))
+#define SET_GLOBAL_EDL_WORKSPACE_51(v) (GLOBAL_EDL_WORKSPACE_51 = (v))
+#define GLOBAL_EDL_WORKSPACE_52 (*(IUH *)((IUH)Gdp + 251))
+#define SET_GLOBAL_EDL_WORKSPACE_52(v) (GLOBAL_EDL_WORKSPACE_52 = (v))
+#define GLOBAL_EDL_WORKSPACE_53 (*(IUH *)((IUH)Gdp + 255))
+#define SET_GLOBAL_EDL_WORKSPACE_53(v) (GLOBAL_EDL_WORKSPACE_53 = (v))
+#define GLOBAL_EDL_WORKSPACE_54 (*(IUH *)((IUH)Gdp + 259))
+#define SET_GLOBAL_EDL_WORKSPACE_54(v) (GLOBAL_EDL_WORKSPACE_54 = (v))
+#define GLOBAL_EDL_WORKSPACE_55 (*(IUH *)((IUH)Gdp + 263))
+#define SET_GLOBAL_EDL_WORKSPACE_55(v) (GLOBAL_EDL_WORKSPACE_55 = (v))
+#define GLOBAL_WhereAmI (*(IU32* *)((IUH)Gdp + 7))
+#define SET_GLOBAL_WhereAmI(v) (GLOBAL_WhereAmI = (v))
+#define GLOBAL_CoRoRet (*(IU32* *)((IUH)Gdp + 11))
+#define SET_GLOBAL_CoRoRet(v) (GLOBAL_CoRoRet = (v))
+#define GLOBAL_WhichAssertion (*(ISH *)((IUH)Gdp + 35))
+#define SET_GLOBAL_WhichAssertion(v) (GLOBAL_WhichAssertion = (v))
+#define GLOBAL_SimulateContext (*(IU8* *)((IUH)Gdp + 19))
+#define SET_GLOBAL_SimulateContext(v) (GLOBAL_SimulateContext = (v))
+#define GLOBAL_ErrorNumber (*(IUH *)((IUH)Gdp + 39))
+#define SET_GLOBAL_ErrorNumber(v) (GLOBAL_ErrorNumber = (v))
+#define GLOBAL_EDL_WORKSPACE_56 (*(IUH *)((IUH)Gdp + 267))
+#define SET_GLOBAL_EDL_WORKSPACE_56(v) (GLOBAL_EDL_WORKSPACE_56 = (v))
+#define GLOBAL_EDL_WORKSPACE_57 (*(IUH *)((IUH)Gdp + 271))
+#define SET_GLOBAL_EDL_WORKSPACE_57(v) (GLOBAL_EDL_WORKSPACE_57 = (v))
+#define GLOBAL_EDL_WORKSPACE_58 (*(IUH *)((IUH)Gdp + 275))
+#define SET_GLOBAL_EDL_WORKSPACE_58(v) (GLOBAL_EDL_WORKSPACE_58 = (v))
+#define GLOBAL_EDL_WORKSPACE_59 (*(IUH *)((IUH)Gdp + 279))
+#define SET_GLOBAL_EDL_WORKSPACE_59(v) (GLOBAL_EDL_WORKSPACE_59 = (v))
+#define GLOBAL_EDL_WORKSPACE_60 (*(IUH *)((IUH)Gdp + 283))
+#define SET_GLOBAL_EDL_WORKSPACE_60(v) (GLOBAL_EDL_WORKSPACE_60 = (v))
+#define GLOBAL_TraceRingRec (*(struct TraceRingREC *)((IUH)Gdp + 287))
+#define SET_GLOBAL_TraceRingRec(v) (GLOBAL_TraceRingRec = (v))
+#define GLOBAL_SubrNumber (*(IUH *)((IUH)Gdp + 3))
+#define SET_GLOBAL_SubrNumber(v) (GLOBAL_SubrNumber = (v))
+#define GLOBAL_SubrRingPtr (*(IUH* *)((IUH)Gdp + 23))
+#define SET_GLOBAL_SubrRingPtr(v) (GLOBAL_SubrRingPtr = (v))
+#define GLOBAL_SubrRingLowIncl (*(IUH* *)((IUH)Gdp + 27))
+#define SET_GLOBAL_SubrRingLowIncl(v) (GLOBAL_SubrRingLowIncl = (v))
+#define GLOBAL_SubrRingHighIncl (*(IUH* *)((IUH)Gdp + 31))
+#define SET_GLOBAL_SubrRingHighIncl(v) (GLOBAL_SubrRingHighIncl = (v))
+#define GLOBAL_HookNumber (*(IUH *)((IUH)Gdp + -1))
+#define SET_GLOBAL_HookNumber(v) (GLOBAL_HookNumber = (v))
+#define GLOBAL_EfiNumber (*(IUH *)((IUH)Gdp + 15))
+#define SET_GLOBAL_EfiNumber(v) (GLOBAL_EfiNumber = (v))
+#define GLOBAL_R_EAX (*(IUH *)((IUH)Gdp + 307))
+#define SET_GLOBAL_R_EAX(v) (GLOBAL_R_EAX = (v))
+#define GLOBAL_R_EBX (*(IUH *)((IUH)Gdp + 311))
+#define SET_GLOBAL_R_EBX(v) (GLOBAL_R_EBX = (v))
+#define GLOBAL_R_ECX (*(IUH *)((IUH)Gdp + 315))
+#define SET_GLOBAL_R_ECX(v) (GLOBAL_R_ECX = (v))
+#define GLOBAL_R_EDX (*(IUH *)((IUH)Gdp + 319))
+#define SET_GLOBAL_R_EDX(v) (GLOBAL_R_EDX = (v))
+#define GLOBAL_R_EBP (*(IUH *)((IUH)Gdp + 323))
+#define SET_GLOBAL_R_EBP(v) (GLOBAL_R_EBP = (v))
+#define GLOBAL_R_ESI (*(IUH *)((IUH)Gdp + 327))
+#define SET_GLOBAL_R_ESI(v) (GLOBAL_R_ESI = (v))
+#define GLOBAL_R_EDI (*(IUH *)((IUH)Gdp + 331))
+#define SET_GLOBAL_R_EDI(v) (GLOBAL_R_EDI = (v))
+#define GLOBAL_EsBase (*(IU32 *)((IUH)Gdp + 335))
+#define SET_GLOBAL_EsBase(v) (GLOBAL_EsBase = (v))
+#define GLOBAL_EsSel (*(IU16 *)((IUH)Gdp + 339))
+#define SET_GLOBAL_EsSel(v) (GLOBAL_EsSel = (v))
+#define GLOBAL_EsDesc (*(struct GLDC_REC* *)((IUH)Gdp + 343))
+#define SET_GLOBAL_EsDesc(v) (GLOBAL_EsDesc = (v))
+#define GLOBAL_EsDescSanctuary (*(struct GLDC_REC* *)((IUH)Gdp + 347))
+#define SET_GLOBAL_EsDescSanctuary(v) (GLOBAL_EsDescSanctuary = (v))
+#define GLOBAL_CsBase (*(IU32 *)((IUH)Gdp + 351))
+#define SET_GLOBAL_CsBase(v) (GLOBAL_CsBase = (v))
+#define GLOBAL_CsSel (*(IU16 *)((IUH)Gdp + 355))
+#define SET_GLOBAL_CsSel(v) (GLOBAL_CsSel = (v))
+#define GLOBAL_CsDesc (*(struct GLDC_REC* *)((IUH)Gdp + 359))
+#define SET_GLOBAL_CsDesc(v) (GLOBAL_CsDesc = (v))
+#define GLOBAL_CsDescSanctuary (*(struct GLDC_REC* *)((IUH)Gdp + 363))
+#define SET_GLOBAL_CsDescSanctuary(v) (GLOBAL_CsDescSanctuary = (v))
+#define GLOBAL_SsBase (*(IU32 *)((IUH)Gdp + 367))
+#define SET_GLOBAL_SsBase(v) (GLOBAL_SsBase = (v))
+#define GLOBAL_SsSel (*(IU16 *)((IUH)Gdp + 371))
+#define SET_GLOBAL_SsSel(v) (GLOBAL_SsSel = (v))
+#define GLOBAL_SsDesc (*(struct GLDC_REC* *)((IUH)Gdp + 375))
+#define SET_GLOBAL_SsDesc(v) (GLOBAL_SsDesc = (v))
+#define GLOBAL_SsDescSanctuary (*(struct GLDC_REC* *)((IUH)Gdp + 379))
+#define SET_GLOBAL_SsDescSanctuary(v) (GLOBAL_SsDescSanctuary = (v))
+#define GLOBAL_DsBase (*(IU32 *)((IUH)Gdp + 383))
+#define SET_GLOBAL_DsBase(v) (GLOBAL_DsBase = (v))
+#define GLOBAL_DsSel (*(IU16 *)((IUH)Gdp + 387))
+#define SET_GLOBAL_DsSel(v) (GLOBAL_DsSel = (v))
+#define GLOBAL_DsDesc (*(struct GLDC_REC* *)((IUH)Gdp + 391))
+#define SET_GLOBAL_DsDesc(v) (GLOBAL_DsDesc = (v))
+#define GLOBAL_DsDescSanctuary (*(struct GLDC_REC* *)((IUH)Gdp + 395))
+#define SET_GLOBAL_DsDescSanctuary(v) (GLOBAL_DsDescSanctuary = (v))
+#define GLOBAL_FsBase (*(IU32 *)((IUH)Gdp + 399))
+#define SET_GLOBAL_FsBase(v) (GLOBAL_FsBase = (v))
+#define GLOBAL_FsSel (*(IU16 *)((IUH)Gdp + 403))
+#define SET_GLOBAL_FsSel(v) (GLOBAL_FsSel = (v))
+#define GLOBAL_FsDesc (*(struct GLDC_REC* *)((IUH)Gdp + 407))
+#define SET_GLOBAL_FsDesc(v) (GLOBAL_FsDesc = (v))
+#define GLOBAL_FsDescSanctuary (*(struct GLDC_REC* *)((IUH)Gdp + 411))
+#define SET_GLOBAL_FsDescSanctuary(v) (GLOBAL_FsDescSanctuary = (v))
+#define GLOBAL_GsBase (*(IU32 *)((IUH)Gdp + 415))
+#define SET_GLOBAL_GsBase(v) (GLOBAL_GsBase = (v))
+#define GLOBAL_GsSel (*(IU16 *)((IUH)Gdp + 419))
+#define SET_GLOBAL_GsSel(v) (GLOBAL_GsSel = (v))
+#define GLOBAL_GsDesc (*(struct GLDC_REC* *)((IUH)Gdp + 423))
+#define SET_GLOBAL_GsDesc(v) (GLOBAL_GsDesc = (v))
+#define GLOBAL_GsDescSanctuary (*(struct GLDC_REC* *)((IUH)Gdp + 427))
+#define SET_GLOBAL_GsDescSanctuary(v) (GLOBAL_GsDescSanctuary = (v))
+#define GLOBAL_LinearDesc (*(struct GLDC_REC* *)((IUH)Gdp + 431))
+#define SET_GLOBAL_LinearDesc(v) (GLOBAL_LinearDesc = (v))
+#define GLOBAL_SystemDesc (*(struct GLDC_REC* *)((IUH)Gdp + 435))
+#define SET_GLOBAL_SystemDesc(v) (GLOBAL_SystemDesc = (v))
+#define GLOBAL_CPL (*(IUH *)((IUH)Gdp + 439))
+#define SET_GLOBAL_CPL(v) (GLOBAL_CPL = (v))
+#define GLOBAL_GdtrBase (*(IU32 *)((IUH)Gdp + 443))
+#define SET_GLOBAL_GdtrBase(v) (GLOBAL_GdtrBase = (v))
+#define GLOBAL_GdtrLimit (*(IU16 *)((IUH)Gdp + 447))
+#define SET_GLOBAL_GdtrLimit(v) (GLOBAL_GdtrLimit = (v))
+#define GLOBAL_LdtSel (*(IU16 *)((IUH)Gdp + 451))
+#define SET_GLOBAL_LdtSel(v) (GLOBAL_LdtSel = (v))
+#define GLOBAL_LdtrBase (*(IU32 *)((IUH)Gdp + 455))
+#define SET_GLOBAL_LdtrBase(v) (GLOBAL_LdtrBase = (v))
+#define GLOBAL_LdtrLimit (*(IU32 *)((IUH)Gdp + 459))
+#define SET_GLOBAL_LdtrLimit(v) (GLOBAL_LdtrLimit = (v))
+#define GLOBAL_TrSel (*(IU16 *)((IUH)Gdp + 463))
+#define SET_GLOBAL_TrSel(v) (GLOBAL_TrSel = (v))
+#define GLOBAL_TrBase (*(IU32 *)((IUH)Gdp + 467))
+#define SET_GLOBAL_TrBase(v) (GLOBAL_TrBase = (v))
+#define GLOBAL_TrLimit (*(IU32 *)((IUH)Gdp + 471))
+#define SET_GLOBAL_TrLimit(v) (GLOBAL_TrLimit = (v))
+#define GLOBAL_TrDescSt (*(IUH *)((IUH)Gdp + 475))
+#define SET_GLOBAL_TrDescSt(v) (GLOBAL_TrDescSt = (v))
+#define GLOBAL_TrIoBase (*(IU32 *)((IUH)Gdp + 479))
+#define SET_GLOBAL_TrIoBase(v) (GLOBAL_TrIoBase = (v))
+#define GLOBAL_TrIoLimit (*(IS32 *)((IUH)Gdp + 483))
+#define SET_GLOBAL_TrIoLimit(v) (GLOBAL_TrIoLimit = (v))
+#define GLOBAL_IdtrBase (*(IU32 *)((IUH)Gdp + 487))
+#define SET_GLOBAL_IdtrBase(v) (GLOBAL_IdtrBase = (v))
+#define GLOBAL_IdtrLimit (*(IU16 *)((IUH)Gdp + 491))
+#define SET_GLOBAL_IdtrLimit(v) (GLOBAL_IdtrLimit = (v))
+#define GLOBAL_AbortPigRun (*(IUH *)((IUH)Gdp + 495))
+#define SET_GLOBAL_AbortPigRun(v) (GLOBAL_AbortPigRun = (v))
+#define GLOBAL_RegsAndFlagsUndefined (*(IUH *)((IUH)Gdp + 499))
+#define SET_GLOBAL_RegsAndFlagsUndefined(v) (GLOBAL_RegsAndFlagsUndefined = (v))
+#define GLOBAL_SigalrmOccurred (*(IUH *)((IUH)Gdp + 503))
+#define SET_GLOBAL_SigalrmOccurred(v) (GLOBAL_SigalrmOccurred = (v))
+#define GLOBAL_PigEnabled (*(IUH *)((IUH)Gdp + 507))
+#define SET_GLOBAL_PigEnabled(v) (GLOBAL_PigEnabled = (v))
+#define GLOBAL_EFLAGS (*(IUH *)((IUH)Gdp + 511))
+#define SET_GLOBAL_EFLAGS(v) (GLOBAL_EFLAGS = (v))
+#define GLOBAL_Ft (*(IUH *)((IUH)Gdp + 515))
+#define SET_GLOBAL_Ft(v) (GLOBAL_Ft = (v))
+#define GLOBAL_F1 (*(IUH *)((IUH)Gdp + 519))
+#define SET_GLOBAL_F1(v) (GLOBAL_F1 = (v))
+#define GLOBAL_F2 (*(IUH *)((IUH)Gdp + 523))
+#define SET_GLOBAL_F2(v) (GLOBAL_F2 = (v))
+#define GLOBAL_F3 (*(IUH *)((IUH)Gdp + 527))
+#define SET_GLOBAL_F3(v) (GLOBAL_F3 = (v))
+#define GLOBAL_R_CR0 (*(IUH *)((IUH)Gdp + 531))
+#define SET_GLOBAL_R_CR0(v) (GLOBAL_R_CR0 = (v))
+#define GLOBAL_R_CR1 (*(IUH *)((IUH)Gdp + 535))
+#define SET_GLOBAL_R_CR1(v) (GLOBAL_R_CR1 = (v))
+#define GLOBAL_R_CR2 (*(IUH *)((IUH)Gdp + 539))
+#define SET_GLOBAL_R_CR2(v) (GLOBAL_R_CR2 = (v))
+#define GLOBAL_R_CR3 (*(IUH *)((IUH)Gdp + 543))
+#define SET_GLOBAL_R_CR3(v) (GLOBAL_R_CR3 = (v))
+#define GLOBAL_R_CR4 (*(IUH *)((IUH)Gdp + 547))
+#define SET_GLOBAL_R_CR4(v) (GLOBAL_R_CR4 = (v))
+#define GLOBAL_R_CR5 (*(IUH *)((IUH)Gdp + 551))
+#define SET_GLOBAL_R_CR5(v) (GLOBAL_R_CR5 = (v))
+#define GLOBAL_R_CR6 (*(IUH *)((IUH)Gdp + 555))
+#define SET_GLOBAL_R_CR6(v) (GLOBAL_R_CR6 = (v))
+#define GLOBAL_R_CR7 (*(IUH *)((IUH)Gdp + 559))
+#define SET_GLOBAL_R_CR7(v) (GLOBAL_R_CR7 = (v))
+#define GLOBAL_R_TR0 (*(IUH *)((IUH)Gdp + 563))
+#define SET_GLOBAL_R_TR0(v) (GLOBAL_R_TR0 = (v))
+#define GLOBAL_R_TR1 (*(IUH *)((IUH)Gdp + 567))
+#define SET_GLOBAL_R_TR1(v) (GLOBAL_R_TR1 = (v))
+#define GLOBAL_R_TR2 (*(IUH *)((IUH)Gdp + 571))
+#define SET_GLOBAL_R_TR2(v) (GLOBAL_R_TR2 = (v))
+#define GLOBAL_R_TR3 (*(IUH *)((IUH)Gdp + 575))
+#define SET_GLOBAL_R_TR3(v) (GLOBAL_R_TR3 = (v))
+#define GLOBAL_R_TR4 (*(IUH *)((IUH)Gdp + 579))
+#define SET_GLOBAL_R_TR4(v) (GLOBAL_R_TR4 = (v))
+#define GLOBAL_R_TR5 (*(IUH *)((IUH)Gdp + 583))
+#define SET_GLOBAL_R_TR5(v) (GLOBAL_R_TR5 = (v))
+#define GLOBAL_R_TR6 (*(IUH *)((IUH)Gdp + 587))
+#define SET_GLOBAL_R_TR6(v) (GLOBAL_R_TR6 = (v))
+#define GLOBAL_R_TR7 (*(IUH *)((IUH)Gdp + 591))
+#define SET_GLOBAL_R_TR7(v) (GLOBAL_R_TR7 = (v))
+#define GLOBAL_R_DR0 (*(IUH *)((IUH)Gdp + 595))
+#define SET_GLOBAL_R_DR0(v) (GLOBAL_R_DR0 = (v))
+#define GLOBAL_R_DR1 (*(IUH *)((IUH)Gdp + 599))
+#define SET_GLOBAL_R_DR1(v) (GLOBAL_R_DR1 = (v))
+#define GLOBAL_R_DR2 (*(IUH *)((IUH)Gdp + 603))
+#define SET_GLOBAL_R_DR2(v) (GLOBAL_R_DR2 = (v))
+#define GLOBAL_R_DR3 (*(IUH *)((IUH)Gdp + 607))
+#define SET_GLOBAL_R_DR3(v) (GLOBAL_R_DR3 = (v))
+#define GLOBAL_R_DR4 (*(IUH *)((IUH)Gdp + 611))
+#define SET_GLOBAL_R_DR4(v) (GLOBAL_R_DR4 = (v))
+#define GLOBAL_R_DR5 (*(IUH *)((IUH)Gdp + 615))
+#define SET_GLOBAL_R_DR5(v) (GLOBAL_R_DR5 = (v))
+#define GLOBAL_R_DR6 (*(IUH *)((IUH)Gdp + 619))
+#define SET_GLOBAL_R_DR6(v) (GLOBAL_R_DR6 = (v))
+#define GLOBAL_R_DR7 (*(IUH *)((IUH)Gdp + 623))
+#define SET_GLOBAL_R_DR7(v) (GLOBAL_R_DR7 = (v))
+#define GLOBAL_InNanoCpu (((*(IBOOL *)((IUH)Gdp + 627)) & 1) != 0)
+#define SET_GLOBAL_InNanoCpu(v) ((*(IBOOL *)((IUH)Gdp + 627)) = (v) ? 1: 0)
+#define GLOBAL_UseNanoCpu (((*(IBOOL *)((IUH)Gdp + 631)) & 1) != 0)
+#define SET_GLOBAL_UseNanoCpu(v) ((*(IBOOL *)((IUH)Gdp + 631)) = (v) ? 1: 0)
+#define GLOBAL_UseLightCompiler (((*(IBOOL *)((IUH)Gdp + 635)) & 1) != 0)
+#define SET_GLOBAL_UseLightCompiler(v) ((*(IBOOL *)((IUH)Gdp + 635)) = (v) ? 1: 0)
+#define GLOBAL_NeedInterInstructionAction (((*(IBOOL *)((IUH)Gdp + 639)) & 1) != 0)
+#define SET_GLOBAL_NeedInterInstructionAction(v) ((*(IBOOL *)((IUH)Gdp + 639)) = (v) ? 1: 0)
+#define GLOBAL_UseCCodeCopier (((*(IBOOL *)((IUH)Gdp + 643)) & 1) != 0)
+#define SET_GLOBAL_UseCCodeCopier(v) ((*(IBOOL *)((IUH)Gdp + 643)) = (v) ? 1: 0)
+#define GLOBAL_seenWithFlags (((*(IBOOL *)((IUH)Gdp + 647)) & 1) != 0)
+#define SET_GLOBAL_seenWithFlags(v) ((*(IBOOL *)((IUH)Gdp + 647)) = (v) ? 1: 0)
+#define GLOBAL_SeenPrePatchJcond (((*(IBOOL *)((IUH)Gdp + 651)) & 1) != 0)
+#define SET_GLOBAL_SeenPrePatchJcond(v) ((*(IBOOL *)((IUH)Gdp + 651)) = (v) ? 1: 0)
+#define GLOBAL_needNextIntelEip (((*(IBOOL *)((IUH)Gdp + 655)) & 1) != 0)
+#define SET_GLOBAL_needNextIntelEip(v) ((*(IBOOL *)((IUH)Gdp + 655)) = (v) ? 1: 0)
+#define GLOBAL_CopierUniverse (*(IU32 *)((IUH)Gdp + 659))
+#define SET_GLOBAL_CopierUniverse(v) (GLOBAL_CopierUniverse = (v))
+#define GLOBAL_lastCopierUniverse (*(IU32 *)((IUH)Gdp + 663))
+#define SET_GLOBAL_lastCopierUniverse(v) (GLOBAL_lastCopierUniverse = (v))
+#define GLOBAL_LastSetCopierUniverse (*(IU32 *)((IUH)Gdp + 667))
+#define SET_GLOBAL_LastSetCopierUniverse(v) (GLOBAL_LastSetCopierUniverse = (v))
+#define GLOBAL_currPFragInfoRec (*(struct FragmentInfoREC* *)((IUH)Gdp + 671))
+#define SET_GLOBAL_currPFragInfoRec(v) (GLOBAL_currPFragInfoRec = (v))
+#define GLOBAL_maxPFragInfoRec (*(struct FragmentInfoREC* *)((IUH)Gdp + 675))
+#define SET_GLOBAL_maxPFragInfoRec(v) (GLOBAL_maxPFragInfoRec = (v))
+#define GLOBAL_copierCleanups (*(IU8* *)((IUH)Gdp + 679))
+#define SET_GLOBAL_copierCleanups(v) (GLOBAL_copierCleanups = (v))
+#define GLOBAL_lastHostCleanup (*(IU8* *)((IUH)Gdp + 683))
+#define SET_GLOBAL_lastHostCleanup(v) (GLOBAL_lastHostCleanup = (v))
+#define GLOBAL_lastHostAddress (*(IU32* *)((IUH)Gdp + 687))
+#define SET_GLOBAL_lastHostAddress(v) (GLOBAL_lastHostAddress = (v))
+#define GLOBAL_lastIntelAddress (*(IU32 *)((IUH)Gdp + 691))
+#define SET_GLOBAL_lastIntelAddress(v) (GLOBAL_lastIntelAddress = (v))
+#define GLOBAL_destHashTable (*(struct JUMP_REC** *)((IUH)Gdp + 695))
+#define SET_GLOBAL_destHashTable(v) (GLOBAL_destHashTable = (v))
+#define GLOBAL_jumpHashTable (*(struct JUMP_REC** *)((IUH)Gdp + 699))
+#define SET_GLOBAL_jumpHashTable(v) (GLOBAL_jumpHashTable = (v))
+#define GLOBAL_freeJumpRecPtr (*(struct JUMP_REC* *)((IUH)Gdp + 703))
+#define SET_GLOBAL_freeJumpRecPtr(v) (GLOBAL_freeJumpRecPtr = (v))
+#define GLOBAL_nextFreeJumpRec (*(struct JUMP_REC* *)((IUH)Gdp + 707))
+#define SET_GLOBAL_nextFreeJumpRec(v) (GLOBAL_nextFreeJumpRec = (v))
+#define GLOBAL_freeJumpRecCount (*(IS32 *)((IUH)Gdp + 711))
+#define SET_GLOBAL_freeJumpRecCount(v) (GLOBAL_freeJumpRecCount = (v))
+#define GLOBAL_poolJumpRecCount (*(IU32 *)((IUH)Gdp + 715))
+#define SET_GLOBAL_poolJumpRecCount(v) (GLOBAL_poolJumpRecCount = (v))
+#define GLOBAL_vctOffsets (*(IU16* *)((IUH)Gdp + 719))
+#define SET_GLOBAL_vctOffsets(v) (GLOBAL_vctOffsets = (v))
+#define GLOBAL_anonOffsets (*(IU16* *)((IUH)Gdp + 723))
+#define SET_GLOBAL_anonOffsets(v) (GLOBAL_anonOffsets = (v))
+#define GLOBAL_selectionDataBasePtr (*(struct selectionDataREC* *)((IUH)Gdp + 727))
+#define SET_GLOBAL_selectionDataBasePtr(v) (GLOBAL_selectionDataBasePtr = (v))
+#define GLOBAL_soloCodeBasePtr (*(struct codeAndActionDataREC* *)((IUH)Gdp + 731))
+#define SET_GLOBAL_soloCodeBasePtr(v) (GLOBAL_soloCodeBasePtr = (v))
+#define GLOBAL_multiCodeBasePtr (*(struct codeAndActionDataREC* *)((IUH)Gdp + 735))
+#define SET_GLOBAL_multiCodeBasePtr(v) (GLOBAL_multiCodeBasePtr = (v))
+#define GLOBAL_codeOffsScaleShift (*(IU32 *)((IUH)Gdp + 739))
+#define SET_GLOBAL_codeOffsScaleShift(v) (GLOBAL_codeOffsScaleShift = (v))
+#define GLOBAL_tuples (*(struct TUPLE_REC* *)((IUH)Gdp + 743))
+#define SET_GLOBAL_tuples(v) (GLOBAL_tuples = (v))
+#define GLOBAL_cursor (*(struct TUPLE_REC* *)((IUH)Gdp + 747))
+#define SET_GLOBAL_cursor(v) (GLOBAL_cursor = (v))
+#define GLOBAL_tuplePtr (*(struct TUPLE_REC* *)((IUH)Gdp + 751))
+#define SET_GLOBAL_tuplePtr(v) (GLOBAL_tuplePtr = (v))
+#define GLOBAL_patchRecPtr (*(IU8* *)((IUH)Gdp + 755))
+#define SET_GLOBAL_patchRecPtr(v) (GLOBAL_patchRecPtr = (v))
+#define GLOBAL_srcPtr (*(IU32* *)((IUH)Gdp + 759))
+#define SET_GLOBAL_srcPtr(v) (GLOBAL_srcPtr = (v))
+#define GLOBAL_dstPtr (*(IU32* *)((IUH)Gdp + 763))
+#define SET_GLOBAL_dstPtr(v) (GLOBAL_dstPtr = (v))
+#define GLOBAL_BackoverMarkerAddr (*(IU32* *)((IUH)Gdp + 767))
+#define SET_GLOBAL_BackoverMarkerAddr(v) (GLOBAL_BackoverMarkerAddr = (v))
+#define GLOBAL_patchTable (*(IU32** *)((IUH)Gdp + 771))
+#define SET_GLOBAL_patchTable(v) (GLOBAL_patchTable = (v))
+#define GLOBAL_patchNames (*(IU8** *)((IUH)Gdp + 775))
+#define SET_GLOBAL_patchNames(v) (GLOBAL_patchNames = (v))
+#define GLOBAL_CopierFt (*(IUH *)((IUH)Gdp + 779))
+#define SET_GLOBAL_CopierFt(v) (GLOBAL_CopierFt = (v))
+#define GLOBAL_FtIsLazy (((*(IBOOL *)((IUH)Gdp + 783)) & 1) != 0)
+#define SET_GLOBAL_FtIsLazy(v) ((*(IBOOL *)((IUH)Gdp + 783)) = (v) ? 1: 0)
+#define GLOBAL_UnivIsLazy (((*(IBOOL *)((IUH)Gdp + 787)) & 1) != 0)
+#define SET_GLOBAL_UnivIsLazy(v) ((*(IBOOL *)((IUH)Gdp + 787)) = (v) ? 1: 0)
+#define GLOBAL_FlagsAreSuppressed (((*(IBOOL *)((IUH)Gdp + 791)) & 1) != 0)
+#define SET_GLOBAL_FlagsAreSuppressed(v) ((*(IBOOL *)((IUH)Gdp + 791)) = (v) ? 1: 0)
+#define GLOBAL_lastLazyFt (*(IUH *)((IUH)Gdp + 795))
+#define SET_GLOBAL_lastLazyFt(v) (GLOBAL_lastLazyFt = (v))
+#define GLOBAL_univVarMask (*(IU32 *)((IUH)Gdp + 799))
+#define SET_GLOBAL_univVarMask(v) (GLOBAL_univVarMask = (v))
+#define GLOBAL_zSafeContinueCheckEFI (*(IU16 *)((IUH)Gdp + 803))
+#define SET_GLOBAL_zSafeContinueCheckEFI(v) (GLOBAL_zSafeContinueCheckEFI = (v))
+#define GLOBAL_zCoRoRetEFI (*(IU16 *)((IUH)Gdp + 807))
+#define SET_GLOBAL_zCoRoRetEFI(v) (GLOBAL_zCoRoRetEFI = (v))
+#define GLOBAL_zPatchMeEFI (*(IU16 *)((IUH)Gdp + 811))
+#define SET_GLOBAL_zPatchMeEFI(v) (GLOBAL_zPatchMeEFI = (v))
+#define GLOBAL_zPostPopEFI (*(IU16 *)((IUH)Gdp + 815))
+#define SET_GLOBAL_zPostPopEFI(v) (GLOBAL_zPostPopEFI = (v))
+#define GLOBAL_zAdjustHspEFI (*(IU16 *)((IUH)Gdp + 819))
+#define SET_GLOBAL_zAdjustHspEFI(v) (GLOBAL_zAdjustHspEFI = (v))
+#define GLOBAL_zLssSpEFI (*(IU16 *)((IUH)Gdp + 823))
+#define SET_GLOBAL_zLssSpEFI(v) (GLOBAL_zLssSpEFI = (v))
+#define GLOBAL_zLssEspEFI (*(IU16 *)((IUH)Gdp + 827))
+#define SET_GLOBAL_zLssEspEFI(v) (GLOBAL_zLssEspEFI = (v))
+#define GLOBAL_lastInstructionInFragment (((*(IBOOL *)((IUH)Gdp + 831)) & 1) != 0)
+#define SET_GLOBAL_lastInstructionInFragment(v) ((*(IBOOL *)((IUH)Gdp + 831)) = (v) ? 1: 0)
+#define GLOBAL_lateInInstruction (((*(IBOOL *)((IUH)Gdp + 835)) & 1) != 0)
+#define SET_GLOBAL_lateInInstruction(v) ((*(IBOOL *)((IUH)Gdp + 835)) = (v) ? 1: 0)
+#define GLOBAL_stackDestViaAdjust (((*(IBOOL *)((IUH)Gdp + 839)) & 1) != 0)
+#define SET_GLOBAL_stackDestViaAdjust(v) ((*(IBOOL *)((IUH)Gdp + 839)) = (v) ? 1: 0)
+#define GLOBAL_stackDestAdjustPositive (((*(IBOOL *)((IUH)Gdp + 843)) & 1) != 0)
+#define SET_GLOBAL_stackDestAdjustPositive(v) ((*(IBOOL *)((IUH)Gdp + 843)) = (v) ? 1: 0)
+#define GLOBAL_stackDestAdjustUnitary (((*(IBOOL *)((IUH)Gdp + 847)) & 1) != 0)
+#define SET_GLOBAL_stackDestAdjustUnitary(v) ((*(IBOOL *)((IUH)Gdp + 847)) = (v) ? 1: 0)
+#define GLOBAL_suppressHspCheck (((*(IBOOL *)((IUH)Gdp + 851)) & 1) != 0)
+#define SET_GLOBAL_suppressHspCheck(v) ((*(IBOOL *)((IUH)Gdp + 851)) = (v) ? 1: 0)
+#define GLOBAL_suppressHbpCheck (((*(IBOOL *)((IUH)Gdp + 855)) & 1) != 0)
+#define SET_GLOBAL_suppressHbpCheck(v) ((*(IBOOL *)((IUH)Gdp + 855)) = (v) ? 1: 0)
+#define GLOBAL_ReSelectVariant (((*(IBOOL *)((IUH)Gdp + 859)) & 1) != 0)
+#define SET_GLOBAL_ReSelectVariant(v) ((*(IBOOL *)((IUH)Gdp + 859)) = (v) ? 1: 0)
+#define GLOBAL_ReSelectTupleSkipCnt (*(ISH *)((IUH)Gdp + 863))
+#define SET_GLOBAL_ReSelectTupleSkipCnt(v) (GLOBAL_ReSelectTupleSkipCnt = (v))
+#define GLOBAL_suppressEaThread (((*(IBOOL *)((IUH)Gdp + 867)) & 1) != 0)
+#define SET_GLOBAL_suppressEaThread(v) ((*(IBOOL *)((IUH)Gdp + 867)) = (v) ? 1: 0)
+#define GLOBAL_postPopPending (((*(IBOOL *)((IUH)Gdp + 871)) & 1) != 0)
+#define SET_GLOBAL_postPopPending(v) ((*(IBOOL *)((IUH)Gdp + 871)) = (v) ? 1: 0)
+#define GLOBAL_postPopSize (*(IU32 *)((IUH)Gdp + 875))
+#define SET_GLOBAL_postPopSize(v) (GLOBAL_postPopSize = (v))
+#define GLOBAL_PatchIdLazyPatch (*(IU16 *)((IUH)Gdp + 879))
+#define SET_GLOBAL_PatchIdLazyPatch(v) (GLOBAL_PatchIdLazyPatch = (v))
+#define GLOBAL_PatchIdBailoutDispatch (*(IU16 *)((IUH)Gdp + 883))
+#define SET_GLOBAL_PatchIdBailoutDispatch(v) (GLOBAL_PatchIdBailoutDispatch = (v))
+#define GLOBAL_PatchIdCondRetDispatch (*(IU16 *)((IUH)Gdp + 887))
+#define SET_GLOBAL_PatchIdCondRetDispatch(v) (GLOBAL_PatchIdCondRetDispatch = (v))
+#define GLOBAL_compTimeFtBitNum (*(IU32 *)((IUH)Gdp + 891))
+#define SET_GLOBAL_compTimeFtBitNum(v) (GLOBAL_compTimeFtBitNum = (v))
+#define GLOBAL_realmodeBitNum (*(IU32 *)((IUH)Gdp + 895))
+#define SET_GLOBAL_realmodeBitNum(v) (GLOBAL_realmodeBitNum = (v))
+#define GLOBAL_pmSrSemanticsBitNum (*(IU32 *)((IUH)Gdp + 899))
+#define SET_GLOBAL_pmSrSemanticsBitNum(v) (GLOBAL_pmSrSemanticsBitNum = (v))
+#define GLOBAL_v8086BitNum (*(IU32 *)((IUH)Gdp + 903))
+#define SET_GLOBAL_v8086BitNum(v) (GLOBAL_v8086BitNum = (v))
+#define GLOBAL_accIsReadBitMask (*(IU32 *)((IUH)Gdp + 907))
+#define SET_GLOBAL_accIsReadBitMask(v) (GLOBAL_accIsReadBitMask = (v))
+#define GLOBAL_SOBbitMask (*(IU32 *)((IUH)Gdp + 911))
+#define SET_GLOBAL_SOBbitMask(v) (GLOBAL_SOBbitMask = (v))
+#define GLOBAL_BOBbitMask (*(IU32 *)((IUH)Gdp + 915))
+#define SET_GLOBAL_BOBbitMask(v) (GLOBAL_BOBbitMask = (v))
+#define GLOBAL_CCObitMask (*(IU32 *)((IUH)Gdp + 919))
+#define SET_GLOBAL_CCObitMask(v) (GLOBAL_CCObitMask = (v))
+#define GLOBAL_useHbpBitMask (*(IU32 *)((IUH)Gdp + 923))
+#define SET_GLOBAL_useHbpBitMask(v) (GLOBAL_useHbpBitMask = (v))
+#define GLOBAL_NeedSafeToContinueCheck (((*(IBOOL *)((IUH)Gdp + 927)) & 1) != 0)
+#define SET_GLOBAL_NeedSafeToContinueCheck(v) ((*(IBOOL *)((IUH)Gdp + 927)) = (v) ? 1: 0)
+#define GLOBAL_ContinueCheckFailure (((*(IBOOL *)((IUH)Gdp + 931)) & 1) != 0)
+#define SET_GLOBAL_ContinueCheckFailure(v) ((*(IBOOL *)((IUH)Gdp + 931)) = (v) ? 1: 0)
+#define GLOBAL_NeedCoRoutineReturn (((*(IBOOL *)((IUH)Gdp + 935)) & 1) != 0)
+#define SET_GLOBAL_NeedCoRoutineReturn(v) ((*(IBOOL *)((IUH)Gdp + 935)) = (v) ? 1: 0)
+#define GLOBAL_SuppressCoRoutineReturn (((*(IBOOL *)((IUH)Gdp + 939)) & 1) != 0)
+#define SET_GLOBAL_SuppressCoRoutineReturn(v) ((*(IBOOL *)((IUH)Gdp + 939)) = (v) ? 1: 0)
+#define GLOBAL_RwCopyingWriteBack (((*(IBOOL *)((IUH)Gdp + 943)) & 1) != 0)
+#define SET_GLOBAL_RwCopyingWriteBack(v) ((*(IBOOL *)((IUH)Gdp + 943)) = (v) ? 1: 0)
+#define GLOBAL_LazyCoRoRet (((*(IBOOL *)((IUH)Gdp + 947)) & 1) != 0)
+#define SET_GLOBAL_LazyCoRoRet(v) ((*(IBOOL *)((IUH)Gdp + 947)) = (v) ? 1: 0)
+#define GLOBAL_noFlagsBitMask (*(IU32 *)((IUH)Gdp + 951))
+#define SET_GLOBAL_noFlagsBitMask(v) (GLOBAL_noFlagsBitMask = (v))
+#define GLOBAL_currCompileMinLa (*(IU32 *)((IUH)Gdp + 955))
+#define SET_GLOBAL_currCompileMinLa(v) (GLOBAL_currCompileMinLa = (v))
+#define GLOBAL_currCompileMaxLa (*(IU32 *)((IUH)Gdp + 959))
+#define SET_GLOBAL_currCompileMaxLa(v) (GLOBAL_currCompileMaxLa = (v))
+#define GLOBAL_DoRWImmOpt (((*(IBOOL *)((IUH)Gdp + 963)) & 1) != 0)
+#define SET_GLOBAL_DoRWImmOpt(v) ((*(IBOOL *)((IUH)Gdp + 963)) = (v) ? 1: 0)
+#define GLOBAL_ImmRWOptMaskBit (*(IUH *)((IUH)Gdp + 967))
+#define SET_GLOBAL_ImmRWOptMaskBit(v) (GLOBAL_ImmRWOptMaskBit = (v))
+#define GLOBAL_NoImmRWOptMaskBit (*(IUH *)((IUH)Gdp + 971))
+#define SET_GLOBAL_NoImmRWOptMaskBit(v) (GLOBAL_NoImmRWOptMaskBit = (v))
+#define GLOBAL_ImmRWAddr32Mask (*(IUH *)((IUH)Gdp + 975))
+#define SET_GLOBAL_ImmRWAddr32Mask(v) (GLOBAL_ImmRWAddr32Mask = (v))
+#define GLOBAL_NaturalAlignmentCVMask (*(IUH *)((IUH)Gdp + 979))
+#define SET_GLOBAL_NaturalAlignmentCVMask(v) (GLOBAL_NaturalAlignmentCVMask = (v))
+#define GLOBAL_ImmRWPhysPtr (*(IU8* *)((IUH)Gdp + 983))
+#define SET_GLOBAL_ImmRWPhysPtr(v) (GLOBAL_ImmRWPhysPtr = (v))
+#define GLOBAL_InheritedDangerousCVs (*(IU32 *)((IUH)Gdp + 987))
+#define SET_GLOBAL_InheritedDangerousCVs(v) (GLOBAL_InheritedDangerousCVs = (v))
+#define GLOBAL_FragDangerousCVMask (*(IU32 *)((IUH)Gdp + 991))
+#define SET_GLOBAL_FragDangerousCVMask(v) (GLOBAL_FragDangerousCVMask = (v))
+#define GLOBAL_zFragProfEFI (*(IU16 *)((IUH)Gdp + 995))
+#define SET_GLOBAL_zFragProfEFI(v) (GLOBAL_zFragProfEFI = (v))
+#define GLOBAL_FragProfIndex (*(IUH *)((IUH)Gdp + 999))
+#define SET_GLOBAL_FragProfIndex(v) (GLOBAL_FragProfIndex = (v))
+#define GLOBAL_DynamicSrcRegActions (*(IU8* *)((IUH)Gdp + 1003))
+#define SET_GLOBAL_DynamicSrcRegActions(v) (GLOBAL_DynamicSrcRegActions = (v))
+#define GLOBAL_outlierAddr (*(IU32* *)((IUH)Gdp + 1007))
+#define SET_GLOBAL_outlierAddr(v) (GLOBAL_outlierAddr = (v))
+#define GLOBAL_jumpRecPtr (*(struct JUMP_REC* *)((IUH)Gdp + 1011))
+#define SET_GLOBAL_jumpRecPtr(v) (GLOBAL_jumpRecPtr = (v))
+#define GLOBAL_HbpIsSetup (((*(IBOOL *)((IUH)Gdp + 1015)) & 1) != 0)
+#define SET_GLOBAL_HbpIsSetup(v) ((*(IBOOL *)((IUH)Gdp + 1015)) = (v) ? 1: 0)
+#define GLOBAL_hbpMinDisp (*(IUH *)((IUH)Gdp + 1019))
+#define SET_GLOBAL_hbpMinDisp(v) (GLOBAL_hbpMinDisp = (v))
+#define GLOBAL_hbpMaxDisp (*(IUH *)((IUH)Gdp + 1023))
+#define SET_GLOBAL_hbpMaxDisp(v) (GLOBAL_hbpMaxDisp = (v))
+#define GLOBAL_DSTAT_DsBaseBailOuts (*(IUH *)((IUH)Gdp + 1027))
+#define SET_GLOBAL_DSTAT_DsBaseBailOuts(v) (GLOBAL_DSTAT_DsBaseBailOuts = (v))
+#define GLOBAL_DsIsChecked (((*(IBOOL *)((IUH)Gdp + 1031)) & 1) != 0)
+#define SET_GLOBAL_DsIsChecked(v) ((*(IBOOL *)((IUH)Gdp + 1031)) = (v) ? 1: 0)
+#define GLOBAL_EDL_WORKSPACE_61 (*(IUH *)((IUH)Gdp + 1035))
+#define SET_GLOBAL_EDL_WORKSPACE_61(v) (GLOBAL_EDL_WORKSPACE_61 = (v))
+#define GLOBAL_CleanedRec (*(struct CleanedREC *)((IUH)Gdp + 1039))
+#define SET_GLOBAL_CleanedRec(v) (GLOBAL_CleanedRec = (v))
+#define GLOBAL_CurrentUniverse (*(IU32 *)((IUH)Gdp + 1055))
+#define SET_GLOBAL_CurrentUniverse(v) (GLOBAL_CurrentUniverse = (v))
+#define GLOBAL_EntryPointCache (*(struct EntryPointCacheREC* *)((IUH)Gdp + 1059))
+#define SET_GLOBAL_EntryPointCache(v) (GLOBAL_EntryPointCache = (v))
+#define GLOBAL_CsSkewedEntryPointCache (*(struct EntryPointCacheREC* *)((IUH)Gdp + 1063))
+#define SET_GLOBAL_CsSkewedEntryPointCache(v) (GLOBAL_CsSkewedEntryPointCache = (v))
+#define GLOBAL_CsLinear (*(IU32 *)((IUH)Gdp + 1067))
+#define SET_GLOBAL_CsLinear(v) (GLOBAL_CsLinear = (v))
+#define GLOBAL_LruCountdown (*(IS32 *)((IUH)Gdp + 1071))
+#define SET_GLOBAL_LruCountdown(v) (GLOBAL_LruCountdown = (v))
+#define GLOBAL_LruCountdownResetValue (*(IS32 *)((IUH)Gdp + 1075))
+#define SET_GLOBAL_LruCountdownResetValue(v) (GLOBAL_LruCountdownResetValue = (v))
+#define GLOBAL_JumpCounter (*(ISH *)((IUH)Gdp + 1079))
+#define SET_GLOBAL_JumpCounter(v) (GLOBAL_JumpCounter = (v))
+#define GLOBAL_JumpRestart (*(ISH *)((IUH)Gdp + 1083))
+#define SET_GLOBAL_JumpRestart(v) (GLOBAL_JumpRestart = (v))
+#define GLOBAL_JumpCalibrate (*(ISH *)((IUH)Gdp + 1087))
+#define SET_GLOBAL_JumpCalibrate(v) (GLOBAL_JumpCalibrate = (v))
+#define GLOBAL_InitialJumpCounter (*(ISH *)((IUH)Gdp + 1091))
+#define SET_GLOBAL_InitialJumpCounter(v) (GLOBAL_InitialJumpCounter = (v))
+#define GLOBAL_minimumInitialVal (*(IUH *)((IUH)Gdp + 1095))
+#define SET_GLOBAL_minimumInitialVal(v) (GLOBAL_minimumInitialVal = (v))
+#define GLOBAL_cyclicJcRestartVal (*(IUH *)((IUH)Gdp + 1099))
+#define SET_GLOBAL_cyclicJcRestartVal(v) (GLOBAL_cyclicJcRestartVal = (v))
+#define GLOBAL_IretHookStack (*(struct IretHookStackREC* *)((IUH)Gdp + 1103))
+#define SET_GLOBAL_IretHookStack(v) (GLOBAL_IretHookStack = (v))
+#define GLOBAL_IretHookStackIndex (*(ISH *)((IUH)Gdp + 1107))
+#define SET_GLOBAL_IretHookStackIndex(v) (GLOBAL_IretHookStackIndex = (v))
+#define GLOBAL_InstructionCount (*(IUH *)((IUH)Gdp + 1111))
+#define SET_GLOBAL_InstructionCount(v) (GLOBAL_InstructionCount = (v))
+#define GLOBAL_Constraint2CvMap (*(IU32* *)((IUH)Gdp + 1115))
+#define SET_GLOBAL_Constraint2CvMap(v) (GLOBAL_Constraint2CvMap = (v))
+#define GLOBAL_InsertBPIs (((*(IBOOL *)((IUH)Gdp + 1119)) & 1) != 0)
+#define SET_GLOBAL_InsertBPIs(v) ((*(IBOOL *)((IUH)Gdp + 1119)) = (v) ? 1: 0)
+#define GLOBAL_CurrentCookie (*(IU32 *)((IUH)Gdp + 1123))
+#define SET_GLOBAL_CurrentCookie(v) (GLOBAL_CurrentCookie = (v))
+#define GLOBAL_ReadWriteCache (*(struct ReadWriteCacheRecord* *)((IUH)Gdp + 1127))
+#define SET_GLOBAL_ReadWriteCache(v) (GLOBAL_ReadWriteCache = (v))
+#define GLOBAL_ReadWriteBackup (*(struct ReadWriteBackupRecord* *)((IUH)Gdp + 1131))
+#define SET_GLOBAL_ReadWriteBackup(v) (GLOBAL_ReadWriteBackup = (v))
+#define GLOBAL_EsCookie (*(IU32 *)((IUH)Gdp + 1135))
+#define SET_GLOBAL_EsCookie(v) (GLOBAL_EsCookie = (v))
+#define GLOBAL_CsCookie (*(IU32 *)((IUH)Gdp + 1139))
+#define SET_GLOBAL_CsCookie(v) (GLOBAL_CsCookie = (v))
+#define GLOBAL_SsCookie (*(IU32 *)((IUH)Gdp + 1143))
+#define SET_GLOBAL_SsCookie(v) (GLOBAL_SsCookie = (v))
+#define GLOBAL_DsCookie (*(IU32 *)((IUH)Gdp + 1147))
+#define SET_GLOBAL_DsCookie(v) (GLOBAL_DsCookie = (v))
+#define GLOBAL_LinearCookie (*(IU32 *)((IUH)Gdp + 1151))
+#define SET_GLOBAL_LinearCookie(v) (GLOBAL_LinearCookie = (v))
+#define GLOBAL_SystemCookie (*(IU32 *)((IUH)Gdp + 1155))
+#define SET_GLOBAL_SystemCookie(v) (GLOBAL_SystemCookie = (v))
+#define GLOBAL_FsCookie (*(IU32 *)((IUH)Gdp + 1159))
+#define SET_GLOBAL_FsCookie(v) (GLOBAL_FsCookie = (v))
+#define GLOBAL_GsCookie (*(IU32 *)((IUH)Gdp + 1163))
+#define SET_GLOBAL_GsCookie(v) (GLOBAL_GsCookie = (v))
+#define GLOBAL_NextCookie (*(IU32 *)((IUH)Gdp + 1167))
+#define SET_GLOBAL_NextCookie(v) (GLOBAL_NextCookie = (v))
+#define GLOBAL_RealModeCookies (*(IU32* *)((IUH)Gdp + 1171))
+#define SET_GLOBAL_RealModeCookies(v) (GLOBAL_RealModeCookies = (v))
+#define GLOBAL_RealModeCurrentCookies (*(IU32* *)((IUH)Gdp + 1175))
+#define SET_GLOBAL_RealModeCurrentCookies(v) (GLOBAL_RealModeCurrentCookies = (v))
+#define GLOBAL_LinearCookies (*(IU32* *)((IUH)Gdp + 1179))
+#define SET_GLOBAL_LinearCookies(v) (GLOBAL_LinearCookies = (v))
+#define GLOBAL_SystemCookies (*(IU32* *)((IUH)Gdp + 1183))
+#define SET_GLOBAL_SystemCookies(v) (GLOBAL_SystemCookies = (v))
+#define GLOBAL_CookiesToReset (*(IU32** *)((IUH)Gdp + 1187))
+#define SET_GLOBAL_CookiesToReset(v) (GLOBAL_CookiesToReset = (v))
+#define GLOBAL_NrOfCookiesToReset (*(IUH *)((IUH)Gdp + 1191))
+#define SET_GLOBAL_NrOfCookiesToReset(v) (GLOBAL_NrOfCookiesToReset = (v))
+#define GLOBAL_ReadWriteScratchBuffer (*(IU8* *)((IUH)Gdp + 1195))
+#define SET_GLOBAL_ReadWriteScratchBuffer(v) (GLOBAL_ReadWriteScratchBuffer = (v))
+#define GLOBAL_ScratchSequenceNumber (*(IUH *)((IUH)Gdp + 1199))
+#define SET_GLOBAL_ScratchSequenceNumber(v) (GLOBAL_ScratchSequenceNumber = (v))
+#define GLOBAL_RdWrPrimaryHits (*(IUH *)((IUH)Gdp + 1203))
+#define SET_GLOBAL_RdWrPrimaryHits(v) (GLOBAL_RdWrPrimaryHits = (v))
+#define GLOBAL_RdWrVideoAccesses (*(IUH *)((IUH)Gdp + 1207))
+#define SET_GLOBAL_RdWrVideoAccesses(v) (GLOBAL_RdWrVideoAccesses = (v))
+#define GLOBAL_RdWrFastProtectedWrites (*(IUH *)((IUH)Gdp + 1211))
+#define SET_GLOBAL_RdWrFastProtectedWrites(v) (GLOBAL_RdWrFastProtectedWrites = (v))
+#define GLOBAL_RdWrSlowProtectedWrites (*(IUH *)((IUH)Gdp + 1215))
+#define SET_GLOBAL_RdWrSlowProtectedWrites(v) (GLOBAL_RdWrSlowProtectedWrites = (v))
+#define GLOBAL_RdWrLoads (*(IUH *)((IUH)Gdp + 1219))
+#define SET_GLOBAL_RdWrLoads(v) (GLOBAL_RdWrLoads = (v))
+#define GLOBAL_RdWrBackupLoads (*(IUH *)((IUH)Gdp + 1223))
+#define SET_GLOBAL_RdWrBackupLoads(v) (GLOBAL_RdWrBackupLoads = (v))
+#define GLOBAL_RdWrRemovals (*(IUH *)((IUH)Gdp + 1227))
+#define SET_GLOBAL_RdWrRemovals(v) (GLOBAL_RdWrRemovals = (v))
+#define GLOBAL_RdWrCookieAllocations (*(IUH *)((IUH)Gdp + 1231))
+#define SET_GLOBAL_RdWrCookieAllocations(v) (GLOBAL_RdWrCookieAllocations = (v))
+#define GLOBAL_RdWrReconstructs (*(IUH *)((IUH)Gdp + 1235))
+#define SET_GLOBAL_RdWrReconstructs(v) (GLOBAL_RdWrReconstructs = (v))
+#define GLOBAL_RdWrCacheResets (*(IUH *)((IUH)Gdp + 1239))
+#define SET_GLOBAL_RdWrCacheResets(v) (GLOBAL_RdWrCacheResets = (v))
+#define GLOBAL_RdWrCookieResets (*(IUH *)((IUH)Gdp + 1243))
+#define SET_GLOBAL_RdWrCookieResets(v) (GLOBAL_RdWrCookieResets = (v))
+#define GLOBAL_RdWrSegCookieInits (*(IUH *)((IUH)Gdp + 1247))
+#define SET_GLOBAL_RdWrSegCookieInits(v) (GLOBAL_RdWrSegCookieInits = (v))
+#define GLOBAL_RdWrStats1 (*(IUH *)((IUH)Gdp + 1251))
+#define SET_GLOBAL_RdWrStats1(v) (GLOBAL_RdWrStats1 = (v))
+#define GLOBAL_RdWrStats2 (*(IUH *)((IUH)Gdp + 1255))
+#define SET_GLOBAL_RdWrStats2(v) (GLOBAL_RdWrStats2 = (v))
+#define GLOBAL_RdWrStats3 (*(IUH *)((IUH)Gdp + 1259))
+#define SET_GLOBAL_RdWrStats3(v) (GLOBAL_RdWrStats3 = (v))
+#define GLOBAL_RdWrStats4 (*(IUH *)((IUH)Gdp + 1263))
+#define SET_GLOBAL_RdWrStats4(v) (GLOBAL_RdWrStats4 = (v))
+#define GLOBAL_RdWrStats5 (*(IUH *)((IUH)Gdp + 1267))
+#define SET_GLOBAL_RdWrStats5(v) (GLOBAL_RdWrStats5 = (v))
+#define GLOBAL_RdWrStats6 (*(IUH *)((IUH)Gdp + 1271))
+#define SET_GLOBAL_RdWrStats6(v) (GLOBAL_RdWrStats6 = (v))
+#define GLOBAL_RdWrStats7 (*(IUH *)((IUH)Gdp + 1275))
+#define SET_GLOBAL_RdWrStats7(v) (GLOBAL_RdWrStats7 = (v))
+#define GLOBAL_RdWrStats8 (*(IUH *)((IUH)Gdp + 1279))
+#define SET_GLOBAL_RdWrStats8(v) (GLOBAL_RdWrStats8 = (v))
+#define GLOBAL_RdWrStats9 (*(IUH *)((IUH)Gdp + 1283))
+#define SET_GLOBAL_RdWrStats9(v) (GLOBAL_RdWrStats9 = (v))
+#define GLOBAL_RdWrStats10 (*(IUH *)((IUH)Gdp + 1287))
+#define SET_GLOBAL_RdWrStats10(v) (GLOBAL_RdWrStats10 = (v))
+#define GLOBAL_RdWrStats11 (*(IUH *)((IUH)Gdp + 1291))
+#define SET_GLOBAL_RdWrStats11(v) (GLOBAL_RdWrStats11 = (v))
+#define GLOBAL_RdWrStats12 (*(IUH *)((IUH)Gdp + 1295))
+#define SET_GLOBAL_RdWrStats12(v) (GLOBAL_RdWrStats12 = (v))
+#define GLOBAL_VddAreaStart (*(IU8* *)((IUH)Gdp + 1299))
+#define SET_GLOBAL_VddAreaStart(v) (GLOBAL_VddAreaStart = (v))
+#define GLOBAL_VddAreaEnd (*(IU8* *)((IUH)Gdp + 1303))
+#define SET_GLOBAL_VddAreaEnd(v) (GLOBAL_VddAreaEnd = (v))
+#define GLOBAL_SafeToUseSas (((*(IBOOL *)((IUH)Gdp + 1307)) & 1) != 0)
+#define SET_GLOBAL_SafeToUseSas(v) ((*(IBOOL *)((IUH)Gdp + 1307)) = (v) ? 1: 0)
+#define GLOBAL_VirtualiseDataSel (*(IU16 *)((IUH)Gdp + 1311))
+#define SET_GLOBAL_VirtualiseDataSel(v) (GLOBAL_VirtualiseDataSel = (v))
+#define GLOBAL_VirtualiseCodeSel (*(IU16 *)((IUH)Gdp + 1315))
+#define SET_GLOBAL_VirtualiseCodeSel(v) (GLOBAL_VirtualiseCodeSel = (v))
+#define GLOBAL_VirtualiseSelsSet (((*(IBOOL *)((IUH)Gdp + 1319)) & 1) != 0)
+#define SET_GLOBAL_VirtualiseSelsSet(v) ((*(IBOOL *)((IUH)Gdp + 1319)) = (v) ? 1: 0)
+#define GLOBAL_EAXsaved (*(IUH *)((IUH)Gdp + 1323))
+#define SET_GLOBAL_EAXsaved(v) (GLOBAL_EAXsaved = (v))
+#define GLOBAL_EBXsaved (*(IUH *)((IUH)Gdp + 1327))
+#define SET_GLOBAL_EBXsaved(v) (GLOBAL_EBXsaved = (v))
+#define GLOBAL_ECXsaved (*(IUH *)((IUH)Gdp + 1331))
+#define SET_GLOBAL_ECXsaved(v) (GLOBAL_ECXsaved = (v))
+#define GLOBAL_EDXsaved (*(IUH *)((IUH)Gdp + 1335))
+#define SET_GLOBAL_EDXsaved(v) (GLOBAL_EDXsaved = (v))
+#define GLOBAL_ESIsaved (*(IUH *)((IUH)Gdp + 1339))
+#define SET_GLOBAL_ESIsaved(v) (GLOBAL_ESIsaved = (v))
+#define GLOBAL_EDIsaved (*(IUH *)((IUH)Gdp + 1343))
+#define SET_GLOBAL_EDIsaved(v) (GLOBAL_EDIsaved = (v))
+#define GLOBAL_EBPsaved (*(IUH *)((IUH)Gdp + 1347))
+#define SET_GLOBAL_EBPsaved(v) (GLOBAL_EBPsaved = (v))
+#define GLOBAL_SafeToContinueInFragment (((*(IBOOL *)((IUH)Gdp + 1351)) & 1) != 0)
+#define SET_GLOBAL_SafeToContinueInFragment(v) ((*(IBOOL *)((IUH)Gdp + 1351)) = (v) ? 1: 0)
+#define GLOBAL_InsideTheCpu (((*(IBOOL *)((IUH)Gdp + 1355)) & 1) != 0)
+#define SET_GLOBAL_InsideTheCpu(v) ((*(IBOOL *)((IUH)Gdp + 1355)) = (v) ? 1: 0)
+#define GLOBAL_SimulateNestingLevel (*(IUH *)((IUH)Gdp + 1359))
+#define SET_GLOBAL_SimulateNestingLevel(v) (GLOBAL_SimulateNestingLevel = (v))
+#define GLOBAL_CpuIsInitialised (((*(IBOOL *)((IUH)Gdp + 1363)) & 1) != 0)
+#define SET_GLOBAL_CpuIsInitialised(v) ((*(IBOOL *)((IUH)Gdp + 1363)) = (v) ? 1: 0)
+#define GLOBAL_AR_FixupWanted (((*(IBOOL *)((IUH)Gdp + 1367)) & 1) != 0)
+#define SET_GLOBAL_AR_FixupWanted(v) ((*(IBOOL *)((IUH)Gdp + 1367)) = (v) ? 1: 0)
+#define GLOBAL_BopCausedTimesliceNap (((*(IBOOL *)((IUH)Gdp + 1371)) & 1) != 0)
+#define SET_GLOBAL_BopCausedTimesliceNap(v) ((*(IBOOL *)((IUH)Gdp + 1371)) = (v) ? 1: 0)
+#define GLOBAL_D6isBop (((*(IBOOL *)((IUH)Gdp + 1375)) & 1) != 0)
+#define SET_GLOBAL_D6isBop(v) ((*(IBOOL *)((IUH)Gdp + 1375)) = (v) ? 1: 0)
+#define GLOBAL_BopTable (*(IU8** *)((IUH)Gdp + 1379))
+#define SET_GLOBAL_BopTable(v) (GLOBAL_BopTable = (v))
+#define GLOBAL_BopNumberAndArgument (*(IU32 *)((IUH)Gdp + 1383))
+#define SET_GLOBAL_BopNumberAndArgument(v) (GLOBAL_BopNumberAndArgument = (v))
+#define GLOBAL_MsWindowsParameters (*(IU16* *)((IUH)Gdp + 1387))
+#define SET_GLOBAL_MsWindowsParameters(v) (GLOBAL_MsWindowsParameters = (v))
+#define GLOBAL_UsingMsWindowsGlueBop (((*(IBOOL *)((IUH)Gdp + 1391)) & 1) != 0)
+#define SET_GLOBAL_UsingMsWindowsGlueBop(v) ((*(IBOOL *)((IUH)Gdp + 1391)) = (v) ? 1: 0)
+#define GLOBAL_LxS_hackyfix (*(IUH *)((IUH)Gdp + 1395))
+#define SET_GLOBAL_LxS_hackyfix(v) (GLOBAL_LxS_hackyfix = (v))
+#define GLOBAL_SavedFt (*(IUH *)((IUH)Gdp + 1399))
+#define SET_GLOBAL_SavedFt(v) (GLOBAL_SavedFt = (v))
+#define GLOBAL_SavedCF (*(IUH *)((IUH)Gdp + 1403))
+#define SET_GLOBAL_SavedCF(v) (GLOBAL_SavedCF = (v))
+#define GLOBAL_SavedZF (*(IUH *)((IUH)Gdp + 1407))
+#define SET_GLOBAL_SavedZF(v) (GLOBAL_SavedZF = (v))
+#define GLOBAL_SavedOF (*(IUH *)((IUH)Gdp + 1411))
+#define SET_GLOBAL_SavedOF(v) (GLOBAL_SavedOF = (v))
+#define GLOBAL_UsedD6 (*(IUH *)((IUH)Gdp + 1415))
+#define SET_GLOBAL_UsedD6(v) (GLOBAL_UsedD6 = (v))
+#define GLOBAL_buildOp1 (*(IUH *)((IUH)Gdp + 1419))
+#define SET_GLOBAL_buildOp1(v) (GLOBAL_buildOp1 = (v))
+#define GLOBAL_buildOp2 (*(IUH *)((IUH)Gdp + 1423))
+#define SET_GLOBAL_buildOp2(v) (GLOBAL_buildOp2 = (v))
+#define GLOBAL_buildOp3 (*(IU32 *)((IUH)Gdp + 1427))
+#define SET_GLOBAL_buildOp3(v) (GLOBAL_buildOp3 = (v))
+#define GLOBAL_buildOp4 (*(IU32 *)((IUH)Gdp + 1431))
+#define SET_GLOBAL_buildOp4(v) (GLOBAL_buildOp4 = (v))
+#define GLOBAL_buildOp5 (*(IUH *)((IUH)Gdp + 1435))
+#define SET_GLOBAL_buildOp5(v) (GLOBAL_buildOp5 = (v))
+#define GLOBAL_buildOp6 (((*(IBOOL *)((IUH)Gdp + 1439)) & 1) != 0)
+#define SET_GLOBAL_buildOp6(v) ((*(IBOOL *)((IUH)Gdp + 1439)) = (v) ? 1: 0)
+#define GLOBAL_EDL_WORKSPACE_62 (*(IUH *)((IUH)Gdp + 1443))
+#define SET_GLOBAL_EDL_WORKSPACE_62(v) (GLOBAL_EDL_WORKSPACE_62 = (v))
+#define GLOBAL_EDL_WORKSPACE_63 (*(IUH *)((IUH)Gdp + 1447))
+#define SET_GLOBAL_EDL_WORKSPACE_63(v) (GLOBAL_EDL_WORKSPACE_63 = (v))
+#define GLOBAL_EDL_WORKSPACE_64 (*(IUH *)((IUH)Gdp + 1451))
+#define SET_GLOBAL_EDL_WORKSPACE_64(v) (GLOBAL_EDL_WORKSPACE_64 = (v))
+#define GLOBAL_EDL_WORKSPACE_65 (*(IUH *)((IUH)Gdp + 1455))
+#define SET_GLOBAL_EDL_WORKSPACE_65(v) (GLOBAL_EDL_WORKSPACE_65 = (v))
+#define GLOBAL_EDL_WORKSPACE_66 (*(IUH *)((IUH)Gdp + 1459))
+#define SET_GLOBAL_EDL_WORKSPACE_66(v) (GLOBAL_EDL_WORKSPACE_66 = (v))
+#define GLOBAL_EDL_WORKSPACE_67 (*(IUH *)((IUH)Gdp + 1463))
+#define SET_GLOBAL_EDL_WORKSPACE_67(v) (GLOBAL_EDL_WORKSPACE_67 = (v))
+#define GLOBAL_EDL_WORKSPACE_68 (*(IUH *)((IUH)Gdp + 1467))
+#define SET_GLOBAL_EDL_WORKSPACE_68(v) (GLOBAL_EDL_WORKSPACE_68 = (v))
+#define GLOBAL_EDL_WORKSPACE_69 (*(IUH *)((IUH)Gdp + 1471))
+#define SET_GLOBAL_EDL_WORKSPACE_69(v) (GLOBAL_EDL_WORKSPACE_69 = (v))
+#define GLOBAL_EDL_WORKSPACE_70 (*(IUH *)((IUH)Gdp + 1475))
+#define SET_GLOBAL_EDL_WORKSPACE_70(v) (GLOBAL_EDL_WORKSPACE_70 = (v))
+#define GLOBAL_EDL_WORKSPACE_71 (*(IUH *)((IUH)Gdp + 1479))
+#define SET_GLOBAL_EDL_WORKSPACE_71(v) (GLOBAL_EDL_WORKSPACE_71 = (v))
+#define GLOBAL_EDL_WORKSPACE_72 (*(IUH *)((IUH)Gdp + 1483))
+#define SET_GLOBAL_EDL_WORKSPACE_72(v) (GLOBAL_EDL_WORKSPACE_72 = (v))
+#define GLOBAL_EDL_WORKSPACE_73 (*(IUH *)((IUH)Gdp + 1487))
+#define SET_GLOBAL_EDL_WORKSPACE_73(v) (GLOBAL_EDL_WORKSPACE_73 = (v))
+#define GLOBAL_EDL_WORKSPACE_74 (*(IUH *)((IUH)Gdp + 1491))
+#define SET_GLOBAL_EDL_WORKSPACE_74(v) (GLOBAL_EDL_WORKSPACE_74 = (v))
+#define GLOBAL_EDL_WORKSPACE_75 (*(IUH *)((IUH)Gdp + 1495))
+#define SET_GLOBAL_EDL_WORKSPACE_75(v) (GLOBAL_EDL_WORKSPACE_75 = (v))
+#define GLOBAL_EDL_WORKSPACE_76 (*(IUH *)((IUH)Gdp + 1499))
+#define SET_GLOBAL_EDL_WORKSPACE_76(v) (GLOBAL_EDL_WORKSPACE_76 = (v))
+#define GLOBAL_EDL_WORKSPACE_77 (*(IUH *)((IUH)Gdp + 1503))
+#define SET_GLOBAL_EDL_WORKSPACE_77(v) (GLOBAL_EDL_WORKSPACE_77 = (v))
+#define GLOBAL_EDL_WORKSPACE_78 (*(IUH *)((IUH)Gdp + 1507))
+#define SET_GLOBAL_EDL_WORKSPACE_78(v) (GLOBAL_EDL_WORKSPACE_78 = (v))
+#define GLOBAL_EDL_WORKSPACE_79 (*(IUH *)((IUH)Gdp + 1511))
+#define SET_GLOBAL_EDL_WORKSPACE_79(v) (GLOBAL_EDL_WORKSPACE_79 = (v))
+#define GLOBAL_EDL_WORKSPACE_80 (*(IUH *)((IUH)Gdp + 1515))
+#define SET_GLOBAL_EDL_WORKSPACE_80(v) (GLOBAL_EDL_WORKSPACE_80 = (v))
+#define GLOBAL_EDL_WORKSPACE_81 (*(IUH *)((IUH)Gdp + 1519))
+#define SET_GLOBAL_EDL_WORKSPACE_81(v) (GLOBAL_EDL_WORKSPACE_81 = (v))
+#define GLOBAL_EDL_WORKSPACE_82 (*(IUH *)((IUH)Gdp + 1523))
+#define SET_GLOBAL_EDL_WORKSPACE_82(v) (GLOBAL_EDL_WORKSPACE_82 = (v))
+#define GLOBAL_EDL_WORKSPACE_83 (*(IUH *)((IUH)Gdp + 1527))
+#define SET_GLOBAL_EDL_WORKSPACE_83(v) (GLOBAL_EDL_WORKSPACE_83 = (v))
+#define GLOBAL_EDL_WORKSPACE_84 (*(IUH *)((IUH)Gdp + 1531))
+#define SET_GLOBAL_EDL_WORKSPACE_84(v) (GLOBAL_EDL_WORKSPACE_84 = (v))
+#define GLOBAL_VGAGlobals (*(struct VGAGLOBALSETTINGS *)((IUH)Gdp + 1535))
+#define SET_GLOBAL_VGAGlobals(v) (GLOBAL_VGAGlobals = (v))
+#define GLOBAL_VidMarkFuncTable (*(IU32** *)((IUH)Gdp + 1691))
+#define SET_GLOBAL_VidMarkFuncTable(v) (GLOBAL_VidMarkFuncTable = (v))
+#define GLOBAL_VidReadFuncTable (*(IU32** *)((IUH)Gdp + 1695))
+#define SET_GLOBAL_VidReadFuncTable(v) (GLOBAL_VidReadFuncTable = (v))
+#define GLOBAL_VidWriteFuncTable (*(IU32** *)((IUH)Gdp + 1699))
+#define SET_GLOBAL_VidWriteFuncTable(v) (GLOBAL_VidWriteFuncTable = (v))
+#define GLOBAL_EDL_WORKSPACE_85 (*(IUH *)((IUH)Gdp + 1703))
+#define SET_GLOBAL_EDL_WORKSPACE_85(v) (GLOBAL_EDL_WORKSPACE_85 = (v))
+#define GLOBAL_EDL_WORKSPACE_86 (*(IUH *)((IUH)Gdp + 1707))
+#define SET_GLOBAL_EDL_WORKSPACE_86(v) (GLOBAL_EDL_WORKSPACE_86 = (v))
+#define GLOBAL_EDL_WORKSPACE_87 (*(IUH *)((IUH)Gdp + 1711))
+#define SET_GLOBAL_EDL_WORKSPACE_87(v) (GLOBAL_EDL_WORKSPACE_87 = (v))
+#define GLOBAL_EDL_WORKSPACE_88 (*(IUH *)((IUH)Gdp + 1715))
+#define SET_GLOBAL_EDL_WORKSPACE_88(v) (GLOBAL_EDL_WORKSPACE_88 = (v))
+#define GLOBAL_EDL_WORKSPACE_89 (*(IUH *)((IUH)Gdp + 1719))
+#define SET_GLOBAL_EDL_WORKSPACE_89(v) (GLOBAL_EDL_WORKSPACE_89 = (v))
+#define GLOBAL_EDL_WORKSPACE_90 (*(IUH *)((IUH)Gdp + 1723))
+#define SET_GLOBAL_EDL_WORKSPACE_90(v) (GLOBAL_EDL_WORKSPACE_90 = (v))
+#define GLOBAL_ActiveVideoWrites (*(struct EVIDWRITES *)((IUH)Gdp + 1727))
+#define SET_GLOBAL_ActiveVideoWrites(v) (GLOBAL_ActiveVideoWrites = (v))
+#define GLOBAL_EDL_WORKSPACE_91 (*(IUH *)((IUH)Gdp + 1775))
+#define SET_GLOBAL_EDL_WORKSPACE_91(v) (GLOBAL_EDL_WORKSPACE_91 = (v))
+#define GLOBAL_EDL_WORKSPACE_92 (*(IUH *)((IUH)Gdp + 1779))
+#define SET_GLOBAL_EDL_WORKSPACE_92(v) (GLOBAL_EDL_WORKSPACE_92 = (v))
+#define GLOBAL_EDL_WORKSPACE_93 (*(IUH *)((IUH)Gdp + 1783))
+#define SET_GLOBAL_EDL_WORKSPACE_93(v) (GLOBAL_EDL_WORKSPACE_93 = (v))
+#define GLOBAL_EDL_WORKSPACE_94 (*(IUH *)((IUH)Gdp + 1787))
+#define SET_GLOBAL_EDL_WORKSPACE_94(v) (GLOBAL_EDL_WORKSPACE_94 = (v))
+#define GLOBAL_ActiveVideoReads (*(struct EVIDREADS *)((IUH)Gdp + 1791))
+#define SET_GLOBAL_ActiveVideoReads(v) (GLOBAL_ActiveVideoReads = (v))
+#define GLOBAL_EDL_WORKSPACE_95 (*(IUH *)((IUH)Gdp + 1811))
+#define SET_GLOBAL_EDL_WORKSPACE_95(v) (GLOBAL_EDL_WORKSPACE_95 = (v))
+#define GLOBAL_EDL_WORKSPACE_96 (*(IUH *)((IUH)Gdp + 1815))
+#define SET_GLOBAL_EDL_WORKSPACE_96(v) (GLOBAL_EDL_WORKSPACE_96 = (v))
+#define GLOBAL_EDL_WORKSPACE_97 (*(IUH *)((IUH)Gdp + 1819))
+#define SET_GLOBAL_EDL_WORKSPACE_97(v) (GLOBAL_EDL_WORKSPACE_97 = (v))
+#define GLOBAL_ActiveVideoMarks (*(struct EVIDMARKS *)((IUH)Gdp + 1823))
+#define SET_GLOBAL_ActiveVideoMarks(v) (GLOBAL_ActiveVideoMarks = (v))
+#define GLOBAL_MaxIntelPageNumber (*(IU32 *)((IUH)Gdp + 1839))
+#define SET_GLOBAL_MaxIntelPageNumber(v) (GLOBAL_MaxIntelPageNumber = (v))
+#define GLOBAL_EmptyIntelPageNumber (*(IU32 *)((IUH)Gdp + 1843))
+#define SET_GLOBAL_EmptyIntelPageNumber(v) (GLOBAL_EmptyIntelPageNumber = (v))
+#define GLOBAL_PageDirectoryPtr (*(IU32* *)((IUH)Gdp + 1847))
+#define SET_GLOBAL_PageDirectoryPtr(v) (GLOBAL_PageDirectoryPtr = (v))
+#define GLOBAL_DebuggerPFLA (*(IU32 *)((IUH)Gdp + 1851))
+#define SET_GLOBAL_DebuggerPFLA(v) (GLOBAL_DebuggerPFLA = (v))
+#define GLOBAL_DebuggerFaultAction (*(IUH *)((IUH)Gdp + 1855))
+#define SET_GLOBAL_DebuggerFaultAction(v) (GLOBAL_DebuggerFaultAction = (v))
+#define GLOBAL_InsideDebugger (*(ISH *)((IUH)Gdp + 1859))
+#define SET_GLOBAL_InsideDebugger(v) (GLOBAL_InsideDebugger = (v))
+#define GLOBAL_EDL_WORKSPACE_98 (*(IUH *)((IUH)Gdp + 1863))
+#define SET_GLOBAL_EDL_WORKSPACE_98(v) (GLOBAL_EDL_WORKSPACE_98 = (v))
+#define GLOBAL_EDL_WORKSPACE_99 (*(IUH *)((IUH)Gdp + 1867))
+#define SET_GLOBAL_EDL_WORKSPACE_99(v) (GLOBAL_EDL_WORKSPACE_99 = (v))
+#define GLOBAL_EDL_WORKSPACE_100 (*(IUH *)((IUH)Gdp + 1871))
+#define SET_GLOBAL_EDL_WORKSPACE_100(v) (GLOBAL_EDL_WORKSPACE_100 = (v))
+#define GLOBAL_EDL_WORKSPACE_101 (*(IUH *)((IUH)Gdp + 1875))
+#define SET_GLOBAL_EDL_WORKSPACE_101(v) (GLOBAL_EDL_WORKSPACE_101 = (v))
+#define GLOBAL_EDL_WORKSPACE_102 (*(IUH *)((IUH)Gdp + 1879))
+#define SET_GLOBAL_EDL_WORKSPACE_102(v) (GLOBAL_EDL_WORKSPACE_102 = (v))
+#define GLOBAL_EDL_WORKSPACE_103 (*(IUH *)((IUH)Gdp + 1883))
+#define SET_GLOBAL_EDL_WORKSPACE_103(v) (GLOBAL_EDL_WORKSPACE_103 = (v))
+#define GLOBAL_EDL_WORKSPACE_104 (*(IUH *)((IUH)Gdp + 1887))
+#define SET_GLOBAL_EDL_WORKSPACE_104(v) (GLOBAL_EDL_WORKSPACE_104 = (v))
+#define GLOBAL_EDL_WORKSPACE_105 (*(IUH *)((IUH)Gdp + 1891))
+#define SET_GLOBAL_EDL_WORKSPACE_105(v) (GLOBAL_EDL_WORKSPACE_105 = (v))
+#define GLOBAL_EDL_WORKSPACE_106 (*(IUH *)((IUH)Gdp + 1895))
+#define SET_GLOBAL_EDL_WORKSPACE_106(v) (GLOBAL_EDL_WORKSPACE_106 = (v))
+#define GLOBAL_EDL_WORKSPACE_107 (*(IUH *)((IUH)Gdp + 1899))
+#define SET_GLOBAL_EDL_WORKSPACE_107(v) (GLOBAL_EDL_WORKSPACE_107 = (v))
+#define GLOBAL_EDL_WORKSPACE_108 (*(IUH *)((IUH)Gdp + 1903))
+#define SET_GLOBAL_EDL_WORKSPACE_108(v) (GLOBAL_EDL_WORKSPACE_108 = (v))
+#define GLOBAL_EDL_WORKSPACE_109 (*(IUH *)((IUH)Gdp + 1907))
+#define SET_GLOBAL_EDL_WORKSPACE_109(v) (GLOBAL_EDL_WORKSPACE_109 = (v))
+#define GLOBAL_EDL_WORKSPACE_110 (*(IUH *)((IUH)Gdp + 1911))
+#define SET_GLOBAL_EDL_WORKSPACE_110(v) (GLOBAL_EDL_WORKSPACE_110 = (v))
+#define GLOBAL_EDL_WORKSPACE_111 (*(IUH *)((IUH)Gdp + 1915))
+#define SET_GLOBAL_EDL_WORKSPACE_111(v) (GLOBAL_EDL_WORKSPACE_111 = (v))
+#define GLOBAL_VirtualisationBIOSOffsets (*(struct VirtualisationBIOSOffsetsREC *)((IUH)Gdp + 1919))
+#define SET_GLOBAL_VirtualisationBIOSOffsets(v) (GLOBAL_VirtualisationBIOSOffsets = (v))
+#define GLOBAL_DoingIoVirtTest (*(IUH *)((IUH)Gdp + 1967))
+#define SET_GLOBAL_DoingIoVirtTest(v) (GLOBAL_DoingIoVirtTest = (v))
+#define GLOBAL_IoVirtTestFailed (*(IUH *)((IUH)Gdp + 1971))
+#define SET_GLOBAL_IoVirtTestFailed(v) (GLOBAL_IoVirtTestFailed = (v))
+#define GLOBAL_NextHostCodeAddress (*(IU32* *)((IUH)Gdp + 1975))
+#define SET_GLOBAL_NextHostCodeAddress(v) (GLOBAL_NextHostCodeAddress = (v))
+#define GLOBAL_NextPhysicalPage (*(IUH *)((IUH)Gdp + 1979))
+#define SET_GLOBAL_NextPhysicalPage(v) (GLOBAL_NextPhysicalPage = (v))
+#define GLOBAL_TranslationHashTable (*(IU16* *)((IUH)Gdp + 1983))
+#define SET_GLOBAL_TranslationHashTable(v) (GLOBAL_TranslationHashTable = (v))
+#define GLOBAL_SasMemoryType (*(IU8* *)((IUH)Gdp + 1987))
+#define SET_GLOBAL_SasMemoryType(v) (GLOBAL_SasMemoryType = (v))
+#define GLOBAL_PhysicalPageRecords (*(struct PhysicalPageREC* *)((IUH)Gdp + 1991))
+#define SET_GLOBAL_PhysicalPageRecords(v) (GLOBAL_PhysicalPageRecords = (v))
+#define GLOBAL_PhysicalPageMemory (*(IU8** *)((IUH)Gdp + 1995))
+#define SET_GLOBAL_PhysicalPageMemory(v) (GLOBAL_PhysicalPageMemory = (v))
+#define GLOBAL_TwentyBitWrapStatus (((*(IBOOL *)((IUH)Gdp + 1999)) & 1) != 0)
+#define SET_GLOBAL_TwentyBitWrapStatus(v) ((*(IBOOL *)((IUH)Gdp + 1999)) = (v) ? 1: 0)
+#define GLOBAL_MultipleRecompilationCount (*(IUH *)((IUH)Gdp + 2003))
+#define SET_GLOBAL_MultipleRecompilationCount(v) (GLOBAL_MultipleRecompilationCount = (v))
+#define GLOBAL_MaxMultipleRecompilation (*(IUH *)((IUH)Gdp + 2007))
+#define SET_GLOBAL_MaxMultipleRecompilation(v) (GLOBAL_MaxMultipleRecompilation = (v))
+#define GLOBAL_HideCompiledFragment (((*(IBOOL *)((IUH)Gdp + 2011)) & 1) != 0)
+#define SET_GLOBAL_HideCompiledFragment(v) ((*(IBOOL *)((IUH)Gdp + 2011)) = (v) ? 1: 0)
+#define GLOBAL_RestartAfterCodeOverwrite (((*(IBOOL *)((IUH)Gdp + 2015)) & 1) != 0)
+#define SET_GLOBAL_RestartAfterCodeOverwrite(v) ((*(IBOOL *)((IUH)Gdp + 2015)) = (v) ? 1: 0)
+#define GLOBAL_DoingCompilation (((*(IBOOL *)((IUH)Gdp + 2019)) & 1) != 0)
+#define SET_GLOBAL_DoingCompilation(v) ((*(IBOOL *)((IUH)Gdp + 2019)) = (v) ? 1: 0)
+#define GLOBAL_SanityCheckStructures (((*(IBOOL *)((IUH)Gdp + 2023)) & 1) != 0)
+#define SET_GLOBAL_SanityCheckStructures(v) ((*(IBOOL *)((IUH)Gdp + 2023)) = (v) ? 1: 0)
+#define GLOBAL_BufferSelfOverwritten (((*(IBOOL *)((IUH)Gdp + 2027)) & 1) != 0)
+#define SET_GLOBAL_BufferSelfOverwritten(v) ((*(IBOOL *)((IUH)Gdp + 2027)) = (v) ? 1: 0)
+#define GLOBAL_RecentCodeOverwriteHashTable (*(IU8** *)((IUH)Gdp + 2031))
+#define SET_GLOBAL_RecentCodeOverwriteHashTable(v) (GLOBAL_RecentCodeOverwriteHashTable = (v))
+#define GLOBAL_DelayedGldtUnprotections (*(IU16 *)((IUH)Gdp + 2035))
+#define SET_GLOBAL_DelayedGldtUnprotections(v) (GLOBAL_DelayedGldtUnprotections = (v))
+#define GLOBAL_DelayedIdtUnprotections (*(IU16 *)((IUH)Gdp + 2039))
+#define SET_GLOBAL_DelayedIdtUnprotections(v) (GLOBAL_DelayedIdtUnprotections = (v))
+#define GLOBAL_DelayedIdtList (*(IU16* *)((IUH)Gdp + 2043))
+#define SET_GLOBAL_DelayedIdtList(v) (GLOBAL_DelayedIdtList = (v))
+#define GLOBAL_CoarseRecords (*(struct CoarseProtREC* *)((IUH)Gdp + 2047))
+#define SET_GLOBAL_CoarseRecords(v) (GLOBAL_CoarseRecords = (v))
+#define GLOBAL_FineRecords (*(struct FineProtREC* *)((IUH)Gdp + 2051))
+#define SET_GLOBAL_FineRecords(v) (GLOBAL_FineRecords = (v))
+#define GLOBAL_SlotRecords (*(struct SlotProtREC* *)((IUH)Gdp + 2055))
+#define SET_GLOBAL_SlotRecords(v) (GLOBAL_SlotRecords = (v))
+#define GLOBAL_DescriptorRecords (*(struct GLDC_REC* *)((IUH)Gdp + 2059))
+#define SET_GLOBAL_DescriptorRecords(v) (GLOBAL_DescriptorRecords = (v))
+#define GLOBAL_MapRecords (*(struct TranslationMapREC* *)((IUH)Gdp + 2063))
+#define SET_GLOBAL_MapRecords(v) (GLOBAL_MapRecords = (v))
+#define GLOBAL_DependencyRecords (*(struct DependencyREC* *)((IUH)Gdp + 2067))
+#define SET_GLOBAL_DependencyRecords(v) (GLOBAL_DependencyRecords = (v))
+#define GLOBAL_DeletionRecords (*(struct DeletionREC* *)((IUH)Gdp + 2071))
+#define SET_GLOBAL_DeletionRecords(v) (GLOBAL_DeletionRecords = (v))
+#define GLOBAL_NextPossSacrificeBuffNr (*(IUH *)((IUH)Gdp + 2075))
+#define SET_GLOBAL_NextPossSacrificeBuffNr(v) (GLOBAL_NextPossSacrificeBuffNr = (v))
+#define GLOBAL_BaseDescriptorBuffNr (*(IUH *)((IUH)Gdp + 2079))
+#define SET_GLOBAL_BaseDescriptorBuffNr(v) (GLOBAL_BaseDescriptorBuffNr = (v))
+#define GLOBAL_MaxDescriptorBuffNr (*(IUH *)((IUH)Gdp + 2083))
+#define SET_GLOBAL_MaxDescriptorBuffNr(v) (GLOBAL_MaxDescriptorBuffNr = (v))
+#define GLOBAL_FreeDataBuffers (*(struct BufferIndexREC* *)((IUH)Gdp + 2087))
+#define SET_GLOBAL_FreeDataBuffers(v) (GLOBAL_FreeDataBuffers = (v))
+#define GLOBAL_FreeCodeBuffers (*(struct BufferIndexREC* *)((IUH)Gdp + 2091))
+#define SET_GLOBAL_FreeCodeBuffers(v) (GLOBAL_FreeCodeBuffers = (v))
+#define GLOBAL_EntryPointHashTable (*(IU16* *)((IUH)Gdp + 2095))
+#define SET_GLOBAL_EntryPointHashTable(v) (GLOBAL_EntryPointHashTable = (v))
+#define GLOBAL_FragCounts (*(IU8* *)((IUH)Gdp + 2099))
+#define SET_GLOBAL_FragCounts(v) (GLOBAL_FragCounts = (v))
+#define GLOBAL_EDL_WORKSPACE_112 (*(IUH *)((IUH)Gdp + 2103))
+#define SET_GLOBAL_EDL_WORKSPACE_112(v) (GLOBAL_EDL_WORKSPACE_112 = (v))
+#define GLOBAL_EDL_WORKSPACE_113 (*(IUH *)((IUH)Gdp + 2107))
+#define SET_GLOBAL_EDL_WORKSPACE_113(v) (GLOBAL_EDL_WORKSPACE_113 = (v))
+#define GLOBAL_ControlBlock (*(struct CompilationControlREC *)((IUH)Gdp + 2111))
+#define SET_GLOBAL_ControlBlock(v) (GLOBAL_ControlBlock = (v))
+#define GLOBAL_EDL_WORKSPACE_114 (*(IUH *)((IUH)Gdp + 2127))
+#define SET_GLOBAL_EDL_WORKSPACE_114(v) (GLOBAL_EDL_WORKSPACE_114 = (v))
+#define GLOBAL_EDL_WORKSPACE_115 (*(IUH *)((IUH)Gdp + 2131))
+#define SET_GLOBAL_EDL_WORKSPACE_115(v) (GLOBAL_EDL_WORKSPACE_115 = (v))
+#define GLOBAL_EDL_WORKSPACE_116 (*(IUH *)((IUH)Gdp + 2135))
+#define SET_GLOBAL_EDL_WORKSPACE_116(v) (GLOBAL_EDL_WORKSPACE_116 = (v))
+#define GLOBAL_EDL_WORKSPACE_117 (*(IUH *)((IUH)Gdp + 2139))
+#define SET_GLOBAL_EDL_WORKSPACE_117(v) (GLOBAL_EDL_WORKSPACE_117 = (v))
+#define GLOBAL_EDL_WORKSPACE_118 (*(IUH *)((IUH)Gdp + 2143))
+#define SET_GLOBAL_EDL_WORKSPACE_118(v) (GLOBAL_EDL_WORKSPACE_118 = (v))
+#define GLOBAL_EDL_WORKSPACE_119 (*(IUH *)((IUH)Gdp + 2147))
+#define SET_GLOBAL_EDL_WORKSPACE_119(v) (GLOBAL_EDL_WORKSPACE_119 = (v))
+#define GLOBAL_EDL_WORKSPACE_120 (*(IUH *)((IUH)Gdp + 2151))
+#define SET_GLOBAL_EDL_WORKSPACE_120(v) (GLOBAL_EDL_WORKSPACE_120 = (v))
+#define GLOBAL_EDL_WORKSPACE_121 (*(IUH *)((IUH)Gdp + 2155))
+#define SET_GLOBAL_EDL_WORKSPACE_121(v) (GLOBAL_EDL_WORKSPACE_121 = (v))
+#define GLOBAL_EDL_WORKSPACE_122 (*(IUH *)((IUH)Gdp + 2159))
+#define SET_GLOBAL_EDL_WORKSPACE_122(v) (GLOBAL_EDL_WORKSPACE_122 = (v))
+#define GLOBAL_EDL_WORKSPACE_123 (*(IUH *)((IUH)Gdp + 2163))
+#define SET_GLOBAL_EDL_WORKSPACE_123(v) (GLOBAL_EDL_WORKSPACE_123 = (v))
+#define GLOBAL_EDL_WORKSPACE_124 (*(IUH *)((IUH)Gdp + 2167))
+#define SET_GLOBAL_EDL_WORKSPACE_124(v) (GLOBAL_EDL_WORKSPACE_124 = (v))
+#define GLOBAL_EDL_WORKSPACE_125 (*(IUH *)((IUH)Gdp + 2171))
+#define SET_GLOBAL_EDL_WORKSPACE_125(v) (GLOBAL_EDL_WORKSPACE_125 = (v))
+#define GLOBAL_CompilationBlock (*(struct BLOCK_TO_COMPILE *)((IUH)Gdp + 2175))
+#define SET_GLOBAL_CompilationBlock(v) (GLOBAL_CompilationBlock = (v))
+#define GLOBAL_EDL_WORKSPACE_126 (*(IUH *)((IUH)Gdp + 2211))
+#define SET_GLOBAL_EDL_WORKSPACE_126(v) (GLOBAL_EDL_WORKSPACE_126 = (v))
+#define GLOBAL_EDL_WORKSPACE_127 (*(IUH *)((IUH)Gdp + 2215))
+#define SET_GLOBAL_EDL_WORKSPACE_127(v) (GLOBAL_EDL_WORKSPACE_127 = (v))
+#define GLOBAL_EDL_WORKSPACE_128 (*(IUH *)((IUH)Gdp + 2219))
+#define SET_GLOBAL_EDL_WORKSPACE_128(v) (GLOBAL_EDL_WORKSPACE_128 = (v))
+#define GLOBAL_EDL_WORKSPACE_129 (*(IUH *)((IUH)Gdp + 2223))
+#define SET_GLOBAL_EDL_WORKSPACE_129(v) (GLOBAL_EDL_WORKSPACE_129 = (v))
+#define GLOBAL_EDL_WORKSPACE_130 (*(IUH *)((IUH)Gdp + 2227))
+#define SET_GLOBAL_EDL_WORKSPACE_130(v) (GLOBAL_EDL_WORKSPACE_130 = (v))
+#define GLOBAL_EDL_WORKSPACE_131 (*(IUH *)((IUH)Gdp + 2231))
+#define SET_GLOBAL_EDL_WORKSPACE_131(v) (GLOBAL_EDL_WORKSPACE_131 = (v))
+#define GLOBAL_EDL_WORKSPACE_132 (*(IUH *)((IUH)Gdp + 2235))
+#define SET_GLOBAL_EDL_WORKSPACE_132(v) (GLOBAL_EDL_WORKSPACE_132 = (v))
+#define GLOBAL_EDL_WORKSPACE_133 (*(IUH *)((IUH)Gdp + 2239))
+#define SET_GLOBAL_EDL_WORKSPACE_133(v) (GLOBAL_EDL_WORKSPACE_133 = (v))
+#define GLOBAL_EDL_WORKSPACE_134 (*(IUH *)((IUH)Gdp + 2243))
+#define SET_GLOBAL_EDL_WORKSPACE_134(v) (GLOBAL_EDL_WORKSPACE_134 = (v))
+#define GLOBAL_EDL_WORKSPACE_135 (*(IUH *)((IUH)Gdp + 2247))
+#define SET_GLOBAL_EDL_WORKSPACE_135(v) (GLOBAL_EDL_WORKSPACE_135 = (v))
+#define GLOBAL_EDL_WORKSPACE_136 (*(IUH *)((IUH)Gdp + 2251))
+#define SET_GLOBAL_EDL_WORKSPACE_136(v) (GLOBAL_EDL_WORKSPACE_136 = (v))
+#define GLOBAL_EDL_WORKSPACE_137 (*(IUH *)((IUH)Gdp + 2255))
+#define SET_GLOBAL_EDL_WORKSPACE_137(v) (GLOBAL_EDL_WORKSPACE_137 = (v))
+#define GLOBAL_EDL_WORKSPACE_138 (*(IUH *)((IUH)Gdp + 2259))
+#define SET_GLOBAL_EDL_WORKSPACE_138(v) (GLOBAL_EDL_WORKSPACE_138 = (v))
+#define GLOBAL_EDL_WORKSPACE_139 (*(IUH *)((IUH)Gdp + 2263))
+#define SET_GLOBAL_EDL_WORKSPACE_139(v) (GLOBAL_EDL_WORKSPACE_139 = (v))
+#define GLOBAL_EDL_WORKSPACE_140 (*(IUH *)((IUH)Gdp + 2267))
+#define SET_GLOBAL_EDL_WORKSPACE_140(v) (GLOBAL_EDL_WORKSPACE_140 = (v))
+#define GLOBAL_EDL_WORKSPACE_141 (*(IUH *)((IUH)Gdp + 2271))
+#define SET_GLOBAL_EDL_WORKSPACE_141(v) (GLOBAL_EDL_WORKSPACE_141 = (v))
+#define GLOBAL_EDL_WORKSPACE_142 (*(IUH *)((IUH)Gdp + 2275))
+#define SET_GLOBAL_EDL_WORKSPACE_142(v) (GLOBAL_EDL_WORKSPACE_142 = (v))
+#define GLOBAL_EDL_WORKSPACE_143 (*(IUH *)((IUH)Gdp + 2279))
+#define SET_GLOBAL_EDL_WORKSPACE_143(v) (GLOBAL_EDL_WORKSPACE_143 = (v))
+#define GLOBAL_EDL_WORKSPACE_144 (*(IUH *)((IUH)Gdp + 2283))
+#define SET_GLOBAL_EDL_WORKSPACE_144(v) (GLOBAL_EDL_WORKSPACE_144 = (v))
+#define GLOBAL_EDL_WORKSPACE_145 (*(IUH *)((IUH)Gdp + 2287))
+#define SET_GLOBAL_EDL_WORKSPACE_145(v) (GLOBAL_EDL_WORKSPACE_145 = (v))
+#define GLOBAL_EDL_WORKSPACE_146 (*(IUH *)((IUH)Gdp + 2291))
+#define SET_GLOBAL_EDL_WORKSPACE_146(v) (GLOBAL_EDL_WORKSPACE_146 = (v))
+#define GLOBAL_EDL_WORKSPACE_147 (*(IUH *)((IUH)Gdp + 2295))
+#define SET_GLOBAL_EDL_WORKSPACE_147(v) (GLOBAL_EDL_WORKSPACE_147 = (v))
+#define GLOBAL_EDL_WORKSPACE_148 (*(IUH *)((IUH)Gdp + 2299))
+#define SET_GLOBAL_EDL_WORKSPACE_148(v) (GLOBAL_EDL_WORKSPACE_148 = (v))
+#define GLOBAL_EDL_WORKSPACE_149 (*(IUH *)((IUH)Gdp + 2303))
+#define SET_GLOBAL_EDL_WORKSPACE_149(v) (GLOBAL_EDL_WORKSPACE_149 = (v))
+#define GLOBAL_EDL_WORKSPACE_150 (*(IUH *)((IUH)Gdp + 2307))
+#define SET_GLOBAL_EDL_WORKSPACE_150(v) (GLOBAL_EDL_WORKSPACE_150 = (v))
+#define GLOBAL_EDL_WORKSPACE_151 (*(IUH *)((IUH)Gdp + 2311))
+#define SET_GLOBAL_EDL_WORKSPACE_151(v) (GLOBAL_EDL_WORKSPACE_151 = (v))
+#define GLOBAL_EDL_WORKSPACE_152 (*(IUH *)((IUH)Gdp + 2315))
+#define SET_GLOBAL_EDL_WORKSPACE_152(v) (GLOBAL_EDL_WORKSPACE_152 = (v))
+#define GLOBAL_EDL_WORKSPACE_153 (*(IUH *)((IUH)Gdp + 2319))
+#define SET_GLOBAL_EDL_WORKSPACE_153(v) (GLOBAL_EDL_WORKSPACE_153 = (v))
+#define GLOBAL_EDL_WORKSPACE_154 (*(IUH *)((IUH)Gdp + 2323))
+#define SET_GLOBAL_EDL_WORKSPACE_154(v) (GLOBAL_EDL_WORKSPACE_154 = (v))
+#define GLOBAL_EDL_WORKSPACE_155 (*(IUH *)((IUH)Gdp + 2327))
+#define SET_GLOBAL_EDL_WORKSPACE_155(v) (GLOBAL_EDL_WORKSPACE_155 = (v))
+#define GLOBAL_EDL_WORKSPACE_156 (*(IUH *)((IUH)Gdp + 2331))
+#define SET_GLOBAL_EDL_WORKSPACE_156(v) (GLOBAL_EDL_WORKSPACE_156 = (v))
+#define GLOBAL_EDL_WORKSPACE_157 (*(IUH *)((IUH)Gdp + 2335))
+#define SET_GLOBAL_EDL_WORKSPACE_157(v) (GLOBAL_EDL_WORKSPACE_157 = (v))
+#define GLOBAL_EDL_WORKSPACE_158 (*(IUH *)((IUH)Gdp + 2339))
+#define SET_GLOBAL_EDL_WORKSPACE_158(v) (GLOBAL_EDL_WORKSPACE_158 = (v))
+#define GLOBAL_EDL_WORKSPACE_159 (*(IUH *)((IUH)Gdp + 2343))
+#define SET_GLOBAL_EDL_WORKSPACE_159(v) (GLOBAL_EDL_WORKSPACE_159 = (v))
+#define GLOBAL_EDL_WORKSPACE_160 (*(IUH *)((IUH)Gdp + 2347))
+#define SET_GLOBAL_EDL_WORKSPACE_160(v) (GLOBAL_EDL_WORKSPACE_160 = (v))
+#define GLOBAL_EDL_WORKSPACE_161 (*(IUH *)((IUH)Gdp + 2351))
+#define SET_GLOBAL_EDL_WORKSPACE_161(v) (GLOBAL_EDL_WORKSPACE_161 = (v))
+#define GLOBAL_EDL_WORKSPACE_162 (*(IUH *)((IUH)Gdp + 2355))
+#define SET_GLOBAL_EDL_WORKSPACE_162(v) (GLOBAL_EDL_WORKSPACE_162 = (v))
+#define GLOBAL_EDL_WORKSPACE_163 (*(IUH *)((IUH)Gdp + 2359))
+#define SET_GLOBAL_EDL_WORKSPACE_163(v) (GLOBAL_EDL_WORKSPACE_163 = (v))
+#define GLOBAL_EDL_WORKSPACE_164 (*(IUH *)((IUH)Gdp + 2363))
+#define SET_GLOBAL_EDL_WORKSPACE_164(v) (GLOBAL_EDL_WORKSPACE_164 = (v))
+#define GLOBAL_EDL_WORKSPACE_165 (*(IUH *)((IUH)Gdp + 2367))
+#define SET_GLOBAL_EDL_WORKSPACE_165(v) (GLOBAL_EDL_WORKSPACE_165 = (v))
+#define GLOBAL_EDL_WORKSPACE_166 (*(IUH *)((IUH)Gdp + 2371))
+#define SET_GLOBAL_EDL_WORKSPACE_166(v) (GLOBAL_EDL_WORKSPACE_166 = (v))
+#define GLOBAL_EDL_WORKSPACE_167 (*(IUH *)((IUH)Gdp + 2375))
+#define SET_GLOBAL_EDL_WORKSPACE_167(v) (GLOBAL_EDL_WORKSPACE_167 = (v))
+#define GLOBAL_EDL_WORKSPACE_168 (*(IUH *)((IUH)Gdp + 2379))
+#define SET_GLOBAL_EDL_WORKSPACE_168(v) (GLOBAL_EDL_WORKSPACE_168 = (v))
+#define GLOBAL_EDL_WORKSPACE_169 (*(IUH *)((IUH)Gdp + 2383))
+#define SET_GLOBAL_EDL_WORKSPACE_169(v) (GLOBAL_EDL_WORKSPACE_169 = (v))
+#define GLOBAL_EDL_WORKSPACE_170 (*(IUH *)((IUH)Gdp + 2387))
+#define SET_GLOBAL_EDL_WORKSPACE_170(v) (GLOBAL_EDL_WORKSPACE_170 = (v))
+#define GLOBAL_EDL_WORKSPACE_171 (*(IUH *)((IUH)Gdp + 2391))
+#define SET_GLOBAL_EDL_WORKSPACE_171(v) (GLOBAL_EDL_WORKSPACE_171 = (v))
+#define GLOBAL_EDL_WORKSPACE_172 (*(IUH *)((IUH)Gdp + 2395))
+#define SET_GLOBAL_EDL_WORKSPACE_172(v) (GLOBAL_EDL_WORKSPACE_172 = (v))
+#define GLOBAL_EDL_WORKSPACE_173 (*(IUH *)((IUH)Gdp + 2399))
+#define SET_GLOBAL_EDL_WORKSPACE_173(v) (GLOBAL_EDL_WORKSPACE_173 = (v))
+#define GLOBAL_EDL_WORKSPACE_174 (*(IUH *)((IUH)Gdp + 2403))
+#define SET_GLOBAL_EDL_WORKSPACE_174(v) (GLOBAL_EDL_WORKSPACE_174 = (v))
+#define GLOBAL_EDL_WORKSPACE_175 (*(IUH *)((IUH)Gdp + 2407))
+#define SET_GLOBAL_EDL_WORKSPACE_175(v) (GLOBAL_EDL_WORKSPACE_175 = (v))
+#define GLOBAL_EDL_WORKSPACE_176 (*(IUH *)((IUH)Gdp + 2411))
+#define SET_GLOBAL_EDL_WORKSPACE_176(v) (GLOBAL_EDL_WORKSPACE_176 = (v))
+#define GLOBAL_EDL_WORKSPACE_177 (*(IUH *)((IUH)Gdp + 2415))
+#define SET_GLOBAL_EDL_WORKSPACE_177(v) (GLOBAL_EDL_WORKSPACE_177 = (v))
+#define GLOBAL_EDL_WORKSPACE_178 (*(IUH *)((IUH)Gdp + 2419))
+#define SET_GLOBAL_EDL_WORKSPACE_178(v) (GLOBAL_EDL_WORKSPACE_178 = (v))
+#define GLOBAL_EDL_WORKSPACE_179 (*(IUH *)((IUH)Gdp + 2423))
+#define SET_GLOBAL_EDL_WORKSPACE_179(v) (GLOBAL_EDL_WORKSPACE_179 = (v))
+#define GLOBAL_EDL_WORKSPACE_180 (*(IUH *)((IUH)Gdp + 2427))
+#define SET_GLOBAL_EDL_WORKSPACE_180(v) (GLOBAL_EDL_WORKSPACE_180 = (v))
+#define GLOBAL_EDL_WORKSPACE_181 (*(IUH *)((IUH)Gdp + 2431))
+#define SET_GLOBAL_EDL_WORKSPACE_181(v) (GLOBAL_EDL_WORKSPACE_181 = (v))
+#define GLOBAL_EDL_WORKSPACE_182 (*(IUH *)((IUH)Gdp + 2435))
+#define SET_GLOBAL_EDL_WORKSPACE_182(v) (GLOBAL_EDL_WORKSPACE_182 = (v))
+#define GLOBAL_EDL_WORKSPACE_183 (*(IUH *)((IUH)Gdp + 2439))
+#define SET_GLOBAL_EDL_WORKSPACE_183(v) (GLOBAL_EDL_WORKSPACE_183 = (v))
+#define GLOBAL_EDL_WORKSPACE_184 (*(IUH *)((IUH)Gdp + 2443))
+#define SET_GLOBAL_EDL_WORKSPACE_184(v) (GLOBAL_EDL_WORKSPACE_184 = (v))
+#define GLOBAL_EDL_WORKSPACE_185 (*(IUH *)((IUH)Gdp + 2447))
+#define SET_GLOBAL_EDL_WORKSPACE_185(v) (GLOBAL_EDL_WORKSPACE_185 = (v))
+#define GLOBAL_EDL_WORKSPACE_186 (*(IUH *)((IUH)Gdp + 2451))
+#define SET_GLOBAL_EDL_WORKSPACE_186(v) (GLOBAL_EDL_WORKSPACE_186 = (v))
+#define GLOBAL_EDL_WORKSPACE_187 (*(IUH *)((IUH)Gdp + 2455))
+#define SET_GLOBAL_EDL_WORKSPACE_187(v) (GLOBAL_EDL_WORKSPACE_187 = (v))
+#define GLOBAL_EDL_WORKSPACE_188 (*(IUH *)((IUH)Gdp + 2459))
+#define SET_GLOBAL_EDL_WORKSPACE_188(v) (GLOBAL_EDL_WORKSPACE_188 = (v))
+#define GLOBAL_EDL_WORKSPACE_189 (*(IUH *)((IUH)Gdp + 2463))
+#define SET_GLOBAL_EDL_WORKSPACE_189(v) (GLOBAL_EDL_WORKSPACE_189 = (v))
+#define GLOBAL_EDL_WORKSPACE_190 (*(IUH *)((IUH)Gdp + 2467))
+#define SET_GLOBAL_EDL_WORKSPACE_190(v) (GLOBAL_EDL_WORKSPACE_190 = (v))
+#define GLOBAL_EDL_WORKSPACE_191 (*(IUH *)((IUH)Gdp + 2471))
+#define SET_GLOBAL_EDL_WORKSPACE_191(v) (GLOBAL_EDL_WORKSPACE_191 = (v))
+#define GLOBAL_EDL_WORKSPACE_192 (*(IUH *)((IUH)Gdp + 2475))
+#define SET_GLOBAL_EDL_WORKSPACE_192(v) (GLOBAL_EDL_WORKSPACE_192 = (v))
+#define GLOBAL_EDL_WORKSPACE_193 (*(IUH *)((IUH)Gdp + 2479))
+#define SET_GLOBAL_EDL_WORKSPACE_193(v) (GLOBAL_EDL_WORKSPACE_193 = (v))
+#define GLOBAL_EDL_WORKSPACE_194 (*(IUH *)((IUH)Gdp + 2483))
+#define SET_GLOBAL_EDL_WORKSPACE_194(v) (GLOBAL_EDL_WORKSPACE_194 = (v))
+#define GLOBAL_EDL_WORKSPACE_195 (*(IUH *)((IUH)Gdp + 2487))
+#define SET_GLOBAL_EDL_WORKSPACE_195(v) (GLOBAL_EDL_WORKSPACE_195 = (v))
+#define GLOBAL_EDL_WORKSPACE_196 (*(IUH *)((IUH)Gdp + 2491))
+#define SET_GLOBAL_EDL_WORKSPACE_196(v) (GLOBAL_EDL_WORKSPACE_196 = (v))
+#define GLOBAL_EDL_WORKSPACE_197 (*(IUH *)((IUH)Gdp + 2495))
+#define SET_GLOBAL_EDL_WORKSPACE_197(v) (GLOBAL_EDL_WORKSPACE_197 = (v))
+#define GLOBAL_EDL_WORKSPACE_198 (*(IUH *)((IUH)Gdp + 2499))
+#define SET_GLOBAL_EDL_WORKSPACE_198(v) (GLOBAL_EDL_WORKSPACE_198 = (v))
+#define GLOBAL_EDL_WORKSPACE_199 (*(IUH *)((IUH)Gdp + 2503))
+#define SET_GLOBAL_EDL_WORKSPACE_199(v) (GLOBAL_EDL_WORKSPACE_199 = (v))
+#define GLOBAL_EDL_WORKSPACE_200 (*(IUH *)((IUH)Gdp + 2507))
+#define SET_GLOBAL_EDL_WORKSPACE_200(v) (GLOBAL_EDL_WORKSPACE_200 = (v))
+#define GLOBAL_EDL_WORKSPACE_201 (*(IUH *)((IUH)Gdp + 2511))
+#define SET_GLOBAL_EDL_WORKSPACE_201(v) (GLOBAL_EDL_WORKSPACE_201 = (v))
+#define GLOBAL_EDL_WORKSPACE_202 (*(IUH *)((IUH)Gdp + 2515))
+#define SET_GLOBAL_EDL_WORKSPACE_202(v) (GLOBAL_EDL_WORKSPACE_202 = (v))
+#define GLOBAL_EDL_WORKSPACE_203 (*(IUH *)((IUH)Gdp + 2519))
+#define SET_GLOBAL_EDL_WORKSPACE_203(v) (GLOBAL_EDL_WORKSPACE_203 = (v))
+#define GLOBAL_EDL_WORKSPACE_204 (*(IUH *)((IUH)Gdp + 2523))
+#define SET_GLOBAL_EDL_WORKSPACE_204(v) (GLOBAL_EDL_WORKSPACE_204 = (v))
+#define GLOBAL_EDL_WORKSPACE_205 (*(IUH *)((IUH)Gdp + 2527))
+#define SET_GLOBAL_EDL_WORKSPACE_205(v) (GLOBAL_EDL_WORKSPACE_205 = (v))
+#define GLOBAL_EDL_WORKSPACE_206 (*(IUH *)((IUH)Gdp + 2531))
+#define SET_GLOBAL_EDL_WORKSPACE_206(v) (GLOBAL_EDL_WORKSPACE_206 = (v))
+#define GLOBAL_EDL_WORKSPACE_207 (*(IUH *)((IUH)Gdp + 2535))
+#define SET_GLOBAL_EDL_WORKSPACE_207(v) (GLOBAL_EDL_WORKSPACE_207 = (v))
+#define GLOBAL_EDL_WORKSPACE_208 (*(IUH *)((IUH)Gdp + 2539))
+#define SET_GLOBAL_EDL_WORKSPACE_208(v) (GLOBAL_EDL_WORKSPACE_208 = (v))
+#define GLOBAL_EDL_WORKSPACE_209 (*(IUH *)((IUH)Gdp + 2543))
+#define SET_GLOBAL_EDL_WORKSPACE_209(v) (GLOBAL_EDL_WORKSPACE_209 = (v))
+#define GLOBAL_EDL_WORKSPACE_210 (*(IUH *)((IUH)Gdp + 2547))
+#define SET_GLOBAL_EDL_WORKSPACE_210(v) (GLOBAL_EDL_WORKSPACE_210 = (v))
+#define GLOBAL_EDL_WORKSPACE_211 (*(IUH *)((IUH)Gdp + 2551))
+#define SET_GLOBAL_EDL_WORKSPACE_211(v) (GLOBAL_EDL_WORKSPACE_211 = (v))
+#define GLOBAL_EDL_WORKSPACE_212 (*(IUH *)((IUH)Gdp + 2555))
+#define SET_GLOBAL_EDL_WORKSPACE_212(v) (GLOBAL_EDL_WORKSPACE_212 = (v))
+#define GLOBAL_PoolAllocationRec (*(struct PoolAllocationREC *)((IUH)Gdp + 2559))
+#define SET_GLOBAL_PoolAllocationRec(v) (GLOBAL_PoolAllocationRec = (v))
+#define GLOBAL_LightCompiledLRUrec (*(struct BufferIndexREC* *)((IUH)Gdp + 2879))
+#define SET_GLOBAL_LightCompiledLRUrec(v) (GLOBAL_LightCompiledLRUrec = (v))
+#define GLOBAL_HSPinCodePages (((*(IBOOL *)((IUH)Gdp + 2883)) & 1) != 0)
+#define SET_GLOBAL_HSPinCodePages(v) ((*(IBOOL *)((IUH)Gdp + 2883)) = (v) ? 1: 0)
+#define GLOBAL_LastCompiledCsDesc (*(struct GLDC_REC* *)((IUH)Gdp + 2887))
+#define SET_GLOBAL_LastCompiledCsDesc(v) (GLOBAL_LastCompiledCsDesc = (v))
+#define GLOBAL_CrossPageInstructions (*(IU8* *)((IUH)Gdp + 2891))
+#define SET_GLOBAL_CrossPageInstructions(v) (GLOBAL_CrossPageInstructions = (v))
+#define GLOBAL_FlushingCache (((*(IBOOL *)((IUH)Gdp + 2895)) & 1) != 0)
+#define SET_GLOBAL_FlushingCache(v) ((*(IBOOL *)((IUH)Gdp + 2895)) = (v) ? 1: 0)
+#define GLOBAL_suppressIroning (((*(IBOOL *)((IUH)Gdp + 2899)) & 1) != 0)
+#define SET_GLOBAL_suppressIroning(v) ((*(IBOOL *)((IUH)Gdp + 2899)) = (v) ? 1: 0)
+#define GLOBAL_IHook (*(IU32 *)((IUH)Gdp + 2903))
+#define SET_GLOBAL_IHook(v) (GLOBAL_IHook = (v))
+#define GLOBAL_EDL_WORKSPACE_213 (*(IUH *)((IUH)Gdp + 2907))
+#define SET_GLOBAL_EDL_WORKSPACE_213(v) (GLOBAL_EDL_WORKSPACE_213 = (v))
+#define GLOBAL_InterruptRec (*(struct InterruptREC *)((IUH)Gdp + 2911))
+#define SET_GLOBAL_InterruptRec(v) (GLOBAL_InterruptRec = (v))
+#define GLOBAL_SasReInitNow (((*(IBOOL *)((IUH)Gdp + 2923)) & 1) != 0)
+#define SET_GLOBAL_SasReInitNow(v) ((*(IBOOL *)((IUH)Gdp + 2923)) = (v) ? 1: 0)
+#define GLOBAL_SasReInitSize (*(IU32 *)((IUH)Gdp + 2927))
+#define SET_GLOBAL_SasReInitSize(v) (GLOBAL_SasReInitSize = (v))
+#define GLOBAL_EDL_WORKSPACE_214 (*(IUH *)((IUH)Gdp + 2931))
+#define SET_GLOBAL_EDL_WORKSPACE_214(v) (GLOBAL_EDL_WORKSPACE_214 = (v))
+#define GLOBAL_EDL_WORKSPACE_215 (*(IUH *)((IUH)Gdp + 2935))
+#define SET_GLOBAL_EDL_WORKSPACE_215(v) (GLOBAL_EDL_WORKSPACE_215 = (v))
+#define GLOBAL_EDL_WORKSPACE_216 (*(IUH *)((IUH)Gdp + 2939))
+#define SET_GLOBAL_EDL_WORKSPACE_216(v) (GLOBAL_EDL_WORKSPACE_216 = (v))
+#define GLOBAL_QuickTickerRec (*(struct QuickTickerREC *)((IUH)Gdp + 2943))
+#define SET_GLOBAL_QuickTickerRec(v) (GLOBAL_QuickTickerRec = (v))
+#define GLOBAL_PigSynchCount (*(IUH *)((IUH)Gdp + 2967))
+#define SET_GLOBAL_PigSynchCount(v) (GLOBAL_PigSynchCount = (v))
+#define GLOBAL_CodeBufferSizeBits (*(IUH *)((IUH)Gdp + 2971))
+#define SET_GLOBAL_CodeBufferSizeBits(v) (GLOBAL_CodeBufferSizeBits = (v))
+#define GLOBAL_CodeBufferSize (*(IUH *)((IUH)Gdp + 2975))
+#define SET_GLOBAL_CodeBufferSize(v) (GLOBAL_CodeBufferSize = (v))
+#define GLOBAL_DataBufferSize (*(IUH *)((IUH)Gdp + 2979))
+#define SET_GLOBAL_DataBufferSize(v) (GLOBAL_DataBufferSize = (v))
+#define GLOBAL_AllBuffers (*(struct BufferIndexREC* *)((IUH)Gdp + 2983))
+#define SET_GLOBAL_AllBuffers(v) (GLOBAL_AllBuffers = (v))
+#define GLOBAL_LightBufferLRU (*(struct BufferIndexREC* *)((IUH)Gdp + 2987))
+#define SET_GLOBAL_LightBufferLRU(v) (GLOBAL_LightBufferLRU = (v))
+#define GLOBAL_CompilationBuffer (*(struct BufferIndexREC* *)((IUH)Gdp + 2991))
+#define SET_GLOBAL_CompilationBuffer(v) (GLOBAL_CompilationBuffer = (v))
+#define GLOBAL_PendingDeletions (*(struct BufferIndexREC* *)((IUH)Gdp + 2995))
+#define SET_GLOBAL_PendingDeletions(v) (GLOBAL_PendingDeletions = (v))
+#define GLOBAL_FragmentInfoArray (*(struct FragmentInfoREC* *)((IUH)Gdp + 2999))
+#define SET_GLOBAL_FragmentInfoArray(v) (GLOBAL_FragmentInfoArray = (v))
+#define GLOBAL_HostCodeBufferLimit (*(IU32* *)((IUH)Gdp + 3003))
+#define SET_GLOBAL_HostCodeBufferLimit(v) (GLOBAL_HostCodeBufferLimit = (v))
+#define GLOBAL_CopiedCleanups (*(IU8* *)((IUH)Gdp + 3007))
+#define SET_GLOBAL_CopiedCleanups(v) (GLOBAL_CopiedCleanups = (v))
+#define GLOBAL_FreeDebugInfoList (*(struct DebugInfoREC* *)((IUH)Gdp + 3011))
+#define SET_GLOBAL_FreeDebugInfoList(v) (GLOBAL_FreeDebugInfoList = (v))
+#define GLOBAL_CodeBufferOverrun (*(IU32* *)((IUH)Gdp + 3015))
+#define SET_GLOBAL_CodeBufferOverrun(v) (GLOBAL_CodeBufferOverrun = (v))
+#define GLOBAL_OverrunHighWaterMark (*(IU32* *)((IUH)Gdp + 3019))
+#define SET_GLOBAL_OverrunHighWaterMark(v) (GLOBAL_OverrunHighWaterMark = (v))
+#define GLOBAL_NumberOfBuffers (*(IU16 *)((IUH)Gdp + 3023))
+#define SET_GLOBAL_NumberOfBuffers(v) (GLOBAL_NumberOfBuffers = (v))
+#define GLOBAL_NextFragmentIndex (*(struct FragmentIndexREC* *)((IUH)Gdp + 3027))
+#define SET_GLOBAL_NextFragmentIndex(v) (GLOBAL_NextFragmentIndex = (v))
+#define GLOBAL_NextFragmentData (*(struct FragmentDataREC* *)((IUH)Gdp + 3031))
+#define SET_GLOBAL_NextFragmentData(v) (GLOBAL_NextFragmentData = (v))
+#define GLOBAL_FpuDisabled (((*(IBOOL *)((IUH)Gdp + 3035)) & 1) != 0)
+#define SET_GLOBAL_FpuDisabled(v) ((*(IBOOL *)((IUH)Gdp + 3035)) = (v) ? 1: 0)
+#define GLOBAL_GenerateNPXexception (((*(IBOOL *)((IUH)Gdp + 3039)) & 1) != 0)
+#define SET_GLOBAL_GenerateNPXexception(v) ((*(IBOOL *)((IUH)Gdp + 3039)) = (v) ? 1: 0)
+#define GLOBAL_NpxExceptionEIP (*(IUH *)((IUH)Gdp + 3043))
+#define SET_GLOBAL_NpxExceptionEIP(v) (GLOBAL_NpxExceptionEIP = (v))
+#define GLOBAL_NpxControl (*(IUH *)((IUH)Gdp + 3047))
+#define SET_GLOBAL_NpxControl(v) (GLOBAL_NpxControl = (v))
+#define GLOBAL_NpxStatus (*(IUH *)((IUH)Gdp + 3051))
+#define SET_GLOBAL_NpxStatus(v) (GLOBAL_NpxStatus = (v))
+#define GLOBAL_NpxFEA (*(IUH *)((IUH)Gdp + 3055))
+#define SET_GLOBAL_NpxFEA(v) (GLOBAL_NpxFEA = (v))
+#define GLOBAL_NpxFDS (*(IUH *)((IUH)Gdp + 3059))
+#define SET_GLOBAL_NpxFDS(v) (GLOBAL_NpxFDS = (v))
+#define GLOBAL_NpxFIP (*(IUH *)((IUH)Gdp + 3063))
+#define SET_GLOBAL_NpxFIP(v) (GLOBAL_NpxFIP = (v))
+#define GLOBAL_NpxFOP (*(IUH *)((IUH)Gdp + 3067))
+#define SET_GLOBAL_NpxFOP(v) (GLOBAL_NpxFOP = (v))
+#define GLOBAL_NpxFCS (*(IUH *)((IUH)Gdp + 3071))
+#define SET_GLOBAL_NpxFCS(v) (GLOBAL_NpxFCS = (v))
+#define GLOBAL_NpxSWFlagC0 (*(IUH *)((IUH)Gdp + 3075))
+#define SET_GLOBAL_NpxSWFlagC0(v) (GLOBAL_NpxSWFlagC0 = (v))
+#define GLOBAL_NpxSWFlagC1 (*(IUH *)((IUH)Gdp + 3079))
+#define SET_GLOBAL_NpxSWFlagC1(v) (GLOBAL_NpxSWFlagC1 = (v))
+#define GLOBAL_NpxSWFlagC2 (*(IUH *)((IUH)Gdp + 3083))
+#define SET_GLOBAL_NpxSWFlagC2(v) (GLOBAL_NpxSWFlagC2 = (v))
+#define GLOBAL_NpxSWFlagC3 (*(IUH *)((IUH)Gdp + 3087))
+#define SET_GLOBAL_NpxSWFlagC3(v) (GLOBAL_NpxSWFlagC3 = (v))
+#define GLOBAL_NpxLastSel (*(IUH *)((IUH)Gdp + 3091))
+#define SET_GLOBAL_NpxLastSel(v) (GLOBAL_NpxLastSel = (v))
+#define GLOBAL_NpxLastOff (*(IUH *)((IUH)Gdp + 3095))
+#define SET_GLOBAL_NpxLastOff(v) (GLOBAL_NpxLastOff = (v))
+#define GLOBAL_NpxException (((*(IBOOL *)((IUH)Gdp + 3099)) & 1) != 0)
+#define SET_GLOBAL_NpxException(v) ((*(IBOOL *)((IUH)Gdp + 3099)) = (v) ? 1: 0)
+#define GLOBAL_npxRounding (*(IUH *)((IUH)Gdp + 3103))
+#define SET_GLOBAL_npxRounding(v) (GLOBAL_npxRounding = (v))
+#define GLOBAL_UnmaskedOUP (((*(IBOOL *)((IUH)Gdp + 3107)) & 1) != 0)
+#define SET_GLOBAL_UnmaskedOUP(v) ((*(IBOOL *)((IUH)Gdp + 3107)) = (v) ? 1: 0)
+#define GLOBAL_hostFpuExceptions (*(IUH *)((IUH)Gdp + 3111))
+#define SET_GLOBAL_hostFpuExceptions(v) (GLOBAL_hostFpuExceptions = (v))
+#define GLOBAL_savedFpuExceptions (*(IUH *)((IUH)Gdp + 3115))
+#define SET_GLOBAL_savedFpuExceptions(v) (GLOBAL_savedFpuExceptions = (v))
+#define GLOBAL_tempFpuExceptions (*(IUH *)((IUH)Gdp + 3119))
+#define SET_GLOBAL_tempFpuExceptions(v) (GLOBAL_tempFpuExceptions = (v))
+#define GLOBAL_savedFpuXcptnOverflow (*(IUH *)((IUH)Gdp + 3123))
+#define SET_GLOBAL_savedFpuXcptnOverflow(v) (GLOBAL_savedFpuXcptnOverflow = (v))
+#define GLOBAL_savedFpuXcptnUnderflow (*(IUH *)((IUH)Gdp + 3127))
+#define SET_GLOBAL_savedFpuXcptnUnderflow(v) (GLOBAL_savedFpuXcptnUnderflow = (v))
+#define GLOBAL_savedFpuXcptnPrecision (*(IUH *)((IUH)Gdp + 3131))
+#define SET_GLOBAL_savedFpuXcptnPrecision(v) (GLOBAL_savedFpuXcptnPrecision = (v))
+#define GLOBAL_MaxBCDValue (*(struct FPSTACKENTRY *)((IUH)Gdp + 3135))
+#define SET_GLOBAL_MaxBCDValue(v) (GLOBAL_MaxBCDValue = (v))
+#define GLOBAL_FPUpload (*(struct FPSTACKENTRY *)((IUH)Gdp + 3151))
+#define SET_GLOBAL_FPUpload(v) (GLOBAL_FPUpload = (v))
+#define GLOBAL_ConstTable (*(struct FPSTACKENTRY* *)((IUH)Gdp + 3167))
+#define SET_GLOBAL_ConstTable(v) (GLOBAL_ConstTable = (v))
+#define GLOBAL_FPTemp (*(struct FPSTACKENTRY* *)((IUH)Gdp + 3171))
+#define SET_GLOBAL_FPTemp(v) (GLOBAL_FPTemp = (v))
+#define GLOBAL_FPUStackBase (*(struct FPSTACKENTRY* *)((IUH)Gdp + 3175))
+#define SET_GLOBAL_FPUStackBase(v) (GLOBAL_FPUStackBase = (v))
+#define GLOBAL_TOSPtr (*(struct FPSTACKENTRY* *)((IUH)Gdp + 3179))
+#define SET_GLOBAL_TOSPtr(v) (GLOBAL_TOSPtr = (v))
+#define GLOBAL_Npx64BitZero (*(struct FP_I64 *)((IUH)Gdp + 3183))
+#define SET_GLOBAL_Npx64BitZero(v) (GLOBAL_Npx64BitZero = (v))
+#define GLOBAL_Npx64BitMaxNeg (*(struct FP_I64 *)((IUH)Gdp + 3191))
+#define SET_GLOBAL_Npx64BitMaxNeg(v) (GLOBAL_Npx64BitMaxNeg = (v))
+#define GLOBAL_Npx64BitHalfMaxNeg (*(struct FP_I64 *)((IUH)Gdp + 3199))
+#define SET_GLOBAL_Npx64BitHalfMaxNeg(v) (GLOBAL_Npx64BitHalfMaxNeg = (v))
+#define GLOBAL_Npx64BitVal1 (*(struct FP_I64 *)((IUH)Gdp + 3207))
+#define SET_GLOBAL_Npx64BitVal1(v) (GLOBAL_Npx64BitVal1 = (v))
+#define GLOBAL_FscaleTable (*(IUH* *)((IUH)Gdp + 3215))
+#define SET_GLOBAL_FscaleTable(v) (GLOBAL_FscaleTable = (v))
+#define GLOBAL_CompZeroTable (*(IU32* *)((IUH)Gdp + 3219))
+#define SET_GLOBAL_CompZeroTable(v) (GLOBAL_CompZeroTable = (v))
+#define GLOBAL_BCDLowNibble (*(struct FP_I64* *)((IUH)Gdp + 3223))
+#define SET_GLOBAL_BCDLowNibble(v) (GLOBAL_BCDLowNibble = (v))
+#define GLOBAL_BCDHighNibble (*(struct FP_I64* *)((IUH)Gdp + 3227))
+#define SET_GLOBAL_BCDHighNibble(v) (GLOBAL_BCDHighNibble = (v))
+#define GLOBAL_FpatanTable (*(struct FPSTACKENTRY* *)((IUH)Gdp + 3231))
+#define SET_GLOBAL_FpatanTable(v) (GLOBAL_FpatanTable = (v))
+#define GLOBAL_Pigging (((*(IBOOL *)((IUH)Gdp + 3235)) & 1) != 0)
+#define SET_GLOBAL_Pigging(v) ((*(IBOOL *)((IUH)Gdp + 3235)) = (v) ? 1: 0)
+#define GLOBAL_PigInterruptState (((*(IBOOL *)((IUH)Gdp + 3239)) & 1) != 0)
+#define SET_GLOBAL_PigInterruptState(v) ((*(IBOOL *)((IUH)Gdp + 3239)) = (v) ? 1: 0)
+#define GLOBAL_PigIgnoreFlags (((*(IBOOL *)((IUH)Gdp + 3243)) & 1) != 0)
+#define SET_GLOBAL_PigIgnoreFlags(v) ((*(IBOOL *)((IUH)Gdp + 3243)) = (v) ? 1: 0)
+#define GLOBAL_ApiPigSynchCount (*(IU16 *)((IUH)Gdp + 3247))
+#define SET_GLOBAL_ApiPigSynchCount(v) (GLOBAL_ApiPigSynchCount = (v))
+#define GLOBAL_ApiPigSynchTable (*(IU32* *)((IUH)Gdp + 3251))
+#define SET_GLOBAL_ApiPigSynchTable(v) (GLOBAL_ApiPigSynchTable = (v))
+#define GLOBAL_PigSynchWanted (((*(IBOOL *)((IUH)Gdp + 3255)) & 1) != 0)
+#define SET_GLOBAL_PigSynchWanted(v) ((*(IBOOL *)((IUH)Gdp + 3255)) = (v) ? 1: 0)
+#define GLOBAL_SadAX (*(ISH *)((IUH)Gdp + 3259))
+#define SET_GLOBAL_SadAX(v) (GLOBAL_SadAX = (v))
+#define GLOBAL_SadBX (*(ISH *)((IUH)Gdp + 3263))
+#define SET_GLOBAL_SadBX(v) (GLOBAL_SadBX = (v))
+#define GLOBAL_SadCX (*(ISH *)((IUH)Gdp + 3267))
+#define SET_GLOBAL_SadCX(v) (GLOBAL_SadCX = (v))
+#define GLOBAL_SadDX (*(ISH *)((IUH)Gdp + 3271))
+#define SET_GLOBAL_SadDX(v) (GLOBAL_SadDX = (v))
+#define GLOBAL_SadBP (*(ISH *)((IUH)Gdp + 3275))
+#define SET_GLOBAL_SadBP(v) (GLOBAL_SadBP = (v))
+#define GLOBAL_SadSP (*(ISH *)((IUH)Gdp + 3279))
+#define SET_GLOBAL_SadSP(v) (GLOBAL_SadSP = (v))
+#define GLOBAL_SadSI (*(ISH *)((IUH)Gdp + 3283))
+#define SET_GLOBAL_SadSI(v) (GLOBAL_SadSI = (v))
+#define GLOBAL_SadDI (*(ISH *)((IUH)Gdp + 3287))
+#define SET_GLOBAL_SadDI(v) (GLOBAL_SadDI = (v))
+#define GLOBAL_SadEIP (*(ISH *)((IUH)Gdp + 3291))
+#define SET_GLOBAL_SadEIP(v) (GLOBAL_SadEIP = (v))
+#define GLOBAL_SadEFLAGS (*(ISH *)((IUH)Gdp + 3295))
+#define SET_GLOBAL_SadEFLAGS(v) (GLOBAL_SadEFLAGS = (v))
+#define GLOBAL_Parameter1 (*(ISH *)((IUH)Gdp + 3299))
+#define SET_GLOBAL_Parameter1(v) (GLOBAL_Parameter1 = (v))
+#define GLOBAL_Parameter2 (*(ISH *)((IUH)Gdp + 3303))
+#define SET_GLOBAL_Parameter2(v) (GLOBAL_Parameter2 = (v))
+#define GLOBAL_BpiKnownTable (*(ISH* *)((IUH)Gdp + 3307))
+#define SET_GLOBAL_BpiKnownTable(v) (GLOBAL_BpiKnownTable = (v))
+#define GLOBAL_BpiWorkTable (*(ISH* *)((IUH)Gdp + 3311))
+#define SET_GLOBAL_BpiWorkTable(v) (GLOBAL_BpiWorkTable = (v))
+#define GLOBAL_BpiLabelTable (*(ISH* *)((IUH)Gdp + 3315))
+#define SET_GLOBAL_BpiLabelTable(v) (GLOBAL_BpiLabelTable = (v))
+#define GLOBAL_BpiFragment (*(struct FragmentDataREC* *)((IUH)Gdp + 3319))
+#define SET_GLOBAL_BpiFragment(v) (GLOBAL_BpiFragment = (v))
+#define GLOBAL_BpiCompilationBuffer (*(struct BufferIndexREC* *)((IUH)Gdp + 3323))
+#define SET_GLOBAL_BpiCompilationBuffer(v) (GLOBAL_BpiCompilationBuffer = (v))
+#define GLOBAL_BpiCompiledCode (*(IU32* *)((IUH)Gdp + 3327))
+#define SET_GLOBAL_BpiCompiledCode(v) (GLOBAL_BpiCompiledCode = (v))
+#define GLOBAL_BpiCompiledStep (*(IU32* *)((IUH)Gdp + 3331))
+#define SET_GLOBAL_BpiCompiledStep(v) (GLOBAL_BpiCompiledStep = (v))
+#define GLOBAL_BpiCompiledUser (*(IU32* *)((IUH)Gdp + 3335))
+#define SET_GLOBAL_BpiCompiledUser(v) (GLOBAL_BpiCompiledUser = (v))
+#define GLOBAL_SafeToReturnToSnippet (((*(IBOOL *)((IUH)Gdp + 3339)) & 1) != 0)
+#define SET_GLOBAL_SafeToReturnToSnippet(v) ((*(IBOOL *)((IUH)Gdp + 3339)) = (v) ? 1: 0)
+#define GLOBAL_BpiIntelStats (((*(IBOOL *)((IUH)Gdp + 3343)) & 1) != 0)
+#define SET_GLOBAL_BpiIntelStats(v) ((*(IBOOL *)((IUH)Gdp + 3343)) = (v) ? 1: 0)
+#define GLOBAL_BpiSuppressFunc (((*(IBOOL *)((IUH)Gdp + 3347)) & 1) != 0)
+#define SET_GLOBAL_BpiSuppressFunc(v) ((*(IBOOL *)((IUH)Gdp + 3347)) = (v) ? 1: 0)
+#define GLOBAL_BpiIntelStatFree (*(struct BpiIntelStatBlock* *)((IUH)Gdp + 3351))
+#define SET_GLOBAL_BpiIntelStatFree(v) (GLOBAL_BpiIntelStatFree = (v))
+#define GLOBAL_BpiIntelStatIndx (*(struct BpiIntelStatBlock** *)((IUH)Gdp + 3355))
+#define SET_GLOBAL_BpiIntelStatIndx(v) (GLOBAL_BpiIntelStatIndx = (v))
+#define GLOBAL_OpBpirealFt (*(IUH *)((IUH)Gdp + 3359))
+#define SET_GLOBAL_OpBpirealFt(v) (GLOBAL_OpBpirealFt = (v))
+#define GLOBAL_OpBpirealF1 (*(IUH *)((IUH)Gdp + 3363))
+#define SET_GLOBAL_OpBpirealF1(v) (GLOBAL_OpBpirealF1 = (v))
+#define GLOBAL_OpBpirealF2 (*(IUH *)((IUH)Gdp + 3367))
+#define SET_GLOBAL_OpBpirealF2(v) (GLOBAL_OpBpirealF2 = (v))
+#define GLOBAL_OpBpirealF3 (*(IUH *)((IUH)Gdp + 3371))
+#define SET_GLOBAL_OpBpirealF3(v) (GLOBAL_OpBpirealF3 = (v))
+#define GLOBAL_OpBpirealUniv (*(IU32 *)((IUH)Gdp + 3375))
+#define SET_GLOBAL_OpBpirealUniv(v) (GLOBAL_OpBpirealUniv = (v))
+#define GLOBAL_OpBpirealWhereAmI (*(IU32* *)((IUH)Gdp + 3379))
+#define SET_GLOBAL_OpBpirealWhereAmI(v) (GLOBAL_OpBpirealWhereAmI = (v))
+#define GLOBAL_EDL_WORKSPACE_217 (*(IUH *)((IUH)Gdp + 3383))
+#define SET_GLOBAL_EDL_WORKSPACE_217(v) (GLOBAL_EDL_WORKSPACE_217 = (v))
+#define GLOBAL_EDL_WORKSPACE_218 (*(IUH *)((IUH)Gdp + 3387))
+#define SET_GLOBAL_EDL_WORKSPACE_218(v) (GLOBAL_EDL_WORKSPACE_218 = (v))
+#define GLOBAL_ApiBufferChain (*(struct BufferIndexREC *)((IUH)Gdp + 3391))
+#define SET_GLOBAL_ApiBufferChain(v) (GLOBAL_ApiBufferChain = (v))
+#define GLOBAL_ApiCompilationBuffer (*(struct BufferIndexREC* *)((IUH)Gdp + 3427))
+#define SET_GLOBAL_ApiCompilationBuffer(v) (GLOBAL_ApiCompilationBuffer = (v))
+#define GLOBAL_ApiNumberOfBuffers (*(IU8 *)((IUH)Gdp + 3431))
+#define SET_GLOBAL_ApiNumberOfBuffers(v) (GLOBAL_ApiNumberOfBuffers = (v))
+#define GLOBAL_ApiNextFragmentIndex (*(struct FragmentIndexREC* *)((IUH)Gdp + 3435))
+#define SET_GLOBAL_ApiNextFragmentIndex(v) (GLOBAL_ApiNextFragmentIndex = (v))
+#define GLOBAL_ApiNextFragmentData (*(struct FragmentDataREC* *)((IUH)Gdp + 3439))
+#define SET_GLOBAL_ApiNextFragmentData(v) (GLOBAL_ApiNextFragmentData = (v))
+#define GLOBAL_ApiGDTBase (*(IU32 *)((IUH)Gdp + 3443))
+#define SET_GLOBAL_ApiGDTBase(v) (GLOBAL_ApiGDTBase = (v))
+#define GLOBAL_ApiGDTLimit (*(IU16 *)((IUH)Gdp + 3447))
+#define SET_GLOBAL_ApiGDTLimit(v) (GLOBAL_ApiGDTLimit = (v))
+#define GLOBAL_ApiMinBuffNr (*(IUH *)((IUH)Gdp + 3451))
+#define SET_GLOBAL_ApiMinBuffNr(v) (GLOBAL_ApiMinBuffNr = (v))
+#define GLOBAL_ApiMaxBuffNr (*(IUH *)((IUH)Gdp + 3455))
+#define SET_GLOBAL_ApiMaxBuffNr(v) (GLOBAL_ApiMaxBuffNr = (v))
+#define GLOBAL_EDL_WORKSPACE_219 (*(IUH *)((IUH)Gdp + 3459))
+#define SET_GLOBAL_EDL_WORKSPACE_219(v) (GLOBAL_EDL_WORKSPACE_219 = (v))
+#define GLOBAL_EDL_WORKSPACE_220 (*(IUH *)((IUH)Gdp + 3463))
+#define SET_GLOBAL_EDL_WORKSPACE_220(v) (GLOBAL_EDL_WORKSPACE_220 = (v))
+#define GLOBAL_EDL_WORKSPACE_221 (*(IUH *)((IUH)Gdp + 3467))
+#define SET_GLOBAL_EDL_WORKSPACE_221(v) (GLOBAL_EDL_WORKSPACE_221 = (v))
+#define GLOBAL_EDL_WORKSPACE_222 (*(IUH *)((IUH)Gdp + 3471))
+#define SET_GLOBAL_EDL_WORKSPACE_222(v) (GLOBAL_EDL_WORKSPACE_222 = (v))
+#define GLOBAL_EDL_WORKSPACE_223 (*(IUH *)((IUH)Gdp + 3475))
+#define SET_GLOBAL_EDL_WORKSPACE_223(v) (GLOBAL_EDL_WORKSPACE_223 = (v))
+#define GLOBAL_EDL_WORKSPACE_224 (*(IUH *)((IUH)Gdp + 3479))
+#define SET_GLOBAL_EDL_WORKSPACE_224(v) (GLOBAL_EDL_WORKSPACE_224 = (v))
+#define GLOBAL_EDL_WORKSPACE_225 (*(IUH *)((IUH)Gdp + 3483))
+#define SET_GLOBAL_EDL_WORKSPACE_225(v) (GLOBAL_EDL_WORKSPACE_225 = (v))
+#define GLOBAL_EDL_WORKSPACE_226 (*(IUH *)((IUH)Gdp + 3487))
+#define SET_GLOBAL_EDL_WORKSPACE_226(v) (GLOBAL_EDL_WORKSPACE_226 = (v))
+#define GLOBAL_EDL_WORKSPACE_227 (*(IUH *)((IUH)Gdp + 3491))
+#define SET_GLOBAL_EDL_WORKSPACE_227(v) (GLOBAL_EDL_WORKSPACE_227 = (v))
+#define GLOBAL_EDL_WORKSPACE_228 (*(IUH *)((IUH)Gdp + 3495))
+#define SET_GLOBAL_EDL_WORKSPACE_228(v) (GLOBAL_EDL_WORKSPACE_228 = (v))
+#define GLOBAL_EDL_WORKSPACE_229 (*(IUH *)((IUH)Gdp + 3499))
+#define SET_GLOBAL_EDL_WORKSPACE_229(v) (GLOBAL_EDL_WORKSPACE_229 = (v))
+#define GLOBAL_EDL_WORKSPACE_230 (*(IUH *)((IUH)Gdp + 3503))
+#define SET_GLOBAL_EDL_WORKSPACE_230(v) (GLOBAL_EDL_WORKSPACE_230 = (v))
+#define GLOBAL_EDL_WORKSPACE_231 (*(IUH *)((IUH)Gdp + 3507))
+#define SET_GLOBAL_EDL_WORKSPACE_231(v) (GLOBAL_EDL_WORKSPACE_231 = (v))
+#define GLOBAL_EDL_WORKSPACE_232 (*(IUH *)((IUH)Gdp + 3511))
+#define SET_GLOBAL_EDL_WORKSPACE_232(v) (GLOBAL_EDL_WORKSPACE_232 = (v))
+#define GLOBAL_EDL_WORKSPACE_233 (*(IUH *)((IUH)Gdp + 3515))
+#define SET_GLOBAL_EDL_WORKSPACE_233(v) (GLOBAL_EDL_WORKSPACE_233 = (v))
+#define GLOBAL_EDL_WORKSPACE_234 (*(IUH *)((IUH)Gdp + 3519))
+#define SET_GLOBAL_EDL_WORKSPACE_234(v) (GLOBAL_EDL_WORKSPACE_234 = (v))
+#define GLOBAL_EDL_WORKSPACE_235 (*(IUH *)((IUH)Gdp + 3523))
+#define SET_GLOBAL_EDL_WORKSPACE_235(v) (GLOBAL_EDL_WORKSPACE_235 = (v))
+#define GLOBAL_EDL_WORKSPACE_236 (*(IUH *)((IUH)Gdp + 3527))
+#define SET_GLOBAL_EDL_WORKSPACE_236(v) (GLOBAL_EDL_WORKSPACE_236 = (v))
+#define GLOBAL_EDL_WORKSPACE_237 (*(IUH *)((IUH)Gdp + 3531))
+#define SET_GLOBAL_EDL_WORKSPACE_237(v) (GLOBAL_EDL_WORKSPACE_237 = (v))
+#define GLOBAL_EDL_WORKSPACE_238 (*(IUH *)((IUH)Gdp + 3535))
+#define SET_GLOBAL_EDL_WORKSPACE_238(v) (GLOBAL_EDL_WORKSPACE_238 = (v))
+#define GLOBAL_EDL_WORKSPACE_239 (*(IUH *)((IUH)Gdp + 3539))
+#define SET_GLOBAL_EDL_WORKSPACE_239(v) (GLOBAL_EDL_WORKSPACE_239 = (v))
+#define GLOBAL_EDL_WORKSPACE_240 (*(IUH *)((IUH)Gdp + 3543))
+#define SET_GLOBAL_EDL_WORKSPACE_240(v) (GLOBAL_EDL_WORKSPACE_240 = (v))
+#define GLOBAL_EDL_WORKSPACE_241 (*(IUH *)((IUH)Gdp + 3547))
+#define SET_GLOBAL_EDL_WORKSPACE_241(v) (GLOBAL_EDL_WORKSPACE_241 = (v))
+#define GLOBAL_EDL_WORKSPACE_242 (*(IUH *)((IUH)Gdp + 3551))
+#define SET_GLOBAL_EDL_WORKSPACE_242(v) (GLOBAL_EDL_WORKSPACE_242 = (v))
+#define GLOBAL_EDL_WORKSPACE_243 (*(IUH *)((IUH)Gdp + 3555))
+#define SET_GLOBAL_EDL_WORKSPACE_243(v) (GLOBAL_EDL_WORKSPACE_243 = (v))
+#define GLOBAL_EDL_WORKSPACE_244 (*(IUH *)((IUH)Gdp + 3559))
+#define SET_GLOBAL_EDL_WORKSPACE_244(v) (GLOBAL_EDL_WORKSPACE_244 = (v))
+#define GLOBAL_EDL_WORKSPACE_245 (*(IUH *)((IUH)Gdp + 3563))
+#define SET_GLOBAL_EDL_WORKSPACE_245(v) (GLOBAL_EDL_WORKSPACE_245 = (v))
+#define GLOBAL_EDL_WORKSPACE_246 (*(IUH *)((IUH)Gdp + 3567))
+#define SET_GLOBAL_EDL_WORKSPACE_246(v) (GLOBAL_EDL_WORKSPACE_246 = (v))
+#define GLOBAL_EDL_WORKSPACE_247 (*(IUH *)((IUH)Gdp + 3571))
+#define SET_GLOBAL_EDL_WORKSPACE_247(v) (GLOBAL_EDL_WORKSPACE_247 = (v))
+#define GLOBAL_EDL_WORKSPACE_248 (*(IUH *)((IUH)Gdp + 3575))
+#define SET_GLOBAL_EDL_WORKSPACE_248(v) (GLOBAL_EDL_WORKSPACE_248 = (v))
+#define GLOBAL_EDL_WORKSPACE_249 (*(IUH *)((IUH)Gdp + 3579))
+#define SET_GLOBAL_EDL_WORKSPACE_249(v) (GLOBAL_EDL_WORKSPACE_249 = (v))
+#define GLOBAL_NpxOpndBuff (*(struct OpndBuffREC *)((IUH)Gdp + 3583))
+#define SET_GLOBAL_NpxOpndBuff(v) (GLOBAL_NpxOpndBuff = (v))
+#define GLOBAL_NewRingOffsetPtr (*(IUH* *)((IUH)Gdp + 3711))
+#define SET_GLOBAL_NewRingOffsetPtr(v) (GLOBAL_NewRingOffsetPtr = (v))
+#define GLOBAL_GLDC_NextCtxId (*(IUH *)((IUH)Gdp + 3715))
+#define SET_GLOBAL_GLDC_NextCtxId(v) (GLOBAL_GLDC_NextCtxId = (v))
+#define GLOBAL_GLDC_curCtxId (*(IUH *)((IUH)Gdp + 3719))
+#define SET_GLOBAL_GLDC_curCtxId(v) (GLOBAL_GLDC_curCtxId = (v))
+#define GLOBAL_GLDC_CxtIDs (*(IU8* *)((IUH)Gdp + 3723))
+#define SET_GLOBAL_GLDC_CxtIDs(v) (GLOBAL_GLDC_CxtIDs = (v))
+#define GLOBAL_GLDC_Index_High_Water (*(IUH *)((IUH)Gdp + 3727))
+#define SET_GLOBAL_GLDC_Index_High_Water(v) (GLOBAL_GLDC_Index_High_Water = (v))
+#define GLOBAL_GLDC_Context_High_Water (*(IUH *)((IUH)Gdp + 3731))
+#define SET_GLOBAL_GLDC_Context_High_Water(v) (GLOBAL_GLDC_Context_High_Water = (v))
+#define GLOBAL_GLDC_IndexPtr (*(struct GLDC_REC** *)((IUH)Gdp + 3735))
+#define SET_GLOBAL_GLDC_IndexPtr(v) (GLOBAL_GLDC_IndexPtr = (v))
+#define GLOBAL_GLDC_CrBase (*(IUH* *)((IUH)Gdp + 3739))
+#define SET_GLOBAL_GLDC_CrBase(v) (GLOBAL_GLDC_CrBase = (v))
+#define GLOBAL_GLDC_NULL_REC (*(struct GLDC_REC* *)((IUH)Gdp + 3743))
+#define SET_GLOBAL_GLDC_NULL_REC(v) (GLOBAL_GLDC_NULL_REC = (v))
+#define GLOBAL_GLDC_DUMMY_REC (*(struct GLDC_REC* *)((IUH)Gdp + 3747))
+#define SET_GLOBAL_GLDC_DUMMY_REC(v) (GLOBAL_GLDC_DUMMY_REC = (v))
+#define GLOBAL_hackyfix (*(IUH *)((IUH)Gdp + 3751))
+#define SET_GLOBAL_hackyfix(v) (GLOBAL_hackyfix = (v))
+#define GLOBAL_IDC_ArrayPtr (*(struct IDC_REC* *)((IUH)Gdp + 3755))
+#define SET_GLOBAL_IDC_ArrayPtr(v) (GLOBAL_IDC_ArrayPtr = (v))
+#define GLOBAL_IDC_BaseLimitCntxTable (*(struct IDC_BASE_LIMIT_CONTEXT* *)((IUH)Gdp + 3759))
+#define SET_GLOBAL_IDC_BaseLimitCntxTable(v) (GLOBAL_IDC_BaseLimitCntxTable = (v))
+#define GLOBAL_IDC_IdtSeqVal (*(IUH *)((IUH)Gdp + 3763))
+#define SET_GLOBAL_IDC_IdtSeqVal(v) (GLOBAL_IDC_IdtSeqVal = (v))
+#define GLOBAL_IDC_IdtHighWater (*(IUH *)((IUH)Gdp + 3767))
+#define SET_GLOBAL_IDC_IdtHighWater(v) (GLOBAL_IDC_IdtHighWater = (v))
+#define GLOBAL_IDC_IdtCntrlVal (*(IUH *)((IUH)Gdp + 3771))
+#define SET_GLOBAL_IDC_IdtCntrlVal(v) (GLOBAL_IDC_IdtCntrlVal = (v))
+#define GLOBAL_IDC_IdtCntrlValNoCheck (*(IUH *)((IUH)Gdp + 3775))
+#define SET_GLOBAL_IDC_IdtCntrlValNoCheck(v) (GLOBAL_IDC_IdtCntrlValNoCheck = (v))
+#define GLOBAL_IDC_IdtSeqMask (*(IUH *)((IUH)Gdp + 3779))
+#define SET_GLOBAL_IDC_IdtSeqMask(v) (GLOBAL_IDC_IdtSeqMask = (v))
+#define GLOBAL_PX_trace (((*(IBOOL *)((IUH)Gdp + 3783)) & 1) != 0)
+#define SET_GLOBAL_PX_trace(v) ((*(IBOOL *)((IUH)Gdp + 3783)) = (v) ? 1: 0)
+#define GLOBAL_PX_doing_contributory (((*(IBOOL *)((IUH)Gdp + 3787)) & 1) != 0)
+#define SET_GLOBAL_PX_doing_contributory(v) ((*(IBOOL *)((IUH)Gdp + 3787)) = (v) ? 1: 0)
+#define GLOBAL_PX_doing_page_fault (((*(IBOOL *)((IUH)Gdp + 3791)) & 1) != 0)
+#define SET_GLOBAL_PX_doing_page_fault(v) ((*(IBOOL *)((IUH)Gdp + 3791)) = (v) ? 1: 0)
+#define GLOBAL_PX_doing_double_fault (((*(IBOOL *)((IUH)Gdp + 3795)) & 1) != 0)
+#define SET_GLOBAL_PX_doing_double_fault(v) ((*(IBOOL *)((IUH)Gdp + 3795)) = (v) ? 1: 0)
+#define GLOBAL_PX_doing_fault (((*(IBOOL *)((IUH)Gdp + 3799)) & 1) != 0)
+#define SET_GLOBAL_PX_doing_fault(v) ((*(IBOOL *)((IUH)Gdp + 3799)) = (v) ? 1: 0)
+#define GLOBAL_PX_source (*(IUH *)((IUH)Gdp + 3803))
+#define SET_GLOBAL_PX_source(v) (GLOBAL_PX_source = (v))
+#define GLOBAL_RF_OnXcptnWanted (((*(IBOOL *)((IUH)Gdp + 3807)) & 1) != 0)
+#define SET_GLOBAL_RF_OnXcptnWanted(v) ((*(IBOOL *)((IUH)Gdp + 3807)) = (v) ? 1: 0)
+#define GLOBAL_PX_Cleaned_Eip (*(IU32 *)((IUH)Gdp + 3811))
+#define SET_GLOBAL_PX_Cleaned_Eip(v) (GLOBAL_PX_Cleaned_Eip = (v))
+#define GLOBAL_VDM_FaultHandler (*(IUH* *)((IUH)Gdp + 3815))
+#define SET_GLOBAL_VDM_FaultHandler(v) (GLOBAL_VDM_FaultHandler = (v))
+#define GLOBAL_IntrHasErrorCode (((*(IBOOL *)((IUH)Gdp + 3819)) & 1) != 0)
+#define SET_GLOBAL_IntrHasErrorCode(v) ((*(IBOOL *)((IUH)Gdp + 3819)) = (v) ? 1: 0)
+#define GLOBAL_IntrErrorCode (*(IU16 *)((IUH)Gdp + 3823))
+#define SET_GLOBAL_IntrErrorCode(v) (GLOBAL_IntrErrorCode = (v))
+#define GLOBAL_CInbTable (*(IUH** *)((IUH)Gdp + 3827))
+#define SET_GLOBAL_CInbTable(v) (GLOBAL_CInbTable = (v))
+#define GLOBAL_CInwTable (*(IUH** *)((IUH)Gdp + 3831))
+#define SET_GLOBAL_CInwTable(v) (GLOBAL_CInwTable = (v))
+#define GLOBAL_CIndTable (*(IUH** *)((IUH)Gdp + 3835))
+#define SET_GLOBAL_CIndTable(v) (GLOBAL_CIndTable = (v))
+#define GLOBAL_COutbTable (*(IUH** *)((IUH)Gdp + 3839))
+#define SET_GLOBAL_COutbTable(v) (GLOBAL_COutbTable = (v))
+#define GLOBAL_COutwTable (*(IUH** *)((IUH)Gdp + 3843))
+#define SET_GLOBAL_COutwTable(v) (GLOBAL_COutwTable = (v))
+#define GLOBAL_COutdTable (*(IUH** *)((IUH)Gdp + 3847))
+#define SET_GLOBAL_COutdTable(v) (GLOBAL_COutdTable = (v))
+#define GLOBAL_InAdapFromPort (*(IU8* *)((IUH)Gdp + 3851))
+#define SET_GLOBAL_InAdapFromPort(v) (GLOBAL_InAdapFromPort = (v))
+#define GLOBAL_OutAdapFromPort (*(IU8* *)((IUH)Gdp + 3855))
+#define SET_GLOBAL_OutAdapFromPort(v) (GLOBAL_OutAdapFromPort = (v))
+#define GLOBAL_InbFuncWrapper (*(IUH** *)((IUH)Gdp + 3859))
+#define SET_GLOBAL_InbFuncWrapper(v) (GLOBAL_InbFuncWrapper = (v))
+#define GLOBAL_InwFuncWrapper (*(IUH** *)((IUH)Gdp + 3863))
+#define SET_GLOBAL_InwFuncWrapper(v) (GLOBAL_InwFuncWrapper = (v))
+#define GLOBAL_IndFuncWrapper (*(IUH** *)((IUH)Gdp + 3867))
+#define SET_GLOBAL_IndFuncWrapper(v) (GLOBAL_IndFuncWrapper = (v))
+#define GLOBAL_OutbFuncWrapper (*(IUH** *)((IUH)Gdp + 3871))
+#define SET_GLOBAL_OutbFuncWrapper(v) (GLOBAL_OutbFuncWrapper = (v))
+#define GLOBAL_OutwFuncWrapper (*(IUH** *)((IUH)Gdp + 3875))
+#define SET_GLOBAL_OutwFuncWrapper(v) (GLOBAL_OutwFuncWrapper = (v))
+#define GLOBAL_OutdFuncWrapper (*(IUH** *)((IUH)Gdp + 3879))
+#define SET_GLOBAL_OutdFuncWrapper(v) (GLOBAL_OutdFuncWrapper = (v))
+#define GLOBAL_TempByteDest (*(IU8 *)((IUH)Gdp + 3883))
+#define SET_GLOBAL_TempByteDest(v) (GLOBAL_TempByteDest = (v))
+#define GLOBAL_TempWordDest (*(IU16 *)((IUH)Gdp + 3887))
+#define SET_GLOBAL_TempWordDest(v) (GLOBAL_TempWordDest = (v))
+#define GLOBAL_TempDoubleDest (*(IU32 *)((IUH)Gdp + 3891))
+#define SET_GLOBAL_TempDoubleDest(v) (GLOBAL_TempDoubleDest = (v))
+#define GLOBAL_MaxValidAdaptor (*(IUH *)((IUH)Gdp + 3895))
+#define SET_GLOBAL_MaxValidAdaptor(v) (GLOBAL_MaxValidAdaptor = (v))
+#define GLOBAL_IOSPortMask (*(IU16 *)((IUH)Gdp + 3899))
+#define SET_GLOBAL_IOSPortMask(v) (GLOBAL_IOSPortMask = (v))
+#define GLOBAL_nt_adaptor (*(IU8 *)((IUH)Gdp + 3903))
+#define SET_GLOBAL_nt_adaptor(v) (GLOBAL_nt_adaptor = (v))
+#define GLOBAL_EDL_WORKSPACE_250 (*(IUH *)((IUH)Gdp + 3907))
+#define SET_GLOBAL_EDL_WORKSPACE_250(v) (GLOBAL_EDL_WORKSPACE_250 = (v))
+#define GLOBAL_EDL_WORKSPACE_251 (*(IUH *)((IUH)Gdp + 3911))
+#define SET_GLOBAL_EDL_WORKSPACE_251(v) (GLOBAL_EDL_WORKSPACE_251 = (v))
+#define GLOBAL_EDL_WORKSPACE_252 (*(IUH *)((IUH)Gdp + 3915))
+#define SET_GLOBAL_EDL_WORKSPACE_252(v) (GLOBAL_EDL_WORKSPACE_252 = (v))
+#define GLOBAL_EDL_WORKSPACE_253 (*(IUH *)((IUH)Gdp + 3919))
+#define SET_GLOBAL_EDL_WORKSPACE_253(v) (GLOBAL_EDL_WORKSPACE_253 = (v))
+#define GLOBAL_EDL_WORKSPACE_254 (*(IUH *)((IUH)Gdp + 3923))
+#define SET_GLOBAL_EDL_WORKSPACE_254(v) (GLOBAL_EDL_WORKSPACE_254 = (v))
+#define GLOBAL_EDL_WORKSPACE_255 (*(IUH *)((IUH)Gdp + 3927))
+#define SET_GLOBAL_EDL_WORKSPACE_255(v) (GLOBAL_EDL_WORKSPACE_255 = (v))
+#define GLOBAL_EDL_WORKSPACE_256 (*(IUH *)((IUH)Gdp + 3931))
+#define SET_GLOBAL_EDL_WORKSPACE_256(v) (GLOBAL_EDL_WORKSPACE_256 = (v))
+#define GLOBAL_SegDescPtrLookupREC (*(struct DYNAMIC_DESC_PTR_LOOKUP *)((IUH)Gdp + 3935))
+#define SET_GLOBAL_SegDescPtrLookupREC(v) (GLOBAL_SegDescPtrLookupREC = (v))
+#define GLOBAL_SegBaseLookupREC (*(struct DYNAMIC_SEG_COOKIE_LOOKUP *)((IUH)Gdp + 3967))
+#define SET_GLOBAL_SegBaseLookupREC(v) (GLOBAL_SegBaseLookupREC = (v))
+#define GLOBAL_SegCookieLookupREC (*(struct DYNAMIC_SEG_COOKIE_LOOKUP *)((IUH)Gdp + 3999))
+#define SET_GLOBAL_SegCookieLookupREC(v) (GLOBAL_SegCookieLookupREC = (v))
+#define GLOBAL_ZeroValue (*(IU32 *)((IUH)Gdp + 4031))
+#define SET_GLOBAL_ZeroValue(v) (GLOBAL_ZeroValue = (v))
+#define GLOBAL_HSP (*(IU8* *)((IUH)Gdp + 4035))
+#define SET_GLOBAL_HSP(v) (GLOBAL_HSP = (v))
+#define GLOBAL_ESPsanctuary (*(IU32 *)((IUH)Gdp + 4039))
+#define SET_GLOBAL_ESPsanctuary(v) (GLOBAL_ESPsanctuary = (v))
+#define GLOBAL_exclPopLimit (*(IU8* *)((IUH)Gdp + 4043))
+#define SET_GLOBAL_exclPopLimit(v) (GLOBAL_exclPopLimit = (v))
+#define GLOBAL_inclPushLimit (*(IU8* *)((IUH)Gdp + 4047))
+#define SET_GLOBAL_inclPushLimit(v) (GLOBAL_inclPushLimit = (v))
+#define GLOBAL_notionalSsBase (*(IU8* *)((IUH)Gdp + 4051))
+#define SET_GLOBAL_notionalSsBase(v) (GLOBAL_notionalSsBase = (v))
+#define GLOBAL_notionalSsBase2 (*(IU8* *)((IUH)Gdp + 4055))
+#define SET_GLOBAL_notionalSsBase2(v) (GLOBAL_notionalSsBase2 = (v))
+#define GLOBAL_notionalSsBase4 (*(IU8* *)((IUH)Gdp + 4059))
+#define SET_GLOBAL_notionalSsBase4(v) (GLOBAL_notionalSsBase4 = (v))
+#define GLOBAL_stackIsWrappable (((*(IBOOL *)((IUH)Gdp + 4063)) & 1) != 0)
+#define SET_GLOBAL_stackIsWrappable(v) ((*(IBOOL *)((IUH)Gdp + 4063)) = (v) ? 1: 0)
+#define GLOBAL_stackIsBig (((*(IBOOL *)((IUH)Gdp + 4067)) & 1) != 0)
+#define SET_GLOBAL_stackIsBig(v) ((*(IBOOL *)((IUH)Gdp + 4067)) = (v) ? 1: 0)
+#define GLOBAL_stackIsExpandDown (((*(IBOOL *)((IUH)Gdp + 4071)) & 1) != 0)
+#define SET_GLOBAL_stackIsExpandDown(v) ((*(IBOOL *)((IUH)Gdp + 4071)) = (v) ? 1: 0)
+#define GLOBAL_stackMask (*(IU32 *)((IUH)Gdp + 4075))
+#define SET_GLOBAL_stackMask(v) (GLOBAL_stackMask = (v))
+#define GLOBAL_stackNeedsNormalising (((*(IBOOL *)((IUH)Gdp + 4079)) & 1) != 0)
+#define SET_GLOBAL_stackNeedsNormalising(v) ((*(IBOOL *)((IUH)Gdp + 4079)) = (v) ? 1: 0)
+#define GLOBAL_laInTopPage (*(IU32 *)((IUH)Gdp + 4083))
+#define SET_GLOBAL_laInTopPage(v) (GLOBAL_laInTopPage = (v))
+#define GLOBAL_laInBottomPage (*(IU32 *)((IUH)Gdp + 4087))
+#define SET_GLOBAL_laInBottomPage(v) (GLOBAL_laInBottomPage = (v))
+#define GLOBAL_hspOK (((*(IBOOL *)((IUH)Gdp + 4091)) & 1) != 0)
+#define SET_GLOBAL_hspOK(v) ((*(IBOOL *)((IUH)Gdp + 4091)) = (v) ? 1: 0)
+#define GLOBAL_pushScratch (*(IU8* *)((IUH)Gdp + 4095))
+#define SET_GLOBAL_pushScratch(v) (GLOBAL_pushScratch = (v))
+#define GLOBAL_pushScratchInUse (((*(IBOOL *)((IUH)Gdp + 4099)) & 1) != 0)
+#define SET_GLOBAL_pushScratchInUse(v) ((*(IBOOL *)((IUH)Gdp + 4099)) = (v) ? 1: 0)
+#define GLOBAL_pushScratchReqSize (*(IUH *)((IUH)Gdp + 4103))
+#define SET_GLOBAL_pushScratchReqSize(v) (GLOBAL_pushScratchReqSize = (v))
+#define GLOBAL_popScratch (*(IU8* *)((IUH)Gdp + 4107))
+#define SET_GLOBAL_popScratch(v) (GLOBAL_popScratch = (v))
+#define GLOBAL_popScratchInUse (((*(IBOOL *)((IUH)Gdp + 4111)) & 1) != 0)
+#define SET_GLOBAL_popScratchInUse(v) ((*(IBOOL *)((IUH)Gdp + 4111)) = (v) ? 1: 0)
+#define GLOBAL_SafeStackIronFrig (((*(IBOOL *)((IUH)Gdp + 4115)) & 1) != 0)
+#define SET_GLOBAL_SafeStackIronFrig(v) ((*(IBOOL *)((IUH)Gdp + 4115)) = (v) ? 1: 0)
+#define GLOBAL_SafeOutOfBoundsFrig (((*(IBOOL *)((IUH)Gdp + 4119)) & 1) != 0)
+#define SET_GLOBAL_SafeOutOfBoundsFrig(v) ((*(IBOOL *)((IUH)Gdp + 4119)) = (v) ? 1: 0)
+#define GLOBAL_HBP (*(IU8* *)((IUH)Gdp + 4123))
+#define SET_GLOBAL_HBP(v) (GLOBAL_HBP = (v))
+#define GLOBAL_newHSP (*(IU8* *)((IUH)Gdp + 4127))
+#define SET_GLOBAL_newHSP(v) (GLOBAL_newHSP = (v))
+#define GLOBAL_newExclPopLimit (*(IU8* *)((IUH)Gdp + 4131))
+#define SET_GLOBAL_newExclPopLimit(v) (GLOBAL_newExclPopLimit = (v))
+#define GLOBAL_newInclPushLimit (*(IU8* *)((IUH)Gdp + 4135))
+#define SET_GLOBAL_newInclPushLimit(v) (GLOBAL_newInclPushLimit = (v))
+#define GLOBAL_newNotionalSsBase (*(IU8* *)((IUH)Gdp + 4139))
+#define SET_GLOBAL_newNotionalSsBase(v) (GLOBAL_newNotionalSsBase = (v))
+#define GLOBAL_newStackIsWrappable (((*(IBOOL *)((IUH)Gdp + 4143)) & 1) != 0)
+#define SET_GLOBAL_newStackIsWrappable(v) ((*(IBOOL *)((IUH)Gdp + 4143)) = (v) ? 1: 0)
+#define GLOBAL_newStackIsBig (((*(IBOOL *)((IUH)Gdp + 4147)) & 1) != 0)
+#define SET_GLOBAL_newStackIsBig(v) ((*(IBOOL *)((IUH)Gdp + 4147)) = (v) ? 1: 0)
+#define GLOBAL_newStackIsExpandDown (((*(IBOOL *)((IUH)Gdp + 4151)) & 1) != 0)
+#define SET_GLOBAL_newStackIsExpandDown(v) ((*(IBOOL *)((IUH)Gdp + 4151)) = (v) ? 1: 0)
+#define GLOBAL_newStackMask (*(IU32 *)((IUH)Gdp + 4155))
+#define SET_GLOBAL_newStackMask(v) (GLOBAL_newStackMask = (v))
+#define GLOBAL_newLaInBottomPage (*(IU32 *)((IUH)Gdp + 4159))
+#define SET_GLOBAL_newLaInBottomPage(v) (GLOBAL_newLaInBottomPage = (v))
+#define GLOBAL_newHspOK (((*(IBOOL *)((IUH)Gdp + 4163)) & 1) != 0)
+#define SET_GLOBAL_newHspOK(v) ((*(IBOOL *)((IUH)Gdp + 4163)) = (v) ? 1: 0)
+#define GLOBAL_newPushScratch (*(IU8* *)((IUH)Gdp + 4167))
+#define SET_GLOBAL_newPushScratch(v) (GLOBAL_newPushScratch = (v))
+#define GLOBAL_newPushScratchInUse (((*(IBOOL *)((IUH)Gdp + 4171)) & 1) != 0)
+#define SET_GLOBAL_newPushScratchInUse(v) ((*(IBOOL *)((IUH)Gdp + 4171)) = (v) ? 1: 0)
+#define GLOBAL_newPushScratchReqSize (*(IUH *)((IUH)Gdp + 4175))
+#define SET_GLOBAL_newPushScratchReqSize(v) (GLOBAL_newPushScratchReqSize = (v))
+#define GLOBAL_newStackIsSupervisor (((*(IBOOL *)((IUH)Gdp + 4179)) & 1) != 0)
+#define SET_GLOBAL_newStackIsSupervisor(v) ((*(IBOOL *)((IUH)Gdp + 4179)) = (v) ? 1: 0)
+#define GLOBAL_SSTAT_PostDstOddBailOut (*(IUH *)((IUH)Gdp + 4183))
+#define SET_GLOBAL_SSTAT_PostDstOddBailOut(v) (GLOBAL_SSTAT_PostDstOddBailOut = (v))
+#define GLOBAL_SSTAT_PostDstBailOut (*(IUH *)((IUH)Gdp + 4187))
+#define SET_GLOBAL_SSTAT_PostDstBailOut(v) (GLOBAL_SSTAT_PostDstBailOut = (v))
+#define GLOBAL_SSTAT_JcProcBailOut (*(IUH *)((IUH)Gdp + 4191))
+#define SET_GLOBAL_SSTAT_JcProcBailOut(v) (GLOBAL_SSTAT_JcProcBailOut = (v))
+#define GLOBAL_SSTAT_CalcStackUniBailOut (*(IUH *)((IUH)Gdp + 4195))
+#define SET_GLOBAL_SSTAT_CalcStackUniBailOut(v) (GLOBAL_SSTAT_CalcStackUniBailOut = (v))
+#define GLOBAL_SSTAT_SetupHbpBailOut (*(IUH *)((IUH)Gdp + 4199))
+#define SET_GLOBAL_SSTAT_SetupHbpBailOut(v) (GLOBAL_SSTAT_SetupHbpBailOut = (v))
+#define GLOBAL_SSTAT_SetupHbpOddBailOut (*(IUH *)((IUH)Gdp + 4203))
+#define SET_GLOBAL_SSTAT_SetupHbpOddBailOut(v) (GLOBAL_SSTAT_SetupHbpOddBailOut = (v))
+#define GLOBAL_VDM_SoftIntHandler (*(IUH* *)((IUH)Gdp + 4207))
+#define SET_GLOBAL_VDM_SoftIntHandler(v) (GLOBAL_VDM_SoftIntHandler = (v))
+#define GLOBAL_EOIEnable (*(IU8* *)((IUH)Gdp + 4211))
+#define SET_GLOBAL_EOIEnable(v) (GLOBAL_EOIEnable = (v))
+#define GLOBAL_AddProfilePtr (*(IUH* *)((IUH)Gdp + 4215))
+#define SET_GLOBAL_AddProfilePtr(v) (GLOBAL_AddProfilePtr = (v))
+#define GLOBAL_MaxProfileData (*(IUH* *)((IUH)Gdp + 4219))
+#define SET_GLOBAL_MaxProfileData(v) (GLOBAL_MaxProfileData = (v))
+#define GLOBAL_FreeContextHead (*(struct ContextREC* *)((IUH)Gdp + 4223))
+#define SET_GLOBAL_FreeContextHead(v) (GLOBAL_FreeContextHead = (v))
+#define GLOBAL_ValidContextHead (*(struct ContextREC* *)((IUH)Gdp + 4227))
+#define SET_GLOBAL_ValidContextHead(v) (GLOBAL_ValidContextHead = (v))
+#define GLOBAL_CurrentContextPtr (*(struct ContextREC* *)((IUH)Gdp + 4231))
+#define SET_GLOBAL_CurrentContextPtr(v) (GLOBAL_CurrentContextPtr = (v))
+#define GLOBAL_CurrentContext (*(IU8 *)((IUH)Gdp + 4235))
+#define SET_GLOBAL_CurrentContext(v) (GLOBAL_CurrentContext = (v))
+#define GLOBAL_SeenGDTUse (((*(IBOOL *)((IUH)Gdp + 4239)) & 1) != 0)
+#define SET_GLOBAL_SeenGDTUse(v) ((*(IBOOL *)((IUH)Gdp + 4239)) = (v) ? 1: 0)
+#define GLOBAL_SeenLDTUse (((*(IBOOL *)((IUH)Gdp + 4243)) & 1) != 0)
+#define SET_GLOBAL_SeenLDTUse(v) ((*(IBOOL *)((IUH)Gdp + 4243)) = (v) ? 1: 0)
+#define GLOBAL_Context (*(struct ContextREC* *)((IUH)Gdp + 4247))
+#define SET_GLOBAL_Context(v) (GLOBAL_Context = (v))
+#define GLOBAL_diffLDTList (*(IU16* *)((IUH)Gdp + 4251))
+#define SET_GLOBAL_diffLDTList(v) (GLOBAL_diffLDTList = (v))
+#define GLOBAL_nextDiffLDT (*(IUH *)((IUH)Gdp + 4255))
+#define SET_GLOBAL_nextDiffLDT(v) (GLOBAL_nextDiffLDT = (v))
+#define GLOBAL_functions (*(IU32** *)((IUH)Gdp + 4259))
+#define SET_GLOBAL_functions(v) (GLOBAL_functions = (v))
+#define GLOBAL_selectedFunction (*(IU32* *)((IUH)Gdp + 4263))
+#define SET_GLOBAL_selectedFunction(v) (GLOBAL_selectedFunction = (v))
+#define GLOBAL_instrCountdown (*(ISH *)((IUH)Gdp + 4267))
+#define SET_GLOBAL_instrCountdown(v) (GLOBAL_instrCountdown = (v))
+#define GLOBAL_blockRecords (*(struct BLOCK_RECORD* *)((IUH)Gdp + 4271))
+#define SET_GLOBAL_blockRecords(v) (GLOBAL_blockRecords = (v))
+#define GLOBAL_patchRecords (*(struct PatchREC* *)((IUH)Gdp + 4275))
+#define SET_GLOBAL_patchRecords(v) (GLOBAL_patchRecords = (v))
+#define GLOBAL_nanoCompParameter (*(IUH *)((IUH)Gdp + 4279))
+#define SET_GLOBAL_nanoCompParameter(v) (GLOBAL_nanoCompParameter = (v))
+#define GLOBAL_intelCopyPtrComp (*(IU8* *)((IUH)Gdp + 4283))
+#define SET_GLOBAL_intelCopyPtrComp(v) (GLOBAL_intelCopyPtrComp = (v))
+#define GLOBAL_nrOfBlocksToCompile (*(IUH *)((IUH)Gdp + 4287))
+#define SET_GLOBAL_nrOfBlocksToCompile(v) (GLOBAL_nrOfBlocksToCompile = (v))
+#define GLOBAL_entryPointHashTable (*(IU16* *)((IUH)Gdp + 4291))
+#define SET_GLOBAL_entryPointHashTable(v) (GLOBAL_entryPointHashTable = (v))
+#define GLOBAL_nextFreeEntryPointRec (*(struct EntryPointREC* *)((IUH)Gdp + 4295))
+#define SET_GLOBAL_nextFreeEntryPointRec(v) (GLOBAL_nextFreeEntryPointRec = (v))
+#define GLOBAL_firstEntryPointPtr (*(struct EntryPointREC* *)((IUH)Gdp + 4299))
+#define SET_GLOBAL_firstEntryPointPtr(v) (GLOBAL_firstEntryPointPtr = (v))
+#define GLOBAL_blockPtr (*(struct BLOCK_RECORD* *)((IUH)Gdp + 4303))
+#define SET_GLOBAL_blockPtr(v) (GLOBAL_blockPtr = (v))
+#define GLOBAL_headAndTailBlockPtr (*(struct BLOCK_RECORD* *)((IUH)Gdp + 4307))
+#define SET_GLOBAL_headAndTailBlockPtr(v) (GLOBAL_headAndTailBlockPtr = (v))
+#define GLOBAL_nrOfInstrsParsed (*(IUH *)((IUH)Gdp + 4311))
+#define SET_GLOBAL_nrOfInstrsParsed(v) (GLOBAL_nrOfInstrsParsed = (v))
+#define GLOBAL_intelPtrLimit (*(IU8* *)((IUH)Gdp + 4315))
+#define SET_GLOBAL_intelPtrLimit(v) (GLOBAL_intelPtrLimit = (v))
+#define GLOBAL_blockStartIntelPtr (*(IU8* *)((IUH)Gdp + 4319))
+#define SET_GLOBAL_blockStartIntelPtr(v) (GLOBAL_blockStartIntelPtr = (v))
+#define GLOBAL_blockStartCodeOffset (*(IUH *)((IUH)Gdp + 4323))
+#define SET_GLOBAL_blockStartCodeOffset(v) (GLOBAL_blockStartCodeOffset = (v))
+#define GLOBAL_finalCodeOffset (*(IUH *)((IUH)Gdp + 4327))
+#define SET_GLOBAL_finalCodeOffset(v) (GLOBAL_finalCodeOffset = (v))
+#define GLOBAL_ni_BlockPtr (*(struct NI_BLOCK_RECORD* *)((IUH)Gdp + 4331))
+#define SET_GLOBAL_ni_BlockPtr(v) (GLOBAL_ni_BlockPtr = (v))
+#define GLOBAL_nanoOp3 (*(IUH *)((IUH)Gdp + 4335))
+#define SET_GLOBAL_nanoOp3(v) (GLOBAL_nanoOp3 = (v))
+#define GLOBAL_sibByte (*(IUH *)((IUH)Gdp + 4339))
+#define SET_GLOBAL_sibByte(v) (GLOBAL_sibByte = (v))
+#define GLOBAL_dynamicOffset (*(IU32 *)((IUH)Gdp + 4343))
+#define SET_GLOBAL_dynamicOffset(v) (GLOBAL_dynamicOffset = (v))
+#define GLOBAL_eaSegCode (*(IUH *)((IUH)Gdp + 4347))
+#define SET_GLOBAL_eaSegCode(v) (GLOBAL_eaSegCode = (v))
+#define GLOBAL_simpleAccess (((*(IBOOL *)((IUH)Gdp + 4351)) & 1) != 0)
+#define SET_GLOBAL_simpleAccess(v) ((*(IBOOL *)((IUH)Gdp + 4351)) = (v) ? 1: 0)
+#define GLOBAL_simpleAccessPtr (*(IU8* *)((IUH)Gdp + 4355))
+#define SET_GLOBAL_simpleAccessPtr(v) (GLOBAL_simpleAccessPtr = (v))
+#define GLOBAL_instrStartIntelPtr (*(IU8* *)((IUH)Gdp + 4359))
+#define SET_GLOBAL_instrStartIntelPtr(v) (GLOBAL_instrStartIntelPtr = (v))
+#define GLOBAL_npxIntelPtr (*(IU8* *)((IUH)Gdp + 4363))
+#define SET_GLOBAL_npxIntelPtr(v) (GLOBAL_npxIntelPtr = (v))
+#define GLOBAL_topLevel (*(IUH* *)((IUH)Gdp + 4367))
+#define SET_GLOBAL_topLevel(v) (GLOBAL_topLevel = (v))
+#define GLOBAL_defaultPrimaryActions (*(IU32** *)((IUH)Gdp + 4371))
+#define SET_GLOBAL_defaultPrimaryActions(v) (GLOBAL_defaultPrimaryActions = (v))
+#define GLOBAL_actualPrimaryActions (*(IU32** *)((IUH)Gdp + 4375))
+#define SET_GLOBAL_actualPrimaryActions(v) (GLOBAL_actualPrimaryActions = (v))
+#define GLOBAL_codeOffset (*(IUH *)((IUH)Gdp + 4379))
+#define SET_GLOBAL_codeOffset(v) (GLOBAL_codeOffset = (v))
+#define GLOBAL_destCodeSegment (*(IUH *)((IUH)Gdp + 4383))
+#define SET_GLOBAL_destCodeSegment(v) (GLOBAL_destCodeSegment = (v))
+#define GLOBAL_destCodeOffset (*(IUH *)((IUH)Gdp + 4387))
+#define SET_GLOBAL_destCodeOffset(v) (GLOBAL_destCodeOffset = (v))
+#define GLOBAL_nanoEax (*(IU32 *)((IUH)Gdp + 4391))
+#define SET_GLOBAL_nanoEax(v) (GLOBAL_nanoEax = (v))
+#define GLOBAL_nanoEcx (*(IU32 *)((IUH)Gdp + 4395))
+#define SET_GLOBAL_nanoEcx(v) (GLOBAL_nanoEcx = (v))
+#define GLOBAL_nanoEdx (*(IU32 *)((IUH)Gdp + 4399))
+#define SET_GLOBAL_nanoEdx(v) (GLOBAL_nanoEdx = (v))
+#define GLOBAL_nanoEbx (*(IU32 *)((IUH)Gdp + 4403))
+#define SET_GLOBAL_nanoEbx(v) (GLOBAL_nanoEbx = (v))
+#define GLOBAL_nanoEsp (*(IU32 *)((IUH)Gdp + 4407))
+#define SET_GLOBAL_nanoEsp(v) (GLOBAL_nanoEsp = (v))
+#define GLOBAL_nanoEbp (*(IU32 *)((IUH)Gdp + 4411))
+#define SET_GLOBAL_nanoEbp(v) (GLOBAL_nanoEbp = (v))
+#define GLOBAL_nanoEsi (*(IU32 *)((IUH)Gdp + 4415))
+#define SET_GLOBAL_nanoEsi(v) (GLOBAL_nanoEsi = (v))
+#define GLOBAL_nanoEdi (*(IU32 *)((IUH)Gdp + 4419))
+#define SET_GLOBAL_nanoEdi(v) (GLOBAL_nanoEdi = (v))
+#define GLOBAL_espToRestore (*(IU32 *)((IUH)Gdp + 4423))
+#define SET_GLOBAL_espToRestore(v) (GLOBAL_espToRestore = (v))
+#define GLOBAL_entryExitCount (*(IU32 *)((IUH)Gdp + 4427))
+#define SET_GLOBAL_entryExitCount(v) (GLOBAL_entryExitCount = (v))
+#define GLOBAL_instructionCount (*(IU32 *)((IUH)Gdp + 4431))
+#define SET_GLOBAL_instructionCount(v) (GLOBAL_instructionCount = (v))
+#define GLOBAL_nanoDebugControl (*(IU32 *)((IUH)Gdp + 4435))
+#define SET_GLOBAL_nanoDebugControl(v) (GLOBAL_nanoDebugControl = (v))
+#define GLOBAL_compilationThreshold (*(IU32 *)((IUH)Gdp + 4439))
+#define SET_GLOBAL_compilationThreshold(v) (GLOBAL_compilationThreshold = (v))
+#define GLOBAL_maxBlocksToCompile (*(IUH *)((IUH)Gdp + 4443))
+#define SET_GLOBAL_maxBlocksToCompile(v) (GLOBAL_maxBlocksToCompile = (v))
+#define GLOBAL_blocksToCompile (*(struct BLOCK_TO_COMPILE* *)((IUH)Gdp + 4447))
+#define SET_GLOBAL_blocksToCompile(v) (GLOBAL_blocksToCompile = (v))
+#define GLOBAL_byteMemory (*(IU8* *)((IUH)Gdp + 4451))
+#define SET_GLOBAL_byteMemory(v) (GLOBAL_byteMemory = (v))
+#define GLOBAL_wordMemory (*(IU16* *)((IUH)Gdp + 4455))
+#define SET_GLOBAL_wordMemory(v) (GLOBAL_wordMemory = (v))
+#define GLOBAL_longMemory (*(IU32* *)((IUH)Gdp + 4459))
+#define SET_GLOBAL_longMemory(v) (GLOBAL_longMemory = (v))
+#define GLOBAL_ni_BlockRecords (*(struct NI_BLOCK_RECORD* *)((IUH)Gdp + 4463))
+#define SET_GLOBAL_ni_BlockRecords(v) (GLOBAL_ni_BlockRecords = (v))
+#define GLOBAL_intelCopyMemoryExec (*(IU8* *)((IUH)Gdp + 4467))
+#define SET_GLOBAL_intelCopyMemoryExec(v) (GLOBAL_intelCopyMemoryExec = (v))
+#define GLOBAL_intelCopyMemoryComp (*(IU8* *)((IUH)Gdp + 4471))
+#define SET_GLOBAL_intelCopyMemoryComp(v) (GLOBAL_intelCopyMemoryComp = (v))
+#define GLOBAL_intelCopyMemoryCompEnd (*(IU8* *)((IUH)Gdp + 4475))
+#define SET_GLOBAL_intelCopyMemoryCompEnd(v) (GLOBAL_intelCopyMemoryCompEnd = (v))
+#define GLOBAL_PpcCR4Save (*(IUH *)((IUH)Gdp + 4479))
+#define SET_GLOBAL_PpcCR4Save(v) (GLOBAL_PpcCR4Save = (v))
+#define GLOBAL_SfDelayFlagPtr (*(struct SfDelayRecord* *)((IUH)Gdp + 4483))
+#define SET_GLOBAL_SfDelayFlagPtr(v) (GLOBAL_SfDelayFlagPtr = (v))
+#define GLOBAL_SfDelayFlagLim (*(IUH *)((IUH)Gdp + 4487))
+#define SET_GLOBAL_SfDelayFlagLim(v) (GLOBAL_SfDelayFlagLim = (v))
+#define GLOBAL_SfDecrementerVal (*(IUH *)((IUH)Gdp + 4491))
+#define SET_GLOBAL_SfDecrementerVal(v) (GLOBAL_SfDecrementerVal = (v))
+#define GLOBAL_SFPciPageCount (*(IU32 *)((IUH)Gdp + 4495))
+#define SET_GLOBAL_SFPciPageCount(v) (GLOBAL_SFPciPageCount = (v))
+#define GLOBAL_SfProcessorType (*(IUH *)((IUH)Gdp + 4499))
+#define SET_GLOBAL_SfProcessorType(v) (GLOBAL_SfProcessorType = (v))
+#define GLOBAL_SfQEventPending (((*(IBOOL *)((IUH)Gdp + 4503)) & 1) != 0)
+#define SET_GLOBAL_SfQEventPending(v) ((*(IBOOL *)((IUH)Gdp + 4503)) = (v) ? 1: 0)
+#define GLOBAL_AllowUncheckedMode (((*(IBOOL *)((IUH)Gdp + 4507)) & 1) != 0)
+#define SET_GLOBAL_AllowUncheckedMode(v) ((*(IBOOL *)((IUH)Gdp + 4507)) = (v) ? 1: 0)
+#define GLOBAL_AllowAnyUnchecked (((*(IBOOL *)((IUH)Gdp + 4511)) & 1) != 0)
+#define SET_GLOBAL_AllowAnyUnchecked(v) ((*(IBOOL *)((IUH)Gdp + 4511)) = (v) ? 1: 0)
+#define GLOBAL_LastProtected (*(IUH *)((IUH)Gdp + 4515))
+#define SET_GLOBAL_LastProtected(v) (GLOBAL_LastProtected = (v))
+#define GLOBAL_EmulationSR (*(IUH *)((IUH)Gdp + 4519))
+#define SET_GLOBAL_EmulationSR(v) (GLOBAL_EmulationSR = (v))
+#define GLOBAL_CurrentCheckLevel (*(IUH *)((IUH)Gdp + 4523))
+#define SET_GLOBAL_CurrentCheckLevel(v) (GLOBAL_CurrentCheckLevel = (v))
+#define GLOBAL_UnProtSegmentValue (*(IUH *)((IUH)Gdp + 4527))
+#define SET_GLOBAL_UnProtSegmentValue(v) (GLOBAL_UnProtSegmentValue = (v))
+#define GLOBAL_SavedSegmentValue (*(IUH *)((IUH)Gdp + 4531))
+#define SET_GLOBAL_SavedSegmentValue(v) (GLOBAL_SavedSegmentValue = (v))
+#define GLOBAL_SavedBATIValue (*(IUH *)((IUH)Gdp + 4535))
+#define SET_GLOBAL_SavedBATIValue(v) (GLOBAL_SavedBATIValue = (v))
+#define GLOBAL_SavedBATDValue (*(IUH *)((IUH)Gdp + 4539))
+#define SET_GLOBAL_SavedBATDValue(v) (GLOBAL_SavedBATDValue = (v))
+#define GLOBAL_LogicalBaseAddrForIO (*(IU8* *)((IUH)Gdp + 4543))
+#define SET_GLOBAL_LogicalBaseAddrForIO(v) (GLOBAL_LogicalBaseAddrForIO = (v))
+#define GLOBAL_WriteToOutputPort (((*(IBOOL *)((IUH)Gdp + 4547)) & 1) != 0)
+#define SET_GLOBAL_WriteToOutputPort(v) ((*(IBOOL *)((IUH)Gdp + 4547)) = (v) ? 1: 0)
+#define GLOBAL_GateA20Status (*(IU8 *)((IUH)Gdp + 4551))
+#define SET_GLOBAL_GateA20Status(v) (GLOBAL_GateA20Status = (v))
+#define GLOBAL_MmuInVideoUniverse (*(IUH *)((IUH)Gdp + 4555))
+#define SET_GLOBAL_MmuInVideoUniverse(v) (GLOBAL_MmuInVideoUniverse = (v))
+#define GLOBAL_MmuWpBit (*(IUH *)((IUH)Gdp + 4559))
+#define SET_GLOBAL_MmuWpBit(v) (GLOBAL_MmuWpBit = (v))
+#define GLOBAL_MmuUsBit (*(IUH *)((IUH)Gdp + 4563))
+#define SET_GLOBAL_MmuUsBit(v) (GLOBAL_MmuUsBit = (v))
+#define GLOBAL_SfSkipVideoWriteSync (((*(IBOOL *)((IUH)Gdp + 4567)) & 1) != 0)
+#define SET_GLOBAL_SfSkipVideoWriteSync(v) ((*(IBOOL *)((IUH)Gdp + 4567)) = (v) ? 1: 0)
+#define GLOBAL_SfSkipVideoReadSync (((*(IBOOL *)((IUH)Gdp + 4571)) & 1) != 0)
+#define SET_GLOBAL_SfSkipVideoReadSync(v) ((*(IBOOL *)((IUH)Gdp + 4571)) = (v) ? 1: 0)
+#define GLOBAL_forceVideoState (((*(IBOOL *)((IUH)Gdp + 4575)) & 1) != 0)
+#define SET_GLOBAL_forceVideoState(v) ((*(IBOOL *)((IUH)Gdp + 4575)) = (v) ? 1: 0)
+#define GLOBAL_EDL_WORKSPACE_257 (*(IUH *)((IUH)Gdp + 4579))
+#define SET_GLOBAL_EDL_WORKSPACE_257(v) (GLOBAL_EDL_WORKSPACE_257 = (v))
+#define GLOBAL_EDL_WORKSPACE_258 (*(IUH *)((IUH)Gdp + 4583))
+#define SET_GLOBAL_EDL_WORKSPACE_258(v) (GLOBAL_EDL_WORKSPACE_258 = (v))
+#define GLOBAL_EDL_WORKSPACE_259 (*(IUH *)((IUH)Gdp + 4587))
+#define SET_GLOBAL_EDL_WORKSPACE_259(v) (GLOBAL_EDL_WORKSPACE_259 = (v))
+#define GLOBAL_EDL_WORKSPACE_260 (*(IUH *)((IUH)Gdp + 4591))
+#define SET_GLOBAL_EDL_WORKSPACE_260(v) (GLOBAL_EDL_WORKSPACE_260 = (v))
+#define GLOBAL_EDL_WORKSPACE_261 (*(IUH *)((IUH)Gdp + 4595))
+#define SET_GLOBAL_EDL_WORKSPACE_261(v) (GLOBAL_EDL_WORKSPACE_261 = (v))
+#define GLOBAL_EDL_WORKSPACE_262 (*(IUH *)((IUH)Gdp + 4599))
+#define SET_GLOBAL_EDL_WORKSPACE_262(v) (GLOBAL_EDL_WORKSPACE_262 = (v))
+#define GLOBAL_EDL_WORKSPACE_263 (*(IUH *)((IUH)Gdp + 4603))
+#define SET_GLOBAL_EDL_WORKSPACE_263(v) (GLOBAL_EDL_WORKSPACE_263 = (v))
+#define GLOBAL_ProtectedSet (*(struct SfRegisterSetREC *)((IUH)Gdp + 4607))
+#define SET_GLOBAL_ProtectedSet(v) (GLOBAL_ProtectedSet = (v))
+#define GLOBAL_CurrentFullRegSet (*(IUH *)((IUH)Gdp + 4671))
+#define SET_GLOBAL_CurrentFullRegSet(v) (GLOBAL_CurrentFullRegSet = (v))
+#define GLOBAL_AltRegZeros (*(struct SfRegZeroAlternateREC* *)((IUH)Gdp + 4675))
+#define SET_GLOBAL_AltRegZeros(v) (GLOBAL_AltRegZeros = (v))
+#define GLOBAL_FullSets (*(struct SfFullRegSetREC* *)((IUH)Gdp + 4679))
+#define SET_GLOBAL_FullSets(v) (GLOBAL_FullSets = (v))
+#define GLOBAL_RegSetNextFree (*(struct SfFullRegSetREC* *)((IUH)Gdp + 4683))
+#define SET_GLOBAL_RegSetNextFree(v) (GLOBAL_RegSetNextFree = (v))
+#define GLOBAL_ValidRegSetHeadPtr (*(struct SfFullRegSetREC* *)((IUH)Gdp + 4687))
+#define SET_GLOBAL_ValidRegSetHeadPtr(v) (GLOBAL_ValidRegSetHeadPtr = (v))
+#define GLOBAL_AltRegZeroNextFree (*(struct SfRegZeroAlternateREC* *)((IUH)Gdp + 4691))
+#define SET_GLOBAL_AltRegZeroNextFree(v) (GLOBAL_AltRegZeroNextFree = (v))
+#define GLOBAL_SfPciBaseAddressRegCount (*(IUH *)((IUH)Gdp + 4695))
+#define SET_GLOBAL_SfPciBaseAddressRegCount(v) (GLOBAL_SfPciBaseAddressRegCount = (v))
+#define GLOBAL_SfPciBaseAddressRegRecs (*(struct SfPciBaseAddressRegREC* *)((IUH)Gdp + 4699))
+#define SET_GLOBAL_SfPciBaseAddressRegRecs(v) (GLOBAL_SfPciBaseAddressRegRecs = (v))
+#define GLOBAL_SfPciMemoryWindowBase (*(IUH *)((IUH)Gdp + 4703))
+#define SET_GLOBAL_SfPciMemoryWindowBase(v) (GLOBAL_SfPciMemoryWindowBase = (v))
+#define GLOBAL_tmpESP (*(IUH *)((IUH)Gdp + 4707))
+#define SET_GLOBAL_tmpESP(v) (GLOBAL_tmpESP = (v))
+#define GLOBAL_EaOffs (*(IU32 *)((IUH)Gdp + 4711))
+#define SET_GLOBAL_EaOffs(v) (GLOBAL_EaOffs = (v))
+#define GLOBAL_BusinessResult (*(IUH *)((IUH)Gdp + 4715))
+#define SET_GLOBAL_BusinessResult(v) (GLOBAL_BusinessResult = (v))
+#define GLOBAL_TraceVector ((IU8 *)((IUH)Gdp + 4719))
+#define GLOBAL_TraceVectorSize (40000)
+#define GDP_OFFSET 1
+#define GDP_SIZE 44719
+#define GDP_CHECKSUM 32240743
+extern IHP Gdp;
+#endif /* _gdpvar_h */
+
+/* A total of 1053 T_GLOBALs */
diff --git a/private/mvdm/softpc.new/base/ccpu386/idiv.c b/private/mvdm/softpc.new/base/ccpu386/idiv.c
new file mode 100644
index 000000000..1bbd34be8
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/idiv.c
@@ -0,0 +1,171 @@
+/*[
+
+idiv.c
+
+LOCAL CHAR SccsID[]="@(#)idiv.c 1.7 08/01/94";
+
+IDIV CPU functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <idiv.h>
+#include <c_div64.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Signed Divide. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+IDIV8
+
+IFN1(
+ IU32, op2 /* divisor operand */
+ )
+
+
+ {
+ IS32 sresult;
+ IS32 sop1;
+ IS32 sop2;
+
+ if ( op2 == 0 )
+ Int0(); /* Divide by Zero Exception */
+
+ sop2 = (IS32)op2;
+ sop1 = (IS32)GET_AX();
+
+ if ( sop1 & BIT15_MASK ) /* Sign extend operands to 32 bits */
+ sop1 |= ~WORD_MASK;
+ if ( sop2 & BIT7_MASK )
+ sop2 |= ~BYTE_MASK;
+
+ sresult = sop1 / sop2; /* Do operation */
+
+ if ( (sresult & 0xff80) == 0 || (sresult & 0xff80) == 0xff80 )
+ ; /* it fits */
+ else
+ Int0(); /* Result doesn't fit in destination */
+
+ SET_AL(sresult); /* Store Quotient */
+ SET_AH(sop1 % sop2); /* Store Remainder */
+
+ /* Set all undefined flag(s) */
+#ifdef SET_UNDEFINED_DIV_FLAG
+ SET_CF(UNDEFINED_FLAG);
+ SET_OF(UNDEFINED_FLAG);
+ SET_SF(UNDEFINED_FLAG);
+ SET_ZF(UNDEFINED_FLAG);
+ SET_PF(UNDEFINED_FLAG);
+ SET_AF(UNDEFINED_FLAG);
+#endif
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Signed Divide. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+IDIV16
+
+IFN1(
+ IU32, op2 /* divisor operand */
+ )
+
+
+ {
+ IS32 sresult;
+ IS32 sop1;
+ IS32 sop2;
+
+ if ( op2 == 0 )
+ Int0(); /* Divide by Zero Exception */
+
+ sop2 = (IS32)op2;
+ sop1 = (IU32)GET_DX() << 16 | GET_AX();
+
+ if ( sop2 & BIT15_MASK ) /* Sign extend operands to 32 bits */
+ sop2 |= ~WORD_MASK;
+
+ sresult = sop1 / sop2; /* Do operation */
+
+ if ( (sresult & 0xffff8000) == 0 || (sresult & 0xffff8000) == 0xffff8000 )
+ ; /* it fits */
+ else
+ Int0(); /* Result doesn't fit in destination */
+
+ SET_AX(sresult); /* Store Quotient */
+ SET_DX(sop1 % sop2); /* Store Remainder */
+
+ /* Set all undefined flag(s) */
+#ifdef SET_UNDEFINED_DIV_FLAG
+ SET_CF(UNDEFINED_FLAG);
+ SET_OF(UNDEFINED_FLAG);
+ SET_SF(UNDEFINED_FLAG);
+ SET_ZF(UNDEFINED_FLAG);
+ SET_PF(UNDEFINED_FLAG);
+ SET_AF(UNDEFINED_FLAG);
+#endif
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Signed Divide. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+IDIV32
+
+IFN1(
+ IU32, op2 /* divisor operand */
+ )
+
+
+ {
+ IS32 slr; /* low result */
+ IS32 shr; /* high result */
+ IS32 srem; /* remainder */
+
+ if ( op2 == 0 )
+ Int0(); /* Divide by Zero Exception */
+
+ shr = GET_EDX();
+ slr = GET_EAX();
+ div64(&shr, &slr, (IS32)op2, &srem);
+
+ if ( ((shr == 0x00000000) && ((slr & BIT31_MASK) == 0)) ||
+ ((shr == 0xffffffff) && ((slr & BIT31_MASK) != 0)) )
+ ; /* it fits */
+ else
+ Int0(); /* Result doesn't fit in destination */
+
+ SET_EAX(slr); /* Store Quotient */
+ SET_EDX(srem); /* Store Remainder */
+
+ /* Set all undefined flag(s) */
+#ifdef SET_UNDEFINED_DIV_FLAG
+ SET_CF(UNDEFINED_FLAG);
+ SET_OF(UNDEFINED_FLAG);
+ SET_SF(UNDEFINED_FLAG);
+ SET_ZF(UNDEFINED_FLAG);
+ SET_PF(UNDEFINED_FLAG);
+ SET_AF(UNDEFINED_FLAG);
+#endif
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/idiv.h b/private/mvdm/softpc.new/base/ccpu386/idiv.h
new file mode 100644
index 000000000..fe4f2ee3e
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/idiv.h
@@ -0,0 +1,30 @@
+/*
+ idiv.h
+
+ Define all IDIV CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)idiv.h 1.4 02/09/94";
+ */
+
+IMPORT VOID IDIV8
+
+IPT1(
+ IU32, op2
+
+ );
+
+IMPORT VOID IDIV16
+
+IPT1(
+ IU32, op2
+
+ );
+
+IMPORT VOID IDIV32
+
+IPT1(
+ IU32, op2
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/imul.c b/private/mvdm/softpc.new/base/ccpu386/imul.c
new file mode 100644
index 000000000..c72d4ffbb
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/imul.c
@@ -0,0 +1,243 @@
+/*[
+
+imul.c
+
+LOCAL CHAR SccsID[]="@(#)imul.c 1.8 11/09/94";
+
+IMUL CPU functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <imul.h>
+#include <c_mul64.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Signed multiply. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+IMUL8
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst(low half)/lsrc operand */
+ IU32, op2 /* rsrc operand */
+ )
+
+
+ {
+ IU32 result;
+
+ /* Sign extend operands to 32-bits (ie Host Size) */
+ if ( *pop1 & BIT7_MASK )
+ *pop1 |= ~BYTE_MASK;
+ if ( op2 & BIT7_MASK )
+ op2 |= ~BYTE_MASK;
+
+ result = *pop1 * op2; /* Do operation */
+ SET_AH(result >> 8 & BYTE_MASK); /* Store top half of result */
+
+ /* Set CF/OF. */
+ if ( (result & 0xff80) == 0 || (result & 0xff80) == 0xff80 )
+ {
+ SET_CF(0); SET_OF(0);
+ }
+ else
+ {
+ SET_CF(1); SET_OF(1);
+ }
+
+#ifdef SET_UNDEFINED_MUL_FLAG
+ /* Do NOT Set all undefined flag.
+ * Microsoft VGA Mouse relies on preserved flags
+ */
+#endif
+ *pop1 = result; /* Return low half of result */
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Signed multiply. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+IMUL16
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst(low half)/lsrc operand */
+ IU32, op2 /* rsrc operand */
+ )
+
+
+ {
+ IU32 result;
+
+ /* Sign extend operands to 32-bits (ie Host Size) */
+ if ( *pop1 & BIT15_MASK )
+ *pop1 |= ~WORD_MASK;
+ if ( op2 & BIT15_MASK )
+ op2 |= ~WORD_MASK;
+
+ result = *pop1 * op2; /* Do operation */
+ SET_DX(result >> 16 & WORD_MASK); /* Store top half of result */
+
+ /* Set CF/OF. */
+ if ( (result & 0xffff8000) == 0 || (result & 0xffff8000) == 0xffff8000 )
+ {
+ SET_CF(0); SET_OF(0);
+ }
+ else
+ {
+ SET_CF(1); SET_OF(1);
+ }
+
+#ifdef SET_UNDEFINED_MUL_FLAG
+ /* Do NOT Set all undefined flag.
+ * Microsoft VGA Mouse relies on preserved flags
+ */
+#endif
+ *pop1 = result; /* Return low half of result */
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Signed multiply, 16bit = 16bit x 16bit. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+IMUL16T
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst operand */
+ IU32, op2, /* lsrc operand */
+ IU32, op3 /* rsrc operand */
+ )
+
+
+ {
+ IU32 result;
+
+ /* Sign extend operands to 32-bits (ie Host Size) */
+ if ( op2 & BIT15_MASK )
+ op2 |= ~WORD_MASK;
+ if ( op3 & BIT15_MASK )
+ op3 |= ~WORD_MASK;
+
+ result = op2 * op3; /* Do operation */
+
+ /* Set CF/OF. */
+ if ( (result & 0xffff8000) == 0 || (result & 0xffff8000) == 0xffff8000 )
+ {
+ SET_CF(0); SET_OF(0);
+ }
+ else
+ {
+ SET_CF(1); SET_OF(1);
+ }
+
+#ifdef SET_UNDEFINED_MUL_FLAG
+ /* Do NOT Set all undefined flag.
+ * Microsoft VGA Mouse relies on preserved flags
+ */
+#endif
+ *pop1 = result; /* Return low half of result */
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Signed multiply. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+IMUL32
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst(low half)/lsrc operand */
+ IU32, op2 /* rsrc operand */
+ )
+
+
+ {
+ IS32 result;
+ IS32 top;
+ IBOOL is_signed = FALSE;
+
+ mul64(&top, &result, (IS32)*pop1, (IS32)op2); /* Do operation */
+ SET_EDX(top); /* Store top half of result */
+
+ if ( result & BIT31_MASK )
+ is_signed = TRUE;
+
+ /* Set CF/OF. */
+ if ( top == 0 && !is_signed || top == 0xffffffff && is_signed )
+ {
+ SET_CF(0); SET_OF(0);
+ }
+ else
+ {
+ SET_CF(1); SET_OF(1);
+ }
+
+#ifdef SET_UNDEFINED_MUL_FLAG
+ /* Do NOT Set all undefined flag.
+ * Microsoft VGA Mouse relies on preserved flags
+ */
+#endif
+ *pop1 = result; /* Return low half of result */
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Signed multiply, 32bit = 32bit x 32bit. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+IMUL32T
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst operand */
+ IU32, op2, /* lsrc operand */
+ IU32, op3 /* rsrc operand */
+ )
+
+
+ {
+ IS32 result;
+ IS32 top;
+ IBOOL is_signed = FALSE;
+
+ mul64(&top, &result, (IS32)op2, (IS32)op3); /* Do operation */
+
+ if ( result & BIT31_MASK )
+ is_signed = TRUE;
+
+ /* Set CF/OF. */
+ if ( top == 0 && !is_signed || top == 0xffffffff && is_signed )
+ {
+ SET_CF(0); SET_OF(0);
+ }
+ else
+ {
+ SET_CF(1); SET_OF(1);
+ }
+
+#ifdef SET_UNDEFINED_MUL_FLAG
+ /* Do NOT Set all undefined flag.
+ * Microsoft VGA Mouse relies on preserved flags
+ */
+#endif
+
+ *pop1 = result; /* Return low half of result */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/imul.h b/private/mvdm/softpc.new/base/ccpu386/imul.h
new file mode 100644
index 000000000..1dc5d89c9
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/imul.h
@@ -0,0 +1,51 @@
+/*
+ imul.h
+
+ Define all IMUL CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)imul.h 1.4 02/09/94";
+ */
+
+IMPORT VOID IMUL8
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
+
+IMPORT VOID IMUL16
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
+
+IMPORT VOID IMUL16T
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IU32, op3
+
+ );
+
+IMPORT VOID IMUL32
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
+
+IMPORT VOID IMUL32T
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IU32, op3
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/in.c b/private/mvdm/softpc.new/base/ccpu386/in.c
new file mode 100644
index 000000000..5e17b9b49
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/in.c
@@ -0,0 +1,101 @@
+/*[
+
+in.c
+
+LOCAL CHAR SccsID[]="@(#)in.c 1.8 09/27/94";
+
+IN CPU Functions.
+-----------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <in.h>
+#include <ios.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+/*
+ * Need to call the IO functions directly from the base arrays (just like
+ * the assembler CPU does), rather than calling inb etc., as the latter
+ * could cause a virtualisation that would end-up back here.
+ */
+
+GLOBAL VOID
+IN8
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst operand */
+ IU32, op2 /* src(port nr.) operand */
+ )
+
+
+ {
+#ifndef PIG
+ IU8 temp;
+
+ (*Ios_inb_function[Ios_in_adapter_table[(IO_ADDR)op2 & (PC_IO_MEM_SIZE-1)]])
+ ((IO_ADDR)op2, &temp);
+ *pop1 = temp;
+#endif /* !PIG */
+ }
+
+GLOBAL VOID
+IN16
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst operand */
+ IU32, op2 /* src(port nr.) operand */
+ )
+
+
+ {
+#ifndef PIG
+ IU16 temp;
+
+ (*Ios_inw_function[Ios_in_adapter_table[(IO_ADDR)op2 & (PC_IO_MEM_SIZE-1)]])
+ ((IO_ADDR)op2, &temp);
+ *pop1 = temp;
+#endif /* !PIG */
+ }
+
+GLOBAL VOID
+IN32 IFN2(
+ IU32 *, pop1, /* pntr to dst operand */
+ IU32, op2 /* src(port nr.) operand */
+ )
+{
+#ifndef PIG
+ IU32 temp;
+
+#ifdef SFELLOW
+ (*Ios_ind_function[Ios_in_adapter_table[(IO_ADDR)op2 &
+ (PC_IO_MEM_SIZE-1)]])
+ ((IO_ADDR)op2, &temp);
+ *pop1 = temp;
+#else
+ IN16(&temp, op2);
+ *pop1 = temp;
+ IN16(&temp, op2 + 2);
+ *pop1 += temp << 16;
+#endif
+#endif /* !PIG */
+}
diff --git a/private/mvdm/softpc.new/base/ccpu386/in.h b/private/mvdm/softpc.new/base/ccpu386/in.h
new file mode 100644
index 000000000..4f263a1dd
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/in.h
@@ -0,0 +1,33 @@
+/*
+ in.h
+
+ Define all IN CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)in.h 1.4 02/09/94";
+ */
+
+IMPORT VOID IN8
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
+
+IMPORT VOID IN16
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
+
+IMPORT VOID IN32
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/inc.c b/private/mvdm/softpc.new/base/ccpu386/inc.c
new file mode 100644
index 000000000..22a05bf82
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/inc.c
@@ -0,0 +1,66 @@
+/*[
+
+inc.c
+
+LOCAL CHAR SccsID[]="@(#)inc.c 1.5 02/09/94";
+
+INC CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <inc.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'inc'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+INC
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst/src operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+ IU32 msb;
+ IU32 op1_msb;
+ IU32 res_msb;
+
+ msb = SZ2MSB(op_sz);
+
+ result = *pop1 + 1 & SZ2MASK(op_sz); /* Do operation */
+ op1_msb = (*pop1 & msb) != 0; /* Isolate all msb's */
+ res_msb = (result & msb) != 0;
+ /* Determine flags */
+ SET_OF(!op1_msb & res_msb); /* OF = !op1 & res */
+ /* CF left unchanged */
+ SET_PF(pf_table[result & BYTE_MASK]);
+ SET_ZF(result == 0);
+ SET_SF((result & msb) != 0); /* SF = MSB */
+ SET_AF(((*pop1 ^ result) & BIT4_MASK) != 0); /* AF = Bit 4 carry */
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/inc.h b/private/mvdm/softpc.new/base/ccpu386/inc.h
new file mode 100644
index 000000000..fa19500a3
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/inc.h
@@ -0,0 +1,17 @@
+/*
+ inc.h
+
+ Define all INC CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)inc.h 1.4 02/09/94";
+ */
+
+IMPORT VOID INC
+
+IPT2(
+ IU32 *, pop1,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/into.c b/private/mvdm/softpc.new/base/ccpu386/into.c
new file mode 100644
index 000000000..ef409231d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/into.c
@@ -0,0 +1,55 @@
+/*[
+
+into.c
+
+LOCAL CHAR SccsID[]="@(#)into.c 1.5 02/09/94";
+
+INTO CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <into.h>
+#include <c_intr.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Interrupt on Overflow */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+INTO()
+ {
+
+ if ( GET_OF() )
+ {
+#ifdef NTVDM
+ extern BOOL host_swint_hook IPT1(IS32, int_no);
+
+ if(GET_PE() && host_swint_hook((IS32) 4))
+ return; /* Interrupt processed by user defined handler */
+#endif
+
+ EXT = INTERNAL;
+ do_intrupt((IU16)4, TRUE, FALSE, (IU16)0);
+ }
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/into.h b/private/mvdm/softpc.new/base/ccpu386/into.h
new file mode 100644
index 000000000..d36f1f696
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/into.h
@@ -0,0 +1,11 @@
+/*
+ into.h
+
+ Define INTO CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)into.h 1.5 09/01/94";
+ */
+
+IMPORT VOID INTO IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/intx.c b/private/mvdm/softpc.new/base/ccpu386/intx.c
new file mode 100644
index 000000000..bdcd8f86e
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/intx.c
@@ -0,0 +1,57 @@
+/*[
+
+intx.c
+
+LOCAL CHAR SccsID[]="@(#)intx.c 1.5 02/09/94";
+
+INT andISM323 CPU Functions.
+----------------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <intx.h>
+#include <c_intr.h>
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/*ISM32n orISM323. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+INTx
+
+IFN1(
+ IU32, op1
+ )
+
+
+ {
+#ifdef NTVDM
+ extern BOOL host_swint_hook IPT1(IS32, int_no);
+
+ if(GET_PE() && host_swint_hook((IS32) op1))
+ return; /* Interrupt processed by user defined handler */
+#endif
+
+ EXT = INTERNAL;
+ do_intrupt((IU16)op1, TRUE, FALSE, (IU16)0);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/intx.h b/private/mvdm/softpc.new/base/ccpu386/intx.h
new file mode 100644
index 000000000..7b6d3b898
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/intx.h
@@ -0,0 +1,16 @@
+/*
+ intx.h
+
+ DefineISM32andISM323 (ie INTx) CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)intx.h 1.4 02/09/94";
+ */
+
+IMPORT VOID INTx
+
+IPT1(
+ IU32, op1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/invd.c b/private/mvdm/softpc.new/base/ccpu386/invd.c
new file mode 100644
index 000000000..67e8fc951
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/invd.c
@@ -0,0 +1,45 @@
+/*[
+
+invd.c
+
+LOCAL CHAR SccsID[]="@(#)invd.c 1.5 02/09/94";
+
+INVD CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <invd.h>
+
+/*
+ =====================================================================
+ EXECUTION STARTS HERE.
+ =====================================================================
+ */
+
+
+#ifdef SPC486
+
+VOID
+INVD()
+ {
+ /*
+ If cache is implemented - then make call to flush cache.
+ flush_cache();
+ */
+ }
+
+#endif /* SPC486 */
diff --git a/private/mvdm/softpc.new/base/ccpu386/invd.h b/private/mvdm/softpc.new/base/ccpu386/invd.h
new file mode 100644
index 000000000..02418978d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/invd.h
@@ -0,0 +1,11 @@
+/*
+ invd.h
+
+ Define INVD CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)invd.h 1.5 09/01/94";
+ */
+
+IMPORT VOID INVD IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/invlpg.c b/private/mvdm/softpc.new/base/ccpu386/invlpg.c
new file mode 100644
index 000000000..5643baddc
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/invlpg.c
@@ -0,0 +1,49 @@
+/*[
+
+invlpg.c
+
+LOCAL CHAR SccsID[]="@(#)invlpg.c 1.5 02/09/94";
+
+INVLPG CPU Functions.
+---------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <invlpg.h>
+#include <c_tlb.h>
+
+/*
+ =====================================================================
+ EXECUTION STARTS HERE.
+ =====================================================================
+ */
+
+
+#ifdef SPC486
+
+GLOBAL VOID
+INVLPG
+
+IFN1(
+ IU32, op1 /* src operand */
+ )
+
+
+ {
+ invalidate_tlb_entry((IU32) op1);
+ }
+
+#endif /* SPC486 */
diff --git a/private/mvdm/softpc.new/base/ccpu386/invlpg.h b/private/mvdm/softpc.new/base/ccpu386/invlpg.h
new file mode 100644
index 000000000..54242f10b
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/invlpg.h
@@ -0,0 +1,16 @@
+/*
+ invlpg.h
+
+ Define INVLPG CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)invlpg.h 1.4 02/09/94";
+ */
+
+IMPORT VOID INVLPG
+
+IPT1(
+ IU32, op1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/iret.c b/private/mvdm/softpc.new/base/ccpu386/iret.c
new file mode 100644
index 000000000..26d8af112
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/iret.c
@@ -0,0 +1,330 @@
+/*[
+
+iret.c
+
+LOCAL CHAR SccsID[]="@(#)iret.c 1.13 1/19/95";
+
+IRET CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include CpuH
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <iret.h>
+#include <c_xfer.h>
+#include <c_tsksw.h>
+#include <c_page.h>
+#include <fault.h>
+
+
+
+/*
+ =====================================================================
+ INTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*--------------------------------------------------------------------*/
+/* Intelligent support for writing (E)FLAGS. */
+/*--------------------------------------------------------------------*/
+LOCAL VOID
+set_current_FLAGS
+
+IFN1(
+ IU32, flags
+ )
+
+
+ {
+ if ( GET_OPERAND_SIZE() == USE16 )
+ setFLAGS(flags);
+ else /* USE32 */
+ c_setEFLAGS(flags);
+ }
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+IRET()
+ {
+ IU16 new_cs; /* The return destination */
+ IU32 new_ip;
+
+ IU32 new_flags; /* The new flags */
+
+ IU16 back_link; /* Task Return variables */
+ IU32 tss_descr_addr;
+
+ ISM32 dest_type; /* category for destination */
+ ISM32 privilege; /* return privilege level */
+
+ IU32 cs_descr_addr; /* code segment descriptor address */
+ CPU_DESCR cs_entry; /* code segment descriptor entry */
+
+ IU16 new_ss; /* The new stack */
+ IU32 new_sp;
+
+ IU16 new_data_selector; /* ES,DS,FS,GS selector */
+
+ IU32 ss_descr_addr; /* stack segment descriptor address */
+ CPU_DESCR ss_entry; /* stack segment descriptor entry */
+
+ if ( GET_PE() == 0 || GET_VM() == 1 )
+ {
+ /* Real Mode or V86 Mode */
+
+ /* must have (E)IP:CS:(E)FLAGS on stack */
+ validate_stack_exists(USE_SP, (ISM32)NR_ITEMS_3);
+
+ /* retrieve return destination and flags from stack */
+ new_ip = tpop(STACK_ITEM_1, NULL_BYTE_OFFSET);
+ new_cs = tpop(STACK_ITEM_2, NULL_BYTE_OFFSET);
+ new_flags = tpop(STACK_ITEM_3, NULL_BYTE_OFFSET);
+
+#ifdef TAKE_REAL_MODE_LIMIT_FAULT
+
+ /* do ip limit check */
+ if ( new_ip > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_IRET_RM_CS_LIMIT);
+
+#else /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ /* The Soft486 EDL CPU does not take Real Mode limit failures.
+ * Since the Ccpu486 is used as a "reference" cpu we wish it
+ * to behave a C version of the EDL Cpu rather than as a C
+ * version of a i486.
+ */
+
+#endif /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ /* ALL SYSTEMS GO */
+
+ load_CS_cache(new_cs, (IU32)0, (CPU_DESCR *)0);
+ SET_EIP(new_ip);
+
+ change_SP((IS32)NR_ITEMS_3);
+
+ set_current_FLAGS(new_flags);
+
+ return;
+ }
+
+ /* PROTECTED MODE */
+
+ /* look for nested return, ie return to another task */
+ if ( GET_NT() == 1 )
+ {
+ /* NESTED RETURN - get old TSS */
+ back_link = spr_read_word(GET_TR_BASE());
+ (VOID)validate_TSS(back_link, &tss_descr_addr, TRUE);
+ switch_tasks(RETURNING, NOT_NESTING, back_link, tss_descr_addr,
+ GET_EIP());
+
+ /* limit check new IP (now in new task) */
+ if ( GET_EIP() > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_IRET_PM_TASK_CS_LIMIT);
+
+ return;
+ }
+
+ /* SAME TASK RETURN */
+
+ /* must have (E)IP:CS:(E)FLAGS on stack */
+ validate_stack_exists(USE_SP, (ISM32)NR_ITEMS_3);
+
+ /* retrieve return destination from stack */
+ new_ip = tpop(STACK_ITEM_1, NULL_BYTE_OFFSET);
+ new_cs = tpop(STACK_ITEM_2, NULL_BYTE_OFFSET);
+ new_flags = tpop(STACK_ITEM_3, NULL_BYTE_OFFSET);
+
+ if ( GET_CPL() != 0 )
+ new_flags = new_flags & ~BIT17_MASK; /* Clear new VM */
+
+ if ( new_flags & BIT17_MASK ) /* VM bit set? */
+ {
+ /*
+ Return to V86 Mode. Stack holds:-
+
+ ===========
+ | EIP |
+ | | CS |
+ | EFLAGS |
+ | ESP |
+ | | SS |
+ | | ES |
+ | | DS |
+ | | FS |
+ | | GS |
+ ===========
+ */
+
+ validate_stack_exists(USE_SP, (ISM32)NR_ITEMS_9);
+
+ /* Check Instruction Pointer valid. */
+ if ( new_ip > (IU32)0xffff )
+ GP((IU16)0, FAULT_IRET_VM_CS_LIMIT);
+
+ /* ALL SYSTEMS GO */
+ c_setEFLAGS(new_flags); /* ensure VM set before segment loads */
+
+ SET_CPL(3); /* V86 privilege level */
+ load_CS_cache(new_cs, (IU32)0, (CPU_DESCR *)0);
+ SET_CS_LIMIT(0xffff);
+
+ SET_EIP(new_ip);
+
+ /* Retrieve new stack ESP:SS */
+ new_sp = tpop(STACK_ITEM_4, NULL_BYTE_OFFSET);
+ new_ss = tpop(STACK_ITEM_5, NULL_BYTE_OFFSET);
+
+ /* Retrieve and set up new data selectors */
+ new_data_selector = tpop(STACK_ITEM_6, NULL_BYTE_OFFSET);
+ load_data_seg(ES_REG, new_data_selector);
+
+ new_data_selector = tpop(STACK_ITEM_7, NULL_BYTE_OFFSET);
+ load_data_seg(DS_REG, new_data_selector);
+
+ new_data_selector = tpop(STACK_ITEM_8, NULL_BYTE_OFFSET);
+ load_data_seg(FS_REG, new_data_selector);
+
+ new_data_selector = tpop(STACK_ITEM_9, NULL_BYTE_OFFSET);
+ load_data_seg(GS_REG, new_data_selector);
+
+ /* Set up new stack */
+ load_stack_seg(new_ss);
+ set_current_SP(new_sp);
+
+ /* Set up pseudo descriptors */
+ load_pseudo_descr(SS_REG);
+ load_pseudo_descr(DS_REG);
+ load_pseudo_descr(ES_REG);
+ load_pseudo_descr(FS_REG);
+ load_pseudo_descr(GS_REG);
+
+ return;
+ }
+
+ /* decode action and further check stack */
+ privilege = GET_SELECTOR_RPL(new_cs);
+ if ( privilege < GET_CPL() )
+ {
+ GP(new_cs, FAULT_IRET_CS_ACCESS_1); /* you can't get to higher privilege */
+ }
+ else if ( privilege == GET_CPL() )
+ {
+ dest_type = SAME_LEVEL;
+ }
+ else
+ {
+ /* going to lower privilege */
+ /* must have (E)IP:CS, (E)FLAGS, (E)SP:SS on stack */
+ validate_stack_exists(USE_SP, (ISM32)NR_ITEMS_5);
+ dest_type = LOWER_PRIVILEGE;
+ }
+
+ if ( selector_outside_GDT_LDT(new_cs, &cs_descr_addr) )
+ GP(new_cs, FAULT_IRET_SELECTOR);
+
+ /* check type, access and presence of return addr */
+
+ /* load descriptor */
+ read_descriptor_linear(cs_descr_addr, &cs_entry);
+
+ /* must be a code segment */
+ switch ( descriptor_super_type(cs_entry.AR) )
+ {
+ case CONFORM_NOREAD_CODE:
+ case CONFORM_READABLE_CODE:
+ /* access check requires DPL <= return RPL */
+ /* note that this even true when changing to outer rings - despite
+ what it says in the 80286 & i486 PRMs - this has been verified on
+ a real 80386 & i486 - Wayne 18th May 1994 */
+ if ( GET_AR_DPL(cs_entry.AR) > privilege )
+ GP(new_cs, FAULT_IRET_ACCESS_2);
+ break;
+
+ case NONCONFORM_NOREAD_CODE:
+ case NONCONFORM_READABLE_CODE:
+ /* access check requires DPL == return RPL */
+ if ( GET_AR_DPL(cs_entry.AR) != privilege )
+ GP(new_cs, FAULT_IRET_ACCESS_3);
+ break;
+
+ default:
+ GP(new_cs, FAULT_IRET_BAD_SEG_TYPE);
+ }
+
+ if ( GET_AR_P(cs_entry.AR) == NOT_PRESENT )
+ NP(new_cs, FAULT_IRET_NP_CS);
+
+ /* action the target */
+ switch ( dest_type )
+ {
+ case SAME_LEVEL:
+ /* do ip limit checking */
+ if ( new_ip > cs_entry.limit )
+ GP((IU16)0, FAULT_IRET_PM_CS_LIMIT_1);
+
+ /* ALL SYSTEMS GO */
+
+ load_CS_cache(new_cs, cs_descr_addr, &cs_entry);
+ SET_EIP(new_ip);
+
+ change_SP((IS32)NR_ITEMS_3);
+
+ set_current_FLAGS(new_flags);
+ break;
+
+ case LOWER_PRIVILEGE:
+ /* check new stack */
+ new_ss = tpop(STACK_ITEM_5, NULL_BYTE_OFFSET);
+ check_SS(new_ss, privilege, &ss_descr_addr, &ss_entry);
+
+ /* do ip limit checking */
+ if ( new_ip > cs_entry.limit )
+ GP((IU16)0, FAULT_IRET_PM_CS_LIMIT_2);
+
+ /* ALL SYSTEMS GO */
+
+ load_CS_cache(new_cs, cs_descr_addr, &cs_entry);
+ SET_EIP(new_ip);
+
+ set_current_FLAGS(new_flags);
+
+ new_sp = tpop(STACK_ITEM_4, NULL_BYTE_OFFSET);
+ load_SS_cache(new_ss, ss_descr_addr, &ss_entry);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ SET_SP (new_sp);
+ else
+ SET_ESP (new_sp);
+
+ SET_CPL(privilege);
+
+ /* finally re-validate DS and ES segments */
+ load_data_seg_new_privilege(DS_REG);
+ load_data_seg_new_privilege(ES_REG);
+ load_data_seg_new_privilege(FS_REG);
+ load_data_seg_new_privilege(GS_REG);
+ break;
+ }
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/iret.h b/private/mvdm/softpc.new/base/ccpu386/iret.h
new file mode 100644
index 000000000..8a786bbaf
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/iret.h
@@ -0,0 +1,11 @@
+/*
+ iret.h
+
+ Define IRET CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)iret.h 1.5 09/01/94";
+ */
+
+IMPORT VOID IRET IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/jcxz.c b/private/mvdm/softpc.new/base/ccpu386/jcxz.c
new file mode 100644
index 000000000..a52627679
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/jcxz.c
@@ -0,0 +1,60 @@
+/*[
+
+jcxz.c
+
+LOCAL CHAR SccsID[]="@(#)jcxz.c 1.5 02/09/94";
+
+JCXZ CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <jcxz.h>
+#include <c_xfer.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+JCXZ
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( GET_CX() == 0 )
+ update_relative_ip(rel_offset);
+ }
+
+GLOBAL VOID
+JECXZ
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( GET_ECX() == 0 )
+ update_relative_ip(rel_offset);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/jcxz.h b/private/mvdm/softpc.new/base/ccpu386/jcxz.h
new file mode 100644
index 000000000..32c0c9f6b
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/jcxz.h
@@ -0,0 +1,23 @@
+/*
+ jcxz.h
+
+ Define all JCXZ CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)jcxz.h 1.4 02/09/94";
+ */
+
+IMPORT VOID JCXZ
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID JECXZ
+
+IPT1(
+ IU32, rel_offset
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/jmp.c b/private/mvdm/softpc.new/base/ccpu386/jmp.c
new file mode 100644
index 000000000..5df44e451
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/jmp.c
@@ -0,0 +1,198 @@
+/*[
+
+jmp.c
+
+LOCAL CHAR SccsID[]="@(#)jmp.c 1.10 01/19/95";
+
+JMP CPU Functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <jmp.h>
+#include <c_xfer.h>
+#include <c_tsksw.h>
+#include <fault.h>
+
+#define TAKE_PROT_MODE_LIMIT_FAULT
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Process far jmps. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JMPF
+#ifdef ANSI
+ (
+ IU32 op1[2] /* offset:segment pointer */
+ )
+#else
+ (op1)
+ IU32 op1[2];
+#endif
+ {
+ IU16 new_cs; /* The destination */
+ IU32 new_ip;
+
+ IU32 descr_addr; /* cs descriptor address and entry */
+ CPU_DESCR entry;
+
+ ISM32 dest_type; /* category for destination */
+ IU8 count; /* dummy for call gate count */
+
+ new_cs = op1[1];
+ new_ip = op1[0];
+
+ if ( GET_PE() == 0 || GET_VM() == 1 )
+ {
+ /* Real Mode or V86 Mode */
+
+#ifdef TAKE_REAL_MODE_LIMIT_FAULT
+
+ /*
+ Although the 386 book says a 16-bit operand should be AND'ed
+ with 0x0000ffff, a 16-bit operand is never fetched with the
+ top bits dirty anyway, so we don't AND here.
+ */
+ if ( new_ip > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_JMPF_RM_CS_LIMIT);
+
+#else /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ /* The Soft486 EDL CPU does not take Real Mode limit failures.
+ * Since the Ccpu486 is used as a "reference" cpu we wish it
+ * to behave a C version of the EDL Cpu rather than as a C
+ * version of a i486.
+ */
+
+#endif /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ load_CS_cache(new_cs, (IU32)0, (CPU_DESCR *)0);
+ SET_EIP(new_ip);
+ }
+ else
+ {
+ /* Protected Mode */
+
+ /* decode and check final destination */
+ validate_far_dest(&new_cs, &new_ip, &descr_addr, &count,
+ &dest_type, JMP_ID);
+
+ /* action possible types of target */
+ switch ( dest_type )
+ {
+ case NEW_TASK:
+ switch_tasks(NOT_RETURNING, NOT_NESTING, new_cs, descr_addr, GET_EIP());
+
+ /* limit check new IP (now in new task) */
+ if ( GET_EIP() > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_JMPF_TASK_CS_LIMIT);
+
+ break;
+
+ case SAME_LEVEL:
+ read_descriptor_linear(descr_addr, &entry);
+
+ /* do limit checking */
+ if ( new_ip > entry.limit )
+ GP((IU16)0, FAULT_JMPF_PM_CS_LIMIT);
+
+ /* stamp new selector with CPL */
+ SET_SELECTOR_RPL(new_cs, GET_CPL());
+ load_CS_cache(new_cs, descr_addr, &entry);
+ SET_EIP(new_ip);
+ break;
+ }
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* jump near indirect */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JMPN
+
+IFN1(
+ IU32, offset
+ )
+
+
+ {
+ /*
+ Although the 386 book says a 16-bit operand should be AND'ed
+ with 0x0000ffff, a 16-bit operand is never fetched with the
+ top bits dirty anyway, so we don't AND here.
+ */
+
+ /* do ip limit check */
+#ifdef TAKE_REAL_MODE_LIMIT_FAULT
+
+ if ( offset > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_JMPN_RM_CS_LIMIT);
+
+#else /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ /* The Soft486 EDL CPU does not take Real Mode limit failures.
+ * Since the Ccpu486 is used as a "reference" cpu we wish it
+ * to behave a C version of the EDL Cpu rather than as a C
+ * version of a i486.
+ */
+
+#ifdef TAKE_PROT_MODE_LIMIT_FAULT
+
+ if ( GET_PE() == 1 && GET_VM() == 0 )
+ {
+ if ( offset > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_JMPN_PM_CS_LIMIT);
+ }
+
+#endif /* TAKE_PROT_MODE_LIMIT_FAULT */
+
+ /* The Soft486 EDL CPU does not take Protected Mode limit failues
+ * for the instructions with relative offsets, Jxx, LOOPxx, JCXZ,
+ * JMP rel and CALL rel, or instructions with near offsets,
+ * JMP near and CALL near.
+ * Since the Ccpu486 is used as a "reference" cpu we wish it
+ * to behave a C version of the EDL Cpu rather than as a C
+ * version of a i486.
+ */
+
+#endif /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ SET_EIP(offset);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* jump near relative */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JMPR
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ update_relative_ip(rel_offset);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/jmp.h b/private/mvdm/softpc.new/base/ccpu386/jmp.h
new file mode 100644
index 000000000..0ab6d14c7
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/jmp.h
@@ -0,0 +1,30 @@
+/*
+ jmp.h
+
+ Define all JMP CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)jmp.h 1.4 02/09/94";
+ */
+
+IMPORT VOID JMPF
+
+IPT1(
+ IU32, op1[2]
+
+ );
+
+IMPORT VOID JMPN
+
+IPT1(
+ IU32, offset
+
+ );
+
+IMPORT VOID JMPR
+
+IPT1(
+ IU32, rel_offset
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/jxx.c b/private/mvdm/softpc.new/base/ccpu386/jxx.c
new file mode 100644
index 000000000..214317b17
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/jxx.c
@@ -0,0 +1,290 @@
+/*[
+
+jxx.c
+
+LOCAL CHAR SccsID[]="@(#)jxx.c 1.5 02/09/94";
+
+Jxx CPU Functions (Conditional Jumps).
+--------------------------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <jxx.h>
+#include <c_xfer.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Jump if Below (CF=1) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JB
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( GET_CF() )
+ update_relative_ip(rel_offset);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Jump if Below or Equal (CF=1 || ZF=1) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JBE
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( GET_CF() || GET_ZF() )
+ update_relative_ip(rel_offset);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Jump if Less (SF != OF) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JL
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( GET_SF() != GET_OF() )
+ update_relative_ip(rel_offset);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Jump if Less or Equal (ZF=1 || (SF != OF)) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JLE
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( GET_SF() != GET_OF() || GET_ZF() )
+ update_relative_ip(rel_offset);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Jump if Not Below (CF=0) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JNB
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( !GET_CF() )
+ update_relative_ip(rel_offset);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Jump if Not Below or Equal (CF=0 && ZF=0) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JNBE
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( !GET_CF() && !GET_ZF() )
+ update_relative_ip(rel_offset);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Jump if Not Less (SF==OF) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JNL
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( GET_SF() == GET_OF() )
+ update_relative_ip(rel_offset);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Jump if Not Less or Equal (ZF=0 && (SF==OF)) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JNLE
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( GET_SF() == GET_OF() && !GET_ZF() )
+ update_relative_ip(rel_offset);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Jump if Not Overflow (OF=0) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JNO
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( !GET_OF() )
+ update_relative_ip(rel_offset);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Jump if Not Parity (PF=0) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JNP
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( !GET_PF() )
+ update_relative_ip(rel_offset);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Jump if Not Sign (SF=0) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JNS
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( !GET_SF() )
+ update_relative_ip(rel_offset);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Jump if Not Zero (ZF=0) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JNZ
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( !GET_ZF() )
+ update_relative_ip(rel_offset);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Jump if Overflow (OF=1) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JO
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( GET_OF() )
+ update_relative_ip(rel_offset);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Jump if Parity (PF=1) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JP
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( GET_PF() )
+ update_relative_ip(rel_offset);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Jump if Sign (SF=1) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JS
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( GET_SF() )
+ update_relative_ip(rel_offset);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Jump if Zero (ZF=1) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+JZ
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ if ( GET_ZF() )
+ update_relative_ip(rel_offset);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/jxx.h b/private/mvdm/softpc.new/base/ccpu386/jxx.h
new file mode 100644
index 000000000..3057c8ac0
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/jxx.h
@@ -0,0 +1,121 @@
+/*
+ jxx.h
+
+ Define all Jxx CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)jxx.h 1.4 02/09/94";
+ */
+
+IMPORT VOID JB
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID JBE
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID JL
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID JLE
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID JNB
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID JNBE
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID JNL
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID JNLE
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID JNO
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID JNP
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID JNS
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID JNZ
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID JO
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID JP
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID JS
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID JZ
+
+IPT1(
+ IU32, rel_offset
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/lahf.c b/private/mvdm/softpc.new/base/ccpu386/lahf.c
new file mode 100644
index 000000000..7c753b28a
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lahf.c
@@ -0,0 +1,46 @@
+/*[
+
+lahf.c
+
+LOCAL CHAR SccsID[]="@(#)lahf.c 1.5 02/09/94";
+
+LAHF CPU functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <lahf.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+LAHF()
+ {
+ IU32 temp;
+
+ /* 7 6 5 4 3 2 1 0 */
+ /* set AH = <SF><ZF>< 0><AF>< 0><PF>< 1><CF> */
+
+ temp = GET_SF() << 7 | GET_ZF() << 6 | GET_AF() << 4 | GET_PF() << 2 |
+ GET_CF() | 0x2;
+ SET_AH(temp);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/lahf.h b/private/mvdm/softpc.new/base/ccpu386/lahf.h
new file mode 100644
index 000000000..639f06c91
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lahf.h
@@ -0,0 +1,11 @@
+/*
+ lahf.h
+
+ Define all LAHF CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)lahf.h 1.5 09/01/94";
+ */
+
+IMPORT VOID LAHF IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/lar.c b/private/mvdm/softpc.new/base/ccpu386/lar.c
new file mode 100644
index 000000000..a41a20d3b
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lar.c
@@ -0,0 +1,101 @@
+/*[
+
+lar.c
+
+LOCAL CHAR SccsID[]="@(#)lar.c 1.5 02/09/94";
+
+LAR CPU Functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <lar.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+LAR
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst operand */
+ IU32, op2 /* src operand */
+ )
+
+
+ {
+ BOOL loadable = FALSE;
+ IU32 descr_addr;
+ CPU_DESCR entry;
+
+ if ( !selector_outside_GDT_LDT((IU16)op2, &descr_addr) )
+ {
+ /* read descriptor from memory */
+ read_descriptor_linear(descr_addr, &entry);
+
+ switch ( descriptor_super_type(entry.AR) )
+ {
+ case INVALID:
+ break; /* never loaded */
+
+ case CONFORM_NOREAD_CODE:
+ case CONFORM_READABLE_CODE:
+ loadable = TRUE; /* always loadable */
+ break;
+
+ case INTERRUPT_GATE:
+ case TRAP_GATE:
+ case XTND_AVAILABLE_TSS:
+ case XTND_BUSY_TSS:
+ case XTND_CALL_GATE:
+ case XTND_INTERRUPT_GATE:
+ case XTND_TRAP_GATE:
+ case AVAILABLE_TSS:
+ case LDT_SEGMENT:
+ case BUSY_TSS:
+ case CALL_GATE:
+ case TASK_GATE:
+ case EXPANDUP_READONLY_DATA:
+ case EXPANDUP_WRITEABLE_DATA:
+ case EXPANDDOWN_READONLY_DATA:
+ case EXPANDDOWN_WRITEABLE_DATA:
+ case NONCONFORM_NOREAD_CODE:
+ case NONCONFORM_READABLE_CODE:
+ /* access depends on privilege, it is required that
+ DPL >= CPL and DPL >= RPL */
+ if ( GET_AR_DPL(entry.AR) >= GET_CPL() &&
+ GET_AR_DPL(entry.AR) >= GET_SELECTOR_RPL(op2) )
+ loadable = TRUE;
+ break;
+ }
+ }
+
+ if ( loadable )
+ {
+ /* Give em the access rights, in a suitable format */
+ *pop1 = (IU32)entry.AR << 8;
+ SET_ZF(1);
+ }
+ else
+ {
+ SET_ZF(0);
+ }
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/lar.h b/private/mvdm/softpc.new/base/ccpu386/lar.h
new file mode 100644
index 000000000..d7c97c463
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lar.h
@@ -0,0 +1,17 @@
+/*
+ lar.h
+
+ Define all LAR CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)lar.h 1.4 02/09/94";
+ */
+
+IMPORT VOID LAR
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/lea.c b/private/mvdm/softpc.new/base/ccpu386/lea.c
new file mode 100644
index 000000000..3f6a32869
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lea.c
@@ -0,0 +1,46 @@
+/*[
+
+lea.c
+
+LOCAL CHAR SccsID[]="@(#)lea.c 1.5 02/09/94";
+
+LEA CPU Functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <lea.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+LEA
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst operand */
+ IU32, op2 /* src(address) operand */
+ )
+
+
+ {
+ *pop1 = op2;
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/lea.h b/private/mvdm/softpc.new/base/ccpu386/lea.h
new file mode 100644
index 000000000..95b0e9065
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lea.h
@@ -0,0 +1,17 @@
+/*
+ lea.h
+
+ Define all LEA CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)lea.h 1.4 02/09/94";
+ */
+
+IMPORT VOID LEA
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/leave.c b/private/mvdm/softpc.new/base/ccpu386/leave.c
new file mode 100644
index 000000000..c717c1672
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/leave.c
@@ -0,0 +1,65 @@
+/*[
+
+leave.c
+
+LOCAL CHAR SccsID[]="@(#)leave.c 1.5 02/09/94";
+
+LEAVE CPU functions.
+--------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <leave.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+LEAVE16()
+ {
+ IU32 new_bp;
+
+ /* check operand exists */
+ validate_stack_exists(USE_BP, (ISM32)NR_ITEMS_1);
+
+ /* all ok - we can safely update the stack pointer */
+ set_current_SP(GET_EBP());
+
+ /* and update frame pointer */
+ new_bp = spop();
+ SET_BP(new_bp);
+ }
+
+GLOBAL VOID
+LEAVE32()
+ {
+ IU32 new_bp;
+
+ /* check operand exists */
+ validate_stack_exists(USE_BP, (ISM32)NR_ITEMS_1);
+
+ /* all ok - we can safely update the stack pointer */
+ set_current_SP(GET_EBP());
+
+ /* and update frame pointer */
+ new_bp = spop();
+ SET_EBP(new_bp);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/leave.h b/private/mvdm/softpc.new/base/ccpu386/leave.h
new file mode 100644
index 000000000..62fb0abd1
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/leave.h
@@ -0,0 +1,13 @@
+/*
+ leave.h
+
+ Define all LEAVE CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)leave.h 1.5 09/01/94";
+ */
+
+IMPORT VOID LEAVE16 IPT0();
+
+IMPORT VOID LEAVE32 IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/lgdt.c b/private/mvdm/softpc.new/base/ccpu386/lgdt.c
new file mode 100644
index 000000000..95ab828cd
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lgdt.c
@@ -0,0 +1,68 @@
+/*[
+
+lgdt.c
+
+LOCAL CHAR SccsID[]="@(#)lgdt.c 1.5 02/09/94";
+
+LGDT CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <lgdt.h>
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+LGDT16
+#ifdef ANSI
+ (
+ IU32 op1[2] /* src(limit:base pair) operand */
+ )
+#else
+ (op1)
+ IU32 op1[2];
+#endif
+ {
+ SET_STAR_LIMIT(GDT_REG, op1[0]);
+ SET_STAR_BASE(GDT_REG, op1[1] & 0xffffff); /* store 24-bit base */
+#ifdef DOUBLE_CPU
+ double_switch_to(SOFT_CPU);
+#endif
+ }
+
+GLOBAL VOID
+LGDT32
+#ifdef ANSI
+ (
+ IU32 op1[2] /* src(limit:base pair) operand */
+ )
+#else
+ (op1)
+ IU32 op1[2];
+#endif
+ {
+ SET_STAR_LIMIT(GDT_REG, op1[0]);
+ SET_STAR_BASE(GDT_REG, op1[1]);
+#ifdef DOUBLE_CPU
+ double_switch_to(SOFT_CPU);
+#endif
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/lgdt.h b/private/mvdm/softpc.new/base/ccpu386/lgdt.h
new file mode 100644
index 000000000..b331d5ad0
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lgdt.h
@@ -0,0 +1,23 @@
+/*
+ lgdt.h
+
+ Define all LGDT CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)lgdt.h 1.4 02/09/94";
+ */
+
+IMPORT VOID LGDT16
+
+IPT1(
+ IU32, op1[2]
+
+ );
+
+IMPORT VOID LGDT32
+
+IPT1(
+ IU32, op1[2]
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/lidt.c b/private/mvdm/softpc.new/base/ccpu386/lidt.c
new file mode 100644
index 000000000..0943553ba
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lidt.c
@@ -0,0 +1,63 @@
+/*[
+
+lidt.c
+
+LOCAL CHAR SccsID[]="@(#)lidt.c 1.5 02/09/94";
+
+LIDT CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <lidt.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+LIDT16
+#ifdef ANSI
+ (
+ IU32 op1[2] /* src(limit:base pair) operand */
+ )
+#else
+ (op1)
+ IU32 op1[2];
+#endif
+ {
+ SET_STAR_LIMIT(IDT_REG, op1[0]);
+ SET_STAR_BASE(IDT_REG, op1[1] & 0xffffff); /* store 24-bit base */
+ }
+
+GLOBAL VOID
+LIDT32
+#ifdef ANSI
+ (
+ IU32 op1[2] /* src(limit:base pair) operand */
+ )
+#else
+ (op1)
+ IU32 op1[2];
+#endif
+ {
+ SET_STAR_LIMIT(IDT_REG, op1[0]);
+ SET_STAR_BASE(IDT_REG, op1[1]);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/lidt.h b/private/mvdm/softpc.new/base/ccpu386/lidt.h
new file mode 100644
index 000000000..fa026bc74
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lidt.h
@@ -0,0 +1,23 @@
+/*
+ lidt.h
+
+ Define all LIDT CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)lidt.h 1.4 02/09/94";
+ */
+
+IMPORT VOID LIDT16
+
+IPT1(
+ IU32, op1[2]
+
+ );
+
+IMPORT VOID LIDT32
+
+IPT1(
+ IU32, op1[2]
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/lldt.c b/private/mvdm/softpc.new/base/ccpu386/lldt.c
new file mode 100644
index 000000000..398ebc583
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lldt.c
@@ -0,0 +1,87 @@
+/*[
+
+lldt.c
+
+LOCAL CHAR SccsID[]="@(#)lldt.c 1.8 01/19/95";
+
+LLDT CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <lldt.h>
+#include <fault.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+LLDT
+
+IFN1(
+ IU32, op1
+ )
+
+
+ {
+ IU16 selector;
+ IU32 descr_addr;
+ CPU_DESCR entry;
+
+ if ( selector_is_null(selector = op1) )
+ {
+#ifndef DONT_CLEAR_LDTR_ON_INVALID
+ SET_LDT_SELECTOR(selector);
+#else
+ SET_LDT_SELECTOR(0); /* just invalidate LDT */
+#endif /* DONT_CLEAR_LDTR_ON_INVALID */
+#ifndef DONT_CLEAR_LDT_BL_ON_INVALID
+ /* Make the C-CPU behave like the assembler CPU with respect
+ * to LDT base and limit when the selector is set to NULL
+ * - there is no way for an Intel app to determine the values
+ * of the LDT base&limit so this will not affect the emulation
+ */
+ SET_LDT_BASE(0);
+ SET_LDT_LIMIT(0);
+#endif /* DONT_CLEAR_LDT_BL_ON_INVALID */
+ }
+ else
+ {
+ /* must be in GDT */
+ if ( selector_outside_GDT(selector, &descr_addr) )
+ GP(selector, FAULT_LLDT_SELECTOR);
+
+ read_descriptor_linear(descr_addr, &entry);
+
+ if ( descriptor_super_type(entry.AR) != LDT_SEGMENT )
+ GP(selector, FAULT_LLDT_NOT_LDT);
+
+ /* must be present */
+ if ( GET_AR_P(entry.AR) == NOT_PRESENT )
+ NP(selector, FAULT_LLDT_NP);
+
+ /* all OK - load up register */
+
+ SET_LDT_SELECTOR(selector);
+ SET_LDT_BASE(entry.base);
+ SET_LDT_LIMIT(entry.limit);
+ }
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/lldt.h b/private/mvdm/softpc.new/base/ccpu386/lldt.h
new file mode 100644
index 000000000..35db351cc
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lldt.h
@@ -0,0 +1,16 @@
+/*
+ lldt.h
+
+ Define all LLDT CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)lldt.h 1.4 02/09/94";
+ */
+
+IMPORT VOID LLDT
+
+IPT1(
+ IU32, op1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/lmsw.c b/private/mvdm/softpc.new/base/ccpu386/lmsw.c
new file mode 100644
index 000000000..09dfcd7b5
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lmsw.c
@@ -0,0 +1,56 @@
+/*[
+
+lmsw.c
+
+LOCAL CHAR SccsID[]="@(#)lmsw.c 1.5 02/09/94";
+
+LMSW CPU functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <lmsw.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+LMSW
+
+IFN1(
+ IU32, op1 /* src operand */
+ )
+
+
+ {
+ IU32 temp;
+ IU32 no_clear = 0xfffffff1; /* can't clear top 28-bits or PE */
+ IU32 no_set = 0xfffffff0; /* can't set top 28-bits */
+
+ /* kill off bits which can not be set */
+ op1 = op1 & ~no_set;
+
+ /* retain bits which can not be cleared */
+ temp = GET_CR(CR_STAT) & no_clear;
+
+ /* thus update only the bits allowed */
+ SET_CR(CR_STAT, temp | op1);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/lmsw.h b/private/mvdm/softpc.new/base/ccpu386/lmsw.h
new file mode 100644
index 000000000..e1d16b8ae
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lmsw.h
@@ -0,0 +1,16 @@
+/*
+ lmsw.h
+
+ Define all LMSW CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)lmsw.h 1.4 02/09/94";
+ */
+
+IMPORT VOID LMSW
+
+IPT1(
+ IU32, op1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/localfm.c b/private/mvdm/softpc.new/base/ccpu386/localfm.c
new file mode 100644
index 000000000..6e84bc46c
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/localfm.c
@@ -0,0 +1,9 @@
+#include <insignia.h>
+#include <host_def.h>
+#include <host_inc.h>
+#include CpuH
+
+IHP Gdp;
+struct CpuVector Cpu;
+struct SasVector Sas;
+struct VideoVector Video;
diff --git a/private/mvdm/softpc.new/base/ccpu386/loopxx.c b/private/mvdm/softpc.new/base/ccpu386/loopxx.c
new file mode 100644
index 000000000..d7dbf6eb1
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/loopxx.c
@@ -0,0 +1,118 @@
+/*[
+
+loopxx.c
+
+LOCAL CHAR SccsID[]="@(#)loopxx.c 1.5 02/09/94";
+
+LOOPxx CPU Functions.
+---------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <loopxx.h>
+#include <c_xfer.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+LOOP16
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ SET_CX(GET_CX() - 1);
+ if ( GET_CX() != 0 )
+ update_relative_ip(rel_offset);
+ }
+
+GLOBAL VOID
+LOOP32
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ SET_ECX(GET_ECX() - 1);
+ if ( GET_ECX() != 0 )
+ update_relative_ip(rel_offset);
+ }
+
+GLOBAL VOID
+LOOPE16
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ SET_CX(GET_CX() - 1);
+ if ( GET_CX() != 0 && GET_ZF() )
+ update_relative_ip(rel_offset);
+ }
+
+GLOBAL VOID
+LOOPE32
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ SET_ECX(GET_ECX() - 1);
+ if ( GET_ECX() != 0 && GET_ZF() )
+ update_relative_ip(rel_offset);
+ }
+
+GLOBAL VOID
+LOOPNE16
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ SET_CX(GET_CX() - 1);
+ if ( GET_CX() != 0 && !GET_ZF() )
+ update_relative_ip(rel_offset);
+ }
+
+GLOBAL VOID
+LOOPNE32
+
+IFN1(
+ IU32, rel_offset
+ )
+
+
+ {
+ SET_ECX(GET_ECX() - 1);
+ if ( GET_ECX() != 0 && !GET_ZF() )
+ update_relative_ip(rel_offset);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/loopxx.h b/private/mvdm/softpc.new/base/ccpu386/loopxx.h
new file mode 100644
index 000000000..74a8124b2
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/loopxx.h
@@ -0,0 +1,51 @@
+/*
+ loopxx.h
+
+ Define all LOOPxx CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)loopxx.h 1.4 02/09/94";
+ */
+
+IMPORT VOID LOOP16
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID LOOP32
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID LOOPE16
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID LOOPE32
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID LOOPNE16
+
+IPT1(
+ IU32, rel_offset
+
+ );
+
+IMPORT VOID LOOPNE32
+
+IPT1(
+ IU32, rel_offset
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/lsl.c b/private/mvdm/softpc.new/base/ccpu386/lsl.c
new file mode 100644
index 000000000..8a3e91546
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lsl.c
@@ -0,0 +1,101 @@
+/*[
+
+lsl.c
+
+LOCAL CHAR SccsID[]="@(#)lsl.c 1.5 02/09/94";
+
+LSL CPU Functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <lsl.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+LSL
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst operand */
+ IU32, op2 /* src operand */
+ )
+
+
+ {
+ BOOL loadable = FALSE;
+ IU32 descr_addr;
+ CPU_DESCR entry;
+
+ if ( !selector_outside_GDT_LDT((IU16)op2, &descr_addr) )
+ {
+ /* read descriptor from memory */
+ read_descriptor_linear(descr_addr, &entry);
+
+ switch ( descriptor_super_type(entry.AR) )
+ {
+ case INVALID:
+ case CALL_GATE:
+ case TASK_GATE:
+ case INTERRUPT_GATE:
+ case TRAP_GATE:
+ case XTND_CALL_GATE:
+ case XTND_INTERRUPT_GATE:
+ case XTND_TRAP_GATE:
+ break; /* never loaded - don't have a limit */
+
+ case CONFORM_NOREAD_CODE:
+ case CONFORM_READABLE_CODE:
+ loadable = TRUE; /* always loadable */
+ break;
+
+ case AVAILABLE_TSS:
+ case LDT_SEGMENT:
+ case BUSY_TSS:
+ case XTND_AVAILABLE_TSS:
+ case XTND_BUSY_TSS:
+ case EXPANDUP_READONLY_DATA:
+ case EXPANDUP_WRITEABLE_DATA:
+ case EXPANDDOWN_READONLY_DATA:
+ case EXPANDDOWN_WRITEABLE_DATA:
+ case NONCONFORM_NOREAD_CODE:
+ case NONCONFORM_READABLE_CODE:
+ /* access depends on privilege, it is required that
+ DPL >= CPL and DPL >= RPL */
+ if ( GET_AR_DPL(entry.AR) >= GET_CPL() &&
+ GET_AR_DPL(entry.AR) >= GET_SELECTOR_RPL(op2) )
+ loadable = TRUE;
+ break;
+ }
+ }
+
+ if ( loadable )
+ {
+ /* Give em the limit */
+ *pop1 = entry.limit;
+ SET_ZF(1);
+ }
+ else
+ {
+ SET_ZF(0);
+ }
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/lsl.h b/private/mvdm/softpc.new/base/ccpu386/lsl.h
new file mode 100644
index 000000000..aa13cc0dd
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lsl.h
@@ -0,0 +1,17 @@
+/*
+ lsl.h
+
+ Define all LSL CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)lsl.h 1.4 02/09/94";
+ */
+
+IMPORT VOID LSL
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/ltr.c b/private/mvdm/softpc.new/base/ccpu386/ltr.c
new file mode 100644
index 000000000..4851a9848
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/ltr.c
@@ -0,0 +1,67 @@
+/*[
+
+ltr.c
+
+LOCAL CHAR SccsID[]="@(#)ltr.c 1.5 02/09/94";
+
+LTR CPU Functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <ltr.h>
+#include <c_page.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+LTR
+
+IFN1(
+ IU32, op1 /* alleged TSS selector */
+ )
+
+
+ {
+ IU16 selector;
+ IU32 descr_addr;
+ CPU_DESCR entry;
+ ISM32 super;
+
+ /* Validate and Read decrciptor info. */
+ selector = op1;
+ super = validate_TSS(selector, &descr_addr, FALSE);
+ read_descriptor_linear(descr_addr, &entry);
+
+ /* mark in memory descriptor as busy */
+ entry.AR |= BIT1_MASK;
+ spr_write_byte(descr_addr+5, (IU8)entry.AR);
+
+ /* finally load components of task register */
+ SET_TR_SELECTOR(selector);
+ SET_TR_BASE(entry.base);
+ SET_TR_LIMIT(entry.limit);
+
+ /* store busy form of TSS */
+ super |= BIT1_MASK;
+ SET_TR_AR_SUPER(super);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/ltr.h b/private/mvdm/softpc.new/base/ccpu386/ltr.h
new file mode 100644
index 000000000..39564ea45
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/ltr.h
@@ -0,0 +1,16 @@
+/*
+ ltr.h
+
+ Define all LTR CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)ltr.h 1.4 02/09/94";
+ */
+
+IMPORT VOID LTR
+
+IPT1(
+ IU32, op1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/lxs.c b/private/mvdm/softpc.new/base/ccpu386/lxs.c
new file mode 100644
index 000000000..7e8f22dad
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lxs.c
@@ -0,0 +1,149 @@
+/*[
+
+lxs.c
+
+LOCAL CHAR SccsID[]="@(#)lxs.c 1.5 02/09/94";
+
+LDS, LES, LGS, LGS and LSS (ie LxS) CPU Functions.
+--------------------------------------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <lxs.h>
+#include <mov.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Load Full Pointer to DS segment register:general register pair. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+LDS
+#ifdef ANSI
+ (
+ IU32 *pop1, /* Pntr to dst(offset) operand */
+ IU32 op2[2] /* src(offset:selector pair) operand */
+ )
+#else
+ (pop1, op2)
+ IU32 *pop1;
+ IU32 op2[2];
+#endif
+ {
+ /* load segment selector first */
+ MOV_SR((IU32)DS_REG, op2[1]);
+
+ /* then (if it works) load offset */
+ *pop1 = op2[0];
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Load Full Pointer to ES segment register:general register pair. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+LES
+#ifdef ANSI
+ (
+ IU32 *pop1, /* Pntr to dst(offset) operand */
+ IU32 op2[2] /* src(offset:selector pair) operand */
+ )
+#else
+ (pop1, op2)
+ IU32 *pop1;
+ IU32 op2[2];
+#endif
+ {
+ /* load segment selector first */
+ MOV_SR((IU32)ES_REG, op2[1]);
+
+ /* then (if it works) load offset */
+ *pop1 = op2[0];
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Load Full Pointer to FS segment register:general register pair. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+LFS
+#ifdef ANSI
+ (
+ IU32 *pop1, /* Pntr to dst(offset) operand */
+ IU32 op2[2] /* src(offset:selector pair) operand */
+ )
+#else
+ (pop1, op2)
+ IU32 *pop1;
+ IU32 op2[2];
+#endif
+ {
+ /* load segment selector first */
+ MOV_SR((IU32)FS_REG, op2[1]);
+
+ /* then (if it works) load offset */
+ *pop1 = op2[0];
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Load Full Pointer to GS segment register:general register pair. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+LGS
+#ifdef ANSI
+ (
+ IU32 *pop1, /* Pntr to dst(offset) operand */
+ IU32 op2[2] /* src(offset:selector pair) operand */
+ )
+#else
+ (pop1, op2)
+ IU32 *pop1;
+ IU32 op2[2];
+#endif
+ {
+ /* load segment selector first */
+ MOV_SR((IU32)GS_REG, op2[1]);
+
+ /* then (if it works) load offset */
+ *pop1 = op2[0];
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Load Full Pointer to SS segment register:general register pair. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+LSS
+#ifdef ANSI
+ (
+ IU32 *pop1, /* Pntr to dst(offset) operand */
+ IU32 op2[2] /* src(offset:selector pair) operand */
+ )
+#else
+ (pop1, op2)
+ IU32 *pop1;
+ IU32 op2[2];
+#endif
+ {
+ /* load segment selector first */
+ MOV_SR((IU32)SS_REG, op2[1]);
+
+ /* then (if it works) load offset */
+ *pop1 = op2[0];
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/lxs.h b/private/mvdm/softpc.new/base/ccpu386/lxs.h
new file mode 100644
index 000000000..f2a0c56df
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/lxs.h
@@ -0,0 +1,49 @@
+/*
+ lxs.h
+
+ Define LDS and LES (ie LxS) CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)lxs.h 1.4 02/09/94";
+ */
+
+IMPORT VOID LDS
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2[2]
+
+ );
+
+IMPORT VOID LES
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2[2]
+
+ );
+
+IMPORT VOID LFS
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2[2]
+
+ );
+
+IMPORT VOID LGS
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2[2]
+
+ );
+
+IMPORT VOID LSS
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2[2]
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/makefile b/private/mvdm/softpc.new/base/ccpu386/makefile
new file mode 100644
index 000000000..6ee4f43fa
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/makefile
@@ -0,0 +1,6 @@
+#
+# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source
+# file to this component. This file merely indirects to the real make file
+# that is shared by all the components of NT OS/2
+#
+!INCLUDE $(NTMAKEENV)\makefile.def
diff --git a/private/mvdm/softpc.new/base/ccpu386/mov.c b/private/mvdm/softpc.new/base/ccpu386/mov.c
new file mode 100644
index 000000000..b7929b455
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/mov.c
@@ -0,0 +1,239 @@
+/*[
+
+mov.c
+
+LOCAL CHAR SccsID[]="@(#)mov.c 1.12 02/13/95";
+
+MOV CPU Functions.
+------------------
+
+]*/
+
+
+#include <stdio.h>
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <mov.h>
+#include <c_tlb.h>
+#include <c_debug.h>
+#include <fault.h>
+#include <config.h> /* For C_SWITCHNPX */
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'lods'. */
+/* Generic - one size fits all 'mov'. */
+/* Generic - one size fits all 'movzx'. */
+/* Generic - one size fits all 'movs'. */
+/* Generic - one size fits all 'stos'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+MOV
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst operand */
+ IU32, op2 /* src operand */
+ )
+
+
+ {
+ *pop1 = op2;
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* 'mov' to segment register. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+MOV_SR
+
+IFN2(
+ IU32, op1, /* index to segment register */
+ IU32, op2 /* src operand */
+ )
+
+
+ {
+ switch ( op1 )
+ {
+ case DS_REG:
+ case ES_REG:
+ case FS_REG:
+ case GS_REG:
+ load_data_seg((ISM32)op1, (IU16)op2);
+ break;
+
+ case SS_REG:
+ load_stack_seg((IU16)op2);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+
+#ifdef SPC486
+#define CR0_VALID_BITS 0xe005003f
+#define CR3_VALID_BITS 0xfffff018
+#else
+#define CR0_VALID_BITS 0x8000001f
+#define CR3_VALID_BITS 0xfffff000
+#endif /* SPC486 */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* 'mov' to control register. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+MOV_CR
+
+IFN2(
+ IU32, op1, /* index to control register */
+ IU32, op2 /* src operand */
+ )
+
+
+ {
+ IU32 keep_et;
+
+ /*
+ Maintain all Reserved bits as 0.
+ */
+ switch ( op1 )
+ {
+ case CR_STAT: /* system control flags */
+ /* If trying to set PG=1 and PE=0, then fault. */
+ if ( (op2 & BIT31_MASK) && !(op2 & BIT0_MASK) )
+ GP((IU16)0, FAULT_MOV_CR_PAGE_IN_RM);
+
+ /* Note ET bit is set at RESET time and remains unchanged */
+ keep_et = GET_ET();
+ SET_CR(CR_STAT, op2 & CR0_VALID_BITS);
+ SET_ET(keep_et);
+ break;
+
+ case 1: /* reserved */
+ break;
+
+ case CR_PFLA: /* page fault linear address */
+ SET_CR(CR_PFLA, op2);
+ break;
+
+ case CR_PDBR: /* page directory base register (PDBR) */
+ SET_CR(CR_PDBR, (op2 & CR3_VALID_BITS));
+ flush_tlb();
+ break;
+
+ default:
+ break;
+ }
+ }
+
+#define DR7_VALID_BITS 0xffff03ff
+#define DR6_VALID_BITS 0x0000e00f
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* 'mov' to debug register. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+MOV_DR
+
+IFN2(
+ IU32, op1, /* index to debug register, (0 - 7) */
+ IU32, op2 /* src operand */
+ )
+
+
+ {
+ switch ( op1 )
+ {
+ case 0: /* Breakpoint Linear Address */
+ case 1:
+ case 2:
+ case 3:
+ SET_DR(op1, op2);
+ setup_breakpoints();
+ break;
+
+ case 4: /* Reserved */
+ case 5:
+ break;
+
+ case 6: /* Debug Status Register */
+ SET_DR(DR_DSR, (op2 & DR6_VALID_BITS));
+ break;
+
+ case 7: /* Debug Control Register */
+ SET_DR(DR_DCR, (op2 & DR7_VALID_BITS));
+ setup_breakpoints();
+ break;
+
+ default:
+ break;
+ }
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* 'mov' to test register. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+MOV_TR
+
+IFN2(
+ IU32, op1, /* index to test register */
+ IU32, op2 /* src operand */
+ )
+
+
+ {
+ switch ( op1 )
+ {
+ case 0: /* Reserved */
+ case 1:
+ case 2:
+ break;
+
+ case TR_CDR: /* Cache test Data Register */
+ printf("Write to Cache Test Data Register.\n");
+ break;
+
+ case TR_CSR: /* Cache test Status Register */
+ printf("Write to Cache Test Status Register.\n");
+ break;
+
+ case TR_CCR: /* Cache test Control Register */
+ printf("Write to Cache Test Control Register.\n");
+ break;
+
+ case TR_TCR: /* Test Command Register */
+ SET_TR(TR_TCR, op2);
+ test_tlb();
+ break;
+
+ case TR_TDR: /* Test Data Register */
+ SET_TR(TR_TDR, op2);
+ break;
+
+ default:
+ break;
+ }
+ }
+
diff --git a/private/mvdm/softpc.new/base/ccpu386/mov.h b/private/mvdm/softpc.new/base/ccpu386/mov.h
new file mode 100644
index 000000000..92f79fa4c
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/mov.h
@@ -0,0 +1,49 @@
+/*
+ mov.h
+
+ Define all MOV CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)mov.h 1.4 02/09/94";
+ */
+
+IMPORT VOID MOV
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
+
+IMPORT VOID MOV_SR /* to Segment Register */
+
+IPT2(
+ IU32, op1,
+ IU32, op2
+
+ );
+
+IMPORT VOID MOV_CR /* to Control Register */
+
+IPT2(
+ IU32, op1,
+ IU32, op2
+
+ );
+
+IMPORT VOID MOV_DR /* to Debug Register */
+
+IPT2(
+ IU32, op1,
+ IU32, op2
+
+ );
+
+IMPORT VOID MOV_TR /* to Test Register */
+
+IPT2(
+ IU32, op1,
+ IU32, op2
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/movsx.c b/private/mvdm/softpc.new/base/ccpu386/movsx.c
new file mode 100644
index 000000000..23e525e18
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/movsx.c
@@ -0,0 +1,56 @@
+/*[
+
+movsx.c
+
+LOCAL CHAR SccsID[]="@(#)movsx.c 1.5 02/09/94";
+
+MOVSX CPU Functions.
+--------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <movsx.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'movsx'. */
+/* NB. This function sign extends to 32-bits. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+MOVSX
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst/lsrc operand */
+ IU32, op2, /* rsrc operand */
+ IUM8, op_sz /* 8 or 16-bit (original rsrc operand size) */
+ )
+
+
+ {
+ if ( SZ2MSB(op_sz) & op2 ) /* sign bit set? */
+ {
+ /* or in sign extension */
+ op2 = op2 | ~SZ2MASK(op_sz);
+ }
+ *pop1 = op2;
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/movsx.h b/private/mvdm/softpc.new/base/ccpu386/movsx.h
new file mode 100644
index 000000000..e74e07b9d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/movsx.h
@@ -0,0 +1,18 @@
+/*
+ movsx.h
+
+ MOVSX CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)movsx.h 1.4 02/09/94";
+ */
+
+IMPORT VOID MOVSX
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/mul.c b/private/mvdm/softpc.new/base/ccpu386/mul.c
new file mode 100644
index 000000000..5b4b9561c
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/mul.c
@@ -0,0 +1,147 @@
+/*[
+
+mul.c
+
+LOCAL CHAR SccsID[]="@(#)mul.c 1.8 11/09/94";
+
+MUL CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <mul.h>
+#include <c_mul64.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Unsigned multiply. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+MUL8
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst(low half)/lsrc operand */
+ IU32, op2 /* rsrc operand */
+ )
+
+
+ {
+ IU32 result;
+ IU32 top;
+
+ result = *pop1 * op2; /* Do operation */
+ top = result >> 8 & 0xff; /* get top 8 bits of result */
+ SET_AH(top); /* Store top half of result */
+
+ if ( top ) /* Set CF/OF */
+ {
+ SET_CF(1); SET_OF(1);
+ }
+ else
+ {
+ SET_CF(0); SET_OF(0);
+ }
+
+#ifdef SET_UNDEFINED_MUL_FLAG
+ /* Do NOT Set all undefined flag.
+ * Microsoft VGA Mouse relies on preserved flags in IMUL
+ */
+#endif
+
+ *pop1 = result; /* Return low half of result */
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Unsigned multiply. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+MUL16
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst(low half)/lsrc operand */
+ IU32, op2 /* rsrc operand */
+ )
+
+
+ {
+ IU32 result;
+ IU32 top;
+
+ result = *pop1 * op2; /* Do operation */
+ top = result >> 16 & WORD_MASK; /* get top 16 bits of result */
+ SET_DX(top); /* Store top half of result */
+
+ if ( top ) /* Set CF/OF */
+ {
+ SET_CF(1); SET_OF(1);
+ }
+ else
+ {
+ SET_CF(0); SET_OF(0);
+ }
+
+#ifdef SET_UNDEFINED_MUL_FLAG
+ /* Do NOT Set all undefined flag.
+ * Microsoft VGA Mouse relies on preserved flags in IMUL
+ */
+#endif
+
+ *pop1 = result; /* Return low half of result */
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Unsigned multiply. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+MUL32
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst(low half)/lsrc operand */
+ IU32, op2 /* rsrc operand */
+ )
+
+
+ {
+ IU32 result;
+ IU32 top;
+
+ mulu64(&top, &result, *pop1, op2); /* Do operation */
+ SET_EDX(top); /* Store top half of result */
+
+ if ( top ) /* Set CF/OF */
+ {
+ SET_CF(1); SET_OF(1);
+ }
+ else
+ {
+ SET_CF(0); SET_OF(0);
+ }
+
+#ifdef SET_UNDEFINED_MUL_FLAG
+ /* Do NOT Set all undefined flag.
+ * Microsoft VGA Mouse relies on preserved flags in IMUL
+ */
+#endif
+
+ *pop1 = result; /* Return low half of result */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/mul.h b/private/mvdm/softpc.new/base/ccpu386/mul.h
new file mode 100644
index 000000000..2199014ef
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/mul.h
@@ -0,0 +1,33 @@
+/*
+ mul.h
+
+ Define all MUL CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)mul.h 1.4 02/09/94";
+ */
+
+IMPORT VOID MUL8
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
+
+IMPORT VOID MUL16
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
+
+IMPORT VOID MUL32
+
+IPT2(
+ IU32 *, pop1,
+ IU32, op2
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/neg.c b/private/mvdm/softpc.new/base/ccpu386/neg.c
new file mode 100644
index 000000000..02671a6bb
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/neg.c
@@ -0,0 +1,66 @@
+/*[
+
+neg.c
+
+LOCAL CHAR SccsID[]="@(#)neg.c 1.5 02/09/94";
+
+NEG CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <neg.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'neg'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+NEG
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst/src operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+ IU32 msb;
+ IU32 op1_msb;
+ IU32 res_msb;
+
+ msb = SZ2MSB(op_sz);
+
+ result = -(*pop1) & SZ2MASK(op_sz); /* Do operation */
+ op1_msb = (*pop1 & msb) != 0; /* Isolate all msb's */
+ res_msb = (result & msb) != 0;
+ /* Determine flags */
+ SET_OF(op1_msb & res_msb); /* OF = op1 & res */
+ SET_CF(op1_msb | res_msb); /* CF = op1 | res */
+ SET_PF(pf_table[result & BYTE_MASK]);
+ SET_ZF(result == 0);
+ SET_SF((result & msb) != 0); /* SF = MSB */
+ SET_AF(((*pop1 ^ result) & BIT4_MASK) != 0); /* AF = Bit 4 carry */
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/neg.h b/private/mvdm/softpc.new/base/ccpu386/neg.h
new file mode 100644
index 000000000..cc8b50bff
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/neg.h
@@ -0,0 +1,17 @@
+/*
+ neg.h
+
+ Define all NEG CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)neg.h 1.4 02/09/94";
+ */
+
+IMPORT VOID NEG
+
+IPT2(
+ IU32 *, pop1,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/newnpx.h b/private/mvdm/softpc.new/base/ccpu386/newnpx.h
new file mode 100644
index 000000000..92fefe90b
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/newnpx.h
@@ -0,0 +1,73 @@
+/*
+ * SccsID = @(#)newnpx.h 1.4 10/06/94
+ */
+
+/* Function prototypes - everything returns void */
+IMPORT VOID F2XM1();
+IMPORT VOID FABS();
+IMPORT VOID FADD();
+IMPORT VOID FBLD();
+IMPORT VOID FBSTP();
+IMPORT VOID FCHS();
+IMPORT VOID FCLEX();
+IMPORT VOID FCOM();
+IMPORT VOID FCOS();
+IMPORT VOID FDECSTP();
+IMPORT VOID FDIV();
+IMPORT VOID FFREE();
+IMPORT VOID FILD();
+IMPORT VOID FLD();
+IMPORT VOID FINCSTP();
+IMPORT VOID FINIT();
+IMPORT VOID FIST();
+IMPORT VOID FLDCONST();
+IMPORT VOID FLDCW();
+IMPORT VOID FMUL();
+IMPORT VOID FNOP();
+IMPORT VOID FPATAN();
+IMPORT VOID FPREM();
+IMPORT VOID FPREM1();
+IMPORT VOID FPTAN();
+IMPORT VOID FRNDINT();
+IMPORT VOID FSTCW();
+IMPORT VOID FRSTOR();
+IMPORT VOID FSAVE();
+IMPORT VOID FSCALE();
+IMPORT VOID FSIN();
+IMPORT VOID FSINCOS();
+IMPORT VOID FSQRT();
+IMPORT VOID FST();
+IMPORT VOID FSTENV();
+IMPORT VOID FSTSW();
+IMPORT VOID FSTCW();
+IMPORT VOID FSUB();
+IMPORT VOID FTST();
+IMPORT VOID FXAM();
+IMPORT VOID FXCH();
+IMPORT VOID FXTRACT();
+IMPORT VOID FYL2X();
+IMPORT VOID FYL2XP1();
+
+#define SAVE_PTRS() \
+ if (!NPX_PROT_MODE) { \
+ NpxFOP = NpxInstr; \
+ } \
+ NpxFCS = GET_CS_SELECTOR(); \
+ NpxFIP = GET_EIP();
+
+#define SAVE_DPTRS() \
+ NpxFEA = m_off[0]; \
+ NpxFDS = GET_SR_SELECTOR(m_seg[0])
+
+typedef enum
+{
+FPSTACK,
+M16I,
+M32I,
+M64I,
+M32R,
+M64R,
+M80R
+} NPXOPTYPE;
+
+IMPORT IU8 FPtype;
diff --git a/private/mvdm/softpc.new/base/ccpu386/nop.c b/private/mvdm/softpc.new/base/ccpu386/nop.c
new file mode 100644
index 000000000..e140e2099
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/nop.c
@@ -0,0 +1,38 @@
+/*[
+
+nop.c
+
+LOCAL CHAR SccsID[]="@(#)nop.c 1.5 02/09/94";
+
+NOP CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <nop.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+NOP()
+ {
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/nop.h b/private/mvdm/softpc.new/base/ccpu386/nop.h
new file mode 100644
index 000000000..d360ffdc7
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/nop.h
@@ -0,0 +1,11 @@
+/*
+ nop.h
+
+ Define all NOP CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)nop.h 1.5 09/01/94";
+ */
+
+IMPORT VOID NOP IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/not.c b/private/mvdm/softpc.new/base/ccpu386/not.c
new file mode 100644
index 000000000..3b7fa0fdc
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/not.c
@@ -0,0 +1,48 @@
+/*[
+
+not.c
+
+LOCAL CHAR SccsID[]="@(#)not.c 1.5 02/09/94";
+
+NOT CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <not.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'not'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+NOT
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst/src operand */
+ )
+
+
+ {
+ *pop1 = ~*pop1;
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/not.h b/private/mvdm/softpc.new/base/ccpu386/not.h
new file mode 100644
index 000000000..adb5ae61d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/not.h
@@ -0,0 +1,16 @@
+/*
+ not.h
+
+ Define all NOT CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)not.h 1.4 02/09/94";
+ */
+
+IMPORT VOID NOT
+
+IPT1(
+ IU32 *, pop1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/ntstubs.c b/private/mvdm/softpc.new/base/ccpu386/ntstubs.c
new file mode 100644
index 000000000..df1855755
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/ntstubs.c
@@ -0,0 +1,226 @@
+#include "insignia.h"
+#include "host_def.h"
+#include "evidgen.h"
+
+#include "cpu4.h"
+
+#ifndef PIG
+
+extern struct VideoVector C_Video;
+
+IHP Gdp;
+struct CpuVector Cpu;
+//struct SasVector Sas;
+struct VideoVector Video;
+
+a3_cpu_interrupt (int errupt, IU16 numint)
+{
+ switch(errupt)
+ {
+ case 1: /* 3.0 cpu_timer_tick */
+ c_cpu_interrupt(CPU_TIMER_TICK, numint);
+ break;
+ case 3: /* 3.0 cpu_hw_int */
+ c_cpu_interrupt(CPU_HW_INT, numint);
+ break;
+ default:
+ printf("a3_cpu_interrupt - unhandled int %d\n", errupt);
+ }
+
+}
+GLOBAL IBOOL AlreadyInYoda=FALSE;
+
+void Cpu_define_outb (IU16 id, void (*func)() )
+{
+ UNUSED(id);
+ UNUSED(func);
+}
+
+void CpuInitializeProfile()
+{
+}
+
+void CpuAnalyzeProfile()
+{
+}
+
+void CpuStartProfile()
+{
+}
+
+IU32 a3_cpu_calc_q_ev_inst_for_time (IU32 val)
+{
+ return(c_cpu_calc_q_ev_inst_for_time (val));
+}
+
+void a3_cpu_init()
+{
+ c_cpu_init();
+}
+
+void a3_cpu_q_ev_set_count (IU32 val)
+{
+ c_cpu_q_ev_set_count (val);
+}
+
+IU32 a3_cpu_q_ev_get_count ()
+{
+ return(c_cpu_q_ev_get_count ());
+}
+
+void a3_cpu_clear_hw_int ()
+{
+}
+
+void a3_cpu_terminate ()
+{
+ c_cpu_terminate();
+}
+
+void _asm_simulate()
+{
+ c_cpu_simulate();
+}
+
+#if 0
+void cpu_simulate()
+{
+ c_cpu_simulate();
+}
+#endif
+
+void copyROM()
+{
+}
+
+void initialise_npx()
+{
+}
+
+void npx_reset()
+{
+}
+
+IHPE Cpu_outb_function;
+IHPE GDP;
+
+void _Call_C_2(IHPE a, IHPE b)
+{
+ UNUSED(a);
+ UNUSED(b);
+}
+
+void D2DmpBinaryImage (LONG base) { UNUSED(base); }
+void D2ForceTraceInit() { }
+LONG D2LowerThreshold, D2UpperThreshold;
+void IH_dump_frag_hist(ULONG n) { UNUSED(n); }
+void Mgr_yoda() { }
+char *NPXDebugBase = "NPXDebugBase";
+char *NPXDebugPtr = "NPXDebugPtr";
+ULONG *NPXFreq = (ULONG *)0;
+ULONG get_287_control_word() { return(0L); }
+double get_287_reg_as_double(int n) { return((double)n); }
+int get_287_sp() { return(0); }
+ULONG get_287_status_word() { return(0L); }
+word get_287_tag_word() { return(0); }
+
+
+#include "sas.h"
+
+#undef sas_connect_memory
+void sas_connect_memory IFN3(PHY_ADDR, low, PHY_ADDR, high, SAS_MEM_TYPE, type)
+{
+ c_sas_connect_memory(low, high, type);
+}
+#undef sas_disable_20_bit_wrapping
+void sas_disable_20_bit_wrapping IFN0() { c_sas_disable_20_bit_wrapping(); }
+#undef sas_enable_20_bit_wrapping
+void sas_enable_20_bit_wrapping IFN0() { c_sas_enable_20_bit_wrapping(); }
+#undef sas_dw_at
+IU32 sas_dw_at IFN1(LIN_ADDR, addr) { return(c_sas_dw_at(addr)); }
+#undef sas_fills
+void sas_fills IFN3(LIN_ADDR, dest, IU8 , val, LIN_ADDR, len) { c_sas_fills(dest, val, len); }
+#undef sas_fillsw
+void sas_fillsw IFN3(LIN_ADDR, dest, IU16 , val, LIN_ADDR, len) { c_sas_fillsw(dest, val, len); }
+#undef sas_hw_at
+IU8 sas_hw_at IFN1(LIN_ADDR, addr) { return(c_sas_hw_at(addr)); }
+#undef sas_hw_at_no_check
+IU8 sas_hw_at_no_check IFN1(LIN_ADDR, addr) { return(c_sas_hw_at(addr)); }
+#undef sas_load
+void sas_load IFN2(sys_addr, addr, half_word *, val)
+{
+ *val = c_sas_hw_at(addr);
+}
+#undef sas_loadw
+void sas_loadw IFN2(sys_addr, addr, word *, val)
+{
+ *val = c_sas_w_at(addr);
+}
+#undef sas_loads
+void sas_loads IFN3(LIN_ADDR, src, IU8 *, dest, LIN_ADDR, len)
+{
+ c_sas_loads(src, dest, len);
+}
+#undef sas_memory_size
+PHY_ADDR sas_memory_size IFN0() { return(c_sas_memory_size()); }
+#undef sas_memory_type
+SAS_MEM_TYPE sas_memory_type IFN1(PHY_ADDR, addr) { return(c_sas_memory_type(addr)); }
+#undef sas_move_bytes_forward
+void sas_move_bytes_forward IFN3(sys_addr, src, sys_addr, dest, sys_addr, len)
+{
+ c_sas_move_bytes_forward(src, dest, len);
+}
+#undef sas_move_words_forward
+void sas_move_words_forward IFN3(sys_addr, src, sys_addr, dest, sys_addr, len)
+{
+ c_sas_move_words_forward(src, dest, len);
+}
+#undef sas_overwrite_memory
+void sas_overwrite_memory IFN2(PHY_ADDR, addr, PHY_ADDR, len)
+{
+ c_sas_overwrite_memory(addr, len);
+}
+#undef sas_scratch_address
+IU8 *sas_scratch_address IFN1(sys_addr, length) { return(c_sas_scratch_address(length)); }
+#undef sas_store
+void sas_store IFN2(LIN_ADDR, addr, IU8, val) { c_sas_store(addr, val); }
+#undef sas_store_no_check
+void sas_store_no_check IFN2(LIN_ADDR, addr, IU8, val) { c_sas_store(addr, val); }
+#undef sas_storedw
+void sas_storedw IFN2(LIN_ADDR, addr, IU32, val) { c_sas_storedw(addr, val); }
+#undef sas_storew
+void sas_storew IFN2(LIN_ADDR, addr, IU16, val) { c_sas_storew(addr, val); }
+#undef sas_storew_no_check
+void sas_storew_no_check IFN2(LIN_ADDR, addr, IU16, val) { c_sas_storew(addr, val); }
+#undef sas_stores
+void sas_stores IFN3(LIN_ADDR, dest, IU8 *, src, LIN_ADDR, len)
+{
+ c_sas_stores(dest, src, len);
+}
+#undef sas_w_at
+IU16 sas_w_at IFN1(LIN_ADDR, addr) { return(c_sas_w_at(addr)); }
+#undef sas_w_at_no_check
+IU16 sas_w_at_no_check IFN1(LIN_ADDR, addr) { return(c_sas_w_at(addr)); }
+#undef sas_transbuf_address
+IU8 *sas_transbuf_address IFN2(LIN_ADDR, dest_intel_addr, PHY_ADDR, len)
+{
+ return(c_sas_transbuf_address(dest_intel_addr, len));
+}
+#undef sas_twenty_bit_wrapping_enabled
+IBOOL sas_twenty_bit_wrapping_enabled() { return(c_sas_twenty_bit_wrapping_enabled()); }
+#undef sas_loads_to_transbuf
+void sas_loads_to_transbuf(IU32 src, IU8 * dest, IU32 len)
+{
+ sas_loads(src, dest, len);
+}
+
+#undef sas_stores_from_transbuf
+void sas_stores_from_transbuf(IU32 dest, IU8 * src, IU32 len)
+{
+ sas_stores(dest, src, len);
+}
+
+/*************************************************************************
+*************************************************************************/
+
+#endif /* !PIG */
diff --git a/private/mvdm/softpc.new/base/ccpu386/ntthread.c b/private/mvdm/softpc.new/base/ccpu386/ntthread.c
new file mode 100644
index 000000000..48f79d8d9
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/ntthread.c
@@ -0,0 +1,253 @@
+#include <windows.h>
+#include "insignia.h"
+#include "host_def.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <setjmp.h>
+
+#define BADID ((DWORD)-1)
+
+#define MAXDEPTH 20
+
+typedef struct {
+ IS32 level;
+ jmp_buf sims[MAXDEPTH];
+ jmp_buf excepts[MAXDEPTH];
+} ThreadSimBuf, *ThreadSimBufPtr;
+
+typedef struct tids {
+ DWORD tid;
+ struct tids *next;
+} TidList, *TidListPtr;
+
+#define TIDNULL ((TidListPtr)0)
+
+TidListPtr tidlist = TIDNULL;
+
+void ccpu386InitThreadStuff();
+void ccpu386foundnewthread();
+void ccpu386newthread();
+void ccpu386exitthread();
+jmp_buf *ccpu386SimulatePtr();
+void ccpu386Unsimulate();
+jmp_buf *ccpu386ThrdExptnPtr();
+void ccpu386GotoThrdExptnPt();
+
+DWORD ccpuSimId = BADID;
+IBOOL potentialNewThread = FALSE;
+
+void ccpu386InitThreadStuff()
+{
+ static TidList lhead;
+
+ ccpuSimId = TlsAlloc();
+
+ if (ccpuSimId == BADID)
+ fprintf(stderr, "ccpu386InitThreadStuff: TlsAlloc() failed\n");
+
+ lhead.tid = GetCurrentThreadId();
+ lhead.next = TIDNULL;
+ tidlist = &lhead;
+
+ ccpu386foundnewthread(); /* for main thread */
+
+}
+
+// what we'd really like to do at create thread time if we could be called
+// in the correct context.
+void ccpu386foundnewthread()
+{
+ ThreadSimBufPtr simstack;
+ TidListPtr tp;
+
+ if (ccpuSimId == BADID)
+ {
+ fprintf(stderr, "ccpu386foundnewthread id:%#x called with Bad Id\n", GetCurrentThreadId());
+ return;
+ }
+ // get buffer for this thread to do sim/unsim on.
+ simstack = (ThreadSimBufPtr)malloc(sizeof(ThreadSimBuf));
+
+ if (simstack == (ThreadSimBufPtr)0)
+ {
+ fprintf(stderr, "ccpu386foundnewthread id:%#x cant malloc %d bytes. Err:%#x\n", GetCurrentThreadId(), sizeof(ThreadSimBuf), GetLastError());
+ return;
+ }
+ simstack->level = 0;
+ if (!TlsSetValue(ccpuSimId, simstack))
+ {
+ fprintf(stderr, "ccpu386foundnewthread id:%#x simid %#x TlsSetValue failed (err:%#x)\n", GetCurrentThreadId(), ccpuSimId, GetLastError());
+ return;
+ }
+}
+
+/* just set bool to be checked in simulate which will be in new thread context*/
+void ccpu386newthread()
+{
+ potentialNewThread = TRUE;
+}
+
+void ccpu386exitthread()
+{
+ ThreadSimBufPtr simstack;
+ TidListPtr tp, prev;
+
+ if (ccpuSimId == BADID)
+ {
+ fprintf(stderr, "ccpu386exitthread id:%#x called with Bad Id\n", GetCurrentThreadId());
+ return;
+ }
+ simstack = (ThreadSimBufPtr)TlsGetValue(ccpuSimId);
+ if (simstack == (ThreadSimBufPtr)0)
+ {
+ fprintf(stderr, "ccpu386exitthread tid:%#x simid %#x TlsGetValue failed (err:%#x)\n", GetCurrentThreadId(), ccpuSimId, GetLastError());
+ return;
+ }
+ free(simstack); //lose host sim memory for this thread
+
+ prev = tidlist;
+ tp = tidlist->next; // assume wont lose main thread
+
+ // remove tid from list of known threads
+ while(tp != TIDNULL)
+ {
+ if (tp->tid == GetCurrentThreadId())
+ {
+ prev->next = tp->next; /* take current node out of chain */
+ free(tp);
+ break;
+ }
+ prev = tp;
+ tp = tp->next;
+ }
+}
+
+jmp_buf *ccpu386SimulatePtr()
+{
+ ThreadSimBufPtr simstack;
+ TidListPtr tp, prev;
+
+ if (ccpuSimId == BADID)
+ {
+ fprintf(stderr, "ccpu386SimulatePtr id:%#x called with Bad Id\n", GetCurrentThreadId());
+ return ((jmp_buf *)0);
+ }
+
+ // Check for 'first call in new thread context' case where we need to set
+ // up new thread data space.
+ if (potentialNewThread)
+ {
+ prev = tp = tidlist;
+ while(tp != TIDNULL) // look for tid in current list
+ {
+ if (tp->tid == GetCurrentThreadId())
+ break;
+ prev = tp;
+ tp = tp->next;
+ }
+ if (tp == TIDNULL) // must be new thread!
+ {
+ potentialNewThread = FALSE; // remove search criteria
+
+ tp = (TidListPtr)malloc(sizeof(TidList)); // make new node
+ if (tp == TIDNULL)
+ {
+ fprintf(stderr, "ccpuSimulatePtr: can't malloc space for new thread data\n");
+ return((jmp_buf *)0);
+ }
+ // connect & initialise node
+ prev->next = tp;
+ tp->tid = GetCurrentThreadId();
+ tp->next = TIDNULL;
+ //get tls data
+ ccpu386foundnewthread();
+ }
+ }
+
+ simstack = (ThreadSimBufPtr)TlsGetValue(ccpuSimId);
+ if (simstack == (ThreadSimBufPtr)0)
+ {
+ fprintf(stderr, "ccpu386SimulatePtr tid:%#x simid %#x TlsGetValue failed (err:%#x)\n", GetCurrentThreadId(), ccpuSimId, GetLastError());
+ return ((jmp_buf *)0);
+ }
+
+ if (simstack->level >= MAXDEPTH)
+ {
+ fprintf(stderr, "Stack overflow in ccpu386SimulatePtr()!\n");
+ return((jmp_buf *)0);
+ }
+
+ /* return pointer to current context and invoke a new CPU level */
+ /* can't setjmp here & return otherwise stack unwinds & context lost */
+
+ return(&simstack->sims[simstack->level++]);
+}
+
+void ccpu386Unsimulate()
+{
+ ThreadSimBufPtr simstack;
+ extern ISM32 in_C;
+
+ if (ccpuSimId == BADID)
+ {
+ fprintf(stderr, "ccpu386Unsimulate id:%#x called with Bad Id\n", GetCurrentThreadId());
+ return ;
+ }
+ simstack = (ThreadSimBufPtr)TlsGetValue(ccpuSimId);
+ if (simstack == (ThreadSimBufPtr)0)
+ {
+ fprintf(stderr, "ccpu386Unsimulate tid:%#x simid %#x TlsGetValue failed (err:%#x)\n", GetCurrentThreadId(), ccpuSimId, GetLastError());
+ return ;
+ }
+
+ if (simstack->level == 0)
+ {
+ fprintf(stderr, "host_unsimulate() - already at base of stack!\n");
+ }
+
+ /* Return to previous context */
+ in_C = 1;
+ simstack->level --;
+ longjmp(simstack->sims[simstack->level], 1);
+}
+
+ /* somewhere for exceptions to return to */
+jmp_buf *ccpu386ThrdExptnPtr()
+{
+ ThreadSimBufPtr simstack;
+
+ if (ccpuSimId == BADID)
+ {
+ fprintf(stderr, "ccpu386ThrdExptnPtr id:%#x called with Bad Id\n", GetCurrentThreadId());
+ return ;
+ }
+ simstack = (ThreadSimBufPtr)TlsGetValue(ccpuSimId);
+ if (simstack == (ThreadSimBufPtr)0)
+ {
+ fprintf(stderr, "ccpu386ThrdExptnPtr id:%#x TlsGetValue failed (err:%#x)\n", GetCurrentThreadId(), GetLastError());
+ return ;
+ }
+
+ return(&simstack->excepts[simstack->level - 1]);
+}
+
+/* take exception */
+void ccpu386GotoThrdExptnPt()
+{
+ ThreadSimBufPtr simstack;
+
+ if (ccpuSimId == BADID)
+ {
+ fprintf(stderr, "ccpu386GotoThrdExptnPtr id:%#x called with Bad Id\n", GetCurrentThreadId());
+ return;
+ }
+ simstack = (ThreadSimBufPtr)TlsGetValue(ccpuSimId);
+ if (simstack == (ThreadSimBufPtr)0)
+ {
+ fprintf(stderr, "ccpu386GotoThrdExptnPtr id:%#x TlsGetValue failed (err:%#x)\n", GetCurrentThreadId(), GetLastError());
+ return ;
+ }
+
+ longjmp(simstack->excepts[simstack->level - 1], 1);
+}
diff --git a/private/mvdm/softpc.new/base/ccpu386/ntthread.h b/private/mvdm/softpc.new/base/ccpu386/ntthread.h
new file mode 100644
index 000000000..a78d7bb5e
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/ntthread.h
@@ -0,0 +1,9 @@
+extern void ccpu386InitThreadStuff();
+extern void ccpu386foundnewthread();
+extern void ccpu386newthread();
+extern void ccpu386exitthread();
+extern jmp_buf *ccpu386SimulatePtr();
+extern void ccpu386Unsimulate();
+extern jmp_buf *ccpu386ThrdExptnPtr();
+extern void ccpu386GotoThrdExptnPt();
+
diff --git a/private/mvdm/softpc.new/base/ccpu386/or.c b/private/mvdm/softpc.new/base/ccpu386/or.c
new file mode 100644
index 000000000..3c08e4751
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/or.c
@@ -0,0 +1,59 @@
+/*[
+
+or.c
+
+LOCAL CHAR SccsID[]="@(#)or.c 1.5 02/09/94";
+
+OR CPU functions.
+-----------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <or.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'or'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+OR
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst/lsrc operand */
+ IU32, op2, /* rsrc operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+
+ result = *pop1 | op2; /* Do operation */
+ SET_CF(0); /* Determine flags */
+ SET_OF(0);
+ SET_AF(0);
+ SET_PF(pf_table[result & BYTE_MASK]);
+ SET_ZF(result == 0);
+ SET_SF((result & SZ2MSB(op_sz)) != 0); /* SF = MSB */
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/or.h b/private/mvdm/softpc.new/base/ccpu386/or.h
new file mode 100644
index 000000000..164487c42
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/or.h
@@ -0,0 +1,18 @@
+/*
+ or.h
+
+ Define all OR CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)or.h 1.4 02/09/94";
+ */
+
+IMPORT VOID OR
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/out.c b/private/mvdm/softpc.new/base/ccpu386/out.c
new file mode 100644
index 000000000..21ab73939
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/out.c
@@ -0,0 +1,91 @@
+/*[
+
+out.c
+
+LOCAL CHAR SccsID[]="@(#)out.c 1.8 09/27/94";
+
+OUT CPU Functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <out.h>
+#include <ios.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+/*
+ * Need to call the IO functions directly from the base arrays (just like
+ * the assembler CPU does), rather than calling outb etc., as the latter
+ * could cause a virtualisation that would end-up back here.
+ */
+
+GLOBAL VOID
+OUT8
+
+IFN2(
+ IU32, op1, /* src1(port nr.) operand */
+ IU32, op2 /* src2(data) operand */
+ )
+
+
+ {
+#ifndef PIG
+ (*Ios_outb_function[Ios_out_adapter_table[op1 &
+ (PC_IO_MEM_SIZE-1)]])
+ (op1, op2);
+#endif /* !PIG */
+ }
+
+GLOBAL VOID
+OUT16
+
+IFN2(
+ IU32, op1, /* src1(port nr.) operand */
+ IU32, op2 /* src2(data) operand */
+ )
+
+
+ {
+#ifndef PIG
+ (*Ios_outw_function[Ios_out_adapter_table[op1 &
+ (PC_IO_MEM_SIZE-1)]])
+ (op1, op2);
+#endif /* !PIG */
+ }
+
+GLOBAL VOID
+OUT32 IFN2(
+ IU32, op1, /* src1(port nr.) operand */
+ IU32, op2 /* src2(data) operand */
+ )
+{
+#ifndef PIG
+#ifdef SFELLOW
+ (*Ios_outd_function[Ios_out_adapter_table[op1 &
+ (PC_IO_MEM_SIZE-1)]])
+ (op1, op2);
+#else
+ OUT16(op1, op2 & 0xffff);
+ OUT16(op1 + 2, op2 >> 16);
+#endif
+#endif /* !PIG */
+}
diff --git a/private/mvdm/softpc.new/base/ccpu386/out.h b/private/mvdm/softpc.new/base/ccpu386/out.h
new file mode 100644
index 000000000..083679cd7
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/out.h
@@ -0,0 +1,33 @@
+/*
+ out.h
+
+ Define all OUT CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)out.h 1.4 02/09/94";
+ */
+
+IMPORT VOID OUT8
+
+IPT2(
+ IU32, op1,
+ IU32, op2
+
+ );
+
+IMPORT VOID OUT16
+
+IPT2(
+ IU32, op1,
+ IU32, op2
+
+ );
+
+IMPORT VOID OUT32
+
+IPT2(
+ IU32, op1,
+ IU32, op2
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/pigger_c.h b/private/mvdm/softpc.new/base/ccpu386/pigger_c.h
new file mode 100644
index 000000000..bd74bf42b
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/pigger_c.h
@@ -0,0 +1,4 @@
+#ifndef _Pigger_c_h
+#define _Pigger_c_h
+#define Pig (1)
+#endif /* ! _Pigger_c_h */
diff --git a/private/mvdm/softpc.new/base/ccpu386/pop.c b/private/mvdm/softpc.new/base/ccpu386/pop.c
new file mode 100644
index 000000000..5ff195b54
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/pop.c
@@ -0,0 +1,78 @@
+/*[
+
+pop.c
+
+LOCAL CHAR SccsID[]="@(#)pop.c 1.5 02/09/94";
+
+POP CPU Functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <pop.h>
+#include <mov.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'pop'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+POP
+
+IFN1(
+ IU32 *, pop1
+ )
+
+
+ {
+ validate_stack_exists(USE_SP, (ISM32)NR_ITEMS_1);
+ *pop1 = spop();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* 'pop' to segment register. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+POP_SR
+
+IFN1(
+ IU32, op1 /* index to segment register */
+ )
+
+
+ {
+ IU32 op2; /* data from stack */
+
+ /* get implicit operand without changing (E)SP */
+ validate_stack_exists(USE_SP, (ISM32)NR_ITEMS_1);
+ op2 = tpop(STACK_ITEM_1, NULL_BYTE_OFFSET);
+
+ /* only use bottom 16-bits */
+ op2 &= WORD_MASK;
+
+ /* do the move */
+ MOV_SR(op1, op2);
+
+ /* if it works update (E)SP */
+ change_SP((IS32)NR_ITEMS_1);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/pop.h b/private/mvdm/softpc.new/base/ccpu386/pop.h
new file mode 100644
index 000000000..fff5856f1
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/pop.h
@@ -0,0 +1,23 @@
+/*
+ pop.h
+
+ Define all POP CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)pop.h 1.4 02/09/94";
+ */
+
+IMPORT VOID POP
+
+IPT1(
+ IU32 *, pop1
+
+ );
+
+IMPORT VOID POP_SR /* to Segment Register */
+
+IPT1(
+ IU32, op1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/popa.c b/private/mvdm/softpc.new/base/ccpu386/popa.c
new file mode 100644
index 000000000..4a6e2e1e0
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/popa.c
@@ -0,0 +1,61 @@
+/*[
+
+popa.c
+
+LOCAL CHAR SccsID[]="@(#)popa.c 1.5 02/09/94";
+
+POPA CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <popa.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+POPA()
+ {
+ validate_stack_exists(USE_SP, (ISM32)NR_ITEMS_8);
+ SET_DI(spop());
+ SET_SI(spop());
+ SET_BP(spop());
+ (VOID) spop(); /* throwaway SP */
+ SET_BX(spop());
+ SET_DX(spop());
+ SET_CX(spop());
+ SET_AX(spop());
+ }
+
+GLOBAL VOID
+POPAD()
+ {
+ validate_stack_exists(USE_SP, (ISM32)NR_ITEMS_8);
+ SET_EDI(spop());
+ SET_ESI(spop());
+ SET_EBP(spop());
+ (VOID) spop(); /* throwaway ESP */
+ SET_EBX(spop());
+ SET_EDX(spop());
+ SET_ECX(spop());
+ SET_EAX(spop());
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/popa.h b/private/mvdm/softpc.new/base/ccpu386/popa.h
new file mode 100644
index 000000000..9999551b4
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/popa.h
@@ -0,0 +1,13 @@
+/*
+ popa.h
+
+ Define all POPA CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)popa.h 1.5 09/01/94";
+ */
+
+IMPORT VOID POPA IPT0();
+
+IMPORT VOID POPAD IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/popf.c b/private/mvdm/softpc.new/base/ccpu386/popf.c
new file mode 100644
index 000000000..848f39910
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/popf.c
@@ -0,0 +1,67 @@
+/*[
+
+popf.c
+
+LOCAL CHAR SccsID[]="@(#)popf.c 1.6 02/05/95";
+
+POPF CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include CpuH
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <popf.h>
+#include <debug.h>
+#include <config.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+POPF()
+ {
+ validate_stack_exists(USE_SP, (ISM32)NR_ITEMS_1);
+ setFLAGS(spop());
+ }
+
+GLOBAL VOID
+POPFD()
+ {
+ IU32 keep_vm;
+ IU32 keep_rf;
+ IU32 val;
+
+ validate_stack_exists(USE_SP, (ISM32)NR_ITEMS_1);
+
+ /* NB. POPFD does not change the VM or RF flags. */
+ keep_vm = GET_VM();
+ keep_rf = GET_RF();
+ val = spop();
+ if (val & (7 << 19))
+ {
+ char buf[64];
+ sprintf(buf, "POPFD attempt to pop %08x", val);
+ note_486_instruction(buf);
+ }
+ c_setEFLAGS(val);
+ SET_VM(keep_vm);
+ SET_RF(keep_rf);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/popf.h b/private/mvdm/softpc.new/base/ccpu386/popf.h
new file mode 100644
index 000000000..6a5690334
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/popf.h
@@ -0,0 +1,13 @@
+/*
+ popf.h
+
+ Define all POPF CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)popf.h 1.5 09/01/94";
+ */
+
+IMPORT VOID POPF IPT0();
+
+IMPORT VOID POPFD IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/push.c b/private/mvdm/softpc.new/base/ccpu386/push.c
new file mode 100644
index 000000000..f0e250a06
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/push.c
@@ -0,0 +1,68 @@
+/*[
+
+push.c
+
+LOCAL CHAR SccsID[]="@(#)push.c 1.6 07/05/94";
+
+PUSH CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <push.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'push'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+PUSH
+
+IFN1(
+ IU32, op1
+ )
+
+
+ {
+ validate_stack_space(USE_SP, (ISM32)NR_ITEMS_1);
+ spush(op1);
+ }
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* 'push' segment register (always write 16 bits, in a 16/32 bit hole)*/
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+PUSH_SR
+
+IFN1(
+ IU32, op1
+ )
+
+
+ {
+ validate_stack_space(USE_SP, (ISM32)NR_ITEMS_1);
+ spush16(op1);
+ }
+
+
diff --git a/private/mvdm/softpc.new/base/ccpu386/push.h b/private/mvdm/softpc.new/base/ccpu386/push.h
new file mode 100644
index 000000000..57f3d747e
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/push.h
@@ -0,0 +1,23 @@
+/*
+ push.h
+
+ Define all PUSH CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)push.h 1.5 07/05/94";
+ */
+
+IMPORT VOID PUSH
+
+IPT1(
+ IU32, op1
+
+ );
+
+IMPORT VOID PUSH_SR
+
+IPT1(
+ IU32, op1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/pusha.c b/private/mvdm/softpc.new/base/ccpu386/pusha.c
new file mode 100644
index 000000000..5addeaa51
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/pusha.c
@@ -0,0 +1,53 @@
+/*[
+
+pusha.c
+
+LOCAL CHAR SccsID[]="@(#)pusha.c 1.5 02/09/94";
+
+PUSHA CPU Functions.
+--------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <pusha.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+PUSHA()
+ {
+ IU32 temp;
+
+ /* verify stack is writable */
+ validate_stack_space(USE_SP, (ISM32)NR_ITEMS_8);
+
+ /* all ok, shunt data onto stack */
+ temp = GET_ESP();
+ spush((IU32)GET_EAX());
+ spush((IU32)GET_ECX());
+ spush((IU32)GET_EDX());
+ spush((IU32)GET_EBX());
+ spush(temp);
+ spush((IU32)GET_EBP());
+ spush((IU32)GET_ESI());
+ spush((IU32)GET_EDI());
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/pusha.h b/private/mvdm/softpc.new/base/ccpu386/pusha.h
new file mode 100644
index 000000000..08002c029
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/pusha.h
@@ -0,0 +1,11 @@
+/*
+ pusha.h
+
+ Define all PUSHA CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)pusha.h 1.5 09/01/94";
+ */
+
+IMPORT VOID PUSHA IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/pushf.c b/private/mvdm/softpc.new/base/ccpu386/pushf.c
new file mode 100644
index 000000000..b3dd130db
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/pushf.c
@@ -0,0 +1,51 @@
+/*[
+
+pushf.c
+
+LOCAL CHAR SccsID[]="@(#)pushf.c 1.6 01/17/95";
+
+PUSHF CPU Functions.
+--------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <pushf.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+PUSHF()
+ {
+ IU32 flags;
+
+ /* verify stack is writable */
+ validate_stack_space(USE_SP, (ISM32)NR_ITEMS_1);
+
+ /* all ok, shunt data onto stack */
+ flags = c_getEFLAGS();
+
+ /* VM and RF are cleared in pushed image. */
+ flags = flags & ~BIT17_MASK; /* Clear VM */
+ flags = flags & ~BIT16_MASK; /* Clear RF */
+
+ spush(flags);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/pushf.h b/private/mvdm/softpc.new/base/ccpu386/pushf.h
new file mode 100644
index 000000000..172f8fd59
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/pushf.h
@@ -0,0 +1,11 @@
+/*
+ pushf.h
+
+ Define all PUSHF CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)pushf.h 1.5 09/01/94";
+ */
+
+IMPORT VOID PUSHF IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/rcl.c b/private/mvdm/softpc.new/base/ccpu386/rcl.c
new file mode 100644
index 000000000..8920af476
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/rcl.c
@@ -0,0 +1,91 @@
+/*[
+
+rcl.c
+
+LOCAL CHAR SccsID[]="@(#)rcl.c 1.5 02/09/94";
+
+RCL CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <rcl.h>
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'rcl'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+RCL
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst/src operand */
+ IU32, op2, /* rotation count operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+ IU32 feedback; /* Bit posn to feed into carry */
+ ISM32 i;
+ ISM32 new_of;
+
+ /* only use lower five bits of count */
+ if ( (op2 &= 0x1f) == 0 )
+ return;
+
+ /*
+ ==== =================
+ -- |CF| <-- | | | | | | | | | <--
+ | ==== ================= |
+ ---------------------------------
+ */
+ feedback = SZ2MSB(op_sz);
+ for ( result = *pop1, i = 0; i < op2; i++ )
+ {
+ if ( result & feedback )
+ {
+ result = result << 1 | GET_CF();
+ SET_CF(1);
+ }
+ else
+ {
+ result = result << 1 | GET_CF();
+ SET_CF(0);
+ }
+ }
+
+ /* OF = CF ^ MSB of result */
+ new_of = GET_CF() ^ (result & feedback) != 0;
+
+ if ( op2 == 1 )
+ {
+ SET_OF(new_of);
+ }
+ else
+ {
+ do_multiple_shiftrot_of(new_of);
+ }
+
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/rcl.h b/private/mvdm/softpc.new/base/ccpu386/rcl.h
new file mode 100644
index 000000000..10e836d4a
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/rcl.h
@@ -0,0 +1,18 @@
+/*
+ rcl.h
+
+ Define all RCL CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)rcl.h 1.4 02/09/94";
+ */
+
+IMPORT VOID RCL
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/rcr.c b/private/mvdm/softpc.new/base/ccpu386/rcr.c
new file mode 100644
index 000000000..df6515a94
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/rcr.c
@@ -0,0 +1,87 @@
+/*[
+
+rcr.c
+
+LOCAL CHAR SccsID[]="@(#)rcr.c 1.5 02/09/94";
+
+RCR CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <rcr.h>
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'rcr'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+RCR
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst/src operand */
+ IU32, op2, /* rotation count operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 temp_cf;
+ IU32 result;
+ IU32 feedback; /* Bit posn to feed carry back to */
+ ISM32 i;
+ ISM32 new_of;
+
+ /* only use lower five bits of count */
+ if ( (op2 &= 0x1f) == 0 )
+ return;
+
+ /*
+ ================= ====
+ -> | | | | | | | | | --> |CF| ---
+ | ================= ==== |
+ ---------------------------------
+ */
+ feedback = SZ2MSB(op_sz);
+ for ( result = *pop1, i = 0; i < op2; i++ )
+ {
+ temp_cf = GET_CF();
+ SET_CF((result & BIT0_MASK) != 0); /* CF <= Bit 0 */
+ result >>= 1;
+ if ( temp_cf )
+ result |= feedback;
+ }
+
+ /* OF = MSB of result ^ (MSB-1) of result */
+ new_of = ((result ^ result << 1) & feedback) != 0;
+
+ if ( op2 == 1 )
+ {
+ SET_OF(new_of);
+ }
+ else
+ {
+ do_multiple_shiftrot_of(new_of);
+ }
+
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/rcr.h b/private/mvdm/softpc.new/base/ccpu386/rcr.h
new file mode 100644
index 000000000..ca1010663
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/rcr.h
@@ -0,0 +1,18 @@
+/*
+ rcr.h
+
+ Define all RCR CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)rcr.h 1.4 02/09/94";
+ */
+
+IMPORT VOID RCR
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/ret.c b/private/mvdm/softpc.new/base/ccpu386/ret.c
new file mode 100644
index 000000000..196744737
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/ret.c
@@ -0,0 +1,270 @@
+/*[
+
+ret.c
+
+LOCAL CHAR SccsID[]="@(#)ret.c 1.9 02/27/95";
+
+RET CPU Functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <ret.h>
+#include <c_xfer.h>
+#include <fault.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Process far RET. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+RETF
+
+IFN1(
+ IU32, op1 /* Number of bytes to pop from stack.
+ NB OS2 Rel 2 implies (contrary to Intel doc.) that
+ imm16(op1) is always a byte quantity! */
+ )
+
+
+ {
+ IU16 new_cs; /* The return destination */
+ IU32 new_ip;
+
+ IU32 cs_descr_addr; /* code segment descriptor address */
+ CPU_DESCR cs_entry; /* code segment descriptor entry */
+
+ ISM32 dest_type; /* category for destination */
+ ISM32 privilege; /* return privilege level */
+
+ IU16 new_ss; /* The new stack */
+ IU32 new_sp;
+
+ IU32 ss_descr_addr; /* stack segment descriptor address */
+ CPU_DESCR ss_entry; /* stack segment descriptor entry */
+
+ IS32 stk_inc; /* Stack increment for basic instruction */
+ ISM32 stk_item; /* Number of items of immediate data */
+
+ /* must have CS:(E)IP on the stack */
+ validate_stack_exists(USE_SP, (ISM32)NR_ITEMS_2);
+
+ /* retrieve return destination from stack */
+ new_ip = tpop(STACK_ITEM_1, NULL_BYTE_OFFSET);
+ new_cs = tpop(STACK_ITEM_2, NULL_BYTE_OFFSET);
+
+ /* force immediate offset to be an item count */
+ if ( GET_OPERAND_SIZE() == USE16 )
+ stk_item = op1 / 2;
+ else /* USE32 */
+ stk_item = op1 / 4;
+
+ if ( GET_PE() == 0 || GET_VM() == 1 )
+ {
+ /* Real Mode or V86 Mode */
+
+#ifdef TAKE_REAL_MODE_LIMIT_FAULT
+
+ /* do ip limit check */
+ if ( new_ip > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_RETF_RM_CS_LIMIT);
+
+#else /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ /* The Soft486 EDL CPU does not take Real Mode limit failures.
+ * Since the Ccpu486 is used as a "reference" cpu we wish it
+ * to behave a C version of the EDL Cpu rather than as a C
+ * version of a i486.
+ */
+
+#endif /* TAKE_REAL_MODE_LIMIT_FAULT */
+
+ /* all systems go */
+ load_CS_cache(new_cs, (IU32)0, (CPU_DESCR *)0);
+ SET_EIP(new_ip);
+
+ stk_inc = NR_ITEMS_2; /* allow for CS:(E)IP */
+ }
+ else
+ {
+ /* Protected Mode */
+
+ /* decode final action and complete stack check */
+ privilege = GET_SELECTOR_RPL(new_cs);
+ if ( privilege < GET_CPL() )
+ {
+ GP(new_cs, FAULT_RETF_PM_ACCESS); /* you can't get to higher privilege */
+ }
+ else if ( privilege == GET_CPL() )
+ {
+ dest_type = SAME_LEVEL;
+ }
+ else
+ {
+ /* going to lower privilege */
+ /* must have CS:(E)IP, immed bytes, SS:(E)SP on stack */
+ validate_stack_exists(USE_SP, (ISM32)(NR_ITEMS_4 + stk_item));
+ dest_type = LOWER_PRIVILEGE;
+ }
+
+ if ( selector_outside_GDT_LDT(new_cs, &cs_descr_addr) )
+ GP(new_cs, FAULT_RETF_SELECTOR);
+
+ /* check type, access and presence of return addr */
+
+ /* load descriptor */
+ read_descriptor_linear(cs_descr_addr, &cs_entry);
+
+ /* must be a code segment */
+ switch ( descriptor_super_type(cs_entry.AR) )
+ {
+ case CONFORM_NOREAD_CODE:
+ case CONFORM_READABLE_CODE:
+ /* access check requires DPL <= return RPL */
+ if ( GET_AR_DPL(cs_entry.AR) > privilege )
+ GP(new_cs, FAULT_RETF_ACCESS_1);
+ break;
+
+ case NONCONFORM_NOREAD_CODE:
+ case NONCONFORM_READABLE_CODE:
+ /* access check requires DPL == return RPL */
+ if ( GET_AR_DPL(cs_entry.AR) != privilege )
+ GP(new_cs, FAULT_RETF_ACCESS_2);
+ break;
+
+ default:
+ GP(new_cs, FAULT_RETF_BAD_SEG_TYPE);
+ }
+
+ if ( GET_AR_P(cs_entry.AR) == NOT_PRESENT )
+ NP(new_cs, FAULT_RETF_CS_NOTPRESENT);
+
+ /* action the target */
+ switch ( dest_type )
+ {
+ case SAME_LEVEL:
+ /* do ip limit checking */
+ if ( new_ip > cs_entry.limit )
+ GP((IU16)0, FAULT_RETF_PM_CS_LIMIT_1);
+
+ /* ALL SYSTEMS GO */
+
+ load_CS_cache(new_cs, cs_descr_addr, &cs_entry);
+ SET_EIP(new_ip);
+ stk_inc = NR_ITEMS_2; /* allow for CS:(E)IP */
+ break;
+
+ case LOWER_PRIVILEGE:
+ /*
+
+ ==========
+ SS:SP -> | old IP |
+ | old CS |
+ | parm 1 |
+ | ... |
+ | parm n |
+ | old SP |
+ | old SS |
+ ==========
+ */
+
+ /* check new stack */
+ new_ss = tpop(STACK_ITEM_4, (ISM32)op1);
+ check_SS(new_ss, privilege, &ss_descr_addr, &ss_entry);
+
+ /* do ip limit checking */
+ if ( new_ip > cs_entry.limit )
+ GP((IU16)0, FAULT_RETF_PM_CS_LIMIT_2);
+
+ /* ALL SYSTEMS GO */
+
+ SET_CPL(privilege);
+
+ load_CS_cache(new_cs, cs_descr_addr, &cs_entry);
+ SET_EIP(new_ip);
+
+ new_sp = tpop(STACK_ITEM_3, (ISM32)op1);
+ load_SS_cache(new_ss, ss_descr_addr, &ss_entry);
+ if ( GET_OPERAND_SIZE() == USE16 )
+ SET_SP(new_sp);
+ else
+ SET_ESP(new_sp);
+ stk_inc = 0;
+
+ /* finally re-validate DS and ES segments */
+ load_data_seg_new_privilege(DS_REG);
+ load_data_seg_new_privilege(ES_REG);
+ load_data_seg_new_privilege(FS_REG);
+ load_data_seg_new_privilege(GS_REG);
+ break;
+ }
+ }
+
+ /* finally increment stack pointer */
+ change_SP(stk_inc);
+ byte_change_SP((IS32)op1);
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* near return */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+RETN
+
+IFN1(
+ IU32, op1
+ )
+
+
+ {
+ IU32 new_ip;
+
+ /* must have ip on stack */
+ validate_stack_exists(USE_SP, (ISM32)NR_ITEMS_1);
+
+ new_ip = tpop(STACK_ITEM_1, NULL_BYTE_OFFSET); /* get ip */
+
+ /* do ip limit check */
+#ifndef TAKE_REAL_MODE_LIMIT_FAULT
+ /* The Soft486 EDL CPU does not take Real Mode limit failures.
+ * Since the Ccpu486 is used as a "reference" cpu we wish it
+ * to behave a C version of the EDL Cpu rather than as a C
+ * version of a i486.
+ */
+
+ if ( GET_PE() == 1 && GET_VM() == 0 )
+#endif /* nTAKE_REAL_MODE_LIMIT_FAULT */
+ {
+ if ( new_ip > GET_CS_LIMIT() )
+ GP((IU16)0, FAULT_RETN_CS_LIMIT);
+ }
+
+ /* all systems go */
+ SET_EIP(new_ip);
+ change_SP((IS32)NR_ITEMS_1);
+
+ if ( op1 )
+ {
+ byte_change_SP((IS32)op1);
+ }
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/ret.h b/private/mvdm/softpc.new/base/ccpu386/ret.h
new file mode 100644
index 000000000..b2f35db92
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/ret.h
@@ -0,0 +1,23 @@
+/*
+ ret.h
+
+ Define all RET CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)ret.h 1.4 02/09/94";
+ */
+
+IMPORT VOID RETF
+
+IPT1(
+ IU32, op1
+
+ );
+
+IMPORT VOID RETN
+
+IPT1(
+ IU32, op1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/rol.c b/private/mvdm/softpc.new/base/ccpu386/rol.c
new file mode 100644
index 000000000..96a691b11
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/rol.c
@@ -0,0 +1,91 @@
+/*[
+
+rol.c
+
+LOCAL CHAR SccsID[]="@(#)rol.c 1.5 02/09/94";
+
+ROL CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <rol.h>
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'rol'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+ROL
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst/src operand */
+ IU32, op2, /* rotation count operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+ IU32 feedback; /* Bit posn to feed into Bit 0 */
+ ISM32 i;
+ ISM32 new_of;
+
+ /* only use lower five bits of count */
+ if ( (op2 &= 0x1f) == 0 )
+ return;
+
+ /*
+ ==== =================
+ |CF| <-- -- | | | | | | | | | <--
+ ==== | ================= |
+ ------------------------
+ */
+ feedback = SZ2MSB(op_sz);
+ for ( result = *pop1, i = 0; i < op2; i++ )
+ {
+ if ( result & feedback )
+ {
+ result = result << 1 | 1;
+ SET_CF(1);
+ }
+ else
+ {
+ result <<= 1;
+ SET_CF(0);
+ }
+ }
+
+ /* OF = CF ^ MSB of result */
+ new_of = GET_CF() ^ (result & feedback) != 0;
+
+ if ( op2 == 1 )
+ {
+ SET_OF(new_of);
+ }
+ else
+ {
+ do_multiple_shiftrot_of(new_of);
+ }
+
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/rol.h b/private/mvdm/softpc.new/base/ccpu386/rol.h
new file mode 100644
index 000000000..7711b7303
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/rol.h
@@ -0,0 +1,18 @@
+/*
+ rol.h
+
+ Define all ROL CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)rol.h 1.4 02/09/94";
+ */
+
+IMPORT VOID ROL
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/ror.c b/private/mvdm/softpc.new/base/ccpu386/ror.c
new file mode 100644
index 000000000..66b06c0b6
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/ror.c
@@ -0,0 +1,91 @@
+/*[
+
+ror.c
+
+LOCAL CHAR SccsID[]="@(#)ror.c 1.5 02/09/94";
+
+ROR CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <ror.h>
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'ror'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+ROR
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst/src operand */
+ IU32, op2, /* rotation count operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+ IU32 feedback; /* Bit posn to feed Bit 0 back to */
+ ISM32 i;
+ ISM32 new_of;
+
+ /* only use lower five bits of count */
+ if ( (op2 &= 0x1f) == 0 )
+ return;
+
+ /*
+ ================= ====
+ -> | | | | | | | | | --- --> |CF|
+ | ================= | ====
+ ------------------------
+ */
+ feedback = SZ2MSB(op_sz);
+ for ( result = *pop1, i = 0; i < op2; i++ )
+ {
+ if ( result & BIT0_MASK )
+ {
+ result = result >> 1 | feedback;
+ SET_CF(1);
+ }
+ else
+ {
+ result >>= 1;
+ SET_CF(0);
+ }
+ }
+
+ /* OF = MSB of result ^ (MSB-1) of result */
+ new_of = ((result ^ result << 1) & feedback) != 0;
+
+ if ( op2 == 1 )
+ {
+ SET_OF(new_of);
+ }
+ else
+ {
+ do_multiple_shiftrot_of(new_of);
+ }
+
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/ror.h b/private/mvdm/softpc.new/base/ccpu386/ror.h
new file mode 100644
index 000000000..de4d08d95
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/ror.h
@@ -0,0 +1,18 @@
+/*
+ ror.h
+
+ Define all ROR CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)ror.h 1.4 02/09/94";
+ */
+
+IMPORT VOID ROR
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/rsrvd.c b/private/mvdm/softpc.new/base/ccpu386/rsrvd.c
new file mode 100644
index 000000000..0554359e0
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/rsrvd.c
@@ -0,0 +1,45 @@
+/*[
+
+rsrvd.c
+
+LOCAL CHAR SccsID[]="@(#)rsrvd.c 1.5 02/09/94";
+
+Reserved CPU Functions.
+-----------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <rsrvd.h>
+
+/*
+ =====================================================================
+ EXECUTION STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Reserved opcode. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+VOID
+RSRVD()
+ {
+ /*
+ Reserved operation - nothing to do.
+ In particular reserved opcodes do not cause Int6 exceptions.
+ 0f 07, 0f 10, 0f 11, 0f 12, 0f 13 are known to be reserved.
+ */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/rsrvd.h b/private/mvdm/softpc.new/base/ccpu386/rsrvd.h
new file mode 100644
index 000000000..7316f215c
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/rsrvd.h
@@ -0,0 +1,11 @@
+/*
+ rsrvd.h
+
+ Define Reserved CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)rsrvd.h 1.5 09/01/94";
+ */
+
+IMPORT VOID RSRVD IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/sahf.c b/private/mvdm/softpc.new/base/ccpu386/sahf.c
new file mode 100644
index 000000000..4a9b3deb7
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sahf.c
@@ -0,0 +1,49 @@
+/*[
+
+sahf.c
+
+LOCAL CHAR SccsID[]="@(#)sahf.c 1.5 02/09/94";
+
+SAHF CPU functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <sahf.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+SAHF()
+ {
+ IU32 temp;
+
+ /* 7 6 5 4 3 2 1 0 */
+ /* AH = <SF><ZF><xx><AF><xx><PF><xx><CF> */
+
+ temp = GET_AH();
+ SET_SF((temp & BIT7_MASK) != 0);
+ SET_ZF((temp & BIT6_MASK) != 0);
+ SET_AF((temp & BIT4_MASK) != 0);
+ SET_PF((temp & BIT2_MASK) != 0);
+ SET_CF((temp & BIT0_MASK) != 0);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/sahf.h b/private/mvdm/softpc.new/base/ccpu386/sahf.h
new file mode 100644
index 000000000..38783c63c
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sahf.h
@@ -0,0 +1,11 @@
+/*
+ sahf.h
+
+ Define all SAHF CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)sahf.h 1.5 09/01/94";
+ */
+
+IMPORT VOID SAHF IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/sar.c b/private/mvdm/softpc.new/base/ccpu386/sar.c
new file mode 100644
index 000000000..9cbc0dfe9
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sar.c
@@ -0,0 +1,82 @@
+/*[
+
+sar.c
+
+LOCAL CHAR SccsID[]="@(#)sar.c 1.5 02/09/94";
+
+SAR CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <sar.h>
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'sar'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SAR
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst/src operand */
+ IU32, op2, /* shift count operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 prelim;
+ IU32 result;
+ IU32 feedback;
+ ISM32 i;
+
+ /* only use lower five bits of count */
+ if ( (op2 &= 0x1f) == 0 )
+ return;
+
+ /*
+ ================= ====
+ --> | | | | | | | | | --> |CF|
+ | ================= ====
+ ---- |
+ */
+ prelim = *pop1; /* Initialise */
+ feedback = prelim & SZ2MSB(op_sz); /* Determine MSB */
+ for ( i = 0; i < (op2 - 1); i++ ) /* Do all but last shift */
+ {
+ prelim = prelim >> 1 | feedback;
+ }
+ SET_CF((prelim & BIT0_MASK) != 0); /* CF = Bit 0 */
+ result = prelim >> 1 | feedback; /* Do final shift */
+ SET_OF(0);
+ SET_PF(pf_table[result & BYTE_MASK]);
+ SET_ZF(result == 0);
+ SET_SF(feedback != 0); /* SF = MSB */
+
+ /* Set undefined flag(s) */
+#ifdef SET_UNDEFINED_FLAG
+ SET_AF(UNDEFINED_FLAG);
+#endif
+
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/sar.h b/private/mvdm/softpc.new/base/ccpu386/sar.h
new file mode 100644
index 000000000..ec2bf6288
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sar.h
@@ -0,0 +1,18 @@
+/*
+ sar.h
+
+ Define all SAR CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)sar.h 1.4 02/09/94";
+ */
+
+IMPORT VOID SAR
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/sascdef.c b/private/mvdm/softpc.new/base/ccpu386/sascdef.c
new file mode 100644
index 000000000..4d6959da1
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sascdef.c
@@ -0,0 +1,142 @@
+/*[
+ * Generated File: sasCdef.c
+ *
+]*/
+
+#include "insignia.h"
+#include "host_inc.h"
+#include "host_def.h"
+#include "Fpu_c.h"
+#include "PigReg_c.h"
+#include "Univer_c.h"
+#define CPU_PRIVATE
+#include "cpu4.h"
+#include "gdpvar.h"
+#include "sas.h"
+#include "evidgen.h"
+
+#include <stdio.h>
+GLOBAL void SasAccessProblem IFN0()
+{
+ fprintf(stderr, "Sas used at illegal time\n");
+}
+
+extern TYPE_sas_memory_size c_sas_memory_size;
+extern TYPE_sas_connect_memory c_sas_connect_memory;
+extern TYPE_sas_enable_20_bit_wrapping c_sas_enable_20_bit_wrapping;
+extern TYPE_sas_disable_20_bit_wrapping c_sas_disable_20_bit_wrapping;
+extern TYPE_sas_twenty_bit_wrapping_enabled c_sas_twenty_bit_wrapping_enabled;
+extern TYPE_sas_memory_type c_sas_memory_type;
+extern TYPE_sas_hw_at c_sas_hw_at;
+extern TYPE_sas_w_at c_sas_w_at;
+extern TYPE_sas_dw_at c_sas_dw_at;
+extern TYPE_sas_hw_at_no_check c_sas_hw_at;
+extern TYPE_sas_w_at_no_check c_sas_w_at;
+extern TYPE_sas_dw_at_no_check c_sas_dw_at;
+extern TYPE_sas_store c_sas_store;
+extern TYPE_sas_storew c_sas_storew;
+extern TYPE_sas_storedw c_sas_storedw;
+extern TYPE_sas_store_no_check c_sas_store;
+extern TYPE_sas_storew_no_check c_sas_storew;
+extern TYPE_sas_storedw_no_check c_sas_storedw;
+extern TYPE_sas_loads c_sas_loads;
+extern TYPE_sas_stores c_sas_stores;
+extern TYPE_sas_loads_no_check c_sas_loads_no_check;
+extern TYPE_sas_stores_no_check c_sas_stores_no_check;
+extern TYPE_sas_move_bytes_forward c_sas_move_bytes_forward;
+extern TYPE_sas_move_words_forward c_sas_move_words_forward;
+extern TYPE_sas_move_doubles_forward c_sas_move_doubles_forward;
+extern TYPE_sas_move_bytes_backward c_sas_move_bytes_backward;
+extern TYPE_sas_move_words_backward c_sas_move_words_backward;
+extern TYPE_sas_move_doubles_backward c_sas_move_doubles_backward;
+extern TYPE_sas_fills c_sas_fills;
+extern TYPE_sas_fillsw c_sas_fillsw;
+extern TYPE_sas_fillsdw c_sas_fillsdw;
+extern TYPE_sas_scratch_address c_sas_scratch_address;
+extern TYPE_sas_transbuf_address c_sas_transbuf_address;
+extern TYPE_sas_loads_to_transbuf c_sas_loads;
+extern TYPE_sas_stores_from_transbuf c_sas_stores;
+extern TYPE_sas_PR8 phy_r8;
+extern TYPE_sas_PR16 phy_r16;
+extern TYPE_sas_PR32 phy_r32;
+extern TYPE_sas_PW8 phy_w8;
+extern TYPE_sas_PW16 phy_w16;
+extern TYPE_sas_PW32 phy_w32;
+extern TYPE_sas_PW8_no_check phy_w8_no_check;
+extern TYPE_sas_PW16_no_check phy_w16_no_check;
+extern TYPE_sas_PW32_no_check phy_w32_no_check;
+extern TYPE_getPtrToPhysAddrByte c_GetPhyAdd;
+extern TYPE_get_byte_addr c_get_byte_addr;
+extern TYPE_getPtrToLinAddrByte c_GetLinAdd;
+extern TYPE_sas_init_pm_selectors c_SasRegisterVirtualSelectors;
+extern TYPE_sas_PWS c_sas_PWS;
+extern TYPE_sas_PWS_no_check c_sas_PWS_no_check;
+extern TYPE_sas_PRS c_sas_PRS;
+extern TYPE_sas_PRS_no_check c_sas_PRS_no_check;
+extern TYPE_sas_PigCmpPage c_sas_PigCmpPage;
+extern TYPE_sas_touch c_sas_touch;
+extern TYPE_IOVirtualised c_IOVirtualised;
+extern TYPE_VirtualiseInstruction c_VirtualiseInstruction;
+
+
+struct SasVector cSasPtrs = {
+ c_sas_memory_size,
+ c_sas_connect_memory,
+ c_sas_enable_20_bit_wrapping,
+ c_sas_disable_20_bit_wrapping,
+ c_sas_twenty_bit_wrapping_enabled,
+ c_sas_memory_type,
+ c_sas_hw_at,
+ c_sas_w_at,
+ c_sas_dw_at,
+ c_sas_hw_at,
+ c_sas_w_at,
+ c_sas_dw_at,
+ c_sas_store,
+ c_sas_storew,
+ c_sas_storedw,
+ c_sas_store,
+ c_sas_storew,
+ c_sas_storedw,
+ c_sas_loads,
+ c_sas_stores,
+ c_sas_loads_no_check,
+ c_sas_stores_no_check,
+ c_sas_move_bytes_forward,
+ c_sas_move_words_forward,
+ c_sas_move_doubles_forward,
+ c_sas_move_bytes_backward,
+ c_sas_move_words_backward,
+ c_sas_move_doubles_backward,
+ c_sas_fills,
+ c_sas_fillsw,
+ c_sas_fillsdw,
+ c_sas_scratch_address,
+ c_sas_transbuf_address,
+ c_sas_loads,
+ c_sas_stores,
+ phy_r8,
+ phy_r16,
+ phy_r32,
+ phy_w8,
+ phy_w16,
+ phy_w32,
+ phy_w8_no_check,
+ phy_w16_no_check,
+ phy_w32_no_check,
+ c_GetPhyAdd,
+ c_get_byte_addr,
+ c_GetLinAdd,
+ c_SasRegisterVirtualSelectors,
+ (void (*)()) 0,
+ c_sas_PWS,
+ c_sas_PWS_no_check,
+ c_sas_PRS,
+ c_sas_PRS_no_check,
+ c_sas_PigCmpPage,
+ c_sas_touch,
+ c_IOVirtualised,
+ c_VirtualiseInstruction
+};
+
+/*======================================== END ========================================*/
diff --git a/private/mvdm/softpc.new/base/ccpu386/sbb.c b/private/mvdm/softpc.new/base/ccpu386/sbb.c
new file mode 100644
index 000000000..0c73b5426
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sbb.c
@@ -0,0 +1,80 @@
+/*[
+
+sbb.c
+
+LOCAL CHAR SccsID[]="@(#)sbb.c 1.5 02/09/94";
+
+SBB CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <sbb.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'sbb'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SBB
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst/lsrc operand */
+ IU32, op2, /* rsrc operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+ IU32 carry;
+ IU32 msb;
+ IU32 op1_msb;
+ IU32 op2_msb;
+ IU32 res_msb;
+
+ msb = SZ2MSB(op_sz);
+ /* Do operation */
+ result = *pop1 - op2 - GET_CF() & SZ2MASK(op_sz);
+ op1_msb = (*pop1 & msb) != 0; /* Isolate all msb's */
+ op2_msb = (op2 & msb) != 0;
+ res_msb = (result & msb) != 0;
+ carry = *pop1 ^ op2 ^ result; /* Isolate carries */
+ /* Determine flags */
+ /*
+ OF = (op1 == !op2) & (op1 ^ res)
+ ie if operand signs differ and res sign different to original
+ destination set OF.
+ */
+ SET_OF((op1_msb != op2_msb) & (op1_msb ^ res_msb));
+ /*
+ Formally:- CF = !op1 & op2 | res & !op1 | res & op2
+ Equivalently:- CF = OF ^ op1 ^ op2 ^ res
+ */
+ SET_CF(((carry & msb) != 0) ^ GET_OF());
+ SET_PF(pf_table[result & BYTE_MASK]);
+ SET_ZF(result == 0);
+ SET_SF((result & msb) != 0); /* SF = MSB */
+ SET_AF((carry & BIT4_MASK) != 0); /* AF = Bit 4 carry */
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/sbb.h b/private/mvdm/softpc.new/base/ccpu386/sbb.h
new file mode 100644
index 000000000..8b6e67a9d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sbb.h
@@ -0,0 +1,18 @@
+/*
+ sbb.h
+
+ Define all SBB CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)sbb.h 1.4 02/09/94";
+ */
+
+IMPORT VOID SBB
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/setxx.c b/private/mvdm/softpc.new/base/ccpu386/setxx.c
new file mode 100644
index 000000000..ba4f19237
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/setxx.c
@@ -0,0 +1,275 @@
+/*[
+
+setxx.c
+
+LOCAL CHAR SccsID[]="@(#)setxx.c 1.5 02/09/94";
+
+SETxx CPU functions (Byte Set on Condition).
+--------------------------------------------
+
+All these functions return 1 if the condition is true, else 0.
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <setxx.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set Byte if Below (CF=1) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SETB
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst operand */
+ )
+
+
+ {
+ *pop1 = GET_CF();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set Byte if Below or Equal (CF=1 || ZF=1) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SETBE
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst operand */
+ )
+
+
+ {
+ *pop1 = GET_CF() || GET_ZF();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set Byte if Less (SF != OF) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SETL
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst operand */
+ )
+
+
+ {
+ *pop1 = GET_SF() != GET_OF();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set Byte if Less or Equal (ZF=1 || (SF != OF)) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SETLE
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst operand */
+ )
+
+
+ {
+ *pop1 = GET_SF() != GET_OF() || GET_ZF();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set Byte if Not Below (CF=0) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SETNB
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst operand */
+ )
+
+
+ {
+ *pop1 = !GET_CF();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set Byte if Not Below or Equal (CF=0 && ZF=0) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SETNBE
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst operand */
+ )
+
+
+ {
+ *pop1 = !GET_CF() && !GET_ZF();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set Byte if Not Less (SF==OF) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SETNL
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst operand */
+ )
+
+
+ {
+ *pop1 = GET_SF() == GET_OF();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set Byte if Not Less or Equal (ZF=0 && (SF==OF)) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SETNLE
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst operand */
+ )
+
+
+ {
+ *pop1 = GET_SF() == GET_OF() && !GET_ZF();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set Byte if Not Overflow (OF=0) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SETNO
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst operand */
+ )
+
+
+ {
+ *pop1 = !GET_OF();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set Byte if Not Parity (PF=0) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SETNP
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst operand */
+ )
+
+
+ {
+ *pop1 = !GET_PF();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set Byte if Not Sign (SF=0) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SETNS
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst operand */
+ )
+
+
+ {
+ *pop1 = !GET_SF();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set Byte if Not Zero (ZF=0) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SETNZ
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst operand */
+ )
+
+
+ {
+ *pop1 = !GET_ZF();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set Byte if Overflow (OF=1) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SETO
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst operand */
+ )
+
+
+ {
+ *pop1 = GET_OF();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set Byte if Parity (PF=1) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SETP
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst operand */
+ )
+
+
+ {
+ *pop1 = GET_PF();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set Byte if Sign (SF=1) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SETS
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst operand */
+ )
+
+
+ {
+ *pop1 = GET_SF();
+ }
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Set Byte if Zero (ZF=1) */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SETZ
+
+IFN1(
+ IU32 *, pop1 /* pntr to dst operand */
+ )
+
+
+ {
+ *pop1 = GET_ZF();
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/setxx.h b/private/mvdm/softpc.new/base/ccpu386/setxx.h
new file mode 100644
index 000000000..615bb7bda
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/setxx.h
@@ -0,0 +1,121 @@
+/*
+ setxx.h
+
+ SETxx CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)setxx.h 1.4 02/09/94";
+ */
+
+IMPORT VOID SETB
+
+IPT1(
+ IU32 *, pop1
+
+ );
+
+IMPORT VOID SETBE
+
+IPT1(
+ IU32 *, pop1
+
+ );
+
+IMPORT VOID SETL
+
+IPT1(
+ IU32 *, pop1
+
+ );
+
+IMPORT VOID SETLE
+
+IPT1(
+ IU32 *, pop1
+
+ );
+
+IMPORT VOID SETNB
+
+IPT1(
+ IU32 *, pop1
+
+ );
+
+IMPORT VOID SETNBE
+
+IPT1(
+ IU32 *, pop1
+
+ );
+
+IMPORT VOID SETNL
+
+IPT1(
+ IU32 *, pop1
+
+ );
+
+IMPORT VOID SETNLE
+
+IPT1(
+ IU32 *, pop1
+
+ );
+
+IMPORT VOID SETNO
+
+IPT1(
+ IU32 *, pop1
+
+ );
+
+IMPORT VOID SETNP
+
+IPT1(
+ IU32 *, pop1
+
+ );
+
+IMPORT VOID SETNS
+
+IPT1(
+ IU32 *, pop1
+
+ );
+
+IMPORT VOID SETNZ
+
+IPT1(
+ IU32 *, pop1
+
+ );
+
+IMPORT VOID SETO
+
+IPT1(
+ IU32 *, pop1
+
+ );
+
+IMPORT VOID SETP
+
+IPT1(
+ IU32 *, pop1
+
+ );
+
+IMPORT VOID SETS
+
+IPT1(
+ IU32 *, pop1
+
+ );
+
+IMPORT VOID SETZ
+
+IPT1(
+ IU32 *, pop1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/sgdt.c b/private/mvdm/softpc.new/base/ccpu386/sgdt.c
new file mode 100644
index 000000000..90012c0e9
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sgdt.c
@@ -0,0 +1,70 @@
+/*[
+
+sgdt.c
+
+LOCAL CHAR SccsID[]="@(#)sgdt.c 1.5 02/09/94";
+
+SGDT CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <sgdt.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+SGDT16
+#ifdef ANSI
+ (
+ IU32 op1[2] /* dst (limit:base pair) operand */
+ )
+#else
+ (op1)
+ IU32 op1[2];
+#endif
+ {
+ /*
+ NB. The Intel manual says the top 8-bits will be stored as zero's,
+ I think they mean this only if it was loaded with a 24-bit (286
+ like) value. Otherwise it just stores what was loaded.
+ It might be that it always stores 'FF' like 286, this needs
+ checking.
+ */
+ op1[0] = GET_STAR_LIMIT(GDT_REG);
+ op1[1] = GET_STAR_BASE(GDT_REG);
+ }
+
+GLOBAL VOID
+SGDT32
+#ifdef ANSI
+ (
+ IU32 op1[2] /* dst (limit:base pair) operand */
+ )
+#else
+ (op1)
+ IU32 op1[2];
+#endif
+ {
+ op1[0] = GET_STAR_LIMIT(GDT_REG);
+ op1[1] = GET_STAR_BASE(GDT_REG);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/sgdt.h b/private/mvdm/softpc.new/base/ccpu386/sgdt.h
new file mode 100644
index 000000000..455f7ecad
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sgdt.h
@@ -0,0 +1,23 @@
+/*
+ sgdt.h
+
+ Define all SGDT CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)sgdt.h 1.4 02/09/94";
+ */
+
+IMPORT VOID SGDT16
+
+IPT1(
+ IU32, op1[2]
+
+ );
+
+IMPORT VOID SGDT32
+
+IPT1(
+ IU32, op1[2]
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/shl.c b/private/mvdm/softpc.new/base/ccpu386/shl.c
new file mode 100644
index 000000000..8c849adbc
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/shl.c
@@ -0,0 +1,88 @@
+/*[
+
+shl.c
+
+LOCAL CHAR SccsID[]="@(#)shl.c 1.5 02/09/94";
+
+SHL CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <shl.h>
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'shl'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SHL
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst/src operand */
+ IU32, op2, /* shift count operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+ IU32 msb;
+ ISM32 new_of;
+
+ /* only use lower five bits of count */
+ if ( (op2 &= 0x1f) == 0 )
+ return;
+
+ msb = SZ2MSB(op_sz);
+
+ /*
+ ==== =================
+ |CF| <-- | | | | | | | | | <-- 0
+ ==== =================
+ */
+ result = *pop1 << op2 - 1; /* Do all but last shift */
+ SET_CF((result & msb) != 0); /* CF = MSB */
+ result = result << 1 & SZ2MASK(op_sz); /* Do final shift */
+ SET_PF(pf_table[result & BYTE_MASK]);
+ SET_ZF(result == 0);
+ SET_SF((result & msb) != 0); /* SF = MSB */
+
+ /* OF = CF ^ SF(MSB) */
+ new_of = GET_CF() ^ GET_SF();
+
+ if ( op2 == 1 )
+ {
+ SET_OF(new_of);
+ }
+ else
+ {
+ do_multiple_shiftrot_of(new_of);
+ }
+
+ /* Set undefined flag(s) */
+#ifdef SET_UNDEFINED_FLAG
+ SET_AF(UNDEFINED_FLAG);
+#endif
+
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/shl.h b/private/mvdm/softpc.new/base/ccpu386/shl.h
new file mode 100644
index 000000000..a76d75434
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/shl.h
@@ -0,0 +1,18 @@
+/*
+ shl.h
+
+ Define all SHL CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)shl.h 1.4 02/09/94";
+ */
+
+IMPORT VOID SHL
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/shld.c b/private/mvdm/softpc.new/base/ccpu386/shld.c
new file mode 100644
index 000000000..f7e3e24a1
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/shld.c
@@ -0,0 +1,125 @@
+/*[
+
+shld.c
+
+LOCAL CHAR SccsID[]="@(#)shld.c 1.6 09/02/94";
+
+SHLD CPU functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <shld.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'shld'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SHLD
+
+IFN4(
+ IU32 *, pop1, /* pntr to dst/lsrc operand */
+ IU32, op2, /* rsrc operand */
+ IU32, op3, /* shift count operand */
+ IUM8, op_sz /* 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+ IU32 msb;
+ ISM32 new_of;
+
+ /* only use lower five bits of count, ie modulo 32 */
+ if ( (op3 &= 0x1f) == 0 )
+ return;
+
+ /*
+ NB. Intel doc. says that if op3 >= op_sz then the operation
+ is undefined. In practice if op_sz is 32 then as op3 is taken
+ modulo 32 it can never be in the undefined range and if op_sz
+ is 16 the filler bits from op2 are 'recycled' for counts of 16
+ and above.
+ */
+
+ /*
+ ==== ================= =================
+ |CF| <-- | | | |op1| | | | <-- | | | |op2| | | |
+ ==== ================= =================
+ */
+
+ if ( op_sz == 16 )
+ {
+ op2 = op2 << 16 | op2; /* Double up filler bits */
+ }
+
+ /* Do all but last shift */
+ op3 = op3 - 1; /* op3 now in range 0 - 30 */
+ if ( op3 != 0 )
+ {
+ result = *pop1 << op3 | op2 >> 32-op3;
+ op2 = op2 << op3;
+ }
+ else
+ {
+ result = *pop1;
+ }
+
+ /* Last shift will put MSB into carry */
+ msb = SZ2MSB(op_sz);
+ SET_CF((result & msb) != 0);
+
+ /* Now do final shift */
+ result = result << 1 | op2 >> 31;
+ result = result & SZ2MASK(op_sz);
+
+ SET_PF(pf_table[result & 0xff]);
+ SET_ZF(result == 0);
+ SET_SF((result & msb) != 0);
+
+ /* OF set if sign changes */
+ new_of = GET_CF() ^ GET_SF();
+
+ if ( op3 == 0 ) /* NB Count has been decremented! */
+ {
+ SET_OF(new_of);
+ }
+ else
+ {
+#ifdef SET_UNDEFINED_SHxD_FLAG
+ /* Set OF to changed SF(original) and SF(result) */
+ new_of = ((result ^ *pop1) & SZ2MSB(op_sz)) != 0;
+ SET_OF(new_of);
+#else /* SET_UNDEFINED_SHxD_FLAG */
+ do_multiple_shiftrot_of(new_of);
+#endif /* SET_UNDEFINED_SHxD_FLAG */
+ }
+
+ /* Set undefined flag(s) */
+#ifdef SET_UNDEFINED_FLAG
+ SET_AF(UNDEFINED_FLAG);
+#endif
+
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/shld.h b/private/mvdm/softpc.new/base/ccpu386/shld.h
new file mode 100644
index 000000000..76dca6c2f
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/shld.h
@@ -0,0 +1,19 @@
+/*
+ shld.h
+
+ SHLD CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)shld.h 1.4 02/09/94";
+ */
+
+IMPORT VOID SHLD
+
+IPT4(
+ IU32 *, pop1,
+ IU32, op2,
+ IU32, op3,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/shr.c b/private/mvdm/softpc.new/base/ccpu386/shr.c
new file mode 100644
index 000000000..43c774d13
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/shr.c
@@ -0,0 +1,87 @@
+/*[
+
+shr.c
+
+LOCAL CHAR SccsID[]="@(#)shr.c 1.5 02/09/94";
+
+SHR CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <shr.h>
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'shr'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SHR
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst/src operand */
+ IU32, op2, /* shift count operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 prelim;
+ IU32 result;
+ ISM32 new_of;
+
+ /* only use lower five bits of count */
+ if ( (op2 &= 0x1f) == 0 )
+ return;
+
+ /*
+ ================= ====
+ 0 --> | | | | | | | | | --> |CF|
+ ================= ====
+ */
+ prelim = *pop1 >> op2 - 1; /* Do all but last shift */
+ SET_CF((prelim & BIT0_MASK) != 0); /* CF = Bit 0 */
+
+ /* OF = MSB of operand */
+ new_of = (prelim & SZ2MSB(op_sz)) != 0;
+
+ result = prelim >> 1; /* Do final shift */
+ SET_PF(pf_table[result & BYTE_MASK]);
+ SET_ZF(result == 0);
+ SET_SF(0);
+
+ if ( op2 == 1 )
+ {
+ SET_OF(new_of);
+ }
+ else
+ {
+ do_multiple_shiftrot_of(new_of);
+ }
+
+ /* Set undefined flag(s) */
+#ifdef SET_UNDEFINED_FLAG
+ SET_AF(UNDEFINED_FLAG);
+#endif
+
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/shr.h b/private/mvdm/softpc.new/base/ccpu386/shr.h
new file mode 100644
index 000000000..942f5348c
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/shr.h
@@ -0,0 +1,18 @@
+/*
+ shr.h
+
+ Define all SHR CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)shr.h 1.4 02/09/94";
+ */
+
+IMPORT VOID SHR
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/shrd.c b/private/mvdm/softpc.new/base/ccpu386/shrd.c
new file mode 100644
index 000000000..0d519768a
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/shrd.c
@@ -0,0 +1,127 @@
+/*[
+
+shrd.c
+
+LOCAL CHAR SccsID[]="@(#)shrd.c 1.6 09/02/94";
+
+SHRD CPU functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <shrd.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'shrd'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SHRD
+
+IFN4(
+ IU32 *, pop1, /* pntr to dst/lsrc operand */
+ IU32, op2, /* rsrc operand */
+ IU32, op3, /* shift count operand */
+ IUM8, op_sz /* 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+ IU32 msb;
+ ISM32 new_of;
+
+ /* only use lower five bits of count */
+ if ( (op3 &= 0x1f) == 0 )
+ return;
+
+ /*
+ NB. Intel doc. says that if op3 >= op_sz then the operation
+ is undefined. In practice if op_sz is 32 then as op3 is taken
+ modulo 32 it can never be in the undefined range and if op_sz
+ is 16 the filler bits from op2 are 'recycled' for counts of 16
+ and above.
+ */
+
+ /*
+ ================= ================= ====
+ | | | |op2| | | | --> | | | |op1| | | | --> |CF|
+ ================= ================= ====
+ */
+
+ if ( op_sz == 16 )
+ {
+ *pop1 = op2 << 16 | *pop1; /* Double up filler bits */
+ }
+
+ /* Do all but last shift */
+ op3 = op3 - 1; /* op3 now in range 0 - 30 */
+ if ( op3 != 0 )
+ {
+ result = *pop1 >> op3 | op2 << 32-op3;
+ op2 = op2 >> op3;
+ }
+ else
+ {
+ result = *pop1;
+ }
+
+ SET_CF((result & BIT0_MASK) != 0); /* last shift puts LSB in CF */
+
+ /* save msb */
+ msb = SZ2MSB(op_sz);
+ msb = (result & msb) != 0;
+
+ /* Now do final shift */
+ result = result >> 1 | op2 << 31;
+ result = result & SZ2MASK(op_sz);
+
+ SET_PF(pf_table[result & 0xff]);
+ SET_ZF(result == 0);
+ SET_SF((result & SZ2MSB(op_sz)) != 0);
+
+ /* set OF if sign changes */
+ new_of = msb ^ GET_SF();
+
+ if ( op3 == 0 ) /* NB Count has been decremented! */
+ {
+ SET_OF(new_of);
+ }
+ else
+ {
+#ifdef SET_UNDEFINED_SHxD_FLAG
+ /* Set OF to changed SF(original) and SF(result) */
+ new_of = ((result ^ *pop1) & SZ2MSB(op_sz)) != 0;
+ SET_OF(new_of);
+#else /* SET_UNDEFINED_SHxD_FLAG */
+ do_multiple_shiftrot_of(new_of);
+#endif /* SET_UNDEFINED_SHxD_FLAG */
+ }
+
+ /* Set undefined flag(s) */
+#ifdef SET_UNDEFINED_FLAG
+ SET_AF(UNDEFINED_FLAG);
+#endif
+
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/shrd.h b/private/mvdm/softpc.new/base/ccpu386/shrd.h
new file mode 100644
index 000000000..e4a3c6ae8
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/shrd.h
@@ -0,0 +1,19 @@
+/*
+ shrd.h
+
+ SHRD CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)shrd.h 1.4 02/09/94";
+ */
+
+IMPORT VOID SHRD
+
+IPT4(
+ IU32 *, pop1,
+ IU32, op2,
+ IU32, op3,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/sidt.c b/private/mvdm/softpc.new/base/ccpu386/sidt.c
new file mode 100644
index 000000000..6e1911c1d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sidt.c
@@ -0,0 +1,71 @@
+/*[
+
+sidt.c
+
+LOCAL CHAR SccsID[]="@(#)sidt.c 1.5 02/09/94";
+
+SIDT CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <sidt.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+SIDT16
+#ifdef ANSI
+ (
+ IU32 op1[2] /* dst (limit:base pair) operand */
+ )
+#else
+ (op1)
+ IU32 op1[2];
+#endif
+ {
+ /*
+ NB. The Intel manual says the top 8-bits will be stored as zero's,
+ I think they mean this only if it was loaded with a 24-bit (286
+ like) value. Otherwise it just stores what was loaded.
+ It might be that it always stores 'FF' like 286, this needs
+ checking.
+ */
+
+ op1[0] = GET_STAR_LIMIT(IDT_REG);
+ op1[1] = GET_STAR_BASE(IDT_REG);
+ }
+
+GLOBAL VOID
+SIDT32
+#ifdef ANSI
+ (
+ IU32 op1[2] /* dst (limit:base pair) operand */
+ )
+#else
+ (op1)
+ IU32 op1[2];
+#endif
+ {
+ op1[0] = GET_STAR_LIMIT(IDT_REG);
+ op1[1] = GET_STAR_BASE(IDT_REG);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/sidt.h b/private/mvdm/softpc.new/base/ccpu386/sidt.h
new file mode 100644
index 000000000..37e94ca8d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sidt.h
@@ -0,0 +1,23 @@
+/*
+ sidt.h
+
+ Define all SIDT CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)sidt.h 1.4 02/09/94";
+ */
+
+IMPORT VOID SIDT16
+
+IPT1(
+ IU32, op1[2]
+
+ );
+
+IMPORT VOID SIDT32
+
+IPT1(
+ IU32, op1[2]
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/sldt.c b/private/mvdm/softpc.new/base/ccpu386/sldt.c
new file mode 100644
index 000000000..08449a102
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sldt.c
@@ -0,0 +1,45 @@
+/*[
+
+sldt.c
+
+LOCAL CHAR SccsID[]="@(#)sldt.c 1.5 02/09/94";
+
+SLDT CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <sldt.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+SLDT
+
+IFN1(
+ IU32 *, pop1
+ )
+
+
+ {
+ *pop1 = GET_LDT_SELECTOR();
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/sldt.h b/private/mvdm/softpc.new/base/ccpu386/sldt.h
new file mode 100644
index 000000000..97dfd5e94
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sldt.h
@@ -0,0 +1,16 @@
+/*
+ sldt.h
+
+ Define all SLDT CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)sldt.h 1.4 02/09/94";
+ */
+
+IMPORT VOID SLDT
+
+IPT1(
+ IU32 *, pop1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/smsw.c b/private/mvdm/softpc.new/base/ccpu386/smsw.c
new file mode 100644
index 000000000..c48037992
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/smsw.c
@@ -0,0 +1,43 @@
+/*[
+
+smsw.c
+
+LOCAL CHAR SccsID[]="@(#)smsw.c 1.5 02/09/94";
+
+SMSW CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <smsw.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+GLOBAL VOID
+SMSW
+
+IFN1(
+ IU32 *, pop1
+ )
+
+ {
+ *pop1 = GET_MSW();
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/smsw.h b/private/mvdm/softpc.new/base/ccpu386/smsw.h
new file mode 100644
index 000000000..054f53048
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/smsw.h
@@ -0,0 +1,16 @@
+/*
+ smsw.h
+
+ Define all SMSW CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)smsw.h 1.4 02/09/94";
+ */
+
+IMPORT VOID SMSW
+
+IPT1(
+ IU32 *, pop1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/sources b/private/mvdm/softpc.new/base/ccpu386/sources
new file mode 100644
index 000000000..19214a837
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sources
@@ -0,0 +1,183 @@
+!IF 0
+
+Copyright (c) 1989 Microsoft Corporation
+
+Module Name:
+
+ sources.
+
+Abstract:
+
+ This file specifies the target component being built and the list of
+ sources files needed to build that component. Also specifies optional
+ compiler switches and libraries that are unique for the component being
+ built.
+
+
+Author:
+
+ Steve Wood (stevewo) 12-Apr-1990
+
+NOTE: Commented description of this file is in \nt\bak\bin\sources.tpl
+
+!ENDIF
+
+
+MAJORCOMP=spclib
+MINORCOMP=ccpu
+
+TARGETNAME=ccpu386
+
+TARGETPATH=obj
+
+
+NTPROFILEINPUT=yes
+
+# Pick one of the following and delete the others
+TARGETTYPE=LIBRARY
+
+
+TARGETLIBS=
+
+SOFTPC_TREE=$(BASEDIR)\private\mvdm\softpc.new
+
+INCLUDES=$(SOFTPC_TREE)\Ccpu386;$(SOFTPC_TREE)\host\inc;$(SOFTPC_TREE)\base\inc;$(SOFTPC_TREE)\host\genPg\inc
+GPSIZE=32
+
+
+
+SOURCES= aaa.c \
+ aad.c \
+ aam.c \
+ aas.c \
+ adc.c \
+ add.c \
+ and.c \
+ arpl.c \
+ bound.c \
+ bsf.c \
+ bsr.c \
+ bswap.c \
+ bt.c \
+ btc.c \
+ btr.c \
+ bts.c \
+ call.c \
+ cbw.c \
+ ccpupig.c \
+ ccpusas4.c \
+ cdq.c \
+ clc.c \
+ cld.c \
+ cli.c \
+ clts.c \
+ cmc.c \
+ cmp.c \
+ cmpxchg.c \
+ cwd.c \
+ cwde.c \
+ c_addr.c \
+ c_bsic.c \
+ c_debug.c \
+ c_div64.c \
+ c_getset.c \
+ c_intr.c \
+ c_main.c \
+ c_mul64.c \
+ c_neg64.c \
+ c_page.c \
+ c_prot.c \
+ c_reg.c \
+ c_seg.c \
+ c_stack.c \
+ c_tlb.c \
+ c_tsksw.c \
+ c_xcptn.c \
+ c_xfer.c \
+ c_xtrn.c \
+ daa.c \
+ das.c \
+ dec.c \
+ div.c \
+ enter.c \
+ fpu.c \
+ idiv.c \
+ imul.c \
+ in.c \
+ inc.c \
+ into.c \
+ intx.c \
+ invd.c \
+ invlpg.c \
+ iret.c \
+ jcxz.c \
+ jmp.c \
+ jxx.c \
+ lahf.c \
+ lar.c \
+ lea.c \
+ leave.c \
+ lgdt.c \
+ lidt.c \
+ lldt.c \
+ lmsw.c \
+ loopxx.c \
+ lsl.c \
+ ltr.c \
+ lxs.c \
+ mov.c \
+ movsx.c \
+ mul.c \
+ neg.c \
+ nop.c \
+ not.c \
+ or.c \
+ out.c \
+ pop.c \
+ popa.c \
+ popf.c \
+ push.c \
+ pusha.c \
+ pushf.c \
+ rcl.c \
+ rcr.c \
+ ret.c \
+ rol.c \
+ ror.c \
+ rsrvd.c \
+ sahf.c \
+ sar.c \
+ sbb.c \
+ setxx.c \
+ sgdt.c \
+ shl.c \
+ shld.c \
+ shr.c \
+ shrd.c \
+ sidt.c \
+ sldt.c \
+ smsw.c \
+ stc.c \
+ std.c \
+ sti.c \
+ str.c \
+ stubs.c \
+ sub.c \
+ test.c \
+ verr.c \
+ verw.c \
+ wait.c \
+ wbinvd.c \
+ xadd.c \
+ xchg.c \
+ xlat.c \
+ xor.c \
+ zfrsrvd.c \
+ sasCdef.c \
+ ntstubs.c \
+ ntthread.c
+
+
+!INCLUDE ccpudefs.inc
+
+UMTYPE=console
diff --git a/private/mvdm/softpc.new/base/ccpu386/stc.c b/private/mvdm/softpc.new/base/ccpu386/stc.c
new file mode 100644
index 000000000..ce296f669
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/stc.c
@@ -0,0 +1,39 @@
+/*[
+
+stc.c
+
+LOCAL CHAR SccsID[]="@(#)stc.c 1.5 02/09/94";
+
+STC CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <stc.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+STC()
+ {
+ SET_CF(1);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/stc.h b/private/mvdm/softpc.new/base/ccpu386/stc.h
new file mode 100644
index 000000000..2fed702a2
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/stc.h
@@ -0,0 +1,11 @@
+/*
+ stc.h
+
+ Define all STC CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)stc.h 1.5 09/01/94";
+ */
+
+IMPORT VOID STC IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/std.c b/private/mvdm/softpc.new/base/ccpu386/std.c
new file mode 100644
index 000000000..504eb4811
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/std.c
@@ -0,0 +1,39 @@
+/*[
+
+std.c
+
+LOCAL CHAR SccsID[]="@(#)std.c 1.5 02/09/94";
+
+STD CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <std.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+STD()
+ {
+ SET_DF(1);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/std.h b/private/mvdm/softpc.new/base/ccpu386/std.h
new file mode 100644
index 000000000..b2051c7d5
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/std.h
@@ -0,0 +1,11 @@
+/*
+ std.h
+
+ Define all STD CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)std.h 1.5 09/01/94";
+ */
+
+IMPORT VOID STD IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/sti.c b/private/mvdm/softpc.new/base/ccpu386/sti.c
new file mode 100644
index 000000000..fe1b2cf3f
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sti.c
@@ -0,0 +1,39 @@
+/*[
+
+sti.c
+
+LOCAL CHAR SccsID[]="@(#)sti.c 1.5 02/09/94";
+
+STI CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <sti.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+STI()
+ {
+ SET_IF(1);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/sti.h b/private/mvdm/softpc.new/base/ccpu386/sti.h
new file mode 100644
index 000000000..8ba9a0be2
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sti.h
@@ -0,0 +1,11 @@
+/*
+ sti.h
+
+ Define all STI CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)sti.h 1.5 09/01/94";
+ */
+
+IMPORT VOID STI IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/str.c b/private/mvdm/softpc.new/base/ccpu386/str.c
new file mode 100644
index 000000000..63ffe3f10
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/str.c
@@ -0,0 +1,45 @@
+/*[
+
+str.c
+
+LOCAL CHAR SccsID[]="@(#)str.c 1.5 02/09/94";
+
+STR CPU Functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <str.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+STR
+
+IFN1(
+ IU32 *, pop1
+ )
+
+
+ {
+ *pop1 = GET_TR_SELECTOR();
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/str.h b/private/mvdm/softpc.new/base/ccpu386/str.h
new file mode 100644
index 000000000..c011c2797
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/str.h
@@ -0,0 +1,16 @@
+/*
+ str.h
+
+ Define all STR CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)str.h 1.4 02/09/94";
+ */
+
+IMPORT VOID STR
+
+IPT1(
+ IU32 *, pop1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/stubs.c b/private/mvdm/softpc.new/base/ccpu386/stubs.c
new file mode 100644
index 000000000..70e526e7a
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/stubs.c
@@ -0,0 +1,21 @@
+#include "insignia.h"
+#include "host_def.h"
+
+/* @(#)stubs.c 1.1 06/26/94
+
+ Stubs file for Prod Ccpu (Ccpu too hard to deyodarise)
+*/
+
+void check_I IFN0()
+{
+}
+
+void check_D IFN0()
+{
+}
+
+void force_yoda()
+{
+}
+
+IBOOL do_condition_checks = FALSE;
diff --git a/private/mvdm/softpc.new/base/ccpu386/sub.c b/private/mvdm/softpc.new/base/ccpu386/sub.c
new file mode 100644
index 000000000..1b0c314ca
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sub.c
@@ -0,0 +1,80 @@
+/*[
+
+sub.c
+
+LOCAL CHAR SccsID[]="@(#)sub.c 1.5 02/09/94";
+
+SUB CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <sub.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'sub'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+SUB
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst/lsrc operand */
+ IU32, op2, /* rsrc operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+ IU32 carry;
+ IU32 msb;
+ IU32 op1_msb;
+ IU32 op2_msb;
+ IU32 res_msb;
+
+ msb = SZ2MSB(op_sz);
+
+ result = *pop1 - op2 & SZ2MASK(op_sz); /* Do operation */
+ op1_msb = (*pop1 & msb) != 0; /* Isolate all msb's */
+ op2_msb = (op2 & msb) != 0;
+ res_msb = (result & msb) != 0;
+ carry = *pop1 ^ op2 ^ result; /* Isolate carries */
+ /* Determine flags */
+ /*
+ OF = (op1 == !op2) & (op1 ^ res)
+ ie if operand signs differ and res sign different to original
+ destination set OF.
+ */
+ SET_OF((op1_msb != op2_msb) & (op1_msb ^ res_msb));
+ /*
+ Formally:- CF = !op1 & op2 | res & !op1 | res & op2
+ Equivalently:- CF = OF ^ op1 ^ op2 ^ res
+ */
+ SET_CF(((carry & msb) != 0) ^ GET_OF());
+ SET_PF(pf_table[result & BYTE_MASK]);
+ SET_ZF(result == 0);
+ SET_SF((result & msb) != 0); /* SF = MSB */
+ SET_AF((carry & BIT4_MASK) != 0); /* AF = Bit 4 carry */
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/sub.h b/private/mvdm/softpc.new/base/ccpu386/sub.h
new file mode 100644
index 000000000..6bbbedb8f
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/sub.h
@@ -0,0 +1,18 @@
+/*
+ sub.h
+
+ Define all SUB CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)sub.h 1.4 02/09/94";
+ */
+
+IMPORT VOID SUB
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/test.c b/private/mvdm/softpc.new/base/ccpu386/test.c
new file mode 100644
index 000000000..7d975a5c0
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/test.c
@@ -0,0 +1,58 @@
+/*[
+
+test.c
+
+LOCAL CHAR SccsID[]="@(#)test.c 1.5 02/09/94";
+
+TEST CPU functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <test.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'test'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+TEST
+
+IFN3(
+ IU32, op1, /* lsrc operand */
+ IU32, op2, /* rsrc operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+
+ result = op1 & op2; /* Do operation */
+ SET_CF(0); /* Determine flags */
+ SET_OF(0);
+ SET_AF(0);
+ SET_PF(pf_table[result & BYTE_MASK]);
+ SET_ZF(result == 0);
+ SET_SF((result & SZ2MSB(op_sz)) != 0); /* SF = MSB */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/test.h b/private/mvdm/softpc.new/base/ccpu386/test.h
new file mode 100644
index 000000000..df677c260
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/test.h
@@ -0,0 +1,18 @@
+/*
+ test.h
+
+ Define all Arithmetic/Logical CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)test.h 1.4 02/09/94";
+ */
+
+IMPORT VOID TEST
+
+IPT3(
+ IU32, op1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/univer_c.h b/private/mvdm/softpc.new/base/ccpu386/univer_c.h
new file mode 100644
index 000000000..c47b507a7
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/univer_c.h
@@ -0,0 +1,4 @@
+#ifndef _Univer_c_h
+#define _Univer_c_h
+#define ImpossibleConstraint (-1)
+#endif /* ! _Univer_c_h */
diff --git a/private/mvdm/softpc.new/base/ccpu386/verr.c b/private/mvdm/softpc.new/base/ccpu386/verr.c
new file mode 100644
index 000000000..25ea4135f
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/verr.c
@@ -0,0 +1,100 @@
+/*[
+
+verr.c
+
+LOCAL CHAR SccsID[]="@(#)verr.c 1.5 02/09/94";
+
+VERR CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <verr.h>
+#include <c_page.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+VERR
+
+IFN1(
+ IU32, op1 /* src(selector) operand */
+ )
+
+
+ {
+ BOOL readable = FALSE;
+ IU32 descr;
+ IU8 AR;
+
+ if ( !selector_outside_GDT_LDT((IU16)op1, &descr) )
+ {
+ /* get access rights */
+ AR = spr_read_byte(descr+5);
+
+ /* Handle each type of descriptor */
+ switch ( descriptor_super_type((IU16)AR) )
+ {
+ case INVALID:
+ case AVAILABLE_TSS:
+ case LDT_SEGMENT:
+ case BUSY_TSS:
+ case CALL_GATE:
+ case TASK_GATE:
+ case INTERRUPT_GATE:
+ case TRAP_GATE:
+ case XTND_AVAILABLE_TSS:
+ case XTND_BUSY_TSS:
+ case XTND_CALL_GATE:
+ case XTND_INTERRUPT_GATE:
+ case XTND_TRAP_GATE:
+ case CONFORM_NOREAD_CODE:
+ case NONCONFORM_NOREAD_CODE:
+ break; /* never readable */
+
+ case CONFORM_READABLE_CODE:
+ readable = TRUE; /* always readable */
+ break;
+
+ case EXPANDUP_READONLY_DATA:
+ case EXPANDUP_WRITEABLE_DATA:
+ case EXPANDDOWN_READONLY_DATA:
+ case EXPANDDOWN_WRITEABLE_DATA:
+ case NONCONFORM_READABLE_CODE:
+ /* access depends on privilege, it is required that
+ DPL >= CPL and DPL >= RPL */
+ if ( GET_AR_DPL(AR) >= GET_CPL() &&
+ GET_AR_DPL(AR) >= GET_SELECTOR_RPL(op1) )
+ readable = TRUE;
+ break;
+ }
+ }
+
+ if ( readable )
+ {
+ SET_ZF(1);
+ }
+ else
+ {
+ SET_ZF(0);
+ }
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/verr.h b/private/mvdm/softpc.new/base/ccpu386/verr.h
new file mode 100644
index 000000000..218d3cc3b
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/verr.h
@@ -0,0 +1,16 @@
+/*
+ verr.h
+
+ Define all VERR CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)verr.h 1.4 02/09/94";
+ */
+
+IMPORT VOID VERR
+
+IPT1(
+ IU32, op1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/verw.c b/private/mvdm/softpc.new/base/ccpu386/verw.c
new file mode 100644
index 000000000..a1412f3e3
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/verw.c
@@ -0,0 +1,96 @@
+/*[
+
+verw.c
+
+LOCAL CHAR SccsID[]="@(#)verw.c 1.5 02/09/94";
+
+VERW CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <verw.h>
+#include <c_page.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+VERW
+
+IFN1(
+ IU32, op1 /* src(selector) operand */
+ )
+
+
+ {
+ BOOL writeable = FALSE;
+ IU32 descr;
+ IU8 AR;
+
+ if ( !selector_outside_GDT_LDT((IU16)op1, &descr) )
+ {
+ /* get access rights */
+ AR = spr_read_byte(descr+5);
+
+ switch ( descriptor_super_type((IU16)AR) )
+ {
+ case INVALID:
+ case AVAILABLE_TSS:
+ case LDT_SEGMENT:
+ case BUSY_TSS:
+ case CALL_GATE:
+ case TASK_GATE:
+ case INTERRUPT_GATE:
+ case TRAP_GATE:
+ case XTND_AVAILABLE_TSS:
+ case XTND_BUSY_TSS:
+ case XTND_CALL_GATE:
+ case XTND_INTERRUPT_GATE:
+ case XTND_TRAP_GATE:
+ case CONFORM_NOREAD_CODE:
+ case CONFORM_READABLE_CODE:
+ case NONCONFORM_NOREAD_CODE:
+ case NONCONFORM_READABLE_CODE:
+ case EXPANDUP_READONLY_DATA:
+ case EXPANDDOWN_READONLY_DATA:
+ break; /* never writeable */
+
+ case EXPANDUP_WRITEABLE_DATA:
+ case EXPANDDOWN_WRITEABLE_DATA:
+ /* access depends on privilege, it is required that
+ DPL >= CPL and DPL >= RPL */
+ if ( GET_AR_DPL(AR) >= GET_CPL() &&
+ GET_AR_DPL(AR) >= GET_SELECTOR_RPL(op1) )
+ writeable = TRUE;
+ break;
+ }
+ }
+
+ if ( writeable )
+ {
+ SET_ZF(1);
+ }
+ else
+ {
+ SET_ZF(0);
+ }
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/verw.h b/private/mvdm/softpc.new/base/ccpu386/verw.h
new file mode 100644
index 000000000..605fb1ea9
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/verw.h
@@ -0,0 +1,16 @@
+/*
+ verw.h
+
+ Define all VERW CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)verw.h 1.4 02/09/94";
+ */
+
+IMPORT VOID VERW
+
+IPT1(
+ IU32, op1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/vglob.c b/private/mvdm/softpc.new/base/ccpu386/vglob.c
new file mode 100644
index 000000000..d1f01ac2f
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/vglob.c
@@ -0,0 +1,451 @@
+/*[
+*************************************************************************
+
+ Name: Vglob.c
+ Author: Simon Frost
+ Created: October 1994
+ Derived from: Vglob.edl
+ Sccs ID: @(#)Vglob.c 1.1 10/24/94
+ Purpose: EXTERNAL interface to VGLOB record.
+ Rewritten in C to save overhead of EDL/C context change
+ for one memory read/write.
+
+ (c)Copyright Insignia Solutions Ltd., 1993. All rights reserved.
+
+*************************************************************************
+]*/
+
+#include "insignia.h"
+#include "host_def.h"
+#include "Evid_c.h"
+#include "gdpvar.h"
+
+/*
+ * Note: no interfaces produced for the following 3.0 VGlob entries as
+ * unused in Evid.
+ * copy_func_pbp ( now video_base_lin_addr )
+ * route_reg1
+ * route_reg2
+ */
+
+/* {get,set}Videolatches still in EvPtrs.edl as required for pigging */
+
+GLOBAL void
+setVideorplane IFN1(IU8 *, value)
+{
+ GLOBAL_VGAGlobals.VGA_rplane = value;
+}
+GLOBAL IU8 *
+getVideorplane IFN0()
+{
+ return(GLOBAL_VGAGlobals.VGA_rplane);
+}
+
+GLOBAL void
+setVideowplane IFN1(IU8 *, value)
+{
+ GLOBAL_VGAGlobals.VGA_wplane = value;
+}
+GLOBAL IU8 *
+getVideowplane IFN0()
+{
+ return(GLOBAL_VGAGlobals.VGA_wplane);
+}
+
+GLOBAL void
+setVideoscratch IFN1(IU8 *, value)
+{
+ GLOBAL_VGAGlobals.scratch = value;
+}
+GLOBAL IU8 *
+getVideoscratch IFN0()
+{
+ return(GLOBAL_VGAGlobals.scratch);
+}
+
+GLOBAL void
+setVideosr_masked_val IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.sr_masked_val = value;
+}
+GLOBAL IU32
+getVideosr_masked_val IFN0()
+{
+ return(GLOBAL_VGAGlobals.sr_masked_val);
+}
+
+GLOBAL void
+setVideosr_nmask IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.sr_nmask = value;
+}
+
+GLOBAL IU32
+getVideosr_nmask IFN0()
+{
+ return(GLOBAL_VGAGlobals.sr_nmask);
+}
+
+GLOBAL void
+setVideodata_and_mask IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.data_and_mask = value;
+}
+
+GLOBAL IU32
+getVideodata_and_mask IFN0()
+{
+ return(GLOBAL_VGAGlobals.data_and_mask);
+}
+
+GLOBAL void
+setVideodata_xor_mask IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.data_xor_mask = value;
+}
+GLOBAL IU32
+getVideodata_xor_mask IFN0()
+{
+ return(GLOBAL_VGAGlobals.data_xor_mask);
+}
+
+GLOBAL void
+setVideolatch_xor_mask IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.latch_xor_mask = value;
+}
+GLOBAL IU32
+getVideolatch_xor_mask IFN0()
+{
+ return(GLOBAL_VGAGlobals.latch_xor_mask);
+}
+
+GLOBAL void
+setVideobit_prot_mask IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.bit_prot_mask = value;
+}
+GLOBAL IU32
+getVideobit_prot_mask IFN0()
+{
+ return(GLOBAL_VGAGlobals.bit_prot_mask);
+}
+
+GLOBAL void
+setVideoplane_enable IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.plane_enable = value;
+}
+GLOBAL IU32
+getVideoplane_enable IFN0()
+{
+ return(GLOBAL_VGAGlobals.plane_enable);
+}
+
+GLOBAL void
+setVideoplane_enable_mask IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.plane_enable_mask = value;
+}
+GLOBAL IU32
+getVideoplane_enable_mask IFN0()
+{
+ return(GLOBAL_VGAGlobals.plane_enable_mask);
+}
+
+GLOBAL void
+setVideosr_lookup IFN1(IUH *, value)
+{
+ GLOBAL_VGAGlobals.sr_lookup = value;
+}
+GLOBAL IUH *
+getVideosr_lookup IFN0()
+{
+ return(GLOBAL_VGAGlobals.sr_lookup);
+}
+
+GLOBAL void
+setVideofwd_str_read_addr IFN1(IUH *, value)
+{
+ GLOBAL_VGAGlobals.fwd_str_read_addr = value;
+}
+GLOBAL IUH *
+getVideofwd_str_read_addr IFN0()
+{
+ return(GLOBAL_VGAGlobals.fwd_str_read_addr);
+}
+
+GLOBAL void
+setVideobwd_str_read_addr IFN1(IUH *, value)
+{
+ GLOBAL_VGAGlobals.bwd_str_read_addr = value;
+}
+GLOBAL IUH *
+getVideobwd_str_read_addr IFN0()
+{
+ return(GLOBAL_VGAGlobals.bwd_str_read_addr);
+}
+
+GLOBAL void
+setVideodirty_total IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.dirty_total = value;
+}
+GLOBAL IU32
+getVideodirty_total IFN0()
+{
+ return(GLOBAL_VGAGlobals.dirty_total);
+}
+
+GLOBAL void
+setVideodirty_low IFN1(IS32, value)
+{
+ GLOBAL_VGAGlobals.dirty_low = value;
+}
+GLOBAL IS32
+getVideodirty_low IFN0()
+{
+ return(GLOBAL_VGAGlobals.dirty_low);
+}
+
+GLOBAL void
+setVideodirty_high IFN1(IS32, value)
+{
+ GLOBAL_VGAGlobals.dirty_high = value;
+}
+GLOBAL IS32
+getVideodirty_high IFN0()
+{
+ return(GLOBAL_VGAGlobals.dirty_high);
+}
+
+GLOBAL void
+setVideovideo_copy IFN1(IU8 *, value)
+{
+ GLOBAL_VGAGlobals.video_copy = value;
+}
+GLOBAL IU8 *
+getVideovideo_copy IFN0()
+{
+ return(GLOBAL_VGAGlobals.video_copy);
+}
+
+GLOBAL void
+setVideomark_byte IFN1(IUH *, value)
+{
+ GLOBAL_VGAGlobals.mark_byte = value;
+}
+GLOBAL IUH *
+getVideomark_byte IFN0()
+{
+ return(GLOBAL_VGAGlobals.mark_byte);
+}
+
+GLOBAL void
+setVideomark_word IFN1(IUH *, value)
+{
+ GLOBAL_VGAGlobals.mark_word = value;
+}
+GLOBAL IUH *
+getVideomark_word IFN0()
+{
+ return(GLOBAL_VGAGlobals.mark_word);
+}
+
+GLOBAL void
+setVideomark_string IFN1(IUH *, value)
+{
+ GLOBAL_VGAGlobals.mark_string = value;
+}
+GLOBAL IUH *
+getVideomark_string IFN0()
+{
+ return(GLOBAL_VGAGlobals.mark_string);
+}
+
+GLOBAL void
+setVideoread_shift_count IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.read_shift_count = value;
+}
+GLOBAL IU32
+getVideoread_shift_count IFN0()
+{
+ return(GLOBAL_VGAGlobals.read_shift_count);
+}
+
+GLOBAL void
+setVideoread_mapped_plane IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.read_mapped_plane = value;
+}
+GLOBAL IU32
+getVideoread_mapped_plane IFN0()
+{
+ return(GLOBAL_VGAGlobals.read_mapped_plane);
+}
+
+GLOBAL void
+setVideocolour_comp IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.colour_comp = value;
+}
+GLOBAL IU32
+getVideocolour_comp IFN0()
+{
+ return(GLOBAL_VGAGlobals.colour_comp);
+}
+
+GLOBAL void
+setVideodont_care IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.dont_care = value;
+}
+GLOBAL IU32
+getVideodont_care IFN0()
+{
+ return(GLOBAL_VGAGlobals.dont_care);
+}
+
+GLOBAL void
+setVideov7_bank_vid_copy_off IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.v7_bank_vid_copy_off = value;
+}
+GLOBAL IU32
+getVideov7_bank_vid_copy_off IFN0()
+{
+ return(GLOBAL_VGAGlobals.v7_bank_vid_copy_off);
+}
+
+GLOBAL void
+setVideoscreen_ptr IFN1(IU8 *, value)
+{
+ GLOBAL_VGAGlobals.screen_ptr = value;
+}
+GLOBAL IU8 *
+getVideoscreen_ptr IFN0()
+{
+ return(GLOBAL_VGAGlobals.screen_ptr);
+}
+
+GLOBAL void
+setVideorotate IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.rotate = value;
+}
+GLOBAL IU32
+getVideorotate IFN0()
+{
+ return(GLOBAL_VGAGlobals.rotate);
+}
+
+GLOBAL void
+setVideocalc_data_xor IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.calc_data_xor = value;
+}
+GLOBAL IU32
+getVideocalc_data_xor IFN0()
+{
+ return(GLOBAL_VGAGlobals.calc_data_xor);
+}
+
+GLOBAL void
+setVideocalc_latch_xor IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.calc_latch_xor = value;
+}
+GLOBAL IU32
+getVideocalc_latch_xor IFN0()
+{
+ return(GLOBAL_VGAGlobals.calc_latch_xor);
+}
+
+GLOBAL void
+setVideoread_byte_addr IFN1(IUH *, value)
+{
+ GLOBAL_VGAGlobals.read_byte_addr = value;
+}
+GLOBAL IUH *
+getVideoread_byte_addr IFN0()
+{
+ return(GLOBAL_VGAGlobals.read_byte_addr);
+}
+
+GLOBAL void
+setVideov7_fg_latches IFN1(IU32, value)
+{
+ GLOBAL_VGAGlobals.v7_fg_latches = value;
+}
+GLOBAL IU32
+getVideov7_fg_latches IFN0()
+{
+ return(GLOBAL_VGAGlobals.v7_fg_latches);
+}
+
+GLOBAL void
+setVideoGC_regs IFN1(IUH **, value)
+{
+ GLOBAL_VGAGlobals.GCRegs = value;
+}
+GLOBAL IUH **
+getVideoGC_regs IFN0()
+{
+ return(GLOBAL_VGAGlobals.GCRegs);
+}
+
+GLOBAL void
+setVideolast_GC_index IFN1(IU8, value)
+{
+ GLOBAL_VGAGlobals.lastGCindex = value;
+}
+GLOBAL IU8
+getVideolast_GC_index IFN0()
+{
+ return(GLOBAL_VGAGlobals.lastGCindex);
+}
+
+GLOBAL void
+setVideodither IFN1(IU8, value)
+{
+ GLOBAL_VGAGlobals.dither = value;
+}
+GLOBAL IU8
+getVideodither IFN0()
+{
+ return(GLOBAL_VGAGlobals.dither);
+}
+
+GLOBAL void
+setVideowrmode IFN1(IU8, value)
+{
+ GLOBAL_VGAGlobals.wrmode = value;
+}
+GLOBAL IU8
+getVideowrmode IFN0()
+{
+ return(GLOBAL_VGAGlobals.wrmode);
+}
+
+GLOBAL void
+setVideochain IFN1(IU8, value)
+{
+ GLOBAL_VGAGlobals.chain = value;
+}
+GLOBAL IU8
+getVideochain IFN0()
+{
+ return(GLOBAL_VGAGlobals.chain);
+}
+
+GLOBAL void
+setVideowrstate IFN1(IU8, value)
+{
+ GLOBAL_VGAGlobals.wrstate = value;
+}
+GLOBAL IU8
+getVideowrstate IFN0()
+{
+ return(GLOBAL_VGAGlobals.wrstate);
+}
diff --git a/private/mvdm/softpc.new/base/ccpu386/wait.c b/private/mvdm/softpc.new/base/ccpu386/wait.c
new file mode 100644
index 000000000..3594c7fe2
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/wait.c
@@ -0,0 +1,38 @@
+/*[
+
+wait.c
+
+LOCAL CHAR SccsID[]="@(#)wait.c 1.5 02/09/94";
+
+WAIT CPU functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <wait.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES STARTS HERE.
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+WAIT()
+ {
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/wait.h b/private/mvdm/softpc.new/base/ccpu386/wait.h
new file mode 100644
index 000000000..e1a75a539
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/wait.h
@@ -0,0 +1,11 @@
+/*
+ wait.h
+
+ Define all WAIT CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)wait.h 1.5 09/01/94";
+ */
+
+IMPORT VOID WAIT IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/wbinvd.c b/private/mvdm/softpc.new/base/ccpu386/wbinvd.c
new file mode 100644
index 000000000..171f1ec44
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/wbinvd.c
@@ -0,0 +1,45 @@
+/*[
+
+wbinvd.c
+
+LOCAL CHAR SccsID[]="@(#)wbinvd.c 1.5 02/09/94";
+
+WBINVD CPU Functions.
+---------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <wbinvd.h>
+
+/*
+ =====================================================================
+ EXECUTION STARTS HERE.
+ =====================================================================
+ */
+
+
+#ifdef SPC486
+
+VOID
+WBINVD()
+ {
+ /*
+ If cache is implemented - then make call to flush cache.
+ flush_cache();
+ */
+ }
+
+#endif /* SPC486 */
diff --git a/private/mvdm/softpc.new/base/ccpu386/wbinvd.h b/private/mvdm/softpc.new/base/ccpu386/wbinvd.h
new file mode 100644
index 000000000..bd83a530c
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/wbinvd.h
@@ -0,0 +1,11 @@
+/*
+ wbinvd.h
+
+ Define WBINVD CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)wbinvd.h 1.5 09/01/94";
+ */
+
+IMPORT VOID WBINVD IPT0();
diff --git a/private/mvdm/softpc.new/base/ccpu386/xadd.c b/private/mvdm/softpc.new/base/ccpu386/xadd.c
new file mode 100644
index 000000000..d46c00a32
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/xadd.c
@@ -0,0 +1,59 @@
+/*[
+
+xadd.c
+
+LOCAL CHAR SccsID[]="@(#)xadd.c 1.5 02/09/94";
+
+XADD CPU functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <xadd.h>
+#include <add.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+#ifdef SPC486
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'xadd'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+XADD
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst/lsrc operand */
+ IU32 *, pop2, /* pntr to dst/rsrc operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 temp;
+
+ temp = *pop1;
+ ADD(pop1, *pop2, op_sz);
+ *pop2 = temp;
+ }
+
+#endif /* SPC486 */
diff --git a/private/mvdm/softpc.new/base/ccpu386/xadd.h b/private/mvdm/softpc.new/base/ccpu386/xadd.h
new file mode 100644
index 000000000..799743a9b
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/xadd.h
@@ -0,0 +1,18 @@
+/*
+ xadd.h
+
+ XADD CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)xadd.h 1.4 02/09/94";
+ */
+
+IMPORT VOID XADD
+
+IPT3(
+ IU32 *, pop1,
+ IU32 *, pop2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/xchg.c b/private/mvdm/softpc.new/base/ccpu386/xchg.c
new file mode 100644
index 000000000..15c27842a
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/xchg.c
@@ -0,0 +1,53 @@
+/*[
+
+xchg.c
+
+LOCAL CHAR SccsID[]="@(#)xchg.c 1.5 02/09/94";
+
+XCHG CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <xchg.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'xchg'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+XCHG
+
+IFN2(
+ IU32 *, pop1, /* pntr to dst/lsrc operand */
+ IU32 *, pop2 /* pntr to dst/rsrc operand */
+ )
+
+
+ {
+ IU32 temp;
+
+ temp = *pop1;
+ *pop1 = *pop2;
+ *pop2 = temp;
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/xchg.h b/private/mvdm/softpc.new/base/ccpu386/xchg.h
new file mode 100644
index 000000000..3432686be
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/xchg.h
@@ -0,0 +1,17 @@
+/*
+ xchg.h
+
+ Define all XCHG CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)xchg.h 1.4 02/09/94";
+ */
+
+IMPORT VOID XCHG
+
+IPT2(
+ IU32 *, pop1,
+ IU32 *, pop2
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/xlat.c b/private/mvdm/softpc.new/base/ccpu386/xlat.c
new file mode 100644
index 000000000..a12539455
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/xlat.c
@@ -0,0 +1,45 @@
+/*[
+
+xlat.c
+
+LOCAL CHAR SccsID[]="@(#)xlat.c 1.5 02/09/94";
+
+XLAT CPU Functions.
+-------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <xlat.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL ROUTINES START HERE
+ =====================================================================
+ */
+
+
+GLOBAL VOID
+XLAT
+
+IFN1(
+ IU32, op1 /* src operand */
+ )
+
+
+ {
+ SET_AL(op1);
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/xlat.h b/private/mvdm/softpc.new/base/ccpu386/xlat.h
new file mode 100644
index 000000000..5a2958feb
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/xlat.h
@@ -0,0 +1,16 @@
+/*
+ xlat.h
+
+ Define all XLAT CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)xlat.h 1.4 02/09/94";
+ */
+
+IMPORT VOID XLAT
+
+IPT1(
+ IU32, op1
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/xor.c b/private/mvdm/softpc.new/base/ccpu386/xor.c
new file mode 100644
index 000000000..17c96ff28
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/xor.c
@@ -0,0 +1,59 @@
+/*[
+
+xor.c
+
+LOCAL CHAR SccsID[]="@(#)xor.c 1.5 02/09/94";
+
+XOR CPU functions.
+------------------
+
+]*/
+
+
+#include <insignia.h>
+
+#include <host_def.h>
+#include <xt.h>
+#include <c_main.h>
+#include <c_addr.h>
+#include <c_bsic.h>
+#include <c_prot.h>
+#include <c_seg.h>
+#include <c_stack.h>
+#include <c_xcptn.h>
+#include <c_reg.h>
+#include <xor.h>
+
+
+/*
+ =====================================================================
+ EXTERNAL FUNCTIONS START HERE.
+ =====================================================================
+ */
+
+
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+/* Generic - one size fits all 'xor'. */
+/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
+GLOBAL VOID
+XOR
+
+IFN3(
+ IU32 *, pop1, /* pntr to dst/lsrc operand */
+ IU32, op2, /* rsrc operand */
+ IUM8, op_sz /* 8, 16 or 32-bit */
+ )
+
+
+ {
+ IU32 result;
+
+ result = *pop1 ^ op2; /* Do operation */
+ SET_CF(0); /* Determine flags */
+ SET_OF(0);
+ SET_AF(0);
+ SET_PF(pf_table[result & BYTE_MASK]);
+ SET_ZF(result == 0);
+ SET_SF((result & SZ2MSB(op_sz)) != 0); /* SF = MSB */
+ *pop1 = result; /* Return answer */
+ }
diff --git a/private/mvdm/softpc.new/base/ccpu386/xor.h b/private/mvdm/softpc.new/base/ccpu386/xor.h
new file mode 100644
index 000000000..ba8d7e185
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/xor.h
@@ -0,0 +1,18 @@
+/*
+ xor.h
+
+ Define all XOR CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)xor.h 1.4 02/09/94";
+ */
+
+IMPORT VOID XOR
+
+IPT3(
+ IU32 *, pop1,
+ IU32, op2,
+ IUM8, op_sz
+
+ );
diff --git a/private/mvdm/softpc.new/base/ccpu386/zfrsrvd.c b/private/mvdm/softpc.new/base/ccpu386/zfrsrvd.c
new file mode 100644
index 000000000..ead9baec8
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/zfrsrvd.c
@@ -0,0 +1,4871 @@
+/*[
+
+zfrsrvd.c
+
+LOCAL CHAR SccsID[]="@(#)zfrsrvd.c 1.23 03/28/95";
+
+Reserved Floating Point CPU Functions.
+--------------------------------------
+
+]*/
+#include <insignia.h>
+#include <host_def.h>
+#include <cfpu_def.h>
+#include <newnpx.h>
+#include <debug.h>
+#include <xt.h> /* DESCR and effective_addr support */
+#include <sas.h> /* need memory(M) */
+#define HOOKED_IRETS
+#include <ica.h> /* need NPX interrupt line */
+#include <ccpusas4.h> /* the cpu internal sas bits */
+#include <c_main.h> /* C CPU definitions-interfaces */
+#include <c_page.h> /* Paging Interface */
+#include <c_mem.h> /* CPU - Memory Interface */
+#include <c_oprnd.h>
+#include <c_reg.h>
+#include <c_xcptn.h> /* Definition of Int16() */
+#include <fault.h>
+#ifdef SFELLOW
+#include <CpuInt_c.h>
+#endif /* SFELLOW */
+
+typedef union
+{
+IU32 sng; /* Single Part Operand */
+IU32 mlt[2]; /* Multiple (two) Part Operand */
+IU8 npxbuff[108]; /* Make it the maximum required size */
+} OPERAND;
+
+IMPORT IU8 *Start_of_M_area;
+IMPORT PHY_ADDR Length_of_M_area;
+IMPORT ISM32 in_C;
+IMPORT IU8 *CCPU_M;
+IMPORT IU32 Sas_wrap_mask;
+IMPORT IU32 event_counter;
+IMPORT IU8 *p; /* Pntr. to Intel Opcode Stream. */
+IMPORT IU8 *p_start; /* Pntr. to Start of Intel Opcode Stream. */
+IMPORT IU8 opcode; /* Last Opcode Byte Read. */
+IMPORT IU8 modRM; /* The modRM byte. */
+IMPORT OPERAND ops[3]; /* Inst. Operands. */
+IMPORT IU32 save_id[3]; /* Saved state for Inst. Operands. */
+IMPORT IU32 m_off[3]; /* Memory Operand offset. */
+IMPORT IU32 m_pa[3];
+IMPORT IU32 m_la[3];
+IMPORT ISM32 m_seg[3]; /* Memory Operand segment reg. index. */
+IMPORT BOOL m_isreg[3]; /* Memory Operand Register(true)/
+ Memory(false) indicator */
+IMPORT IU8 segment_override; /* Segment Prefix for current inst. */
+IMPORT IU8 repeat; /* Repeat Prefix for current inst. */
+IMPORT IU32 rep_count; /* Repeat Count for string insts. */
+IMPORT IUM32 old_TF; /* used by POPF and IRET to save Trap Flag */
+IMPORT IU32 immed; /* For immediate generation. */
+
+IMPORT BOOL POPST;
+IMPORT BOOL DOUBLEPOP;
+IMPORT BOOL REVERSE;
+IMPORT BOOL UNORDERED;
+IMPORT BOOL NPX_PROT_MODE;
+IMPORT BOOL NPX_ADDRESS_SIZE_32;
+IMPORT BOOL NpxException;
+IMPORT IU32 NpxLastSel;
+IMPORT IU32 NpxLastOff;
+IMPORT IU32 NpxFEA;
+IMPORT IU32 NpxFDS;
+IMPORT IU32 NpxFIP;
+IMPORT IU32 NpxFOP;
+IMPORT IU32 NpxFCS;
+IU16 Ax_regptr;
+IMPORT SEGMENT_REGISTER CCPU_SR[6];
+IMPORT IU16 *CCPU_WR[8];
+IMPORT IU32 CCPU_IP;
+
+LOCAL BOOL DoNpxPrologue IPT0();
+
+LOCAL IU32 NpxInstr;
+
+LOCAL VOID npx_fabs() {
+ SAVE_PTRS();
+ FABS();
+}
+
+LOCAL VOID npx_fadd_f0_f0() {
+/* fadd st,st */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FADD(0, 0, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fadd_f0_f1() {
+/* fadd st,st(1) */
+ IU16 src2_index = 1;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FADD(0, 0, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fadd_f0_f2() {
+/* fadd st,st(2) */
+ IU16 src2_index = 2;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FADD(0, 0, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fadd_f0_f3() {
+/* fadd st,st(3) */
+ IU16 src2_index = 3;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FADD(0, 0, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fadd_f0_f4() {
+/* fadd st,st(4) */
+ IU16 src2_index = 4;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FADD(0, 0, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fadd_f0_f5() {
+/* fadd st,st(5) */
+ IU16 src2_index = 5;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FADD(0, 0, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fadd_f0_f6() {
+/* fadd st,st(6) */
+ IU16 src2_index = 6;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FADD(0, 0, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fadd_f0_f7() {
+/* fadd st,st(7) */
+ IU16 src2_index = 7;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FADD(0, 0, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fadd_f1_f0() {
+/* fadd st(1),st */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FADD(1, 1, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fadd_f2_f0() {
+/* fadd st(2),st */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FADD(2, 2, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fadd_f3_f0() {
+/* fadd st(3),st */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FADD(3, 3, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fadd_f4_f0() {
+/* fadd st(4),st */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FADD(4, 4, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fadd_f5_f0() {
+/* fadd st(5),st */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FADD(5, 5, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fadd_f6_f0() {
+/* fadd st(6),st */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FADD(6, 6, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fadd_f7_f0() {
+/* fadd st(7),st */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FADD(7, 7, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fadd_short() {
+/* fadd DWORD PTR */
+
+ FPtype = M32R;
+ D_Ed(0, RO0, PG_R);
+ F_Ed(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FADD(0, 0, &ops[0].sng);
+}
+
+LOCAL VOID npx_fadd_long() {
+/* fadd QWORD PTR */
+
+ FPtype = M64R;
+ D_E08(0, RO0, PG_R);
+ F_E08(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FADD(0, 0, &ops[0].npxbuff[0]);
+}
+
+LOCAL VOID npx_faddp_f0() {
+/* faddp st(0),st */
+
+ POPST = TRUE;
+ npx_fadd_f0_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_faddp_f1() {
+/* faddp st(1),st */
+
+ POPST = TRUE;
+ npx_fadd_f1_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_faddp_f2() {
+/* faddp st(2),st */
+
+ POPST = TRUE;
+ npx_fadd_f2_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_faddp_f3() {
+/* faddp st(3),st */
+
+ POPST = TRUE;
+ npx_fadd_f3_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_faddp_f4() {
+/* faddp st(4),st */
+
+ POPST = TRUE;
+ npx_fadd_f4_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_faddp_f5() {
+/* faddp st(5),st */
+
+ POPST = TRUE;
+ npx_fadd_f5_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_faddp_f6() {
+/* faddp st(6),st */
+
+ POPST = TRUE;
+ npx_fadd_f6_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_faddp_f7() {
+/* faddp st(7),st */
+
+ POPST = TRUE;
+ npx_fadd_f7_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fbld() {
+/* fbld TBYTE PTR */
+
+ D_E0a(0, RO0, PG_R);
+ F_E0a(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FBLD(&ops[0].npxbuff[0]);
+}
+
+LOCAL VOID npx_fbstp() {
+/* fbstp TBYTE PTR */
+
+ D_E0a(0, WO0, PG_W);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FBSTP(&ops[0].npxbuff[0]);
+ P_E0a(0);
+}
+
+LOCAL VOID npx_fchs() {
+/* fchs */
+
+ SAVE_PTRS();
+ FCHS();
+}
+
+LOCAL VOID npx_fclex() {
+/* fclex */
+
+ FCLEX();
+}
+
+LOCAL VOID npx_fcom_f0() {
+/* fcom st(0) */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FCOM((VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fcom_f1() {
+/* fcom st(1) */
+ IU16 src2_index = 1;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FCOM((VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fcom_f2() {
+/* fcom st(2) */
+ IU16 src2_index = 2;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FCOM((VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fcom_f3() {
+/* fcom st(3) */
+ IU16 src2_index = 3;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FCOM((VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fcom_f4() {
+/* fcom st(4) */
+ IU16 src2_index = 4;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FCOM((VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fcom_f5() {
+/* fcom st(5) */
+ IU16 src2_index = 5;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FCOM((VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fcom_f6() {
+/* fcom st(6) */
+ IU16 src2_index = 6;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FCOM((VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fcom_f7() {
+/* fcom st(7) */
+ IU16 src2_index = 7;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FCOM((VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fcom_short() {
+/* fcom DWORD PTR */
+
+ FPtype = M32R;
+ D_Ed(0, RO0, PG_R);
+ F_Ed(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FCOM(&ops[0].sng);
+}
+
+LOCAL VOID npx_fcom_long() {
+/* fcom QWORD PTR */
+
+ FPtype = M64R;
+ D_E08(0, RO0, PG_R);
+ F_E08(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FCOM(&ops[0].npxbuff[0]);
+}
+
+LOCAL VOID npx_fcomp_f0() {
+/* fcomp st(0) */
+
+ POPST = TRUE;
+ npx_fcom_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fcomp_f1() {
+/* fcomp st(1) */
+
+ POPST = TRUE;
+ npx_fcom_f1();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fcomp_f2() {
+/* fcomp st(2) */
+
+ POPST = TRUE;
+ npx_fcom_f2();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fcomp_f3() {
+/* fcomp st(3) */
+
+ POPST = TRUE;
+ npx_fcom_f3();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fcomp_f4() {
+/* fcomp st(4) */
+
+ POPST = TRUE;
+ npx_fcom_f4();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fcomp_f5() {
+/* fcomp st(5) */
+
+ POPST = TRUE;
+ npx_fcom_f5();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fcomp_f6() {
+/* fcomp st(6) */
+
+ POPST = TRUE;
+ npx_fcom_f6();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fcomp_f7() {
+/* fcomp st(7) */
+
+ POPST = TRUE;
+ npx_fcom_f7();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fcomp_short() {
+/* fcomp DWORD PTR */
+
+ POPST = TRUE;
+ npx_fcom_short();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fcomp_long() {
+/* fcomp QWORD PTR */
+
+ POPST = TRUE;
+ npx_fcom_long();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fcompp() {
+/* fcompp */
+
+ DOUBLEPOP = TRUE;
+ npx_fcom_f1();
+ DOUBLEPOP = FALSE;
+}
+
+LOCAL VOID npx_fcos() {
+/* fcos */
+
+ SAVE_PTRS();
+ FCOS();
+}
+
+LOCAL VOID npx_fdecstp() {
+/* fdecstp */
+
+ FDECSTP();
+}
+
+LOCAL VOID npx_fdiv_f0_f0() {
+/* fdiv st,st */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FDIV(0, 0, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fdiv_f0_f1() {
+/* fdiv st,st(1) */
+ IU16 src2_index = 1;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FDIV(0, 0, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fdiv_f0_f2() {
+/* fdiv st,st(2) */
+ IU16 src2_index = 2;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FDIV(0, 0, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fdiv_f0_f3() {
+/* fdiv st,st(3) */
+ IU16 src2_index = 3;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FDIV(0, 0, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fdiv_f0_f4() {
+/* fdiv st,st(4) */
+ IU16 src2_index = 4;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FDIV(0, 0, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fdiv_f0_f5() {
+/* fdiv st,st(5) */
+ IU16 src2_index = 5;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FDIV(0, 0, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fdiv_f0_f6() {
+/* fdiv st,st(6) */
+ IU16 src2_index = 6;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FDIV(0, 0, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fdiv_f0_f7() {
+/* fdiv st,st(7) */
+ IU16 src2_index = 7;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FDIV(0, 0, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fdiv_f1_f0() {
+/* fdiv st(1),st */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FDIV(1, 1, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fdiv_f2_f0() {
+/* fdiv st(2),st */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FDIV(2, 2, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fdiv_f3_f0() {
+/* fdiv st(3),st */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FDIV(3, 3, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fdiv_f4_f0() {
+/* fdiv st(4),st */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FDIV(4, 4, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fdiv_f5_f0() {
+/* fdiv st(5),st */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FDIV(5, 5, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fdiv_f6_f0() {
+/* fdiv st(6),st */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FDIV(6, 6, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fdiv_f7_f0() {
+/* fdiv st(7),st */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FDIV(7, 7, (VOID *)&src2_index);
+}
+
+LOCAL VOID npx_fdiv_short() {
+/* fdiv DWORD PTR */
+
+ FPtype = M32R;
+ D_Ed(0, RO0, PG_R);
+ F_Ed(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FDIV(0, 0, &ops[0].sng);
+}
+
+LOCAL VOID npx_fdiv_long() {
+/* fdiv QWORD PTR */
+
+ FPtype = M64R;
+ D_E08(0, RO0, PG_R);
+ F_E08(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FDIV(0, 0, &ops[0].npxbuff[0]);
+}
+
+LOCAL VOID npx_fdivp_f0() {
+/* fdivp st(0),st */
+
+ POPST = TRUE;
+ npx_fdiv_f0_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fdivp_f1() {
+/* fdivp st(1),st */
+
+ POPST = TRUE;
+ npx_fdiv_f1_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fdivp_f2() {
+/* fdivp st(2),st */
+
+ POPST = TRUE;
+ npx_fdiv_f2_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fdivp_f3() {
+/* fdivp st(3),st */
+
+ POPST = TRUE;
+ npx_fdiv_f3_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fdivp_f4() {
+/* fdivp st(4),st */
+
+ POPST = TRUE;
+ npx_fdiv_f4_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fdivp_f5() {
+/* fdivp st(5),st */
+
+ POPST = TRUE;
+ npx_fdiv_f5_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fdivp_f6() {
+/* fdivp st(6),st */
+
+ POPST = TRUE;
+ npx_fdiv_f6_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fdivp_f7() {
+/* fdivp st(7),st */
+
+ POPST = TRUE;
+ npx_fdiv_f7_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fdivr_f0_f0() {
+/* fdivr st,st */
+
+ REVERSE = TRUE;
+ npx_fdiv_f0_f0();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivr_f0_f1() {
+/* fdivr st,st(1) */
+
+ REVERSE = TRUE;
+ npx_fdiv_f0_f1();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivr_f0_f2() {
+/* fdivr st,st(2) */
+
+ REVERSE = TRUE;
+ npx_fdiv_f0_f2();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivr_f0_f3() {
+/* fdivr st,st(3) */
+
+ REVERSE = TRUE;
+ npx_fdiv_f0_f3();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivr_f0_f4() {
+/* fdivr st,st(4) */
+
+ REVERSE = TRUE;
+ npx_fdiv_f0_f4();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivr_f0_f5() {
+/* fdivr st,st(5) */
+
+ REVERSE = TRUE;
+ npx_fdiv_f0_f5();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivr_f0_f6() {
+/* fdivr st,st(6) */
+
+ REVERSE = TRUE;
+ npx_fdiv_f0_f6();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivr_f0_f7() {
+/* fdivr st,st(7) */
+
+ REVERSE = TRUE;
+ npx_fdiv_f0_f7();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivr_f1_f0() {
+/* fdivr st(1),st */
+
+ REVERSE = TRUE;
+ npx_fdiv_f1_f0();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivr_f2_f0() {
+/* fdivr st(2),st */
+
+ REVERSE = TRUE;
+ npx_fdiv_f2_f0();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivr_f3_f0() {
+/* fdivr st(3),st */
+
+ REVERSE = TRUE;
+ npx_fdiv_f3_f0();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivr_f4_f0() {
+/* fdivr st(4),st */
+
+ REVERSE = TRUE;
+ npx_fdiv_f4_f0();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivr_f5_f0() {
+/* fdivr st(5),st */
+
+ REVERSE = TRUE;
+ npx_fdiv_f5_f0();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivr_f6_f0() {
+/* fdivr st(6),st */
+
+ REVERSE = TRUE;
+ npx_fdiv_f6_f0();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivr_f7_f0() {
+/* fdivr st(7),st */
+
+ REVERSE = TRUE;
+ npx_fdiv_f7_f0();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivr_short() {
+/* fdivr DWORD PTR */
+
+ REVERSE = TRUE;
+ npx_fdiv_short();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivr_long() {
+/* fdivr QWORD PTR */
+
+ REVERSE = TRUE;
+ npx_fdiv_long();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivrp_f0() {
+/* fdivrp st(0),st */
+
+ REVERSE = TRUE;
+ POPST = TRUE;
+ npx_fdiv_f0_f0();
+ POPST = FALSE;
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivrp_f1() {
+/* fdivrp st(1),st */
+
+ REVERSE = TRUE;
+ POPST = TRUE;
+ npx_fdiv_f1_f0();
+ POPST = FALSE;
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivrp_f2() {
+/* fdivrp st(2),st */
+
+ REVERSE = TRUE;
+ POPST = TRUE;
+ npx_fdiv_f2_f0();
+ POPST = FALSE;
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivrp_f3() {
+/* fdivrp st(3),st */
+
+ REVERSE = TRUE;
+ POPST = TRUE;
+ npx_fdiv_f3_f0();
+ POPST = FALSE;
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivrp_f4() {
+/* fdivrp st(4),st */
+
+ REVERSE = TRUE;
+ POPST = TRUE;
+ npx_fdiv_f4_f0();
+ POPST = FALSE;
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivrp_f5() {
+/* fdivrp st(5),st */
+
+ REVERSE = TRUE;
+ POPST = TRUE;
+ npx_fdiv_f5_f0();
+ POPST = FALSE;
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivrp_f6() {
+/* fdivrp st(6),st */
+
+ REVERSE = TRUE;
+ POPST = TRUE;
+ npx_fdiv_f6_f0();
+ POPST = FALSE;
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fdivrp_f7() {
+/* fdivrp st(7),st */
+
+ REVERSE = TRUE;
+ POPST = TRUE;
+ npx_fdiv_f7_f0();
+ POPST = FALSE;
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_ffree_f0() {
+/* ffree st(0) */
+
+ SAVE_PTRS();
+ FFREE(0);
+}
+
+LOCAL VOID npx_ffree_f1() {
+/* ffree st(1) */
+
+ SAVE_PTRS();
+ FFREE(1);
+}
+
+LOCAL VOID npx_ffree_f2() {
+/* ffree st(2) */
+
+ SAVE_PTRS();
+ FFREE(2);
+}
+
+LOCAL VOID npx_ffree_f3() {
+/* ffree st(3) */
+
+ SAVE_PTRS();
+ FFREE(3);
+}
+
+LOCAL VOID npx_ffree_f4() {
+/* ffree st(4) */
+
+ SAVE_PTRS();
+ FFREE(4);
+}
+
+LOCAL VOID npx_ffree_f5() {
+/* ffree st(5) */
+
+ SAVE_PTRS();
+ FFREE(5);
+}
+
+LOCAL VOID npx_ffree_f6() {
+/* ffree st(6) */
+
+ SAVE_PTRS();
+ FFREE(6);
+}
+
+LOCAL VOID npx_ffree_f7() {
+/* ffree st(7) */
+
+ SAVE_PTRS();
+ FFREE(7);
+}
+
+LOCAL VOID npx_ffreep_f0() {
+/* ffreep st(0) */
+
+ POPST=TRUE;
+ npx_ffree_f0();
+ POPST=FALSE;
+}
+
+LOCAL VOID npx_ffreep_f1() {
+/* ffreep st(1) */
+
+ POPST=TRUE;
+ npx_ffree_f1();
+ POPST=FALSE;
+}
+
+LOCAL VOID npx_ffreep_f2() {
+/* ffreep st(2) */
+
+ POPST=TRUE;
+ npx_ffree_f2();
+ POPST=FALSE;
+}
+
+LOCAL VOID npx_ffreep_f3() {
+/* ffreep st(3) */
+
+ POPST=TRUE;
+ npx_ffree_f3();
+ POPST=FALSE;
+}
+
+LOCAL VOID npx_ffreep_f4() {
+/* ffreep st(4) */
+
+ POPST=TRUE;
+ npx_ffree_f4();
+ POPST=FALSE;
+}
+
+LOCAL VOID npx_ffreep_f5() {
+/* ffreep st(5) */
+
+ POPST=TRUE;
+ npx_ffree_f5();
+ POPST=FALSE;
+}
+
+LOCAL VOID npx_ffreep_f6() {
+/* ffreep st(6) */
+
+ POPST=TRUE;
+ npx_ffree_f6();
+ POPST=FALSE;
+}
+
+LOCAL VOID npx_ffreep_f7() {
+/* ffreep st(7) */
+
+ POPST=TRUE;
+ npx_ffree_f7();
+ POPST=FALSE;
+}
+
+LOCAL VOID npx_fiadd_word() {
+/* fiadd WORD PTR */
+
+ FPtype = M16I;
+ D_Ew(0, RO0, PG_R);
+ F_Ew(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FADD(0, 0, &ops[0].sng);
+}
+
+
+LOCAL VOID npx_fiadd_short() {
+/* fiadd DWORD PTR */
+
+ FPtype = M32I;
+ D_Ed(0, RO0, PG_R);
+ F_Ed(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FADD(0, 0, &ops[0].sng);
+}
+
+LOCAL VOID npx_ficom_word() {
+/* ficom WORD PTR */
+
+ FPtype = M16I;
+ D_Ew(0, RO0, PG_R);
+ F_Ew(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FCOM(&ops[0].sng);
+}
+
+LOCAL VOID npx_ficom_short() {
+/* ficom DWORD PTR */
+
+ FPtype = M32I;
+ D_Ed(0, RO0, PG_R);
+ F_Ed(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FCOM(&ops[0].sng);
+}
+
+LOCAL VOID npx_ficomp_word() {
+/* ficomp WORD PTR */
+
+ POPST = TRUE;
+ npx_ficom_word();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_ficomp_short() {
+/* ficomp DWORD PTR */
+
+ POPST = TRUE;
+ npx_ficom_short();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fidiv_word() {
+/* fidiv WORD PTR */
+
+ FPtype = M16I;
+ D_Ew(0, RO0, PG_R);
+ F_Ew(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FDIV(0, 0, &ops[0].sng);
+}
+
+LOCAL VOID npx_fidiv_short() {
+/* fidiv DWORD PTR */
+
+ FPtype = M32I;
+ D_Ed(0, RO0, PG_R);
+ F_Ed(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FDIV(0, 0, &ops[0].sng);
+}
+
+LOCAL VOID npx_fidivr_word() {
+/* fidivr WORD PTR */
+
+ REVERSE=TRUE;
+ npx_fidiv_word();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fidivr_short() {
+/* fidivr DWORD PTR */
+
+ REVERSE=TRUE;
+ npx_fidiv_short();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fild_word() {
+/* fild WORD PTR */
+
+ FPtype = M16I;
+ D_Ew(0, RO0, PG_R);
+ F_Ew(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FLD(&ops[0].sng);
+}
+
+LOCAL VOID npx_fild_short() {
+/* fild DWORD PTR */
+
+ FPtype = M32I;
+ D_Ed(0, RO0, PG_R);
+ F_Ed(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FLD(&ops[0].sng);
+}
+
+LOCAL VOID npx_fild_long() {
+/* fild QWORD PTR */
+
+ FPtype = M64I;
+ D_E08(0, RO0, PG_R);
+ F_E08(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FLD(&ops[0].npxbuff[0]);
+}
+
+LOCAL VOID npx_fimul_word() {
+/* fimul WORD PTR */
+
+ FPtype = M16I;
+ D_Ew(0, RO0, PG_R);
+ F_Ew(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FMUL(0, 0, &ops[0].sng);
+}
+
+LOCAL VOID npx_fimul_short() {
+/* fimul DWORD PTR */
+
+ FPtype = M32I;
+ D_Ed(0, RO0, PG_R);
+ F_Ed(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FMUL(0, 0, &ops[0].sng);
+}
+
+LOCAL VOID npx_fincstp() {
+/* fincstp */
+
+ FINCSTP();
+}
+
+LOCAL VOID npx_finit() {
+/* finit */
+
+ FINIT();
+}
+
+LOCAL VOID npx_fist_word() {
+/* fist WORD PTR */
+
+ FPtype = M16I;
+ D_Ew(0, WO0, PG_W);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FIST(&ops[0].sng);
+ P_Ew(0);
+}
+
+LOCAL VOID npx_fist_short() {
+/* fist DWORD PTR */
+
+ FPtype = M32I;
+ D_Ed(0, WO0, PG_W);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FIST(&ops[0].sng);
+ P_Ed(0);
+}
+
+LOCAL VOID npx_fistp_word() {
+/* fistp WORD PTR */
+
+ POPST = TRUE;
+ npx_fist_word();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fistp_short() {
+/* fistp DWORD PTR */
+
+ POPST = TRUE;
+ npx_fist_short();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fistp_long() {
+/* fistp QWORD PTR */
+
+ FPtype = M64I;
+ POPST = TRUE;
+ D_E08(0, WO0, PG_W);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FIST(&ops[0].npxbuff[0]);
+ P_E08(0);
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fisub_word() {
+/* fisub WORD PTR */
+
+ FPtype = M16I;
+ D_Ew(0, RO0, PG_R);
+ F_Ew(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FSUB(0, 0, &ops[0].sng);
+}
+
+LOCAL VOID npx_fisub_short() {
+/* fisub DWORD PTR */
+
+ FPtype = M32I;
+ D_Ed(0, RO0, PG_R);
+ F_Ed(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FSUB(0, 0, &ops[0].sng);
+}
+
+LOCAL VOID npx_fisubr_word() {
+/* fisubr WORD PTR */
+
+ REVERSE = TRUE;
+ npx_fisub_word();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fisubr_short() {
+/* fisubr DWORD PTR */
+
+ REVERSE = TRUE;
+ npx_fisub_short();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fld_f0() {
+/* fld st(0) */
+ IU16 stackPtr = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FLD((VOID *)&stackPtr);
+}
+
+LOCAL VOID npx_fld_f1() {
+/* fld st(1) */
+ IU16 stackPtr = 1;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FLD((VOID *)&stackPtr);
+}
+
+LOCAL VOID npx_fld_f2() {
+/* fld st(2) */
+ IU16 stackPtr = 2;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FLD((VOID *)&stackPtr);
+}
+
+LOCAL VOID npx_fld_f3() {
+/* fld st(3) */
+ IU16 stackPtr = 3;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FLD((VOID *)&stackPtr);
+}
+
+LOCAL VOID npx_fld_f4() {
+/* fld st(4) */
+ IU16 stackPtr = 4;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FLD((VOID *)&stackPtr);
+}
+
+LOCAL VOID npx_fld_f5() {
+/* fld st(5) */
+ IU16 stackPtr = 5;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FLD((VOID *)&stackPtr);
+}
+
+LOCAL VOID npx_fld_f6() {
+/* fld st(6) */
+ IU16 stackPtr = 6;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FLD((VOID *)&stackPtr);
+}
+
+LOCAL VOID npx_fld_f7() {
+/* fld st(7) */
+ IU16 stackPtr = 7;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FLD((VOID *)&stackPtr);
+}
+
+LOCAL VOID npx_fld_short() {
+/* fld DWORD PTR */
+
+ FPtype=M32R;
+ D_Ed(0, RO0, PG_R);
+ F_Ed(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FLD(&ops[0].sng);
+}
+
+LOCAL VOID npx_fld_long() {
+/* fld QWORD PTR */
+
+ FPtype=M64R;
+ D_E08(0, RO0, PG_R);
+ F_E08(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FLD(&ops[0].npxbuff[0]);
+}
+
+LOCAL VOID npx_fld_temp() {
+/* fld TBYTE PTR */
+
+ FPtype=M80R;
+ D_E0a(0, RO0, PG_R);
+ F_E0a(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FLD(&ops[0].npxbuff[0]);
+}
+
+LOCAL VOID npx_fldcw() {
+/* fldcw */
+
+ D_Ew(0, RO0, PG_R);
+ F_Ew(0);
+ FLDCW(&ops[0].sng);
+}
+
+LOCAL VOID npx_fldenv() {
+/* fldenv */
+
+ NPX_ADDRESS_SIZE_32 = (GET_OPERAND_SIZE()==USE16)?FALSE:TRUE;
+ NPX_PROT_MODE = ( GET_PE() && (GET_VM() == 0) );
+ D_E0e(0, RO0, PG_R);
+ F_E0e(0);
+ FLDENV(&ops[0].npxbuff[0]);
+}
+
+LOCAL VOID npx_fldlg2() {
+/* fldlg2 */
+
+ SAVE_PTRS();
+ FLDCONST(4);
+}
+
+LOCAL VOID npx_fldln2() {
+/* fldln2 */
+
+ SAVE_PTRS();
+ FLDCONST(5);
+}
+
+LOCAL VOID npx_fldl2e() {
+/* fldl2e */
+
+ SAVE_PTRS();
+ FLDCONST(2);
+}
+
+LOCAL VOID npx_fldl2t() {
+/* fldl2t */
+
+ SAVE_PTRS();
+ FLDCONST(1);
+}
+
+LOCAL VOID npx_fldpi() {
+/* fldpi */
+
+ SAVE_PTRS();
+ FLDCONST(3);
+}
+
+LOCAL VOID npx_fldz() {
+/* fldz */
+
+ SAVE_PTRS();
+ FLDCONST(6);
+}
+
+LOCAL VOID npx_fld1() {
+/* fld1 */
+
+ SAVE_PTRS();
+ FLDCONST(0);
+}
+
+LOCAL VOID npx_fmul_f0_f0() {
+/* fmul st,st */
+ IU16 StackPtr = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FMUL(0, 0, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fmul_f0_f1() {
+/* fmul st,st(1) */
+ IU16 StackPtr = 1;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FMUL(0, 0, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fmul_f0_f2() {
+/* fmul st,st(2) */
+ IU16 StackPtr = 2;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FMUL(0, 0, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fmul_f0_f3() {
+/* fmul st,st(3) */
+ IU16 StackPtr = 3;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FMUL(0, 0, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fmul_f0_f4() {
+/* fmul st,st(4) */
+ IU16 StackPtr = 4;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FMUL(0, 0, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fmul_f0_f5() {
+/* fmul st,st(5) */
+ IU16 StackPtr = 5;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FMUL(0, 0, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fmul_f0_f6() {
+/* fmul st,st(6) */
+ IU16 StackPtr = 6;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FMUL(0, 0, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fmul_f0_f7() {
+/* fmul st,st(7) */
+ IU16 StackPtr = 7;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FMUL(0, 0, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fmul_f1_f0() {
+/* fmul st(1),st */
+ IU16 StackPtr = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FMUL(1, 1, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fmul_f2_f0() {
+/* fmul st(2),st */
+ IU16 StackPtr = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FMUL(2, 2, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fmul_f3_f0() {
+/* fmul st(3),st */
+ IU16 StackPtr = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FMUL(3, 3, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fmul_f4_f0() {
+/* fmul st(4),st */
+ IU16 StackPtr = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FMUL(4, 4, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fmul_f5_f0() {
+/* fmul st(5),st */
+ IU16 StackPtr = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FMUL(5, 5, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fmul_f6_f0() {
+/* fmul st(6),st */
+ IU16 StackPtr = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FMUL(6, 6, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fmul_f7_f0() {
+/* fmul st(7),st */
+ IU16 StackPtr = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FMUL(7, 7, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fmul_short() {
+/* fmul DWORD PTR */
+
+ FPtype = M32R;
+ D_Ed(0, RO0, PG_R);
+ F_Ed(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FMUL(0, 0, &ops[0].sng);
+}
+
+LOCAL VOID npx_fmul_long() {
+/* fmul QWORD PTR */
+
+ FPtype = M64R;
+ D_E08(0, RO0, PG_R);
+ F_E08(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FMUL(0, 0, &ops[0].npxbuff[0]);
+}
+
+LOCAL VOID npx_fmulp_f0() {
+/* fmulp st(0),st */
+
+ POPST = TRUE;
+ npx_fmul_f0_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fmulp_f1() {
+/* fmulp st(1),st */
+
+ POPST = TRUE;
+ npx_fmul_f1_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fmulp_f2() {
+/* fmulp st(2),st */
+
+ POPST = TRUE;
+ npx_fmul_f2_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fmulp_f3() {
+/* fmulp st(3),st */
+
+ POPST = TRUE;
+ npx_fmul_f3_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fmulp_f4() {
+/* fmulp st(4),st */
+
+ POPST = TRUE;
+ npx_fmul_f4_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fmulp_f5() {
+/* fmulp st(5),st */
+
+ POPST = TRUE;
+ npx_fmul_f5_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fmulp_f6() {
+/* fmulp st(6),st */
+
+ POPST = TRUE;
+ npx_fmul_f6_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fmulp_f7() {
+/* fmulp st(7),st */
+
+ POPST = TRUE;
+ npx_fmul_f7_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fnop() {
+/* fnop */
+
+ SAVE_PTRS();
+ FNOP();
+}
+
+LOCAL VOID npx_fpatan() {
+/* fpatan */
+
+ SAVE_PTRS();
+ FPATAN();
+}
+
+LOCAL VOID npx_fprem() {
+/* fprem */
+
+ SAVE_PTRS();
+ FPREM();
+}
+
+LOCAL VOID npx_fprem1() {
+/* fprem */
+
+ SAVE_PTRS();
+ FPREM1();
+}
+
+LOCAL VOID npx_fptan() {
+/* fptan */
+
+ SAVE_PTRS();
+ FPTAN();
+}
+
+LOCAL VOID npx_frndint() {
+/* frndint */
+
+ SAVE_PTRS();
+ FRNDINT();
+}
+
+LOCAL VOID npx_fscale() {
+/* fscale */
+
+ SAVE_PTRS();
+ FSCALE();
+}
+
+LOCAL VOID npx_fsin() {
+/* fsin */
+
+ SAVE_PTRS();
+ FSIN();
+}
+
+LOCAL VOID npx_fsincos() {
+/* fsincos */
+
+ SAVE_PTRS();
+ FSINCOS();
+}
+
+LOCAL VOID npx_fsqrt() {
+/* fsqrt */
+
+ SAVE_PTRS();
+ FSQRT();
+}
+
+LOCAL VOID npx_frstor() {
+/* frstor */
+
+ NPX_ADDRESS_SIZE_32 = (GET_OPERAND_SIZE()==USE16)?FALSE:TRUE;
+ NPX_PROT_MODE = ( GET_PE() && (GET_VM() == 0) );
+ D_E5e(0, RO0, PG_R);
+ F_E5e(0);
+ FRSTOR(&ops[0].npxbuff[0]);
+}
+
+LOCAL VOID npx_fsave() {
+/* fsave */
+
+ NPX_ADDRESS_SIZE_32 = (GET_OPERAND_SIZE()==USE16)?FALSE:TRUE;
+ NPX_PROT_MODE = ( GET_PE() && (GET_VM() == 0) );
+ D_E5e(0, WO0, PG_W);
+ FSAVE(&ops[0].npxbuff[0]);
+ P_E5e(0);
+}
+
+LOCAL VOID npx_fst_f0() {
+/* fst st(0) */
+ IU16 StackPtr=0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FST((VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fst_f1() {
+/* fst st(1) */
+ IU16 StackPtr=1;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FST((VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fst_f2() {
+/* fst st(2) */
+ IU16 StackPtr=2;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FST((VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fst_f3() {
+/* fst st(3) */
+ IU16 StackPtr=3;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FST((VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fst_f4() {
+/* fst st(4) */
+ IU16 StackPtr=4;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FST((VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fst_f5() {
+/* fst st(5) */
+ IU16 StackPtr=5;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FST((VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fst_f6() {
+/* fst st(6) */
+ IU16 StackPtr=6;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FST((VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fst_f7() {
+/* fst st(7) */
+ IU16 StackPtr=7;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FST((VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fst_short() {
+/* fst DWORD PTR */
+
+ FPtype = M32R;
+ D_Ed(0, WO0, PG_W);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FST(&ops[0].sng);
+ P_Ed(0);
+}
+
+LOCAL VOID npx_fst_long() {
+/* fst QWORD PTR */
+
+ FPtype = M64R;
+ D_E08(0, WO0, PG_W);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FST(&ops[0].npxbuff[0]);
+ P_E08(0);
+}
+
+LOCAL VOID npx_fstcw() {
+/* fstcw */
+
+ D_Ew(0, WO0, PG_W);
+ FSTCW(&ops[0].sng);
+ P_Ew(0);
+}
+
+LOCAL VOID npx_fstenv() {
+/* fstenv */
+
+ NPX_ADDRESS_SIZE_32 = (GET_OPERAND_SIZE()==USE16)?FALSE:TRUE;
+ NPX_PROT_MODE = ( GET_PE() && (GET_VM() == 0) );
+ D_E0e(0, WO0, PG_W);
+ FSTENV(&ops[0].npxbuff[0]);
+ P_E0e(0);
+}
+
+LOCAL VOID npx_fstp_f0() {
+/* fstp st(0) */
+
+ POPST = TRUE;
+ npx_fst_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fstp_f1() {
+/* fstp st(1) */
+
+ POPST = TRUE;
+ npx_fst_f1();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fstp_f2() {
+/* fstp st(2) */
+
+ POPST = TRUE;
+ npx_fst_f2();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fstp_f3() {
+/* fstp st(3) */
+
+ POPST = TRUE;
+ npx_fst_f3();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fstp_f4() {
+/* fstp st(4) */
+
+ POPST = TRUE;
+ npx_fst_f4();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fstp_f5() {
+/* fstp st(5) */
+
+ POPST = TRUE;
+ npx_fst_f5();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fstp_f6() {
+/* fstp st(6) */
+
+ POPST = TRUE;
+ npx_fst_f6();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fstp_f7() {
+/* fstp st(7) */
+
+ POPST = TRUE;
+ npx_fst_f7();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fstp_short() {
+/* fstp DWORD PTR */
+
+ POPST = TRUE;
+ npx_fst_short();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fstp_long() {
+/* fstp QWORD PTR */
+
+ POPST = TRUE;
+ npx_fst_long();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fstp_temp() {
+/* fstp TBYTE PTR */
+
+ POPST = TRUE;
+ FPtype = M80R;
+ D_E0a(0, WO0, PG_W);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FST(&ops[0].npxbuff[0]);
+ P_E0a(0);
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fstsw() {
+/* fstsw */
+
+ D_Ew(0, WO0, PG_W);
+ FSTSW(&ops[0].sng, FALSE);
+ P_Ew(0);
+}
+
+LOCAL VOID npx_fstswax() {
+/* fstswax */
+
+ FSTSW((VOID *)&Ax_regptr, TRUE);
+ SET_AX(Ax_regptr);
+}
+
+LOCAL VOID npx_fsub_f0_f0() {
+/* fsub st,st */
+ IU16 StackPtr = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FSUB(0, 0, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fsub_f0_f1() {
+/* fsub st,st(1) */
+ IU16 StackPtr = 1;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FSUB(0, 0, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fsub_f0_f2() {
+/* fsub st,st(2) */
+ IU16 StackPtr = 2;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FSUB(0, 0, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fsub_f0_f3() {
+/* fsub st,st(3) */
+ IU16 StackPtr = 3;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FSUB(0, 0, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fsub_f0_f4() {
+/* fsub st,st(4) */
+ IU16 StackPtr = 4;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FSUB(0, 0, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fsub_f0_f5() {
+/* fsub st,st(5) */
+ IU16 StackPtr = 5;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FSUB(0, 0, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fsub_f0_f6() {
+/* fsub st,st(6) */
+ IU16 StackPtr = 6;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FSUB(0, 0, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fsub_f0_f7() {
+/* fsub st,st(7) */
+ IU16 StackPtr = 7;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FSUB(0, 0, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fsub_f1_f0() {
+/* fsub st(1),st */
+ IU16 StackPtr = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FSUB(1, 1, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fsub_f2_f0() {
+/* fsub st(2),st */
+ IU16 StackPtr = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FSUB(2, 2, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fsub_f3_f0() {
+/* fsub st(3),st */
+ IU16 StackPtr = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FSUB(3, 3, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fsub_f4_f0() {
+/* fsub st(4),st */
+ IU16 StackPtr = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FSUB(4, 4, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fsub_f5_f0() {
+/* fsub st(5),st */
+ IU16 StackPtr = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FSUB(5, 5, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fsub_f6_f0() {
+/* fsub st(6),st */
+ IU16 StackPtr = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FSUB(6, 6, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fsub_f7_f0() {
+/* fsub st(7),st */
+ IU16 StackPtr = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ FSUB(7, 7, (VOID *)&StackPtr);
+}
+
+LOCAL VOID npx_fsub_short() {
+/* fsub DWORD PTR */
+
+ FPtype = M32R;
+ D_Ed(0, RO0, PG_R);
+ F_Ed(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FSUB(0, 0, &ops[0].sng);
+}
+
+LOCAL VOID npx_fsub_long() {
+/* fsub QWORD PTR */
+
+ FPtype = M64R;
+ D_E08(0, RO0, PG_R);
+ F_E08(0);
+ SAVE_PTRS();
+ SAVE_DPTRS();
+ FSUB(0, 0, &ops[0].npxbuff[0]);
+}
+
+LOCAL VOID npx_fsubp_f0() {
+/* fsubp st(0),st */
+
+ POPST = TRUE;
+ npx_fsub_f0_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fsubp_f1() {
+/* fsubp st(1),st */
+
+ POPST = TRUE;
+ npx_fsub_f1_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fsubp_f2() {
+/* fsubp st(2),st */
+
+ POPST = TRUE;
+ npx_fsub_f2_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fsubp_f3() {
+/* fsubp st(3),st */
+
+ POPST = TRUE;
+ npx_fsub_f3_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fsubp_f4() {
+/* fsubp st(4),st */
+
+ POPST = TRUE;
+ npx_fsub_f4_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fsubp_f5() {
+/* fsubp st(5),st */
+
+ POPST = TRUE;
+ npx_fsub_f5_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fsubp_f6() {
+/* fsubp st(6),st */
+
+ POPST = TRUE;
+ npx_fsub_f6_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fsubp_f7() {
+/* fsubp st(7),st */
+
+ POPST = TRUE;
+ npx_fsub_f7_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fsubr_f0_f0() {
+/* fsubr st,st */
+
+ REVERSE = TRUE;
+ npx_fsub_f0_f0();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubr_f0_f1() {
+/* fsubr st,st(1) */
+
+ REVERSE = TRUE;
+ npx_fsub_f0_f1();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubr_f0_f2() {
+/* fsubr st,st(2) */
+
+ REVERSE = TRUE;
+ npx_fsub_f0_f2();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubr_f0_f3() {
+/* fsubr st,st(3) */
+
+ REVERSE = TRUE;
+ npx_fsub_f0_f3();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubr_f0_f4() {
+/* fsubr st,st(4) */
+
+ REVERSE = TRUE;
+ npx_fsub_f0_f4();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubr_f0_f5() {
+/* fsubr st,st(5) */
+
+ REVERSE = TRUE;
+ npx_fsub_f0_f5();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubr_f0_f6() {
+/* fsubr st,st(6) */
+
+ REVERSE = TRUE;
+ npx_fsub_f0_f6();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubr_f0_f7() {
+/* fsubr st,st(7) */
+
+ REVERSE = TRUE;
+ npx_fsub_f0_f7();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubr_f1_f0() {
+/* fsubr st(1),st */
+
+ REVERSE = TRUE;
+ npx_fsub_f1_f0();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubr_f2_f0() {
+/* fsubr st(2),st */
+
+ REVERSE = TRUE;
+ npx_fsub_f2_f0();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubr_f3_f0() {
+/* fsubr st(3),st */
+
+ REVERSE = TRUE;
+ npx_fsub_f3_f0();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubr_f4_f0() {
+/* fsubr st(4),st */
+
+ REVERSE = TRUE;
+ npx_fsub_f4_f0();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubr_f5_f0() {
+/* fsubr st(5),st */
+
+ REVERSE = TRUE;
+ npx_fsub_f5_f0();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubr_f6_f0() {
+/* fsubr st(6),st */
+
+ REVERSE = TRUE;
+ npx_fsub_f6_f0();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubr_f7_f0() {
+/* fsubr st(7),st */
+
+ REVERSE = TRUE;
+ npx_fsub_f7_f0();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubr_short() {
+/* fsubr DWORD PTR */
+
+ REVERSE = TRUE;
+ npx_fsub_short();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubr_long() {
+/* fsubr QWORD PTR */
+
+ REVERSE = TRUE;
+ npx_fsub_long();
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubrp_f0() {
+/* fsubrp st(0),st */
+
+ REVERSE = TRUE;
+ POPST = TRUE;
+ npx_fsub_f0_f0();
+ POPST = FALSE;
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubrp_f1() {
+/* fsubrp st(1),st */
+
+ REVERSE = TRUE;
+ POPST = TRUE;
+ npx_fsub_f1_f0();
+ POPST = FALSE;
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubrp_f2() {
+/* fsubrp st(2),st */
+
+ REVERSE = TRUE;
+ POPST = TRUE;
+ npx_fsub_f2_f0();
+ POPST = FALSE;
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubrp_f3() {
+/* fsubrp st(3),st */
+
+ REVERSE = TRUE;
+ POPST = TRUE;
+ npx_fsub_f3_f0();
+ POPST = FALSE;
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubrp_f4() {
+/* fsubrp st(4),st */
+
+ REVERSE = TRUE;
+ POPST = TRUE;
+ npx_fsub_f4_f0();
+ POPST = FALSE;
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubrp_f5() {
+/* fsubrp st(5),st */
+
+ REVERSE = TRUE;
+ POPST = TRUE;
+ npx_fsub_f5_f0();
+ POPST = FALSE;
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubrp_f6() {
+/* fsubrp st(6),st */
+
+ REVERSE = TRUE;
+ POPST = TRUE;
+ npx_fsub_f6_f0();
+ POPST = FALSE;
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_fsubrp_f7() {
+/* fsubrp st(7),st */
+
+ REVERSE = TRUE;
+ POPST = TRUE;
+ npx_fsub_f7_f0();
+ POPST = FALSE;
+ REVERSE = FALSE;
+}
+
+LOCAL VOID npx_ftst() {
+/* ftst */
+
+ SAVE_PTRS();
+ FTST();
+}
+
+LOCAL VOID npx_fucom_f0() {
+/* fucom st(0) */
+ IU16 src2_index = 0;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ UNORDERED = TRUE;
+ FCOM((VOID *)&src2_index);
+ UNORDERED = FALSE;
+}
+
+LOCAL VOID npx_fucom_f1() {
+/* fucom st(1) */
+ IU16 src2_index = 1;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ UNORDERED = TRUE;
+ FCOM((VOID *)&src2_index);
+ UNORDERED = FALSE;
+}
+
+LOCAL VOID npx_fucom_f2() {
+/* fucom st(2) */
+ IU16 src2_index = 2;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ UNORDERED = TRUE;
+ FCOM((VOID *)&src2_index);
+ UNORDERED = FALSE;
+}
+
+LOCAL VOID npx_fucom_f3() {
+/* fucom st(3) */
+ IU16 src2_index = 3;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ UNORDERED = TRUE;
+ FCOM((VOID *)&src2_index);
+ UNORDERED = FALSE;
+}
+
+LOCAL VOID npx_fucom_f4() {
+/* fucom st(4) */
+ IU16 src2_index = 4;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ UNORDERED = TRUE;
+ FCOM((VOID *)&src2_index);
+ UNORDERED = FALSE;
+}
+
+LOCAL VOID npx_fucom_f5() {
+/* fucom st(5) */
+ IU16 src2_index = 5;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ UNORDERED = TRUE;
+ FCOM((VOID *)&src2_index);
+ UNORDERED = FALSE;
+}
+
+LOCAL VOID npx_fucom_f6() {
+/* fucom st(6) */
+ IU16 src2_index = 6;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ UNORDERED = TRUE;
+ FCOM((VOID *)&src2_index);
+ UNORDERED = FALSE;
+}
+
+LOCAL VOID npx_fucom_f7() {
+/* fucom st(7) */
+ IU16 src2_index = 7;
+
+ SAVE_PTRS();
+ FPtype = FPSTACK;
+ UNORDERED = TRUE;
+ FCOM((VOID *)&src2_index);
+ UNORDERED = FALSE;
+}
+
+LOCAL VOID npx_fucomp_f0() {
+/* fucomp st(0) */
+
+ POPST = TRUE;
+ npx_fucom_f0();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fucomp_f1() {
+/* fucomp st(1) */
+
+ POPST = TRUE;
+ npx_fucom_f1();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fucomp_f2() {
+/* fucomp st(2) */
+
+ POPST = TRUE;
+ npx_fucom_f2();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fucomp_f3() {
+/* fucomp st(3) */
+
+ POPST = TRUE;
+ npx_fucom_f3();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fucomp_f4() {
+/* fucomp st(4) */
+
+ POPST = TRUE;
+ npx_fucom_f4();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fucomp_f5() {
+/* fucomp st(5) */
+
+ POPST = TRUE;
+ npx_fucom_f5();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fucomp_f6() {
+/* fucomp st(6) */
+
+ POPST = TRUE;
+ npx_fucom_f6();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fucomp_f7() {
+/* fucomp st(7) */
+
+ POPST = TRUE;
+ npx_fucom_f7();
+ POPST = FALSE;
+}
+
+LOCAL VOID npx_fucompp() {
+/* fucompp */
+
+ DOUBLEPOP = TRUE;
+ npx_fucom_f1();
+ DOUBLEPOP = FALSE;
+}
+
+LOCAL VOID npx_fxam() {
+/* fxam */
+
+ SAVE_PTRS();
+ FXAM();
+}
+
+LOCAL VOID npx_fxch_f0() {
+/* fxch st(0) */
+
+ SAVE_PTRS();
+ FXCH(0);
+}
+
+LOCAL VOID npx_fxch_f1() {
+/* fxch st(1) */
+
+ SAVE_PTRS();
+ FXCH(1);
+}
+
+LOCAL VOID npx_fxch_f2() {
+/* fxch st(2) */
+
+ SAVE_PTRS();
+ FXCH(2);
+}
+
+LOCAL VOID npx_fxch_f3() {
+/* fxch st(3) */
+
+ SAVE_PTRS();
+ FXCH(3);
+}
+
+LOCAL VOID npx_fxch_f4() {
+/* fxch st(4) */
+
+ SAVE_PTRS();
+ FXCH(4);
+}
+
+LOCAL VOID npx_fxch_f5() {
+/* fxch st(5) */
+
+ SAVE_PTRS();
+ FXCH(5);
+}
+
+LOCAL VOID npx_fxch_f6() {
+/* fxch st(6) */
+
+ SAVE_PTRS();
+ FXCH(6);
+}
+
+LOCAL VOID npx_fxch_f7() {
+/* fxch st(7) */
+
+ SAVE_PTRS();
+ FXCH(7);
+}
+
+LOCAL VOID npx_fxtract() {
+/* fxtract */
+
+ SAVE_PTRS();
+ FXTRACT();
+}
+
+LOCAL VOID npx_fyl2x() {
+/* fyl2x */
+
+ SAVE_PTRS();
+ FYL2X();
+}
+
+LOCAL VOID npx_fyl2xp1() {
+/* fyl2xp1 */
+
+ SAVE_PTRS();
+ FYL2XP1();
+}
+
+LOCAL VOID npx_f2xm1() {
+/* f2xm1 */
+
+ SAVE_PTRS();
+ F2XM1();
+}
+
+LOCAL VOID npx_funimp() {
+ Int6();
+}
+
+
+LOCAL VOID (*inst_table[])() = {
+npx_fadd_short, /* d8 00 */
+npx_fadd_short, /* d8 01 */
+npx_fadd_short, /* d8 02 */
+npx_fadd_short, /* d8 03 */
+npx_fadd_short, /* d8 04 */
+npx_fadd_short, /* d8 05 */
+npx_fadd_short, /* d8 06 */
+npx_fadd_short, /* d8 07 */
+npx_fmul_short, /* d8 08 */
+npx_fmul_short, /* d8 09 */
+npx_fmul_short, /* d8 0a */
+npx_fmul_short, /* d8 0b */
+npx_fmul_short, /* d8 0c */
+npx_fmul_short, /* d8 0d */
+npx_fmul_short, /* d8 0e */
+npx_fmul_short, /* d8 0f */
+npx_fcom_short, /* d8 10 */
+npx_fcom_short, /* d8 11 */
+npx_fcom_short, /* d8 12 */
+npx_fcom_short, /* d8 13 */
+npx_fcom_short, /* d8 14 */
+npx_fcom_short, /* d8 15 */
+npx_fcom_short, /* d8 16 */
+npx_fcom_short, /* d8 17 */
+npx_fcomp_short, /* d8 18 */
+npx_fcomp_short, /* d8 19 */
+npx_fcomp_short, /* d8 1a */
+npx_fcomp_short, /* d8 1b */
+npx_fcomp_short, /* d8 1c */
+npx_fcomp_short, /* d8 1d */
+npx_fcomp_short, /* d8 1e */
+npx_fcomp_short, /* d8 1f */
+npx_fsub_short, /* d8 20 */
+npx_fsub_short, /* d8 21 */
+npx_fsub_short, /* d8 22 */
+npx_fsub_short, /* d8 23 */
+npx_fsub_short, /* d8 24 */
+npx_fsub_short, /* d8 25 */
+npx_fsub_short, /* d8 26 */
+npx_fsub_short, /* d8 27 */
+npx_fsubr_short, /* d8 28 */
+npx_fsubr_short, /* d8 29 */
+npx_fsubr_short, /* d8 2a */
+npx_fsubr_short, /* d8 2b */
+npx_fsubr_short, /* d8 2c */
+npx_fsubr_short, /* d8 2d */
+npx_fsubr_short, /* d8 2e */
+npx_fsubr_short, /* d8 2f */
+npx_fdiv_short, /* d8 30 */
+npx_fdiv_short, /* d8 31 */
+npx_fdiv_short, /* d8 32 */
+npx_fdiv_short, /* d8 33 */
+npx_fdiv_short, /* d8 34 */
+npx_fdiv_short, /* d8 35 */
+npx_fdiv_short, /* d8 36 */
+npx_fdiv_short, /* d8 37 */
+npx_fdivr_short, /* d8 38 */
+npx_fdivr_short, /* d8 39 */
+npx_fdivr_short, /* d8 3a */
+npx_fdivr_short, /* d8 3b */
+npx_fdivr_short, /* d8 3c */
+npx_fdivr_short, /* d8 3d */
+npx_fdivr_short, /* d8 3e */
+npx_fdivr_short, /* d8 3f */
+npx_fadd_short, /* d8 40 */
+npx_fadd_short, /* d8 41 */
+npx_fadd_short, /* d8 42 */
+npx_fadd_short, /* d8 43 */
+npx_fadd_short, /* d8 44 */
+npx_fadd_short, /* d8 45 */
+npx_fadd_short, /* d8 46 */
+npx_fadd_short, /* d8 47 */
+npx_fmul_short, /* d8 48 */
+npx_fmul_short, /* d8 49 */
+npx_fmul_short, /* d8 4a */
+npx_fmul_short, /* d8 4b */
+npx_fmul_short, /* d8 4c */
+npx_fmul_short, /* d8 4d */
+npx_fmul_short, /* d8 4e */
+npx_fmul_short, /* d8 4f */
+npx_fcom_short, /* d8 50 */
+npx_fcom_short, /* d8 51 */
+npx_fcom_short, /* d8 52 */
+npx_fcom_short, /* d8 53 */
+npx_fcom_short, /* d8 54 */
+npx_fcom_short, /* d8 55 */
+npx_fcom_short, /* d8 56 */
+npx_fcom_short, /* d8 57 */
+npx_fcomp_short, /* d8 58 */
+npx_fcomp_short, /* d8 59 */
+npx_fcomp_short, /* d8 5a */
+npx_fcomp_short, /* d8 5b */
+npx_fcomp_short, /* d8 5c */
+npx_fcomp_short, /* d8 5d */
+npx_fcomp_short, /* d8 5e */
+npx_fcomp_short, /* d8 5f */
+npx_fsub_short, /* d8 60 */
+npx_fsub_short, /* d8 61 */
+npx_fsub_short, /* d8 62 */
+npx_fsub_short, /* d8 63 */
+npx_fsub_short, /* d8 64 */
+npx_fsub_short, /* d8 65 */
+npx_fsub_short, /* d8 66 */
+npx_fsub_short, /* d8 67 */
+npx_fsubr_short, /* d8 68 */
+npx_fsubr_short, /* d8 69 */
+npx_fsubr_short, /* d8 6a */
+npx_fsubr_short, /* d8 6b */
+npx_fsubr_short, /* d8 6c */
+npx_fsubr_short, /* d8 6d */
+npx_fsubr_short, /* d8 6e */
+npx_fsubr_short, /* d8 6f */
+npx_fdiv_short, /* d8 70 */
+npx_fdiv_short, /* d8 71 */
+npx_fdiv_short, /* d8 72 */
+npx_fdiv_short, /* d8 73 */
+npx_fdiv_short, /* d8 74 */
+npx_fdiv_short, /* d8 75 */
+npx_fdiv_short, /* d8 76 */
+npx_fdiv_short, /* d8 77 */
+npx_fdivr_short, /* d8 78 */
+npx_fdivr_short, /* d8 79 */
+npx_fdivr_short, /* d8 7a */
+npx_fdivr_short, /* d8 7b */
+npx_fdivr_short, /* d8 7c */
+npx_fdivr_short, /* d8 7d */
+npx_fdivr_short, /* d8 7e */
+npx_fdivr_short, /* d8 7f */
+npx_fadd_short, /* d8 80 */
+npx_fadd_short, /* d8 81 */
+npx_fadd_short, /* d8 82 */
+npx_fadd_short, /* d8 83 */
+npx_fadd_short, /* d8 84 */
+npx_fadd_short, /* d8 85 */
+npx_fadd_short, /* d8 86 */
+npx_fadd_short, /* d8 87 */
+npx_fmul_short, /* d8 88 */
+npx_fmul_short, /* d8 89 */
+npx_fmul_short, /* d8 8a */
+npx_fmul_short, /* d8 8b */
+npx_fmul_short, /* d8 8c */
+npx_fmul_short, /* d8 8d */
+npx_fmul_short, /* d8 8e */
+npx_fmul_short, /* d8 8f */
+npx_fcom_short, /* d8 90 */
+npx_fcom_short, /* d8 91 */
+npx_fcom_short, /* d8 92 */
+npx_fcom_short, /* d8 93 */
+npx_fcom_short, /* d8 94 */
+npx_fcom_short, /* d8 95 */
+npx_fcom_short, /* d8 96 */
+npx_fcom_short, /* d8 97 */
+npx_fcomp_short, /* d8 98 */
+npx_fcomp_short, /* d8 99 */
+npx_fcomp_short, /* d8 9a */
+npx_fcomp_short, /* d8 9b */
+npx_fcomp_short, /* d8 9c */
+npx_fcomp_short, /* d8 9d */
+npx_fcomp_short, /* d8 9e */
+npx_fcomp_short, /* d8 9f */
+npx_fsub_short, /* d8 a0 */
+npx_fsub_short, /* d8 a1 */
+npx_fsub_short, /* d8 a2 */
+npx_fsub_short, /* d8 a3 */
+npx_fsub_short, /* d8 a4 */
+npx_fsub_short, /* d8 a5 */
+npx_fsub_short, /* d8 a6 */
+npx_fsub_short, /* d8 a7 */
+npx_fsubr_short, /* d8 a8 */
+npx_fsubr_short, /* d8 a9 */
+npx_fsubr_short, /* d8 aa */
+npx_fsubr_short, /* d8 ab */
+npx_fsubr_short, /* d8 ac */
+npx_fsubr_short, /* d8 ad */
+npx_fsubr_short, /* d8 ae */
+npx_fsubr_short, /* d8 af */
+npx_fdiv_short, /* d8 b0 */
+npx_fdiv_short, /* d8 b1 */
+npx_fdiv_short, /* d8 b2 */
+npx_fdiv_short, /* d8 b3 */
+npx_fdiv_short, /* d8 b4 */
+npx_fdiv_short, /* d8 b5 */
+npx_fdiv_short, /* d8 b6 */
+npx_fdiv_short, /* d8 b7 */
+npx_fdivr_short, /* d8 b8 */
+npx_fdivr_short, /* d8 b9 */
+npx_fdivr_short, /* d8 ba */
+npx_fdivr_short, /* d8 bb */
+npx_fdivr_short, /* d8 bc */
+npx_fdivr_short, /* d8 bd */
+npx_fdivr_short, /* d8 be */
+npx_fdivr_short, /* d8 bf */
+npx_fadd_f0_f0, /* d8 c0 */
+npx_fadd_f0_f1,
+npx_fadd_f0_f2,
+npx_fadd_f0_f3,
+npx_fadd_f0_f4,
+npx_fadd_f0_f5,
+npx_fadd_f0_f6,
+npx_fadd_f0_f7,
+npx_fmul_f0_f0, /* d8 c7 */
+npx_fmul_f0_f1,
+npx_fmul_f0_f2,
+npx_fmul_f0_f3,
+npx_fmul_f0_f4,
+npx_fmul_f0_f5,
+npx_fmul_f0_f6,
+npx_fmul_f0_f7,
+npx_fcom_f0, /* d8 d0 */
+npx_fcom_f1,
+npx_fcom_f2,
+npx_fcom_f3,
+npx_fcom_f4,
+npx_fcom_f5,
+npx_fcom_f6,
+npx_fcom_f7,
+npx_fcomp_f0,
+npx_fcomp_f1,
+npx_fcomp_f2,
+npx_fcomp_f3,
+npx_fcomp_f4,
+npx_fcomp_f5,
+npx_fcomp_f6,
+npx_fcomp_f7,
+npx_fsub_f0_f0, /* d8 e0 */
+npx_fsub_f0_f1,
+npx_fsub_f0_f2,
+npx_fsub_f0_f3,
+npx_fsub_f0_f4,
+npx_fsub_f0_f5,
+npx_fsub_f0_f6,
+npx_fsub_f0_f7,
+npx_fsubr_f0_f0,
+npx_fsubr_f0_f1,
+npx_fsubr_f0_f2,
+npx_fsubr_f0_f3,
+npx_fsubr_f0_f4,
+npx_fsubr_f0_f5,
+npx_fsubr_f0_f6,
+npx_fsubr_f0_f7,
+npx_fdiv_f0_f0, /* d8 f0 */
+npx_fdiv_f0_f1,
+npx_fdiv_f0_f2,
+npx_fdiv_f0_f3,
+npx_fdiv_f0_f4,
+npx_fdiv_f0_f5,
+npx_fdiv_f0_f6,
+npx_fdiv_f0_f7,
+npx_fdivr_f0_f0,
+npx_fdivr_f0_f1,
+npx_fdivr_f0_f2,
+npx_fdivr_f0_f3,
+npx_fdivr_f0_f4,
+npx_fdivr_f0_f5,
+npx_fdivr_f0_f6,
+npx_fdivr_f0_f7,
+npx_fld_short, /* d9 00 */
+npx_fld_short, /* d9 01 */
+npx_fld_short, /* d9 02 */
+npx_fld_short, /* d9 03 */
+npx_fld_short, /* d9 04 */
+npx_fld_short, /* d9 05 */
+npx_fld_short, /* d9 06 */
+npx_fld_short, /* d9 07 */
+npx_funimp, /* d9 08 */
+npx_funimp, /* d9 09 */
+npx_funimp, /* d9 0a */
+npx_funimp, /* d9 0b */
+npx_funimp, /* d9 0c */
+npx_funimp, /* d9 0d */
+npx_funimp, /* d9 0e */
+npx_funimp, /* d9 0f */
+npx_fst_short, /* d9 10 */
+npx_fst_short, /* d9 11 */
+npx_fst_short, /* d9 12 */
+npx_fst_short, /* d9 13 */
+npx_fst_short, /* d9 14 */
+npx_fst_short, /* d9 15 */
+npx_fst_short, /* d9 16 */
+npx_fst_short, /* d9 17 */
+npx_fstp_short, /* d9 18 */
+npx_fstp_short, /* d9 19 */
+npx_fstp_short, /* d9 1a */
+npx_fstp_short, /* d9 1b */
+npx_fstp_short, /* d9 1c */
+npx_fstp_short, /* d9 1d */
+npx_fstp_short, /* d9 1e */
+npx_fstp_short, /* d9 1f */
+npx_fldenv, /* d9 20 */
+npx_fldenv, /* d9 21 */
+npx_fldenv, /* d9 22 */
+npx_fldenv, /* d9 23 */
+npx_fldenv, /* d9 24 */
+npx_fldenv, /* d9 25 */
+npx_fldenv, /* d9 26 */
+npx_fldenv, /* d9 27 */
+npx_fldcw, /* d9 28 */
+npx_fldcw, /* d9 29 */
+npx_fldcw, /* d9 2a */
+npx_fldcw, /* d9 2b */
+npx_fldcw, /* d9 2c */
+npx_fldcw, /* d9 2d */
+npx_fldcw, /* d9 2e */
+npx_fldcw, /* d9 2f */
+npx_fstenv, /* d9 30 */
+npx_fstenv, /* d9 31 */
+npx_fstenv, /* d9 32 */
+npx_fstenv, /* d9 33 */
+npx_fstenv, /* d9 34 */
+npx_fstenv, /* d9 35 */
+npx_fstenv, /* d9 36 */
+npx_fstenv, /* d9 37 */
+npx_fstcw, /* d9 38 */
+npx_fstcw, /* d9 39 */
+npx_fstcw, /* d9 3a */
+npx_fstcw, /* d9 3b */
+npx_fstcw, /* d9 3c */
+npx_fstcw, /* d9 3d */
+npx_fstcw, /* d9 3e */
+npx_fstcw, /* d9 3f */
+npx_fld_short, /* d9 40 */
+npx_fld_short, /* d9 41 */
+npx_fld_short, /* d9 42 */
+npx_fld_short, /* d9 43 */
+npx_fld_short, /* d9 44 */
+npx_fld_short, /* d9 45 */
+npx_fld_short, /* d9 46 */
+npx_fld_short, /* d9 47 */
+npx_funimp, /* d9 48 */
+npx_funimp, /* d9 49 */
+npx_funimp, /* d9 4a */
+npx_funimp, /* d9 4b */
+npx_funimp, /* d9 4c */
+npx_funimp, /* d9 4d */
+npx_funimp, /* d9 4e */
+npx_funimp, /* d9 4f */
+npx_fst_short, /* d9 50 */
+npx_fst_short, /* d9 51 */
+npx_fst_short, /* d9 52 */
+npx_fst_short, /* d9 53 */
+npx_fst_short, /* d9 54 */
+npx_fst_short, /* d9 55 */
+npx_fst_short, /* d9 56 */
+npx_fst_short, /* d9 57 */
+npx_fstp_short, /* d9 58 */
+npx_fstp_short, /* d9 59 */
+npx_fstp_short, /* d9 5a */
+npx_fstp_short, /* d9 5b */
+npx_fstp_short, /* d9 5c */
+npx_fstp_short, /* d9 5d */
+npx_fstp_short, /* d9 5e */
+npx_fstp_short, /* d9 5f */
+npx_fldenv, /* d9 60 */
+npx_fldenv, /* d9 61 */
+npx_fldenv, /* d9 62 */
+npx_fldenv, /* d9 63 */
+npx_fldenv, /* d9 64 */
+npx_fldenv, /* d9 65 */
+npx_fldenv, /* d9 66 */
+npx_fldenv, /* d9 67 */
+npx_fldcw, /* d9 68 */
+npx_fldcw, /* d9 69 */
+npx_fldcw, /* d9 6a */
+npx_fldcw, /* d9 6b */
+npx_fldcw, /* d9 6c */
+npx_fldcw, /* d9 6d */
+npx_fldcw, /* d9 6e */
+npx_fldcw, /* d9 6f */
+npx_fstenv, /* d9 70 */
+npx_fstenv, /* d9 71 */
+npx_fstenv, /* d9 72 */
+npx_fstenv, /* d9 73 */
+npx_fstenv, /* d9 74 */
+npx_fstenv, /* d9 75 */
+npx_fstenv, /* d9 76 */
+npx_fstenv, /* d9 77 */
+npx_fstcw, /* d9 78 */
+npx_fstcw, /* d9 79 */
+npx_fstcw, /* d9 7a */
+npx_fstcw, /* d9 7b */
+npx_fstcw, /* d9 7c */
+npx_fstcw, /* d9 7d */
+npx_fstcw, /* d9 7e */
+npx_fstcw, /* d9 7f */
+npx_fld_short, /* d9 80 */
+npx_fld_short, /* d9 81 */
+npx_fld_short, /* d9 82 */
+npx_fld_short, /* d9 83 */
+npx_fld_short, /* d9 84 */
+npx_fld_short, /* d9 85 */
+npx_fld_short, /* d9 86 */
+npx_fld_short, /* d9 87 */
+npx_funimp, /* d9 88 */
+npx_funimp, /* d9 89 */
+npx_funimp, /* d9 8a */
+npx_funimp, /* d9 8b */
+npx_funimp, /* d9 8c */
+npx_funimp, /* d9 8d */
+npx_funimp, /* d9 8e */
+npx_funimp, /* d9 8f */
+npx_fst_short, /* d9 90 */
+npx_fst_short, /* d9 91 */
+npx_fst_short, /* d9 92 */
+npx_fst_short, /* d9 93 */
+npx_fst_short, /* d9 94 */
+npx_fst_short, /* d9 95 */
+npx_fst_short, /* d9 96 */
+npx_fst_short, /* d9 97 */
+npx_fstp_short, /* d9 98 */
+npx_fstp_short, /* d9 99 */
+npx_fstp_short, /* d9 9a */
+npx_fstp_short, /* d9 9b */
+npx_fstp_short, /* d9 9c */
+npx_fstp_short, /* d9 9d */
+npx_fstp_short, /* d9 9e */
+npx_fstp_short, /* d9 9f */
+npx_fldenv, /* d9 a0 */
+npx_fldenv, /* d9 a1 */
+npx_fldenv, /* d9 a2 */
+npx_fldenv, /* d9 a3 */
+npx_fldenv, /* d9 a4 */
+npx_fldenv, /* d9 a5 */
+npx_fldenv, /* d9 a6 */
+npx_fldenv, /* d9 a7 */
+npx_fldcw, /* d9 a8 */
+npx_fldcw, /* d9 a9 */
+npx_fldcw, /* d9 aa */
+npx_fldcw, /* d9 ab */
+npx_fldcw, /* d9 ac */
+npx_fldcw, /* d9 ad */
+npx_fldcw, /* d9 ae */
+npx_fldcw, /* d9 af */
+npx_fstenv, /* d9 b0 */
+npx_fstenv, /* d9 b1 */
+npx_fstenv, /* d9 b2 */
+npx_fstenv, /* d9 b3 */
+npx_fstenv, /* d9 b4 */
+npx_fstenv, /* d9 b5 */
+npx_fstenv, /* d9 b6 */
+npx_fstenv, /* d9 b7 */
+npx_fstcw, /* d9 b8 */
+npx_fstcw, /* d9 b9 */
+npx_fstcw, /* d9 ba */
+npx_fstcw, /* d9 bb */
+npx_fstcw, /* d9 bc */
+npx_fstcw, /* d9 bd */
+npx_fstcw, /* d9 be */
+npx_fstcw, /* d9 bf */
+npx_fld_f0, /* d9 c0 */
+npx_fld_f1,
+npx_fld_f2,
+npx_fld_f3,
+npx_fld_f4,
+npx_fld_f5,
+npx_fld_f6,
+npx_fld_f7,
+npx_fxch_f0,
+npx_fxch_f1,
+npx_fxch_f2,
+npx_fxch_f3,
+npx_fxch_f4,
+npx_fxch_f5,
+npx_fxch_f6,
+npx_fxch_f7,
+npx_fnop, /* d9 d0 */
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_fstp_f0,
+npx_fstp_f1,
+npx_fstp_f2,
+npx_fstp_f3,
+npx_fstp_f4,
+npx_fstp_f5,
+npx_fstp_f6,
+npx_fstp_f7,
+npx_fchs, /* d9 e0 */
+npx_fabs, /* d9 e1 */
+npx_funimp,
+npx_funimp,
+npx_ftst, /* d9 e4 */
+npx_fxam, /* d9 e5 */
+npx_funimp,
+npx_funimp,
+npx_fld1, /* d9 e8 */
+npx_fldl2t, /* d9 e9 */
+npx_fldl2e, /* d9 ea */
+npx_fldpi, /* d9 eb */
+npx_fldlg2, /* d9 ec */
+npx_fldln2, /* d9 ed */
+npx_fldz, /* d9 ee */
+npx_funimp,
+npx_f2xm1, /* d9 f0 */
+npx_fyl2x, /* d9 f1 */
+npx_fptan, /* d9 f2 */
+npx_fpatan, /* d9 f3 */
+npx_fxtract, /* d9 f4 */
+npx_fprem1, /* d9 f5 */
+npx_fdecstp, /* d9 f6 */
+npx_fincstp, /* d9 f7 */
+npx_fprem, /* d9 f8 */
+npx_fyl2xp1, /* d9 f9 */
+npx_fsqrt, /* d9 fa */
+npx_fsincos, /* d9 fb */
+npx_frndint, /* d9 fc */
+npx_fscale, /* d9 fd */
+npx_fsin, /* d9 fe */
+npx_fcos, /* d9 ff */
+npx_fiadd_short, /* da 00 */
+npx_fiadd_short, /* da 01 */
+npx_fiadd_short, /* da 02 */
+npx_fiadd_short, /* da 03 */
+npx_fiadd_short, /* da 04 */
+npx_fiadd_short, /* da 05 */
+npx_fiadd_short, /* da 06 */
+npx_fiadd_short, /* da 07 */
+npx_fimul_short, /* da 08 */
+npx_fimul_short, /* da 09 */
+npx_fimul_short, /* da 0a */
+npx_fimul_short, /* da 0b */
+npx_fimul_short, /* da 0c */
+npx_fimul_short, /* da 0d */
+npx_fimul_short, /* da 0e */
+npx_fimul_short, /* da 0f */
+npx_ficom_short, /* da 10 */
+npx_ficom_short, /* da 11 */
+npx_ficom_short, /* da 12 */
+npx_ficom_short, /* da 13 */
+npx_ficom_short, /* da 14 */
+npx_ficom_short, /* da 15 */
+npx_ficom_short, /* da 16 */
+npx_ficom_short, /* da 17 */
+npx_ficomp_short, /* da 18 */
+npx_ficomp_short, /* da 19 */
+npx_ficomp_short, /* da 1a */
+npx_ficomp_short, /* da 1b */
+npx_ficomp_short, /* da 1c */
+npx_ficomp_short, /* da 1d */
+npx_ficomp_short, /* da 1e */
+npx_ficomp_short, /* da 1f */
+npx_fisub_short, /* da 20 */
+npx_fisub_short, /* da 21 */
+npx_fisub_short, /* da 22 */
+npx_fisub_short, /* da 23 */
+npx_fisub_short, /* da 24 */
+npx_fisub_short, /* da 25 */
+npx_fisub_short, /* da 26 */
+npx_fisub_short, /* da 27 */
+npx_fisubr_short, /* da 28 */
+npx_fisubr_short, /* da 29 */
+npx_fisubr_short, /* da 2a */
+npx_fisubr_short, /* da 2b */
+npx_fisubr_short, /* da 2c */
+npx_fisubr_short, /* da 2d */
+npx_fisubr_short, /* da 2e */
+npx_fisubr_short, /* da 2f */
+npx_fidiv_short, /* da 30 */
+npx_fidiv_short, /* da 31 */
+npx_fidiv_short, /* da 32 */
+npx_fidiv_short, /* da 33 */
+npx_fidiv_short, /* da 34 */
+npx_fidiv_short, /* da 35 */
+npx_fidiv_short, /* da 36 */
+npx_fidiv_short, /* da 37 */
+npx_fidivr_short, /* da 38 */
+npx_fidivr_short, /* da 39 */
+npx_fidivr_short, /* da 3a */
+npx_fidivr_short, /* da 3b */
+npx_fidivr_short, /* da 3c */
+npx_fidivr_short, /* da 3d */
+npx_fidivr_short, /* da 3e */
+npx_fidivr_short, /* da 3f */
+npx_fiadd_short, /* da 40 */
+npx_fiadd_short, /* da 41 */
+npx_fiadd_short, /* da 42 */
+npx_fiadd_short, /* da 43 */
+npx_fiadd_short, /* da 44 */
+npx_fiadd_short, /* da 45 */
+npx_fiadd_short, /* da 46 */
+npx_fiadd_short, /* da 47 */
+npx_fimul_short, /* da 48 */
+npx_fimul_short, /* da 49 */
+npx_fimul_short, /* da 4a */
+npx_fimul_short, /* da 4b */
+npx_fimul_short, /* da 4c */
+npx_fimul_short, /* da 4d */
+npx_fimul_short, /* da 4e */
+npx_fimul_short, /* da 4f */
+npx_ficom_short, /* da 50 */
+npx_ficom_short, /* da 51 */
+npx_ficom_short, /* da 52 */
+npx_ficom_short, /* da 53 */
+npx_ficom_short, /* da 54 */
+npx_ficom_short, /* da 55 */
+npx_ficom_short, /* da 56 */
+npx_ficom_short, /* da 57 */
+npx_ficomp_short, /* da 58 */
+npx_ficomp_short, /* da 59 */
+npx_ficomp_short, /* da 5a */
+npx_ficomp_short, /* da 5b */
+npx_ficomp_short, /* da 5c */
+npx_ficomp_short, /* da 5d */
+npx_ficomp_short, /* da 5e */
+npx_ficomp_short, /* da 5f */
+npx_fisub_short, /* da 60 */
+npx_fisub_short, /* da 61 */
+npx_fisub_short, /* da 62 */
+npx_fisub_short, /* da 63 */
+npx_fisub_short, /* da 64 */
+npx_fisub_short, /* da 65 */
+npx_fisub_short, /* da 66 */
+npx_fisub_short, /* da 67 */
+npx_fisubr_short, /* da 68 */
+npx_fisubr_short, /* da 69 */
+npx_fisubr_short, /* da 6a */
+npx_fisubr_short, /* da 6b */
+npx_fisubr_short, /* da 6c */
+npx_fisubr_short, /* da 6d */
+npx_fisubr_short, /* da 6e */
+npx_fisubr_short, /* da 6f */
+npx_fidiv_short, /* da 70 */
+npx_fidiv_short, /* da 71 */
+npx_fidiv_short, /* da 72 */
+npx_fidiv_short, /* da 73 */
+npx_fidiv_short, /* da 74 */
+npx_fidiv_short, /* da 75 */
+npx_fidiv_short, /* da 76 */
+npx_fidiv_short, /* da 77 */
+npx_fidivr_short, /* da 78 */
+npx_fidivr_short, /* da 79 */
+npx_fidivr_short, /* da 7a */
+npx_fidivr_short, /* da 7b */
+npx_fidivr_short, /* da 7c */
+npx_fidivr_short, /* da 7d */
+npx_fidivr_short, /* da 7e */
+npx_fidivr_short, /* da 7f */
+npx_fiadd_short, /* da 80 */
+npx_fiadd_short, /* da 81 */
+npx_fiadd_short, /* da 82 */
+npx_fiadd_short, /* da 83 */
+npx_fiadd_short, /* da 84 */
+npx_fiadd_short, /* da 85 */
+npx_fiadd_short, /* da 86 */
+npx_fiadd_short, /* da 87 */
+npx_fimul_short, /* da 88 */
+npx_fimul_short, /* da 89 */
+npx_fimul_short, /* da 8a */
+npx_fimul_short, /* da 8b */
+npx_fimul_short, /* da 8c */
+npx_fimul_short, /* da 8d */
+npx_fimul_short, /* da 8e */
+npx_fimul_short, /* da 8f */
+npx_ficom_short, /* da 90 */
+npx_ficom_short, /* da 91 */
+npx_ficom_short, /* da 92 */
+npx_ficom_short, /* da 93 */
+npx_ficom_short, /* da 94 */
+npx_ficom_short, /* da 95 */
+npx_ficom_short, /* da 96 */
+npx_ficom_short, /* da 97 */
+npx_ficomp_short, /* da 98 */
+npx_ficomp_short, /* da 99 */
+npx_ficomp_short, /* da 9a */
+npx_ficomp_short, /* da 9b */
+npx_ficomp_short, /* da 9c */
+npx_ficomp_short, /* da 9d */
+npx_ficomp_short, /* da 9e */
+npx_ficomp_short, /* da 9f */
+npx_fisub_short, /* da a0 */
+npx_fisub_short, /* da a1 */
+npx_fisub_short, /* da a2 */
+npx_fisub_short, /* da a3 */
+npx_fisub_short, /* da a4 */
+npx_fisub_short, /* da a5 */
+npx_fisub_short, /* da a6 */
+npx_fisub_short, /* da a7 */
+npx_fisubr_short, /* da a8 */
+npx_fisubr_short, /* da a9 */
+npx_fisubr_short, /* da aa */
+npx_fisubr_short, /* da ab */
+npx_fisubr_short, /* da ac */
+npx_fisubr_short, /* da ad */
+npx_fisubr_short, /* da ae */
+npx_fisubr_short, /* da af */
+npx_fidiv_short, /* da b0 */
+npx_fidiv_short, /* da b1 */
+npx_fidiv_short, /* da b2 */
+npx_fidiv_short, /* da b3 */
+npx_fidiv_short, /* da b4 */
+npx_fidiv_short, /* da b5 */
+npx_fidiv_short, /* da b6 */
+npx_fidiv_short, /* da b7 */
+npx_fidivr_short, /* da b8 */
+npx_fidivr_short, /* da b9 */
+npx_fidivr_short, /* da ba */
+npx_fidivr_short, /* da bb */
+npx_fidivr_short, /* da bc */
+npx_fidivr_short, /* da bd */
+npx_fidivr_short, /* da be */
+npx_fidivr_short, /* da bf */
+npx_funimp, /* da c0 */
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp, /* da d0 */
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp, /* da e0 */
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_fucompp, /* da e9 */
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp, /* da f0 */
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_fild_short, /* db 00 */
+npx_fild_short, /* db 01 */
+npx_fild_short, /* db 02 */
+npx_fild_short, /* db 03 */
+npx_fild_short, /* db 04 */
+npx_fild_short, /* db 05 */
+npx_fild_short, /* db 06 */
+npx_fild_short, /* db 07 */
+npx_funimp, /* db 08 */
+npx_funimp, /* db 09 */
+npx_funimp, /* db 0a */
+npx_funimp, /* db 0b */
+npx_funimp, /* db 0c */
+npx_funimp, /* db 0d */
+npx_funimp, /* db 0e */
+npx_funimp, /* db 0f */
+npx_fist_short, /* db 10 */
+npx_fist_short, /* db 11 */
+npx_fist_short, /* db 12 */
+npx_fist_short, /* db 13 */
+npx_fist_short, /* db 14 */
+npx_fist_short, /* db 15 */
+npx_fist_short, /* db 16 */
+npx_fist_short, /* db 17 */
+npx_fistp_short, /* db 18 */
+npx_fistp_short, /* db 19 */
+npx_fistp_short, /* db 1a */
+npx_fistp_short, /* db 1b */
+npx_fistp_short, /* db 1c */
+npx_fistp_short, /* db 1d */
+npx_fistp_short, /* db 1e */
+npx_fistp_short, /* db 1f */
+npx_funimp, /* db 20 */
+npx_funimp, /* db 21 */
+npx_funimp, /* db 22 */
+npx_funimp, /* db 23 */
+npx_funimp, /* db 24 */
+npx_funimp, /* db 25 */
+npx_funimp, /* db 26 */
+npx_funimp, /* db 27 */
+npx_fld_temp, /* db 28 */
+npx_fld_temp, /* db 29 */
+npx_fld_temp, /* db 2a */
+npx_fld_temp, /* db 2b */
+npx_fld_temp, /* db 2c */
+npx_fld_temp, /* db 2d */
+npx_fld_temp, /* db 2e */
+npx_fld_temp, /* db 2f */
+npx_funimp, /* db 30 */
+npx_funimp, /* db 31 */
+npx_funimp, /* db 32 */
+npx_funimp, /* db 33 */
+npx_funimp, /* db 34 */
+npx_funimp, /* db 35 */
+npx_funimp, /* db 36 */
+npx_funimp, /* db 37 */
+npx_fstp_temp, /* db 38 */
+npx_fstp_temp, /* db 39 */
+npx_fstp_temp, /* db 3a */
+npx_fstp_temp, /* db 3b */
+npx_fstp_temp, /* db 3c */
+npx_fstp_temp, /* db 3d */
+npx_fstp_temp, /* db 3e */
+npx_fstp_temp, /* db 3f */
+npx_fild_short, /* db 40 */
+npx_fild_short, /* db 41 */
+npx_fild_short, /* db 42 */
+npx_fild_short, /* db 43 */
+npx_fild_short, /* db 44 */
+npx_fild_short, /* db 45 */
+npx_fild_short, /* db 46 */
+npx_fild_short, /* db 47 */
+npx_funimp, /* db 48 */
+npx_funimp, /* db 49 */
+npx_funimp, /* db 4a */
+npx_funimp, /* db 4b */
+npx_funimp, /* db 4c */
+npx_funimp, /* db 4d */
+npx_funimp, /* db 4e */
+npx_funimp, /* db 4f */
+npx_fist_short, /* db 50 */
+npx_fist_short, /* db 51 */
+npx_fist_short, /* db 52 */
+npx_fist_short, /* db 53 */
+npx_fist_short, /* db 54 */
+npx_fist_short, /* db 55 */
+npx_fist_short, /* db 56 */
+npx_fist_short, /* db 57 */
+npx_fistp_short, /* db 58 */
+npx_fistp_short, /* db 59 */
+npx_fistp_short, /* db 5a */
+npx_fistp_short, /* db 5b */
+npx_fistp_short, /* db 5c */
+npx_fistp_short, /* db 5d */
+npx_fistp_short, /* db 5e */
+npx_fistp_short, /* db 5f */
+npx_funimp, /* db 60 */
+npx_funimp, /* db 61 */
+npx_funimp, /* db 62 */
+npx_funimp, /* db 63 */
+npx_funimp, /* db 64 */
+npx_funimp, /* db 65 */
+npx_funimp, /* db 66 */
+npx_funimp, /* db 67 */
+npx_fld_temp, /* db 68 */
+npx_fld_temp, /* db 69 */
+npx_fld_temp, /* db 6a */
+npx_fld_temp, /* db 6b */
+npx_fld_temp, /* db 6c */
+npx_fld_temp, /* db 6d */
+npx_fld_temp, /* db 6e */
+npx_fld_temp, /* db 6f */
+npx_funimp, /* db 70 */
+npx_funimp, /* db 71 */
+npx_funimp, /* db 72 */
+npx_funimp, /* db 73 */
+npx_funimp, /* db 74 */
+npx_funimp, /* db 75 */
+npx_funimp, /* db 76 */
+npx_funimp, /* db 77 */
+npx_fstp_temp, /* db 78 */
+npx_fstp_temp, /* db 79 */
+npx_fstp_temp, /* db 7a */
+npx_fstp_temp, /* db 7b */
+npx_fstp_temp, /* db 7c */
+npx_fstp_temp, /* db 7d */
+npx_fstp_temp, /* db 7e */
+npx_fstp_temp, /* db 7f */
+npx_fild_short, /* db 80 */
+npx_fild_short, /* db 81 */
+npx_fild_short, /* db 82 */
+npx_fild_short, /* db 83 */
+npx_fild_short, /* db 84 */
+npx_fild_short, /* db 85 */
+npx_fild_short, /* db 86 */
+npx_fild_short, /* db 87 */
+npx_funimp, /* db 88 */
+npx_funimp, /* db 89 */
+npx_funimp, /* db 8a */
+npx_funimp, /* db 8b */
+npx_funimp, /* db 8c */
+npx_funimp, /* db 8d */
+npx_funimp, /* db 8e */
+npx_funimp, /* db 8f */
+npx_fist_short, /* db 90 */
+npx_fist_short, /* db 91 */
+npx_fist_short, /* db 92 */
+npx_fist_short, /* db 93 */
+npx_fist_short, /* db 94 */
+npx_fist_short, /* db 95 */
+npx_fist_short, /* db 96 */
+npx_fist_short, /* db 97 */
+npx_fistp_short, /* db 98 */
+npx_fistp_short, /* db 99 */
+npx_fistp_short, /* db 9a */
+npx_fistp_short, /* db 9b */
+npx_fistp_short, /* db 9c */
+npx_fistp_short, /* db 9d */
+npx_fistp_short, /* db 9e */
+npx_fistp_short, /* db 9f */
+npx_funimp, /* db a0 */
+npx_funimp, /* db a1 */
+npx_funimp, /* db a2 */
+npx_funimp, /* db a3 */
+npx_funimp, /* db a4 */
+npx_funimp, /* db a5 */
+npx_funimp, /* db a6 */
+npx_funimp, /* db a7 */
+npx_fld_temp, /* db a8 */
+npx_fld_temp, /* db a9 */
+npx_fld_temp, /* db aa */
+npx_fld_temp, /* db ab */
+npx_fld_temp, /* db ac */
+npx_fld_temp, /* db ad */
+npx_fld_temp, /* db ae */
+npx_fld_temp, /* db af */
+npx_funimp, /* db b0 */
+npx_funimp, /* db b1 */
+npx_funimp, /* db b2 */
+npx_funimp, /* db b3 */
+npx_funimp, /* db b4 */
+npx_funimp, /* db b5 */
+npx_funimp, /* db b6 */
+npx_funimp, /* db b7 */
+npx_fstp_temp, /* db b8 */
+npx_fstp_temp, /* db b9 */
+npx_fstp_temp, /* db ba */
+npx_fstp_temp, /* db bb */
+npx_fstp_temp, /* db bc */
+npx_fstp_temp, /* db bd */
+npx_fstp_temp, /* db be */
+npx_fstp_temp, /* db bf */
+npx_funimp, /* db c0 */
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp, /* db d0 */
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_fnop, /* db e0 */
+npx_fnop,
+npx_fclex, /* db e2 */
+npx_finit, /* db e3 */
+npx_fnop, /* db e4 - used to be fsetpm */
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp, /* db f0 */
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_fadd_long, /* dc 00 */
+npx_fadd_long, /* dc 01 */
+npx_fadd_long, /* dc 02 */
+npx_fadd_long, /* dc 03 */
+npx_fadd_long, /* dc 04 */
+npx_fadd_long, /* dc 05 */
+npx_fadd_long, /* dc 06 */
+npx_fadd_long, /* dc 07 */
+npx_fmul_long, /* dc 08 */
+npx_fmul_long, /* dc 09 */
+npx_fmul_long, /* dc 0a */
+npx_fmul_long, /* dc 0b */
+npx_fmul_long, /* dc 0c */
+npx_fmul_long, /* dc 0d */
+npx_fmul_long, /* dc 0e */
+npx_fmul_long, /* dc 0f */
+npx_fcom_long, /* dc 10 */
+npx_fcom_long, /* dc 11 */
+npx_fcom_long, /* dc 12 */
+npx_fcom_long, /* dc 13 */
+npx_fcom_long, /* dc 14 */
+npx_fcom_long, /* dc 15 */
+npx_fcom_long, /* dc 16 */
+npx_fcom_long, /* dc 17 */
+npx_fcomp_long, /* dc 18 */
+npx_fcomp_long, /* dc 19 */
+npx_fcomp_long, /* dc 1a */
+npx_fcomp_long, /* dc 1b */
+npx_fcomp_long, /* dc 1c */
+npx_fcomp_long, /* dc 1d */
+npx_fcomp_long, /* dc 1e */
+npx_fcomp_long, /* dc 1f */
+npx_fsub_long, /* dc 20 */
+npx_fsub_long, /* dc 21 */
+npx_fsub_long, /* dc 22 */
+npx_fsub_long, /* dc 23 */
+npx_fsub_long, /* dc 24 */
+npx_fsub_long, /* dc 25 */
+npx_fsub_long, /* dc 26 */
+npx_fsub_long, /* dc 27 */
+npx_fsubr_long, /* dc 28 */
+npx_fsubr_long, /* dc 29 */
+npx_fsubr_long, /* dc 2a */
+npx_fsubr_long, /* dc 2b */
+npx_fsubr_long, /* dc 2c */
+npx_fsubr_long, /* dc 2d */
+npx_fsubr_long, /* dc 2e */
+npx_fsubr_long, /* dc 2f */
+npx_fdiv_long, /* dc 30 */
+npx_fdiv_long, /* dc 31 */
+npx_fdiv_long, /* dc 32 */
+npx_fdiv_long, /* dc 33 */
+npx_fdiv_long, /* dc 34 */
+npx_fdiv_long, /* dc 35 */
+npx_fdiv_long, /* dc 36 */
+npx_fdiv_long, /* dc 37 */
+npx_fdivr_long, /* dc 38 */
+npx_fdivr_long, /* dc 39 */
+npx_fdivr_long, /* dc 3a */
+npx_fdivr_long, /* dc 3b */
+npx_fdivr_long, /* dc 3c */
+npx_fdivr_long, /* dc 3d */
+npx_fdivr_long, /* dc 3e */
+npx_fdivr_long, /* dc 3f */
+npx_fadd_long, /* dc 40 */
+npx_fadd_long, /* dc 41 */
+npx_fadd_long, /* dc 42 */
+npx_fadd_long, /* dc 43 */
+npx_fadd_long, /* dc 44 */
+npx_fadd_long, /* dc 45 */
+npx_fadd_long, /* dc 46 */
+npx_fadd_long, /* dc 47 */
+npx_fmul_long, /* dc 48 */
+npx_fmul_long, /* dc 49 */
+npx_fmul_long, /* dc 4a */
+npx_fmul_long, /* dc 4b */
+npx_fmul_long, /* dc 4c */
+npx_fmul_long, /* dc 4d */
+npx_fmul_long, /* dc 4e */
+npx_fmul_long, /* dc 4f */
+npx_fcom_long, /* dc 50 */
+npx_fcom_long, /* dc 51 */
+npx_fcom_long, /* dc 52 */
+npx_fcom_long, /* dc 53 */
+npx_fcom_long, /* dc 54 */
+npx_fcom_long, /* dc 55 */
+npx_fcom_long, /* dc 56 */
+npx_fcom_long, /* dc 57 */
+npx_fcomp_long, /* dc 58 */
+npx_fcomp_long, /* dc 59 */
+npx_fcomp_long, /* dc 5a */
+npx_fcomp_long, /* dc 5b */
+npx_fcomp_long, /* dc 5c */
+npx_fcomp_long, /* dc 5d */
+npx_fcomp_long, /* dc 5e */
+npx_fcomp_long, /* dc 5f */
+npx_fsub_long, /* dc 60 */
+npx_fsub_long, /* dc 61 */
+npx_fsub_long, /* dc 62 */
+npx_fsub_long, /* dc 63 */
+npx_fsub_long, /* dc 64 */
+npx_fsub_long, /* dc 65 */
+npx_fsub_long, /* dc 66 */
+npx_fsub_long, /* dc 67 */
+npx_fsubr_long, /* dc 68 */
+npx_fsubr_long, /* dc 69 */
+npx_fsubr_long, /* dc 6a */
+npx_fsubr_long, /* dc 6b */
+npx_fsubr_long, /* dc 6c */
+npx_fsubr_long, /* dc 6d */
+npx_fsubr_long, /* dc 6e */
+npx_fsubr_long, /* dc 6f */
+npx_fdiv_long, /* dc 70 */
+npx_fdiv_long, /* dc 71 */
+npx_fdiv_long, /* dc 72 */
+npx_fdiv_long, /* dc 73 */
+npx_fdiv_long, /* dc 74 */
+npx_fdiv_long, /* dc 75 */
+npx_fdiv_long, /* dc 76 */
+npx_fdiv_long, /* dc 77 */
+npx_fdivr_long, /* dc 78 */
+npx_fdivr_long, /* dc 79 */
+npx_fdivr_long, /* dc 7a */
+npx_fdivr_long, /* dc 7b */
+npx_fdivr_long, /* dc 7c */
+npx_fdivr_long, /* dc 7d */
+npx_fdivr_long, /* dc 7e */
+npx_fdivr_long, /* dc 7f */
+npx_fadd_long, /* dc 80 */
+npx_fadd_long, /* dc 81 */
+npx_fadd_long, /* dc 82 */
+npx_fadd_long, /* dc 83 */
+npx_fadd_long, /* dc 84 */
+npx_fadd_long, /* dc 85 */
+npx_fadd_long, /* dc 86 */
+npx_fadd_long, /* dc 87 */
+npx_fmul_long, /* dc 88 */
+npx_fmul_long, /* dc 89 */
+npx_fmul_long, /* dc 8a */
+npx_fmul_long, /* dc 8b */
+npx_fmul_long, /* dc 8c */
+npx_fmul_long, /* dc 8d */
+npx_fmul_long, /* dc 8e */
+npx_fmul_long, /* dc 8f */
+npx_fcom_long, /* dc 90 */
+npx_fcom_long, /* dc 91 */
+npx_fcom_long, /* dc 92 */
+npx_fcom_long, /* dc 93 */
+npx_fcom_long, /* dc 94 */
+npx_fcom_long, /* dc 95 */
+npx_fcom_long, /* dc 96 */
+npx_fcom_long, /* dc 97 */
+npx_fcomp_long, /* dc 98 */
+npx_fcomp_long, /* dc 99 */
+npx_fcomp_long, /* dc 9a */
+npx_fcomp_long, /* dc 9b */
+npx_fcomp_long, /* dc 9c */
+npx_fcomp_long, /* dc 9d */
+npx_fcomp_long, /* dc 9e */
+npx_fcomp_long, /* dc 9f */
+npx_fsub_long, /* dc a0 */
+npx_fsub_long, /* dc a1 */
+npx_fsub_long, /* dc a2 */
+npx_fsub_long, /* dc a3 */
+npx_fsub_long, /* dc a4 */
+npx_fsub_long, /* dc a5 */
+npx_fsub_long, /* dc a6 */
+npx_fsub_long, /* dc a7 */
+npx_fsubr_long, /* dc a8 */
+npx_fsubr_long, /* dc a9 */
+npx_fsubr_long, /* dc aa */
+npx_fsubr_long, /* dc ab */
+npx_fsubr_long, /* dc ac */
+npx_fsubr_long, /* dc ad */
+npx_fsubr_long, /* dc ae */
+npx_fsubr_long, /* dc af */
+npx_fdiv_long, /* dc b0 */
+npx_fdiv_long, /* dc b1 */
+npx_fdiv_long, /* dc b2 */
+npx_fdiv_long, /* dc b3 */
+npx_fdiv_long, /* dc b4 */
+npx_fdiv_long, /* dc b5 */
+npx_fdiv_long, /* dc b6 */
+npx_fdiv_long, /* dc b7 */
+npx_fdivr_long, /* dc b8 */
+npx_fdivr_long, /* dc b9 */
+npx_fdivr_long, /* dc 3a */
+npx_fdivr_long, /* dc bb */
+npx_fdivr_long, /* dc bc */
+npx_fdivr_long, /* dc bd */
+npx_fdivr_long, /* dc be */
+npx_fdivr_long, /* dc bf */
+npx_fadd_f0_f0, /* dc c0 */
+npx_fadd_f1_f0,
+npx_fadd_f2_f0,
+npx_fadd_f3_f0,
+npx_fadd_f4_f0,
+npx_fadd_f5_f0,
+npx_fadd_f6_f0,
+npx_fadd_f7_f0,
+npx_fmul_f0_f0, /* dc c8 */
+npx_fmul_f1_f0,
+npx_fmul_f2_f0,
+npx_fmul_f3_f0,
+npx_fmul_f4_f0,
+npx_fmul_f5_f0,
+npx_fmul_f6_f0,
+npx_fmul_f7_f0,
+npx_fcom_f0, /* dc d0 */
+npx_fcom_f1,
+npx_fcom_f2,
+npx_fcom_f3,
+npx_fcom_f4,
+npx_fcom_f5,
+npx_fcom_f6,
+npx_fcom_f7,
+npx_fcomp_f0,
+npx_fcomp_f1,
+npx_fcomp_f2,
+npx_fcomp_f3,
+npx_fcomp_f4,
+npx_fcomp_f5,
+npx_fcomp_f6,
+npx_fcomp_f7,
+npx_fsubr_f0_f0, /* dc e0 */
+npx_fsubr_f1_f0,
+npx_fsubr_f2_f0,
+npx_fsubr_f3_f0,
+npx_fsubr_f4_f0,
+npx_fsubr_f5_f0,
+npx_fsubr_f6_f0,
+npx_fsubr_f7_f0,
+npx_fsub_f0_f0, /* dc e8 */
+npx_fsub_f1_f0,
+npx_fsub_f2_f0,
+npx_fsub_f3_f0,
+npx_fsub_f4_f0,
+npx_fsub_f5_f0,
+npx_fsub_f6_f0,
+npx_fsub_f7_f0,
+npx_fdivr_f0_f0, /* dc f0 */
+npx_fdivr_f1_f0,
+npx_fdivr_f2_f0,
+npx_fdivr_f3_f0,
+npx_fdivr_f4_f0,
+npx_fdivr_f5_f0,
+npx_fdivr_f6_f0,
+npx_fdivr_f7_f0,
+npx_fdiv_f0_f0, /* dc f8 */
+npx_fdiv_f1_f0,
+npx_fdiv_f2_f0,
+npx_fdiv_f3_f0,
+npx_fdiv_f4_f0,
+npx_fdiv_f5_f0,
+npx_fdiv_f6_f0,
+npx_fdiv_f7_f0,
+npx_fld_long, /* dd 00 */
+npx_fld_long, /* dd 01 */
+npx_fld_long, /* dd 02 */
+npx_fld_long, /* dd 03 */
+npx_fld_long, /* dd 04 */
+npx_fld_long, /* dd 05 */
+npx_fld_long, /* dd 06 */
+npx_fld_long, /* dd 07 */
+npx_funimp, /* dd 08 */
+npx_funimp, /* dd 09 */
+npx_funimp, /* dd 0a */
+npx_funimp, /* dd 0b */
+npx_funimp, /* dd 0c */
+npx_funimp, /* dd 0d */
+npx_funimp, /* dd 0e */
+npx_funimp, /* dd 0f */
+npx_fst_long, /* dd 10 */
+npx_fst_long, /* dd 11 */
+npx_fst_long, /* dd 12 */
+npx_fst_long, /* dd 13 */
+npx_fst_long, /* dd 14 */
+npx_fst_long, /* dd 15 */
+npx_fst_long, /* dd 16 */
+npx_fst_long, /* dd 17 */
+npx_fstp_long, /* dd 18 */
+npx_fstp_long, /* dd 19 */
+npx_fstp_long, /* dd 1a */
+npx_fstp_long, /* dd 1b */
+npx_fstp_long, /* dd 1c */
+npx_fstp_long, /* dd 1d */
+npx_fstp_long, /* dd 1e */
+npx_fstp_long, /* dd 1f */
+npx_frstor, /* dd 20 */
+npx_frstor, /* dd 21 */
+npx_frstor, /* dd 22 */
+npx_frstor, /* dd 23 */
+npx_frstor, /* dd 24 */
+npx_frstor, /* dd 25 */
+npx_frstor, /* dd 26 */
+npx_frstor, /* dd 27 */
+npx_funimp, /* dd 28 */
+npx_funimp, /* dd 29 */
+npx_funimp, /* dd 2a */
+npx_funimp, /* dd 2b */
+npx_funimp, /* dd 2c */
+npx_funimp, /* dd 2d */
+npx_funimp, /* dd 2e */
+npx_funimp, /* dd 2f */
+npx_fsave, /* dd 30 */
+npx_fsave, /* dd 31 */
+npx_fsave, /* dd 32 */
+npx_fsave, /* dd 33 */
+npx_fsave, /* dd 34 */
+npx_fsave, /* dd 35 */
+npx_fsave, /* dd 36 */
+npx_fsave, /* dd 37 */
+npx_fstsw, /* dd 38 */
+npx_fstsw, /* dd 39 */
+npx_fstsw, /* dd 3a */
+npx_fstsw, /* dd 3b */
+npx_fstsw, /* dd 3c */
+npx_fstsw, /* dd 3d */
+npx_fstsw, /* dd 3e */
+npx_fstsw, /* dd 3f */
+npx_fld_long, /* dd 40 */
+npx_fld_long, /* dd 41 */
+npx_fld_long, /* dd 42 */
+npx_fld_long, /* dd 43 */
+npx_fld_long, /* dd 44 */
+npx_fld_long, /* dd 45 */
+npx_fld_long, /* dd 46 */
+npx_fld_long, /* dd 47 */
+npx_funimp, /* dd 48 */
+npx_funimp, /* dd 49 */
+npx_funimp, /* dd 4a */
+npx_funimp, /* dd 4b */
+npx_funimp, /* dd 4c */
+npx_funimp, /* dd 4d */
+npx_funimp, /* dd 4e */
+npx_funimp, /* dd 4f */
+npx_fst_long, /* dd 50 */
+npx_fst_long, /* dd 51 */
+npx_fst_long, /* dd 52 */
+npx_fst_long, /* dd 53 */
+npx_fst_long, /* dd 54 */
+npx_fst_long, /* dd 55 */
+npx_fst_long, /* dd 56 */
+npx_fst_long, /* dd 57 */
+npx_fstp_long, /* dd 58 */
+npx_fstp_long, /* dd 59 */
+npx_fstp_long, /* dd 5a */
+npx_fstp_long, /* dd 5b */
+npx_fstp_long, /* dd 5c */
+npx_fstp_long, /* dd 5d */
+npx_fstp_long, /* dd 5e */
+npx_fstp_long, /* dd 5f */
+npx_frstor, /* dd 60 */
+npx_frstor, /* dd 61 */
+npx_frstor, /* dd 62 */
+npx_frstor, /* dd 63 */
+npx_frstor, /* dd 64 */
+npx_frstor, /* dd 65 */
+npx_frstor, /* dd 66 */
+npx_frstor, /* dd 67 */
+npx_funimp, /* dd 68 */
+npx_funimp, /* dd 69 */
+npx_funimp, /* dd 6a */
+npx_funimp, /* dd 6b */
+npx_funimp, /* dd 6c */
+npx_funimp, /* dd 6d */
+npx_funimp, /* dd 6e */
+npx_funimp, /* dd 6f */
+npx_fsave, /* dd 70 */
+npx_fsave, /* dd 71 */
+npx_fsave, /* dd 72 */
+npx_fsave, /* dd 73 */
+npx_fsave, /* dd 74 */
+npx_fsave, /* dd 75 */
+npx_fsave, /* dd 76 */
+npx_fsave, /* dd 77 */
+npx_fstsw, /* dd 78 */
+npx_fstsw, /* dd 79 */
+npx_fstsw, /* dd 7a */
+npx_fstsw, /* dd 7b */
+npx_fstsw, /* dd 7c */
+npx_fstsw, /* dd 7d */
+npx_fstsw, /* dd 7e */
+npx_fstsw, /* dd 7f */
+npx_fld_long, /* dd 80 */
+npx_fld_long, /* dd 81 */
+npx_fld_long, /* dd 82 */
+npx_fld_long, /* dd 83 */
+npx_fld_long, /* dd 84 */
+npx_fld_long, /* dd 85 */
+npx_fld_long, /* dd 86 */
+npx_fld_long, /* dd 87 */
+npx_funimp, /* dd 88 */
+npx_funimp, /* dd 89 */
+npx_funimp, /* dd 8a */
+npx_funimp, /* dd 8b */
+npx_funimp, /* dd 8c */
+npx_funimp, /* dd 8d */
+npx_funimp, /* dd 8e */
+npx_funimp, /* dd 8f */
+npx_fst_long, /* dd 90 */
+npx_fst_long, /* dd 91 */
+npx_fst_long, /* dd 92 */
+npx_fst_long, /* dd 93 */
+npx_fst_long, /* dd 94 */
+npx_fst_long, /* dd 95 */
+npx_fst_long, /* dd 96 */
+npx_fst_long, /* dd 97 */
+npx_fstp_long, /* dd 98 */
+npx_fstp_long, /* dd 99 */
+npx_fstp_long, /* dd 9a */
+npx_fstp_long, /* dd 9b */
+npx_fstp_long, /* dd 9c */
+npx_fstp_long, /* dd 9d */
+npx_fstp_long, /* dd 9e */
+npx_fstp_long, /* dd 9f */
+npx_frstor, /* dd a0 */
+npx_frstor, /* dd a1 */
+npx_frstor, /* dd a2 */
+npx_frstor, /* dd a3 */
+npx_frstor, /* dd a4 */
+npx_frstor, /* dd a5 */
+npx_frstor, /* dd a6 */
+npx_frstor, /* dd a7 */
+npx_funimp, /* dd a8 */
+npx_funimp, /* dd a9 */
+npx_funimp, /* dd aa */
+npx_funimp, /* dd ab */
+npx_funimp, /* dd ac */
+npx_funimp, /* dd ad */
+npx_funimp, /* dd ae */
+npx_funimp, /* dd af */
+npx_fsave, /* dd b0 */
+npx_fsave, /* dd b1 */
+npx_fsave, /* dd b2 */
+npx_fsave, /* dd b3 */
+npx_fsave, /* dd b4 */
+npx_fsave, /* dd b5 */
+npx_fsave, /* dd b6 */
+npx_fsave, /* dd b7 */
+npx_fstsw, /* dd b8 */
+npx_fstsw, /* dd b9 */
+npx_fstsw, /* dd ba */
+npx_fstsw, /* dd bb */
+npx_fstsw, /* dd bc */
+npx_fstsw, /* dd bd */
+npx_fstsw, /* dd be */
+npx_fstsw, /* dd bf */
+npx_ffree_f0, /* dd c0 */
+npx_ffree_f1,
+npx_ffree_f2,
+npx_ffree_f3,
+npx_ffree_f4,
+npx_ffree_f5,
+npx_ffree_f6,
+npx_ffree_f7,
+npx_fxch_f0, /* dd c8 */
+npx_fxch_f1,
+npx_fxch_f2,
+npx_fxch_f3,
+npx_fxch_f4,
+npx_fxch_f5,
+npx_fxch_f6,
+npx_fxch_f7,
+npx_fst_f0, /* dd d0 */
+npx_fst_f1,
+npx_fst_f2,
+npx_fst_f3,
+npx_fst_f4,
+npx_fst_f5,
+npx_fst_f6,
+npx_fst_f7,
+npx_fstp_f0, /* dd d8 */
+npx_fstp_f1,
+npx_fstp_f2,
+npx_fstp_f3,
+npx_fstp_f4,
+npx_fstp_f5,
+npx_fstp_f6,
+npx_fstp_f7,
+npx_fucom_f0, /* dd e0 */
+npx_fucom_f1,
+npx_fucom_f2,
+npx_fucom_f3,
+npx_fucom_f4,
+npx_fucom_f5,
+npx_fucom_f6,
+npx_fucom_f7,
+npx_fucomp_f0, /* dd e8 */
+npx_fucomp_f1,
+npx_fucomp_f2,
+npx_fucomp_f3,
+npx_fucomp_f4,
+npx_fucomp_f5,
+npx_fucomp_f6,
+npx_fucomp_f7,
+npx_funimp, /* dd f0 */
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_fiadd_word, /* de 00 */
+npx_fiadd_word, /* de 01 */
+npx_fiadd_word, /* de 02 */
+npx_fiadd_word, /* de 03 */
+npx_fiadd_word, /* de 04 */
+npx_fiadd_word, /* de 05 */
+npx_fiadd_word, /* de 06 */
+npx_fiadd_word, /* de 07 */
+npx_fimul_word, /* de 08 */
+npx_fimul_word, /* de 09 */
+npx_fimul_word, /* de 0a */
+npx_fimul_word, /* de 0b */
+npx_fimul_word, /* de 0c */
+npx_fimul_word, /* de 0d */
+npx_fimul_word, /* de 0e */
+npx_fimul_word, /* de 0f */
+npx_ficom_word, /* de 10 */
+npx_ficom_word, /* de 11 */
+npx_ficom_word, /* de 12 */
+npx_ficom_word, /* de 13 */
+npx_ficom_word, /* de 14 */
+npx_ficom_word, /* de 15 */
+npx_ficom_word, /* de 16 */
+npx_ficom_word, /* de 17 */
+npx_ficomp_word, /* de 18 */
+npx_ficomp_word, /* de 19 */
+npx_ficomp_word, /* de 1a */
+npx_ficomp_word, /* de 1b */
+npx_ficomp_word, /* de 1c */
+npx_ficomp_word, /* de 1d */
+npx_ficomp_word, /* de 1e */
+npx_ficomp_word, /* de 1f */
+npx_fisub_word, /* de 20 */
+npx_fisub_word, /* de 21 */
+npx_fisub_word, /* de 22 */
+npx_fisub_word, /* de 23 */
+npx_fisub_word, /* de 24 */
+npx_fisub_word, /* de 25 */
+npx_fisub_word, /* de 26 */
+npx_fisub_word, /* de 27 */
+npx_fisubr_word, /* de 28 */
+npx_fisubr_word, /* de 29 */
+npx_fisubr_word, /* de 2a */
+npx_fisubr_word, /* de 2b */
+npx_fisubr_word, /* de 2c */
+npx_fisubr_word, /* de 2d */
+npx_fisubr_word, /* de 2e */
+npx_fisubr_word, /* de 2f */
+npx_fidiv_word, /* de 30 */
+npx_fidiv_word, /* de 31 */
+npx_fidiv_word, /* de 32 */
+npx_fidiv_word, /* de 33 */
+npx_fidiv_word, /* de 34 */
+npx_fidiv_word, /* de 35 */
+npx_fidiv_word, /* de 36 */
+npx_fidiv_word, /* de 37 */
+npx_fidivr_word, /* de 38 */
+npx_fidivr_word, /* de 39 */
+npx_fidivr_word, /* de 3a */
+npx_fidivr_word, /* de 3b */
+npx_fidivr_word, /* de 3c */
+npx_fidivr_word, /* de 3d */
+npx_fidivr_word, /* de 3e */
+npx_fidivr_word, /* de 3f */
+npx_fiadd_word, /* de 40 */
+npx_fiadd_word, /* de 41 */
+npx_fiadd_word, /* de 42 */
+npx_fiadd_word, /* de 43 */
+npx_fiadd_word, /* de 44 */
+npx_fiadd_word, /* de 45 */
+npx_fiadd_word, /* de 46 */
+npx_fiadd_word, /* de 47 */
+npx_fimul_word, /* de 48 */
+npx_fimul_word, /* de 49 */
+npx_fimul_word, /* de 4a */
+npx_fimul_word, /* de 4b */
+npx_fimul_word, /* de 4c */
+npx_fimul_word, /* de 4d */
+npx_fimul_word, /* de 4e */
+npx_fimul_word, /* de 4f */
+npx_ficom_word, /* de 50 */
+npx_ficom_word, /* de 51 */
+npx_ficom_word, /* de 52 */
+npx_ficom_word, /* de 53 */
+npx_ficom_word, /* de 54 */
+npx_ficom_word, /* de 55 */
+npx_ficom_word, /* de 56 */
+npx_ficom_word, /* de 57 */
+npx_ficomp_word, /* de 58 */
+npx_ficomp_word, /* de 59 */
+npx_ficomp_word, /* de 5a */
+npx_ficomp_word, /* de 5b */
+npx_ficomp_word, /* de 5c */
+npx_ficomp_word, /* de 5d */
+npx_ficomp_word, /* de 5e */
+npx_ficomp_word, /* de 5f */
+npx_fisub_word, /* de 60 */
+npx_fisub_word, /* de 61 */
+npx_fisub_word, /* de 62 */
+npx_fisub_word, /* de 63 */
+npx_fisub_word, /* de 64 */
+npx_fisub_word, /* de 65 */
+npx_fisub_word, /* de 66 */
+npx_fisub_word, /* de 67 */
+npx_fisubr_word, /* de 68 */
+npx_fisubr_word, /* de 69 */
+npx_fisubr_word, /* de 6a */
+npx_fisubr_word, /* de 6b */
+npx_fisubr_word, /* de 6c */
+npx_fisubr_word, /* de 6d */
+npx_fisubr_word, /* de 6e */
+npx_fisubr_word, /* de 6f */
+npx_fidiv_word, /* de 70 */
+npx_fidiv_word, /* de 71 */
+npx_fidiv_word, /* de 72 */
+npx_fidiv_word, /* de 73 */
+npx_fidiv_word, /* de 74 */
+npx_fidiv_word, /* de 75 */
+npx_fidiv_word, /* de 76 */
+npx_fidiv_word, /* de 77 */
+npx_fidivr_word, /* de 78 */
+npx_fidivr_word, /* de 79 */
+npx_fidivr_word, /* de 7a */
+npx_fidivr_word, /* de 7b */
+npx_fidivr_word, /* de 7c */
+npx_fidivr_word, /* de 7d */
+npx_fidivr_word, /* de 7e */
+npx_fidivr_word, /* de 7f */
+npx_fiadd_word, /* de 80 */
+npx_fiadd_word, /* de 81 */
+npx_fiadd_word, /* de 82 */
+npx_fiadd_word, /* de 83 */
+npx_fiadd_word, /* de 84 */
+npx_fiadd_word, /* de 85 */
+npx_fiadd_word, /* de 86 */
+npx_fiadd_word, /* de 87 */
+npx_fimul_word, /* de 88 */
+npx_fimul_word, /* de 89 */
+npx_fimul_word, /* de 8a */
+npx_fimul_word, /* de 8b */
+npx_fimul_word, /* de 8c */
+npx_fimul_word, /* de 8d */
+npx_fimul_word, /* de 8e */
+npx_fimul_word, /* de 8f */
+npx_ficom_word, /* de 90 */
+npx_ficom_word, /* de 91 */
+npx_ficom_word, /* de 92 */
+npx_ficom_word, /* de 93 */
+npx_ficom_word, /* de 94 */
+npx_ficom_word, /* de 95 */
+npx_ficom_word, /* de 96 */
+npx_ficom_word, /* de 97 */
+npx_ficomp_word, /* de 98 */
+npx_ficomp_word, /* de 99 */
+npx_ficomp_word, /* de 9a */
+npx_ficomp_word, /* de 9b */
+npx_ficomp_word, /* de 9c */
+npx_ficomp_word, /* de 9d */
+npx_ficomp_word, /* de 9e */
+npx_ficomp_word, /* de 9f */
+npx_fisub_word, /* de a0 */
+npx_fisub_word, /* de a1 */
+npx_fisub_word, /* de a2 */
+npx_fisub_word, /* de a3 */
+npx_fisub_word, /* de a4 */
+npx_fisub_word, /* de a5 */
+npx_fisub_word, /* de a6 */
+npx_fisub_word, /* de a7 */
+npx_fisubr_word, /* de a8 */
+npx_fisubr_word, /* de a9 */
+npx_fisubr_word, /* de aa */
+npx_fisubr_word, /* de ab */
+npx_fisubr_word, /* de ac */
+npx_fisubr_word, /* de ad */
+npx_fisubr_word, /* de ae */
+npx_fisubr_word, /* de af */
+npx_fidiv_word, /* de b0 */
+npx_fidiv_word, /* de b1 */
+npx_fidiv_word, /* de b2 */
+npx_fidiv_word, /* de b3 */
+npx_fidiv_word, /* de b4 */
+npx_fidiv_word, /* de b5 */
+npx_fidiv_word, /* de b6 */
+npx_fidiv_word, /* de b7 */
+npx_fidivr_word, /* de b8 */
+npx_fidivr_word, /* de b9 */
+npx_fidivr_word, /* de ba */
+npx_fidivr_word, /* de bb */
+npx_fidivr_word, /* de bc */
+npx_fidivr_word, /* de bd */
+npx_fidivr_word, /* de be */
+npx_fidivr_word, /* de bf */
+npx_faddp_f0, /* de c0 */
+npx_faddp_f1,
+npx_faddp_f2,
+npx_faddp_f3,
+npx_faddp_f4,
+npx_faddp_f5,
+npx_faddp_f6,
+npx_faddp_f7,
+npx_fmulp_f0, /* de c8 */
+npx_fmulp_f1,
+npx_fmulp_f2,
+npx_fmulp_f3,
+npx_fmulp_f4,
+npx_fmulp_f5,
+npx_fmulp_f6,
+npx_fmulp_f7,
+npx_fcomp_f0, /* de d0 */
+npx_fcomp_f1,
+npx_fcomp_f2,
+npx_fcomp_f3,
+npx_fcomp_f4,
+npx_fcomp_f5,
+npx_fcomp_f6,
+npx_fcomp_f7,
+npx_funimp,
+npx_fcompp, /* de d9 */
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_fsubrp_f0, /* de e0 */
+npx_fsubrp_f1,
+npx_fsubrp_f2,
+npx_fsubrp_f3,
+npx_fsubrp_f4,
+npx_fsubrp_f5,
+npx_fsubrp_f6,
+npx_fsubrp_f7,
+npx_fsubp_f0, /* de e8 */
+npx_fsubp_f1,
+npx_fsubp_f2,
+npx_fsubp_f3,
+npx_fsubp_f4,
+npx_fsubp_f5,
+npx_fsubp_f6,
+npx_fsubp_f7,
+npx_fdivrp_f0, /* de f0 */
+npx_fdivrp_f1,
+npx_fdivrp_f2,
+npx_fdivrp_f3,
+npx_fdivrp_f4,
+npx_fdivrp_f5,
+npx_fdivrp_f6,
+npx_fdivrp_f7,
+npx_fdivp_f0, /* de f8 */
+npx_fdivp_f1,
+npx_fdivp_f2,
+npx_fdivp_f3,
+npx_fdivp_f4,
+npx_fdivp_f5,
+npx_fdivp_f6,
+npx_fdivp_f7,
+npx_fild_word, /* df 00 */
+npx_fild_word, /* df 01 */
+npx_fild_word, /* df 02 */
+npx_fild_word, /* df 03 */
+npx_fild_word, /* df 04 */
+npx_fild_word, /* df 05 */
+npx_fild_word, /* df 06 */
+npx_fild_word, /* df 07 */
+npx_funimp, /* df 08 */
+npx_funimp, /* df 09 */
+npx_funimp, /* df 0a */
+npx_funimp, /* df 0b */
+npx_funimp, /* df 0c */
+npx_funimp, /* df 0d */
+npx_funimp, /* df 0e */
+npx_funimp, /* df 0f */
+npx_fist_word, /* df 10 */
+npx_fist_word, /* df 11 */
+npx_fist_word, /* df 12 */
+npx_fist_word, /* df 13 */
+npx_fist_word, /* df 14 */
+npx_fist_word, /* df 15 */
+npx_fist_word, /* df 16 */
+npx_fist_word, /* df 17 */
+npx_fistp_word, /* df 18 */
+npx_fistp_word, /* df 19 */
+npx_fistp_word, /* df 1a */
+npx_fistp_word, /* df 1b */
+npx_fistp_word, /* df 1c */
+npx_fistp_word, /* df 1d */
+npx_fistp_word, /* df 1e */
+npx_fistp_word, /* df 1f */
+npx_fbld, /* df 20 */
+npx_fbld, /* df 21 */
+npx_fbld, /* df 22 */
+npx_fbld, /* df 23 */
+npx_fbld, /* df 24 */
+npx_fbld, /* df 25 */
+npx_fbld, /* df 26 */
+npx_fbld, /* df 27 */
+npx_fild_long, /* df 28 */
+npx_fild_long, /* df 29 */
+npx_fild_long, /* df 2a */
+npx_fild_long, /* df 2b */
+npx_fild_long, /* df 2c */
+npx_fild_long, /* df 2d */
+npx_fild_long, /* df 2e */
+npx_fild_long, /* df 2f */
+npx_fbstp, /* df 30 */
+npx_fbstp, /* df 31 */
+npx_fbstp, /* df 32 */
+npx_fbstp, /* df 33 */
+npx_fbstp, /* df 34 */
+npx_fbstp, /* df 35 */
+npx_fbstp, /* df 36 */
+npx_fbstp, /* df 37 */
+npx_fistp_long, /* df 38 */
+npx_fistp_long, /* df 39 */
+npx_fistp_long, /* df 3a */
+npx_fistp_long, /* df 3b */
+npx_fistp_long, /* df 3c */
+npx_fistp_long, /* df 3d */
+npx_fistp_long, /* df 3e */
+npx_fistp_long, /* df 3f */
+npx_fild_word, /* df 40 */
+npx_fild_word, /* df 41 */
+npx_fild_word, /* df 42 */
+npx_fild_word, /* df 43 */
+npx_fild_word, /* df 44 */
+npx_fild_word, /* df 45 */
+npx_fild_word, /* df 46 */
+npx_fild_word, /* df 47 */
+npx_funimp, /* df 48 */
+npx_funimp, /* df 49 */
+npx_funimp, /* df 4a */
+npx_funimp, /* df 4b */
+npx_funimp, /* df 4c */
+npx_funimp, /* df 4d */
+npx_funimp, /* df 4e */
+npx_funimp, /* df 4f */
+npx_fist_word, /* df 50 */
+npx_fist_word, /* df 51 */
+npx_fist_word, /* df 52 */
+npx_fist_word, /* df 53 */
+npx_fist_word, /* df 54 */
+npx_fist_word, /* df 55 */
+npx_fist_word, /* df 56 */
+npx_fist_word, /* df 57 */
+npx_fistp_word, /* df 58 */
+npx_fistp_word, /* df 59 */
+npx_fistp_word, /* df 5a */
+npx_fistp_word, /* df 5b */
+npx_fistp_word, /* df 5c */
+npx_fistp_word, /* df 5d */
+npx_fistp_word, /* df 5e */
+npx_fistp_word, /* df 5f */
+npx_fbld, /* df 60 */
+npx_fbld, /* df 61 */
+npx_fbld, /* df 62 */
+npx_fbld, /* df 63 */
+npx_fbld, /* df 64 */
+npx_fbld, /* df 65 */
+npx_fbld, /* df 66 */
+npx_fbld, /* df 67 */
+npx_fild_long, /* df 68 */
+npx_fild_long, /* df 69 */
+npx_fild_long, /* df 6a */
+npx_fild_long, /* df 6b */
+npx_fild_long, /* df 6c */
+npx_fild_long, /* df 6d */
+npx_fild_long, /* df 6e */
+npx_fild_long, /* df 6f */
+npx_fbstp, /* df 70 */
+npx_fbstp, /* df 71 */
+npx_fbstp, /* df 72 */
+npx_fbstp, /* df 73 */
+npx_fbstp, /* df 34 */
+npx_fbstp, /* df 75 */
+npx_fbstp, /* df 76 */
+npx_fbstp, /* df 77 */
+npx_fistp_long, /* df 78 */
+npx_fistp_long, /* df 79 */
+npx_fistp_long, /* df 7a */
+npx_fistp_long, /* df 7b */
+npx_fistp_long, /* df 7c */
+npx_fistp_long, /* df 7d */
+npx_fistp_long, /* df 7e */
+npx_fistp_long, /* df 7f */
+npx_fild_word, /* df 80 */
+npx_fild_word, /* df 81 */
+npx_fild_word, /* df 82 */
+npx_fild_word, /* df 83 */
+npx_fild_word, /* df 84 */
+npx_fild_word, /* df 85 */
+npx_fild_word, /* df 86 */
+npx_fild_word, /* df 87 */
+npx_funimp, /* df 88 */
+npx_funimp, /* df 89 */
+npx_funimp, /* df 8a */
+npx_funimp, /* df 8b */
+npx_funimp, /* df 8c */
+npx_funimp, /* df 8d */
+npx_funimp, /* df 8e */
+npx_funimp, /* df 8f */
+npx_fist_word, /* df 90 */
+npx_fist_word, /* df 91 */
+npx_fist_word, /* df 92 */
+npx_fist_word, /* df 93 */
+npx_fist_word, /* df 94 */
+npx_fist_word, /* df 95 */
+npx_fist_word, /* df 96 */
+npx_fist_word, /* df 97 */
+npx_fistp_word, /* df 98 */
+npx_fistp_word, /* df 99 */
+npx_fistp_word, /* df 9a */
+npx_fistp_word, /* df 9b */
+npx_fistp_word, /* df 9c */
+npx_fistp_word, /* df 9d */
+npx_fistp_word, /* df 9e */
+npx_fistp_word, /* df 9f */
+npx_fbld, /* df a0 */
+npx_fbld, /* df a1 */
+npx_fbld, /* df a2 */
+npx_fbld, /* df a3 */
+npx_fbld, /* df a4 */
+npx_fbld, /* df a5 */
+npx_fbld, /* df a6 */
+npx_fbld, /* df a7 */
+npx_fild_long, /* df a8 */
+npx_fild_long, /* df a9 */
+npx_fild_long, /* df aa */
+npx_fild_long, /* df ab */
+npx_fild_long, /* df ac */
+npx_fild_long, /* df ad */
+npx_fild_long, /* df ae */
+npx_fild_long, /* df af */
+npx_fbstp, /* df b0 */
+npx_fbstp, /* df b1 */
+npx_fbstp, /* df b2 */
+npx_fbstp, /* df b3 */
+npx_fbstp, /* df b4 */
+npx_fbstp, /* df b5 */
+npx_fbstp, /* df b6 */
+npx_fbstp, /* df b7 */
+npx_fistp_long, /* df b8 */
+npx_fistp_long, /* df b9 */
+npx_fistp_long, /* df ba */
+npx_fistp_long, /* df bb */
+npx_fistp_long, /* df bc */
+npx_fistp_long, /* df bd */
+npx_fistp_long, /* df be */
+npx_fistp_long, /* df bf */
+npx_ffreep_f0, /* df c0 */
+npx_ffreep_f1,
+npx_ffreep_f2,
+npx_ffreep_f3,
+npx_ffreep_f4,
+npx_ffreep_f5,
+npx_ffreep_f6,
+npx_ffreep_f7,
+npx_fxch_f0, /* df c8 */
+npx_fxch_f1,
+npx_fxch_f2,
+npx_fxch_f3,
+npx_fxch_f4,
+npx_fxch_f5,
+npx_fxch_f6,
+npx_fxch_f7,
+npx_fstp_f0, /* df d0 */
+npx_fstp_f1,
+npx_fstp_f2,
+npx_fstp_f3,
+npx_fstp_f4,
+npx_fstp_f5,
+npx_fstp_f6,
+npx_fstp_f7,
+npx_fstp_f0, /* df d8 */
+npx_fstp_f1,
+npx_fstp_f2,
+npx_fstp_f3,
+npx_fstp_f4,
+npx_fstp_f5,
+npx_fstp_f6,
+npx_fstp_f7,
+npx_fstswax, /* df e0 */
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp, /* df f0 */
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp,
+npx_funimp
+};
+
+VOID ZFRSRVD(npx_instr)
+IU32 npx_instr;
+{
+ if (!NPX_PROT_MODE) {
+ NpxInstr = npx_instr;
+ }
+ if (DoNpxPrologue())
+ (*inst_table[npx_instr])();
+}
+
+LOCAL BOOL DoNpxPrologue() {
+ if (GET_EM() || GET_TS()) {
+ INTx(0x7);
+ return(FALSE);
+ } else {
+ return(TRUE);
+ }
+}
+
+GLOBAL IBOOL NpxIntrNeeded = FALSE;
+LOCAL IU32 NpxExceptionEIP = 0;
+
+VOID DoNpxException() {
+
+ NpxException = FALSE;
+ NpxExceptionEIP = NpxFIP;
+ NpxIntrNeeded = TRUE; /* interrupt delayed until next NPX inst */
+}
+
+/* called on NPX instr that follows faulting instr */
+void TakeNpxExceptionInt()
+{
+ IU32 hook_address;
+ IU16 cpu_hw_interrupt_number;
+
+ NpxIntrNeeded = FALSE;
+ NpxFIP = NpxExceptionEIP;
+
+#ifdef SPC486
+ if (GET_NE() == 0)
+ {
+#ifndef SFELLOW
+ ica_hw_interrupt (ICA_SLAVE, CPU_AT_NPX_INT, 1);
+#else /* SFELLOW */
+ c_cpu_interrupt(CPU_NPX_INT, 0);
+#endif /* SFELLOW */
+ }
+ else
+ {
+ Int16();
+ }
+#else /* SPC486 */
+ ica_hw_interrupt (ICA_SLAVE, CPU_AT_NPX_INT, 1);
+#endif /* SPC486 */
+
+#ifndef SFELLOW
+ /* and immediately dispatch to interrupt */
+ if (GET_IF())
+ {
+ cpu_hw_interrupt_number = ica_intack(&hook_address);
+ EXT = EXTERNAL;
+ do_intrupt(cpu_hw_interrupt_number, FALSE, FALSE, (IU16)0);
+ CCPU_save_EIP = GET_EIP(); /* to reflect IP change */
+ }
+#endif /*SFELLOW*/
+}
diff --git a/private/mvdm/softpc.new/base/ccpu386/zfrsrvd.h b/private/mvdm/softpc.new/base/ccpu386/zfrsrvd.h
new file mode 100644
index 000000000..2de84495d
--- /dev/null
+++ b/private/mvdm/softpc.new/base/ccpu386/zfrsrvd.h
@@ -0,0 +1,15 @@
+/*
+ zfrsrvd.h
+
+ Define Reserved Floating Point CPU functions.
+ */
+
+/*
+ static char SccsID[]="@(#)zfrsrvd.h 1.4 02/09/94";
+ */
+
+IMPORT VOID ZFRSRVD
+
+IPT1(
+ IU32, npxopcode
+ );