Compare commits

...

70 Commits

Author SHA1 Message Date
bellard
fcf8fcc8e5 update
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@382 c046a42c-6fe2-441c-8c8c-71466251a162
2003-09-17 22:56:56 +00:00
bellard
dbc5594cb6 finished simplifying string operations - correct TF flag handling for string operations and ss loading - simplified basic block exit code generation
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@381 c046a42c-6fe2-441c-8c8c-71466251a162
2003-09-17 22:56:30 +00:00
bellard
4cbb86e1c4 added JUMP_TB2 for a third basic block exit jump point
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@380 c046a42c-6fe2-441c-8c8c-71466251a162
2003-09-17 22:53:29 +00:00
bellard
f513a41a3d finished simplifying string operations
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@379 c046a42c-6fe2-441c-8c8c-71466251a162
2003-09-17 22:52:47 +00:00
bellard
c106152d26 added two more jump points
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@378 c046a42c-6fe2-441c-8c8c-71466251a162
2003-09-17 22:51:45 +00:00
bellard
facc68be25 removed x86 hacks
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@377 c046a42c-6fe2-441c-8c8c-71466251a162
2003-09-17 22:51:18 +00:00
bellard
3ff0631ed9 added linux < 2.4.21 vm86 bug workaround - added extensive TF flag test
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@376 c046a42c-6fe2-441c-8c8c-71466251a162
2003-09-17 22:49:51 +00:00
bellard
b1ba65744e depth 32 fix
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@375 c046a42c-6fe2-441c-8c8c-71466251a162
2003-09-16 21:47:08 +00:00
bellard
b67d59594e glibc 2.3.x fix
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@374 c046a42c-6fe2-441c-8c8c-71466251a162
2003-09-16 21:46:04 +00:00
bellard
2e255c6b9f faster and more accurate segment handling
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@373 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-21 23:25:21 +00:00
bellard
3f33731662 pop ss, mov ss, x and sti disable irqs for the next instruction - began dispatch optimization by adding new x86 cpu 'hidden' flags
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@372 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-20 23:02:09 +00:00
bellard
d05e66d217 no error code if hardware interrupt
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@371 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-20 21:34:35 +00:00
bellard
2d80ae8987 avoid problems if make clean was not made before updating
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@370 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-11 23:01:33 +00:00
bellard
17383a2a2a gcc 3.x is mandatory now on PowerPC
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@369 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-11 22:28:58 +00:00
bellard
9257a9e49c workaround for gcc 3.3 bug or overoptimisation if a label is not used
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@368 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-11 22:21:18 +00:00
bellard
70a194b930 fixed invalid Linux asm/unistd.h header for PowerPC and gcc 3.3
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@367 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-11 22:20:16 +00:00
bellard
2573109866 pass function name to JMUP_TB()
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@366 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-11 22:19:11 +00:00
bellard
9dfa5b421d 64 bit fixes (Falk Hueffner)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@365 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-11 20:35:58 +00:00
bellard
9da8ba18e6 mode X double scan fix (malc)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@364 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-11 20:33:04 +00:00
bellard
76bc683820 updated
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@363 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-10 23:41:46 +00:00
bellard
3b22c4707d fixed invalid ESP usage (Jon Nall)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@362 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-10 23:40:50 +00:00
bellard
96e6e05372 fixed invalid code gen
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@361 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-10 23:39:55 +00:00
bellard
e2222c3924 removed warnings
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@360 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-10 23:39:03 +00:00
bellard
31e8f3c894 PowerPC fix (Jon Nall)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@359 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-10 22:52:34 +00:00
bellard
9368caf64d updated
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@358 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-10 22:15:31 +00:00
bellard
38e584a072 m68k host port (Richard Zidlicky)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@357 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-10 22:14:22 +00:00
bellard
313aa56710 added VGA emulation - added PS/2 mouse and keyboard emulation - use SDL for VGA display
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@356 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-10 21:52:11 +00:00
bellard
4cbf74b6b8 soft mmu support
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@355 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-10 21:48:43 +00:00
bellard
33417e7025 soft mmu support - Memory I/O API - synthetize string instructions
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@354 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-10 21:47:01 +00:00
bellard
4021dab059 soft mmu support - moved unrelated code to help2-i386.c - synthetize string instructions
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@353 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-10 21:41:46 +00:00
bellard
626df76abb build all targets at the same time
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@352 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-10 21:39:31 +00:00
bellard
abcd5da72e use bswap.h
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@351 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-10 21:38:48 +00:00
bellard
97a847bc03 build all targets at the same time - SDL probe support
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@350 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-10 21:36:04 +00:00
bellard
ab93bbe2ae soft mmu support
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@349 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-10 21:35:13 +00:00
bellard
0f0b726444 SDL support for VGA, keyboard and mouse
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@348 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-09 18:26:36 +00:00
bellard
b92e5a22ec Software MMU support (used for memory mapped devices such as VGA)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@347 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-08 23:58:05 +00:00
bellard
17b0018b42 Full VGA support, including old CGA modes, VGA planar and mode X
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@346 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-08 23:50:57 +00:00
bellard
39cf780327 fixed graphical VGA 16 color mode - fixed 9 pixel wide text mode
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@345 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-05 23:06:22 +00:00
bellard
e89f66eca9 Hardware level VGA emulation (only text mode is tested)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@344 c046a42c-6fe2-441c-8c8c-71466251a162
2003-08-04 23:30:47 +00:00
bellard
b6d78bfa0d correct CPL support (should fix flat real mode support)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@343 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-29 20:53:01 +00:00
bellard
c33a346edf first part of single stepping support
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@342 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-29 20:50:33 +00:00
bellard
61a2ad53cb refresh clock dummy emulation (netbsd boot fix)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@341 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-27 22:19:00 +00:00
bellard
2c1794c42e more generic ljmp and lcall - fixed REPNZ usage for non compare string ops (FreeDos boot loader fix)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@340 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-27 21:11:27 +00:00
bellard
8a4c1cc411 fixed ss segment load - added ICEBP instruction
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@339 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-26 20:34:00 +00:00
bellard
330d0414a5 keyboard emulation - accepts to boot with Bochs BIOS and LGPL'ed VGA BIOS
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@338 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-26 18:11:40 +00:00
bellard
3802ce26a1 set to protected mode
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@337 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-26 18:02:28 +00:00
bellard
4abe615b84 removed debug
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@336 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-26 18:01:58 +00:00
bellard
a412ac572f real mode support
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@335 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-26 18:01:40 +00:00
bellard
b2b5fb228f popw (%esp) test)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@334 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-26 18:00:58 +00:00
bellard
8f186479e2 real mode support (now boots from BOCHS BIOS and LGPL'ed VGA BIOS)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@333 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-26 17:59:00 +00:00
bellard
4c3a88a284 gdb stub breakpoints support
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@332 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-26 12:06:08 +00:00
bellard
d6b4936796 update
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@331 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-13 22:37:44 +00:00
bellard
9d0fe224f4 update
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@330 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-13 22:08:50 +00:00
bellard
6e0374f6b5 debug print
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@329 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-13 17:34:37 +00:00
bellard
9e5f5284b3 convert signal numbers
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@328 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-13 17:33:54 +00:00
bellard
c596ed1713 times() fix
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@327 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-13 17:32:31 +00:00
bellard
91cf4d88fb gcc 3.2.2 bug workaround (RedHat 9 fix)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@326 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-13 17:31:01 +00:00
bellard
a96fc003bd sparc fix
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@325 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-13 17:30:15 +00:00
bellard
d44b29c21e address printing fix
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@324 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-13 17:29:55 +00:00
bellard
070893f425 RedHat 9 fix
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@323 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-13 17:27:19 +00:00
bellard
9621339dca changed basic block exit generation
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@322 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-11 15:17:41 +00:00
bellard
ede28208d8 added nop test for exception
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@321 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-11 14:49:58 +00:00
bellard
7739f36e38 fixed EIP exception bug in case of nop operations (kernel 2.5.74 copy_from_user() bug)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@320 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-11 14:49:22 +00:00
bellard
f8c8799840 added return for ARM case (may be incorrect - need checking)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@319 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-09 19:41:41 +00:00
bellard
43fff2384e ARM signal support
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@318 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-09 19:31:39 +00:00
bellard
1b21b62ab4 ARM fixes
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@317 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-09 17:16:27 +00:00
bellard
a1516e92b6 ARM init fix
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@316 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-09 17:13:37 +00:00
bellard
6fb883e8e3 ARM fix: mmap
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@315 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-09 17:12:39 +00:00
bellard
6e295807ac ARM fixes
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@314 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-09 17:10:32 +00:00
bellard
f2674e31e0 old select support
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@313 c046a42c-6fe2-441c-8c8c-71466251a162
2003-07-09 12:26:09 +00:00
52 changed files with 7711 additions and 2028 deletions

View File

@@ -1,10 +1,33 @@
version 0.4.4:
- full hardware level VGA emulation
- graphical display with SDL
- added PS/2 mouse and keyboard emulation
- popw (%esp) fix
- mov to/from segment data width fix
- added real mode support
- added Bochs BIOS and LGPL'ed VGA BIOS loader in vl
- m68k host port (Richard Zidlicky)
- partial soft MMU support for memory mapped I/Os
- multi-target build
- fixed: no error code in hardware interrupts
- fixed: pop ss, mov ss, x and sti disable hardware irqs for the next insn
- correct single stepping thru string operations
version 0.4.3:
- x86 exception fix in case of nop instruction.
- gcc 3.2.2 bug workaround (RedHat 9 fix)
- sparc and Alpha host fixes
- many ARM target fixes: 'ls' and 'bash' can be launched.
version 0.4.2:
- many exception handling fixes (can compile a Linux kernel inside vl)
- IDE emulation support
- initial GDB stub support
- deferred update support for disk images (Rusty Russell)
- accept user mode Linux Copy On Write disk images
- accept User Mode Linux Copy On Write disk images
- SMP kernels can at least be booted
version 0.4.1:

221
Makefile
View File

@@ -1,202 +1,48 @@
include config.mak
include config-host.mak
CFLAGS=-Wall -O2 -g
LDFLAGS=-g
LIBS=
DEFINES=-DHAVE_BYTESWAP_H
HELPER_CFLAGS=$(CFLAGS)
PROGS=qemu
ifdef CONFIG_STATIC
LDFLAGS+=-static
endif
ifeq ($(ARCH),i386)
CFLAGS+=-fomit-frame-pointer
OP_CFLAGS=$(CFLAGS) -mpreferred-stack-boundary=2
ifeq ($(HAVE_GCC3_OPTIONS),yes)
OP_CFLAGS+= -falign-functions=0
else
OP_CFLAGS+= -malign-functions=0
endif
ifdef TARGET_GPROF
LDFLAGS+=-Wl,-T,i386.ld
else
# WARNING: this LDFLAGS is _very_ tricky : qemu is an ELF shared object
# that the kernel ELF loader considers as an executable. I think this
# is the simplest way to make it self virtualizable!
LDFLAGS+=-Wl,-shared
endif
ifeq ($(TARGET_ARCH), i386)
PROGS+=vl vlmkcow
endif
endif
ifeq ($(ARCH),ppc)
OP_CFLAGS=$(CFLAGS)
LDFLAGS+=-Wl,-T,ppc.ld
endif
ifeq ($(ARCH),s390)
OP_CFLAGS=$(CFLAGS)
LDFLAGS+=-Wl,-T,s390.ld
endif
ifeq ($(ARCH),sparc)
CFLAGS+=-m32 -ffixed-g1 -ffixed-g2 -ffixed-g3 -ffixed-g6
LDFLAGS+=-m32
OP_CFLAGS=$(CFLAGS) -fno-delayed-branch -ffixed-i0
HELPER_CFLAGS=$(CFLAGS) -ffixed-i0 -mflat
LDFLAGS+=-Wl,-T,sparc.ld
endif
ifeq ($(ARCH),sparc64)
CFLAGS+=-m64 -ffixed-g1 -ffixed-g2 -ffixed-g3 -ffixed-g6
LDFLAGS+=-m64
OP_CFLAGS=$(CFLAGS) -fno-delayed-branch -ffixed-i0
endif
ifeq ($(ARCH),alpha)
# -msmall-data is not used because we want two-instruction relocations
# for the constant constructions
OP_CFLAGS=-Wall -O2 -g
# Ensure there's only a single GP
CFLAGS += -msmall-data
LDFLAGS+=-Wl,-T,alpha.ld
endif
ifeq ($(ARCH),ia64)
OP_CFLAGS=$(CFLAGS)
endif
ifeq ($(ARCH),arm)
OP_CFLAGS=$(CFLAGS) -mno-sched-prolog
LDFLAGS+=-Wl,-T,arm.ld
endif
ifeq ($(HAVE_GCC3_OPTIONS),yes)
# very important to generate a return at the end of every operation
OP_CFLAGS+=-fno-reorder-blocks -fno-optimize-sibling-calls
endif
#########################################################
DEFINES+=-D_GNU_SOURCE
LIBS+=-lm
TOOLS=vlmkcow
# profiling code
ifdef TARGET_GPROF
LDFLAGS+=-p
main.o: CFLAGS+=-p
endif
OBJS= elfload.o main.o syscall.o mmap.o signal.o path.o
ifeq ($(TARGET_ARCH), i386)
OBJS+= vm86.o
endif
SRCS:= $(OBJS:.o=.c)
OBJS+= libqemu.a
# cpu emulator library
LIBOBJS=thunk.o exec.o translate.o cpu-exec.o gdbstub.o
ifeq ($(TARGET_ARCH), i386)
LIBOBJS+=translate-i386.o op-i386.o helper-i386.o
endif
ifeq ($(TARGET_ARCH), arm)
LIBOBJS+=translate-arm.o op-arm.o
endif
# NOTE: the disassembler code is only needed for debugging
LIBOBJS+=disas.o
ifeq ($(findstring i386, $(TARGET_ARCH) $(ARCH)),i386)
LIBOBJS+=i386-dis.o
endif
ifeq ($(findstring alpha, $(TARGET_ARCH) $(ARCH)),alpha)
LIBOBJS+=alpha-dis.o
endif
ifeq ($(findstring ppc, $(TARGET_ARCH) $(ARCH)),ppc)
LIBOBJS+=ppc-dis.o
endif
ifeq ($(findstring sparc, $(TARGET_ARCH) $(ARCH)),sparc)
LIBOBJS+=sparc-dis.o
endif
ifeq ($(findstring arm, $(TARGET_ARCH) $(ARCH)),arm)
LIBOBJS+=arm-dis.o
endif
ifeq ($(ARCH),ia64)
OBJS += ia64-syscall.o
endif
all: $(PROGS) qemu-doc.html
qemu: $(OBJS)
$(CC) $(LDFLAGS) -o $@ $^ $(LIBS)
ifeq ($(ARCH),alpha)
# Mark as 32 bit binary, i. e. it will be mapped into the low 31 bit of
# the address space (31 bit so sign extending doesn't matter)
echo -ne '\001\000\000\000' | dd of=qemu bs=1 seek=48 count=4 conv=notrunc
endif
# must use static linking to avoid leaving stuff in virtual address space
vl: vl.o block.o libqemu.a
$(CC) -static -Wl,-T,i386-vl.ld -o $@ $^ $(LIBS)
all: dyngen $(TOOLS) qemu-doc.html
for d in $(TARGET_DIRS); do \
make -C $$d $@ || exit 1 ; \
done
vlmkcow: vlmkcow.o
$(CC) -o $@ $^ $(LIBS)
$(HOST_CC) -o $@ $^ $(LIBS)
depend: $(SRCS)
$(CC) -MM $(CFLAGS) $^ 1>.depend
# libqemu
libqemu.a: $(LIBOBJS)
rm -f $@
$(AR) rcs $@ $(LIBOBJS)
dyngen: dyngen.c
$(HOST_CC) -O2 -Wall -g $< -o $@
translate-$(TARGET_ARCH).o: translate-$(TARGET_ARCH).c gen-op-$(TARGET_ARCH).h opc-$(TARGET_ARCH).h cpu-$(TARGET_ARCH).h
translate.o: translate.c op-$(TARGET_ARCH).h opc-$(TARGET_ARCH).h cpu-$(TARGET_ARCH).h
op-$(TARGET_ARCH).h: op-$(TARGET_ARCH).o dyngen
./dyngen -o $@ $<
opc-$(TARGET_ARCH).h: op-$(TARGET_ARCH).o dyngen
./dyngen -c -o $@ $<
gen-op-$(TARGET_ARCH).h: op-$(TARGET_ARCH).o dyngen
./dyngen -g -o $@ $<
op-$(TARGET_ARCH).o: op-$(TARGET_ARCH).c
$(CC) $(OP_CFLAGS) $(DEFINES) -c -o $@ $<
helper-$(TARGET_ARCH).o: helper-$(TARGET_ARCH).c
$(CC) $(HELPER_CFLAGS) $(DEFINES) -c -o $@ $<
op-i386.o: op-i386.c opreg_template.h ops_template.h ops_template_mem.h
op-arm.o: op-arm.c op-arm-template.h
dyngen: dyngen.o
$(HOST_CC) -o $@ $^ $(LIBS)
%.o: %.c
$(CC) $(CFLAGS) $(DEFINES) -c -o $@ $<
$(HOST_CC) $(CFLAGS) $(DEFINES) -c -o $@ $<
clean:
$(MAKE) -C tests clean
rm -f *.o *.a *~ qemu dyngen TAGS
# avoid old build problems by removing potentially incorrect old files
rm -f config.mak config.h op-i386.h opc-i386.h gen-op-i386.h op-arm.h opc-arm.h gen-op-arm.h
rm -f *.o *.a $(TOOLS) dyngen TAGS
for d in $(TARGET_DIRS); do \
make -C $$d $@ || exit 1 ; \
done
distclean: clean
rm -f config.mak config.h
rm -f config-host.mak config-host.h
for d in $(TARGET_DIRS); do \
rm -f $$d/config.h $$d/config.mak || exit 1 ; \
done
install: $(PROGS)
install: all
mkdir -p $(prefix)/bin
install -m 755 -s $(PROGS) $(prefix)/bin
install -m 755 -s $(TOOLS) $(prefix)/bin
for d in $(TARGET_DIRS); do \
make -C $$d $@ || exit 1 ; \
done
# various test targets
test speed: qemu
test speed: all
make -C tests $@
TAGS:
@@ -208,16 +54,17 @@ qemu-doc.html: qemu-doc.texi
FILES= \
README README.distrib COPYING COPYING.LIB TODO Changelog VERSION \
configure \
configure Makefile Makefile.target \
dyngen.c dyngen.h dyngen-exec.h ioctls.h syscall_types.h \
Makefile elf.h elfload.c main.c signal.c qemu.h \
elf.h elfload.c main.c signal.c qemu.h \
syscall.c syscall_defs.h vm86.c path.c mmap.c \
i386.ld ppc.ld alpha.ld s390.ld sparc.ld arm.ld\
vl.c i386-vl.ld vl.h block.c vlmkcow.c\
thunk.c cpu-exec.c translate.c cpu-all.h thunk.h exec.h\
exec.c cpu-exec.c gdbstub.c\
cpu-i386.h op-i386.c helper-i386.c syscall-i386.h translate-i386.c \
i386.ld ppc.ld alpha.ld s390.ld sparc.ld arm.ld m68k.ld \
vl.c i386-vl.ld vl.h block.c vlmkcow.c vga.c vga_template.h sdl.c \
thunk.c cpu-exec.c translate.c cpu-all.h cpu-defs.h thunk.h exec.h\
exec.c cpu-exec.c gdbstub.c bswap.h \
cpu-i386.h op-i386.c helper-i386.c helper2-i386.c syscall-i386.h translate-i386.c \
exec-i386.h ops_template.h ops_template_mem.h op_string.h opreg_template.h \
ops_mem.h softmmu_template.h softmmu_header.h \
cpu-arm.h syscall-arm.h exec-arm.h op-arm.c translate-arm.c op-arm-template.h \
dis-asm.h disas.c disas.h alpha-dis.c ppc-dis.c i386-dis.c sparc-dis.c \
arm-dis.c \

206
Makefile.target Normal file
View File

@@ -0,0 +1,206 @@
include config.mak
VPATH=$(SRC_PATH)
CFLAGS=-Wall -O2 -g
LDFLAGS=-g
LIBS=
DEFINES=-I.
HELPER_CFLAGS=$(CFLAGS)
DYNGEN=../dyngen
ifndef CONFIG_SOFTMMU
PROGS=qemu
endif
ifdef CONFIG_STATIC
LDFLAGS+=-static
endif
ifeq ($(ARCH),i386)
CFLAGS+=-fomit-frame-pointer
OP_CFLAGS=$(CFLAGS) -mpreferred-stack-boundary=2
ifeq ($(HAVE_GCC3_OPTIONS),yes)
OP_CFLAGS+= -falign-functions=0
else
OP_CFLAGS+= -malign-functions=0
endif
ifdef TARGET_GPROF
LDFLAGS+=-Wl,-T,$(SRC_PATH)/i386.ld
else
# WARNING: this LDFLAGS is _very_ tricky : qemu is an ELF shared object
# that the kernel ELF loader considers as an executable. I think this
# is the simplest way to make it self virtualizable!
LDFLAGS+=-Wl,-shared
endif
ifeq ($(TARGET_ARCH), i386)
PROGS+=vl
endif
endif
ifeq ($(ARCH),ppc)
OP_CFLAGS=$(CFLAGS)
LDFLAGS+=-Wl,-T,$(SRC_PATH)/ppc.ld
endif
ifeq ($(ARCH),s390)
OP_CFLAGS=$(CFLAGS)
LDFLAGS+=-Wl,-T,$(SRC_PATH)/s390.ld
endif
ifeq ($(ARCH),sparc)
CFLAGS+=-m32 -ffixed-g1 -ffixed-g2 -ffixed-g3 -ffixed-g6
LDFLAGS+=-m32
OP_CFLAGS=$(CFLAGS) -fno-delayed-branch -ffixed-i0
HELPER_CFLAGS=$(CFLAGS) -ffixed-i0 -mflat
# -static is used to avoid g1/g3 usage by the dynamic linker
LDFLAGS+=-Wl,-T,$(SRC_PATH)/sparc.ld -static
endif
ifeq ($(ARCH),sparc64)
CFLAGS+=-m64 -ffixed-g1 -ffixed-g2 -ffixed-g3 -ffixed-g6
LDFLAGS+=-m64
OP_CFLAGS=$(CFLAGS) -fno-delayed-branch -ffixed-i0
endif
ifeq ($(ARCH),alpha)
# -msmall-data is not used because we want two-instruction relocations
# for the constant constructions
OP_CFLAGS=-Wall -O2 -g
# Ensure there's only a single GP
CFLAGS += -msmall-data
LDFLAGS+=-Wl,-T,$(SRC_PATH)/alpha.ld
endif
ifeq ($(ARCH),ia64)
OP_CFLAGS=$(CFLAGS)
endif
ifeq ($(ARCH),arm)
OP_CFLAGS=$(CFLAGS) -mno-sched-prolog
LDFLAGS+=-Wl,-T,$(SRC_PATH)/arm.ld
endif
ifeq ($(ARCH),m68k)
OP_CFLAGS=$(CFLAGS) -fomit-frame-pointer
LDFLAGS+=-Wl,-T,m68k.ld
endif
ifeq ($(HAVE_GCC3_OPTIONS),yes)
# very important to generate a return at the end of every operation
OP_CFLAGS+=-fno-reorder-blocks -fno-optimize-sibling-calls
endif
#########################################################
DEFINES+=-D_GNU_SOURCE
LIBS+=-lm
# profiling code
ifdef TARGET_GPROF
LDFLAGS+=-p
main.o: CFLAGS+=-p
endif
OBJS= elfload.o main.o syscall.o mmap.o signal.o path.o
ifeq ($(TARGET_ARCH), i386)
OBJS+= vm86.o
endif
SRCS:= $(OBJS:.o=.c)
OBJS+= libqemu.a
# cpu emulator library
LIBOBJS=thunk.o exec.o translate.o cpu-exec.o gdbstub.o
ifeq ($(TARGET_ARCH), i386)
LIBOBJS+=translate-i386.o op-i386.o helper-i386.o helper2-i386.o
endif
ifeq ($(TARGET_ARCH), arm)
LIBOBJS+=translate-arm.o op-arm.o
endif
# NOTE: the disassembler code is only needed for debugging
LIBOBJS+=disas.o
ifeq ($(findstring i386, $(TARGET_ARCH) $(ARCH)),i386)
LIBOBJS+=i386-dis.o
endif
ifeq ($(findstring alpha, $(TARGET_ARCH) $(ARCH)),alpha)
LIBOBJS+=alpha-dis.o
endif
ifeq ($(findstring ppc, $(TARGET_ARCH) $(ARCH)),ppc)
LIBOBJS+=ppc-dis.o
endif
ifeq ($(findstring sparc, $(TARGET_ARCH) $(ARCH)),sparc)
LIBOBJS+=sparc-dis.o
endif
ifeq ($(findstring arm, $(TARGET_ARCH) $(ARCH)),arm)
LIBOBJS+=arm-dis.o
endif
ifeq ($(ARCH),ia64)
OBJS += ia64-syscall.o
endif
all: $(PROGS) qemu-doc.html
qemu: $(OBJS)
$(CC) $(LDFLAGS) -o $@ $^ $(LIBS)
ifeq ($(ARCH),alpha)
# Mark as 32 bit binary, i. e. it will be mapped into the low 31 bit of
# the address space (31 bit so sign extending doesn't matter)
echo -ne '\001\000\000\000' | dd of=qemu bs=1 seek=48 count=4 conv=notrunc
endif
# must use static linking to avoid leaving stuff in virtual address space
VL_OBJS=vl.o block.o vga.o
ifdef CONFIG_SDL
VL_OBJS+=sdl.o
SDL_LIBS+=-L/usr/X11R6/lib -lX11 -lXext -lXv -ldl
endif
vl: $(VL_OBJS) libqemu.a
$(CC) -static -Wl,-T,$(SRC_PATH)/i386-vl.ld -o $@ $^ $(LIBS) $(SDL_LIBS)
sdl.o: sdl.c
$(CC) $(CFLAGS) $(DEFINES) $(SDL_CFLAGS) -c -o $@ $<
depend: $(SRCS)
$(CC) -MM $(CFLAGS) $^ 1>.depend
# libqemu
libqemu.a: $(LIBOBJS)
rm -f $@
$(AR) rcs $@ $(LIBOBJS)
translate-$(TARGET_ARCH).o: translate-$(TARGET_ARCH).c gen-op-$(TARGET_ARCH).h opc-$(TARGET_ARCH).h cpu-$(TARGET_ARCH).h
translate.o: translate.c op-$(TARGET_ARCH).h opc-$(TARGET_ARCH).h cpu-$(TARGET_ARCH).h
op-$(TARGET_ARCH).h: op-$(TARGET_ARCH).o $(DYNGEN)
$(DYNGEN) -o $@ $<
opc-$(TARGET_ARCH).h: op-$(TARGET_ARCH).o $(DYNGEN)
$(DYNGEN) -c -o $@ $<
gen-op-$(TARGET_ARCH).h: op-$(TARGET_ARCH).o $(DYNGEN)
$(DYNGEN) -g -o $@ $<
op-$(TARGET_ARCH).o: op-$(TARGET_ARCH).c
$(CC) $(OP_CFLAGS) $(DEFINES) -c -o $@ $<
helper-$(TARGET_ARCH).o: helper-$(TARGET_ARCH).c
$(CC) $(HELPER_CFLAGS) $(DEFINES) -c -o $@ $<
op-i386.o: op-i386.c opreg_template.h ops_template.h ops_template_mem.h ops_mem.h
op-arm.o: op-arm.c op-arm-template.h
%.o: %.c
$(CC) $(CFLAGS) $(DEFINES) -c -o $@ $<
clean:
rm -f *.o *.a *~ $(PROGS) \
gen-op-$(TARGET_ARCH).h opc-$(TARGET_ARCH).h op-$(TARGET_ARCH).h
ifneq ($(wildcard .depend),)
include .depend
endif

7
README
View File

@@ -50,8 +50,10 @@ host gcc binutils glibc linux distribution
x86 2.95.2 2.13.2 2.1.3 2.4.18
3.2 2.13.2 2.1.3 2.4.18
2.96 2.11.93.0.2 2.2.5 2.4.18 Red Hat 7.3
3.2.2 2.13.90.0.18 2.3.2 2.4.20 Red Hat 9
PowerPC 2.95.4 2.12.90.0.1 2.2.5 2.4.20-pre2 Debian 3.0
PowerPC 3.3 [4] 2.13.90.0.18 2.3.1 2.4.20briq
3.2
Alpha 3.3 [1] 2.14.90.0.4 2.2.5 2.2.20 [2] Debian 3.0
@@ -65,6 +67,9 @@ ARM 2.95.4 2.12.90.0.1 2.2.5 2.4.9 [3] Debian 3.0
(untested).
[3] 2.4.9-ac10-rmk2-np1-cerf2
[4] gcc 2.95.x generates invalid code when using too many register
variables. You must use gcc 3.x on PowerPC.
Documentation
-------------

View File

@@ -1 +1 @@
0.4.2
0.4.4

View File

@@ -560,8 +560,8 @@ static arm_regname regnames[] =
{ "a1", "a2", "a3", "a4", "v1", "v2", "v3", "WR", "v5", "SB", "SL", "FP", "IP", "SP", "LR", "PC" }}
};
/* Default to GCC register name set. */
static unsigned int regname_selected = 1;
/* Default to STD register name set. */
static unsigned int regname_selected = 2;
#define NUM_ARM_REGNAMES NUM_ELEM (regnames)
#define arm_regnames regnames[regname_selected].reg_names

84
bswap.h Normal file
View File

@@ -0,0 +1,84 @@
#ifndef BSWAP_H
#define BSWAP_H
#include "config-host.h"
#include <inttypes.h>
#ifdef HAVE_BYTESWAP_H
#include <byteswap.h>
#else
#define bswap_16(x) \
({ \
uint16_t __x = (x); \
((uint16_t)( \
(((uint16_t)(__x) & (uint16_t)0x00ffU) << 8) | \
(((uint16_t)(__x) & (uint16_t)0xff00U) >> 8) )); \
})
#define bswap_32(x) \
({ \
uint32_t __x = (x); \
((uint32_t)( \
(((uint32_t)(__x) & (uint32_t)0x000000ffUL) << 24) | \
(((uint32_t)(__x) & (uint32_t)0x0000ff00UL) << 8) | \
(((uint32_t)(__x) & (uint32_t)0x00ff0000UL) >> 8) | \
(((uint32_t)(__x) & (uint32_t)0xff000000UL) >> 24) )); \
})
#define bswap_64(x) \
({ \
uint64_t __x = (x); \
((uint64_t)( \
(uint64_t)(((uint64_t)(__x) & (uint64_t)0x00000000000000ffULL) << 56) | \
(uint64_t)(((uint64_t)(__x) & (uint64_t)0x000000000000ff00ULL) << 40) | \
(uint64_t)(((uint64_t)(__x) & (uint64_t)0x0000000000ff0000ULL) << 24) | \
(uint64_t)(((uint64_t)(__x) & (uint64_t)0x00000000ff000000ULL) << 8) | \
(uint64_t)(((uint64_t)(__x) & (uint64_t)0x000000ff00000000ULL) >> 8) | \
(uint64_t)(((uint64_t)(__x) & (uint64_t)0x0000ff0000000000ULL) >> 24) | \
(uint64_t)(((uint64_t)(__x) & (uint64_t)0x00ff000000000000ULL) >> 40) | \
(uint64_t)(((uint64_t)(__x) & (uint64_t)0xff00000000000000ULL) >> 56) )); \
})
#endif /* !HAVE_BYTESWAP_H */
#if defined(__alpha__) || defined (__ia64__)
#define HOST_LONG_BITS 64
#else
#define HOST_LONG_BITS 32
#endif
#define HOST_LONG_SIZE (HOST_LONG_BITS / 8)
static inline uint16_t bswap16(uint16_t x)
{
return bswap_16(x);
}
static inline uint32_t bswap32(uint32_t x)
{
return bswap_32(x);
}
static inline uint64_t bswap64(uint64_t x)
{
return bswap_64(x);
}
static inline void bswap16s(uint16_t *s)
{
*s = bswap16(*s);
}
static inline void bswap32s(uint32_t *s)
{
*s = bswap32(*s);
}
static inline void bswap64s(uint64_t *s)
{
*s = bswap64(*s);
}
#endif /* BSWAP_H */

233
configure vendored
View File

@@ -15,7 +15,6 @@ TMPC="${TMPDIR1}/qemu-conf-${RANDOM}-$$-${RANDOM}.c"
TMPO="${TMPDIR1}/qemu-conf-${RANDOM}-$$-${RANDOM}.o"
TMPE="${TMPDIR1}/qemu-conf-${RANDOM}-$$-${RANDOM}"
TMPS="${TMPDIR1}/qemu-conf-${RANDOM}-$$-${RANDOM}.S"
TMPH="${TMPDIR1}/qemu-conf-${RANDOM}-$$-${RANDOM}.h"
# default parameters
prefix="/usr/local"
@@ -27,12 +26,11 @@ host_cc="gcc"
ar="ar"
make="make"
strip="strip"
target_cpu="x86"
target_bigendian="default"
cpu=`uname -m`
target_list="i386 i386-softmmu arm"
case "$cpu" in
i386|i486|i586|i686|i86pc|BePC)
cpu="x86"
cpu="i386"
;;
armv4l)
cpu="armv4l"
@@ -58,6 +56,9 @@ case "$cpu" in
ia64)
cpu="ia64"
;;
m68k)
cpu="m68k"
;;
*)
cpu="unknown"
;;
@@ -71,6 +72,26 @@ case $targetos in
*) ;;
esac
##########################################
# SDL probe
cat > $TMPC << EOF
#include <SDL.h>
#undef main /* We don't want SDL to override our main() */
int main( void ) { return SDL_Init (SDL_INIT_VIDEO); }
EOF
sdl_too_old=no
sdl=no
if $cc -o $TMPE `sdl-config --cflags` $TMPC `sdl-config --libs` 2> /dev/null ; then
_sdlversion=`sdl-config --version | sed 's/[^0-9]//g'`
if test "$_sdlversion" -lt 121 ; then
sdl_too_old=yes
else
sdl=yes
fi
fi
# find source path
# XXX: we assume an absolute path is given when launching configure,
# except in './configure' case.
@@ -104,16 +125,14 @@ for opt do
;;
--cpu=*) cpu=`echo $opt | cut -d '=' -f 2`
;;
--target-cpu=*) target_cpu=`echo $opt | cut -d '=' -f 2`
;;
--target-big-endian) target_bigendian="yes"
;;
--target-little-endian) target_bigendian="no"
--target-list=*) target_list=${opt#--target-list=}
;;
--enable-gprof) gprof="yes"
;;
--static) static="yes"
;;
--disable-sdl) sdl="no"
;;
esac
done
@@ -147,7 +166,7 @@ fi
else
# if cross compiling, cannot launch a program, so make a static guess
if test "$cpu" = "powerpc" -o "$cpu" = "mips" -o "$cpu" = "s390" -o "$cpu" = "sparc" -o "$cpu" = "sparc64"; then
if test "$cpu" = "powerpc" -o "$cpu" = "mips" -o "$cpu" = "s390" -o "$cpu" = "sparc" -o "$cpu" = "sparc64" -o "$cpu" = "m68k"; then
bigendian="yes"
fi
@@ -164,16 +183,6 @@ if $cc -fno-reorder-blocks -fno-optimize-sibling-calls -o $TMPO $TMPC 2> /dev/nu
have_gcc3_options="yes"
fi
if test "$target_bigendian" = "default" ; then
if test "$target_cpu" = "x86" ; then
target_bigendian="no"
elif test "$target_cpu" = "arm" ; then
target_bigendian="no"
else
target_bigendian="no"
fi
fi
if test x"$1" = x"-h" -o x"$1" = x"--help" ; then
cat << EOF
@@ -185,7 +194,7 @@ echo "Standard options:"
echo " --help print this message"
echo " --prefix=PREFIX install in PREFIX [$prefix]"
echo " --interp-prefix=PREFIX where to find shared libraries, etc. [$interp_prefix]"
echo " --target_cpu=CPU set target cpu (x86 or arm) [$target_cpu]"
echo " --target-list=LIST set target list [$target_list]"
echo ""
echo "Advanced options (experts only):"
echo " --source-path=PATH path of source code [$source_path]"
@@ -205,94 +214,142 @@ echo "C compiler $cc"
echo "make $make"
echo "host CPU $cpu"
echo "host big endian $bigendian"
echo "target CPU $target_cpu"
echo "target big endian $target_bigendian"
echo "target list $target_list"
echo "gprof enabled $gprof"
echo "static build $static"
echo "Creating config.mak and config.h"
echo "# Automatically generated by configure - do not modify" > config.mak
echo "/* Automatically generated by configure - do not modify */" > $TMPH
echo "prefix=$prefix" >> config.mak
echo "#define CONFIG_QEMU_PREFIX \"$interp_prefix\"" >> $TMPH
echo "MAKE=$make" >> config.mak
echo "CC=$cc" >> config.mak
if test "$have_gcc3_options" = "yes" ; then
echo "HAVE_GCC3_OPTIONS=yes" >> config.mak
echo "SDL support $sdl"
if test $sdl_too_old = "yes"; then
echo "-> Your SDL version is too old - please upgrade to have FFplay/SDL support"
fi
echo "HOST_CC=$host_cc" >> config.mak
echo "AR=$ar" >> config.mak
echo "STRIP=$strip -s -R .comment -R .note" >> config.mak
echo "CFLAGS=$CFLAGS" >> config.mak
echo "LDFLAGS=$LDFLAGS" >> config.mak
if test "$cpu" = "x86" ; then
echo "ARCH=i386" >> config.mak
echo "#define HOST_I386 1" >> $TMPH
config_mak="config-host.mak"
config_h="config-host.h"
echo "Creating $config_mak and $config_h"
echo "# Automatically generated by configure - do not modify" > $config_mak
echo "/* Automatically generated by configure - do not modify */" > $config_h
echo "prefix=$prefix" >> $config_mak
echo "MAKE=$make" >> $config_mak
echo "CC=$cc" >> $config_mak
if test "$have_gcc3_options" = "yes" ; then
echo "HAVE_GCC3_OPTIONS=yes" >> $config_mak
fi
echo "HOST_CC=$host_cc" >> $config_mak
echo "AR=$ar" >> $config_mak
echo "STRIP=$strip -s -R .comment -R .note" >> $config_mak
echo "CFLAGS=$CFLAGS" >> $config_mak
echo "LDFLAGS=$LDFLAGS" >> $config_mak
if test "$cpu" = "i386" ; then
echo "ARCH=i386" >> $config_mak
echo "#define HOST_I386 1" >> $config_h
elif test "$cpu" = "armv4l" ; then
echo "ARCH=arm" >> config.mak
echo "#define HOST_ARM 1" >> $TMPH
echo "ARCH=arm" >> $config_mak
echo "#define HOST_ARM 1" >> $config_h
elif test "$cpu" = "powerpc" ; then
echo "ARCH=ppc" >> config.mak
echo "#define HOST_PPC 1" >> $TMPH
echo "ARCH=ppc" >> $config_mak
echo "#define HOST_PPC 1" >> $config_h
elif test "$cpu" = "mips" ; then
echo "ARCH=mips" >> config.mak
echo "#define HOST_MIPS 1" >> $TMPH
echo "ARCH=mips" >> $config_mak
echo "#define HOST_MIPS 1" >> $config_h
elif test "$cpu" = "s390" ; then
echo "ARCH=s390" >> config.mak
echo "#define HOST_S390 1" >> $TMPH
echo "ARCH=s390" >> $config_mak
echo "#define HOST_S390 1" >> $config_h
elif test "$cpu" = "alpha" ; then
echo "ARCH=alpha" >> config.mak
echo "#define HOST_ALPHA 1" >> $TMPH
echo "ARCH=alpha" >> $config_mak
echo "#define HOST_ALPHA 1" >> $config_h
elif test "$cpu" = "sparc" ; then
echo "ARCH=sparc" >> config.mak
echo "#define HOST_SPARC 1" >> $TMPH
echo "ARCH=sparc" >> $config_mak
echo "#define HOST_SPARC 1" >> $config_h
elif test "$cpu" = "sparc64" ; then
echo "ARCH=sparc64" >> config.mak
echo "#define HOST_SPARC64 1" >> $TMPH
echo "ARCH=sparc64" >> $config_mak
echo "#define HOST_SPARC64 1" >> $config_h
elif test "$cpu" = "ia64" ; then
echo "ARCH=ia64" >> config.mak
echo "#define HOST_IA64 1" >> $TMPH
echo "ARCH=ia64" >> $config_mak
echo "#define HOST_IA64 1" >> $config_h
elif test "$cpu" = "m68k" ; then
echo "ARCH=m68k" >> config.mak
echo "#define HOST_M68K 1" >> $TMPH
else
echo "Unsupported CPU"
exit 1
fi
if test "$bigendian" = "yes" ; then
echo "WORDS_BIGENDIAN=yes" >> config.mak
echo "#define WORDS_BIGENDIAN 1" >> $TMPH
echo "WORDS_BIGENDIAN=yes" >> $config_mak
echo "#define WORDS_BIGENDIAN 1" >> $config_h
fi
echo "#define HAVE_BYTESWAP_H 1" >> $config_h
if test "$gprof" = "yes" ; then
echo "TARGET_GPROF=yes" >> $config_mak
echo "#define HAVE_GPROF 1" >> $config_h
fi
if test "$static" = "yes" ; then
echo "CONFIG_STATIC=yes" >> $config_mak
fi
if test "$sdl" = "yes" ; then
echo "CONFIG_SDL=yes" >> $config_mak
echo "#define CONFIG_SDL 1" >> $config_h
echo "SDL_LIBS=`sdl-config --libs`" >> $config_mak
echo "SDL_CFLAGS=`sdl-config --cflags`" >> $config_mak
fi
echo -n "VERSION=" >>$config_mak
head $source_path/VERSION >>$config_mak
echo "" >>$config_mak
echo -n "#define QEMU_VERSION \"" >> $config_h
head $source_path/VERSION >> $config_h
echo "\"" >> $config_h
echo "SRC_PATH=$source_path" >> $config_mak
echo "TARGET_DIRS=$target_list" >> $config_mak
for target in $target_list; do
target_dir="$target"
config_mak=$target_dir/config.mak
config_h=$target_dir/config.h
target_cpu=`echo $target | cut -d '-' -f 1`
target_bigendian="no"
target_softmmu="no"
if expr $target : '.*-softmmu' > /dev/null ; then
target_softmmu="yes"
fi
if test "$target_cpu" = "x86" ; then
echo "TARGET_ARCH=i386" >> config.mak
echo "#define TARGET_ARCH \"i386\"" >> $TMPH
echo "#define TARGET_I386 1" >> $TMPH
echo "Creating $config_mak, $config_h and $target_dir/Makefile"
mkdir -p $target_dir
ln -sf $source_path/Makefile.target $target_dir/Makefile
echo "# Automatically generated by configure - do not modify" > $config_mak
echo "/* Automatically generated by configure - do not modify */" > $config_h
echo "include ../config-host.mak" >> $config_mak
echo "#include \"../config-host.h\"" >> $config_h
echo "#define CONFIG_QEMU_PREFIX \"$interp_prefix\"" >> $config_h
if test "$target_cpu" = "i386" ; then
echo "TARGET_ARCH=i386" >> $config_mak
echo "#define TARGET_ARCH \"i386\"" >> $config_h
echo "#define TARGET_I386 1" >> $config_h
elif test "$target_cpu" = "arm" ; then
echo "TARGET_ARCH=arm" >> config.mak
echo "#define TARGET_ARCH \"arm\"" >> $TMPH
echo "#define TARGET_ARM 1" >> $TMPH
echo "TARGET_ARCH=arm" >> $config_mak
echo "#define TARGET_ARCH \"arm\"" >> $config_h
echo "#define TARGET_ARM 1" >> $config_h
else
echo "Unsupported target CPU"
exit 1
fi
if test "$target_bigendian" = "yes" ; then
echo "TARGET_WORDS_BIGENDIAN=yes" >> config.mak
echo "#define TARGET_WORDS_BIGENDIAN 1" >> $TMPH
echo "TARGET_WORDS_BIGENDIAN=yes" >> $config_mak
echo "#define TARGET_WORDS_BIGENDIAN 1" >> $config_h
fi
if test "$target_softmmu" = "yes" ; then
echo "CONFIG_SOFTMMU=yes" >> $config_mak
echo "#define CONFIG_SOFTMMU 1" >> $config_h
fi
if test "$gprof" = "yes" ; then
echo "TARGET_GPROF=yes" >> config.mak
echo "#define HAVE_GPROF 1" >> $TMPH
fi
if test "$static" = "yes" ; then
echo "CONFIG_STATIC=yes" >> config.mak
fi
echo -n "VERSION=" >>config.mak
head $source_path/VERSION >>config.mak
echo "" >>config.mak
echo -n "#define QEMU_VERSION \"" >> $TMPH
head $source_path/VERSION >> $TMPH
echo "\"" >> $TMPH
done # for target in $targets
# build tree in object directory if source path is different from current one
if test "$source_path_used" = "yes" ; then
@@ -305,13 +362,5 @@ if test "$source_path_used" = "yes" ; then
ln -sf $source_path/$f $f
done
fi
echo "SRC_PATH=$source_path" >> config.mak
diff $TMPH config.h >/dev/null 2>&1
if test $? -ne 0 ; then
mv -f $TMPH config.h
else
echo "config.h is unchanged"
fi
rm -f $TMPO $TMPC $TMPE $TMPS $TMPH
rm -f $TMPO $TMPC $TMPE $TMPS

View File

@@ -140,6 +140,7 @@ static inline void stfl(void *ptr, float v)
stl(ptr, u.i);
}
#if defined(__arm__) && !defined(WORDS_BIGENDIAN)
/* NOTE: arm is horrible as double 32 bit words are stored in big endian ! */
@@ -313,9 +314,24 @@ extern CPUState *cpu_single_env;
#define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */
void cpu_interrupt(CPUState *s, int mask);
int cpu_breakpoint_insert(CPUState *env, uint32_t pc);
int cpu_breakpoint_remove(CPUState *env, uint32_t pc);
void cpu_single_step(CPUState *env, int enabled);
/* memory API */
typedef void CPUWriteMemoryFunc(uint32_t addr, uint32_t value);
typedef uint32_t CPUReadMemoryFunc(uint32_t addr);
void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
long phys_offset);
int cpu_register_io_memory(int io_index,
CPUReadMemoryFunc **mem_read,
CPUWriteMemoryFunc **mem_write);
/* gdb stub API */
extern int gdbstub_fd;
CPUState *cpu_gdbstub_get_env(void *opaque);
int cpu_gdbstub(void *opaque, void (*main_loop)(void *opaque), int port);
int cpu_gdbstub(void *opaque, int (*main_loop)(void *opaque), int port);
#endif /* CPU_ALL_H */

View File

@@ -20,12 +20,10 @@
#ifndef CPU_ARM_H
#define CPU_ARM_H
#include "config.h"
#include <setjmp.h>
#include "cpu-defs.h"
#define EXCP_UDEF 1 /* undefined instruction */
#define EXCP_SWI 2 /* software interrupt */
#define EXCP_INTERRUPT 256 /* async interruption */
typedef struct CPUARMState {
uint32_t regs[16];

39
cpu-defs.h Normal file
View File

@@ -0,0 +1,39 @@
/*
* common defines for all CPUs
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef CPU_DEFS_H
#define CPU_DEFS_H
#include "config.h"
#include <setjmp.h>
#define EXCP_INTERRUPT 256 /* async interruption */
#define EXCP_HLT 257 /* hlt instruction reached */
#define EXCP_DEBUG 258 /* cpu stopped after a breakpoint or singlestep */
#define MAX_BREAKPOINTS 32
#define CPU_TLB_SIZE 256
typedef struct CPUTLBEntry {
uint32_t address;
uint32_t addend;
} CPUTLBEntry;
#endif

View File

@@ -170,7 +170,7 @@ int cpu_exec(CPUState *env1)
do_interrupt(env->exception_index,
env->exception_is_int,
env->error_code,
env->exception_next_eip);
env->exception_next_eip, 0);
#endif
}
env->exception_index = -1;
@@ -182,17 +182,18 @@ int cpu_exec(CPUState *env1)
tmp_T0 = T0;
#endif
interrupt_request = env->interrupt_request;
if (interrupt_request) {
if (__builtin_expect(interrupt_request, 0)) {
#if defined(TARGET_I386)
/* if hardware interrupt pending, we execute it */
if ((interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) {
(env->eflags & IF_MASK) &&
!(env->hflags & HF_INHIBIT_IRQ_MASK)) {
int intno;
intno = cpu_x86_get_pic_interrupt(env);
if (loglevel) {
fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
}
do_interrupt(intno, 0, 0, 0);
do_interrupt(intno, 0, 0, 0, 1);
env->interrupt_request &= ~CPU_INTERRUPT_HARD;
/* ensure that no TB jump will be modified as
the program flow was changed */
@@ -225,31 +226,20 @@ int cpu_exec(CPUState *env1)
cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
#elif defined(TARGET_ARM)
env->cpsr = compute_cpsr();
cpu_arm_dump_state(env, logfile, 0);
env->cpsr &= ~0xf0000000;
#else
#error unsupported target CPU
#endif
}
#endif
/* we compute the CPU state. We assume it will not
change during the whole generated block. */
/* we record a subset of the CPU state. It will
always be the same before a given translated block
is executed. */
#if defined(TARGET_I386)
flags = (env->segs[R_CS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - GEN_FLAG_CODE32_SHIFT);
flags |= (env->segs[R_SS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - GEN_FLAG_SS32_SHIFT);
flags |= (((unsigned long)env->segs[R_DS].base |
(unsigned long)env->segs[R_ES].base |
(unsigned long)env->segs[R_SS].base) != 0) <<
GEN_FLAG_ADDSEG_SHIFT;
if (!(env->eflags & VM_MASK)) {
flags |= (env->segs[R_CS].selector & 3) << GEN_FLAG_CPL_SHIFT;
} else {
/* NOTE: a dummy CPL is kept */
flags |= (1 << GEN_FLAG_VM_SHIFT);
flags |= (3 << GEN_FLAG_CPL_SHIFT);
}
flags |= (env->eflags & (IOPL_MASK | TF_MASK));
flags = env->hflags;
flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
cs_base = env->segs[R_CS].base;
pc = cs_base + env->eip;
#elif defined(TARGET_ARM)
@@ -278,17 +268,7 @@ int cpu_exec(CPUState *env1)
tb->tc_ptr = tc_ptr;
tb->cs_base = (unsigned long)cs_base;
tb->flags = flags;
ret = cpu_gen_code(tb, CODE_GEN_MAX_SIZE, &code_gen_size);
#if defined(TARGET_I386)
/* XXX: suppress that, this is incorrect */
/* if invalid instruction, signal it */
if (ret != 0) {
/* NOTE: the tb is allocated but not linked, so we
can leave it */
spin_unlock(&tb_lock);
raise_exception(EXCP06_ILLOP);
}
#endif
cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
*ptb = tb;
tb->hash_next = NULL;
tb_link(tb);
@@ -305,12 +285,8 @@ int cpu_exec(CPUState *env1)
#ifdef __sparc__
T0 = tmp_T0;
#endif
/* see if we can patch the calling TB. XXX: remove TF test */
if (T0 != 0
#if defined(TARGET_I386)
&& !(env->eflags & TF_MASK)
#endif
) {
/* see if we can patch the calling TB. */
if (T0 != 0) {
spin_lock(&tb_lock);
tb_add_jump((TranslationBlock *)(T0 & ~3), T0 & 3, tb);
spin_unlock(&tb_lock);
@@ -336,6 +312,15 @@ int cpu_exec(CPUState *env1)
gen_func();
#endif
env->current_tb = NULL;
/* reset soft MMU for next block (it can currently
only be set by a memory fault) */
#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
if (env->hflags & HF_SOFTMMU_MASK) {
env->hflags &= ~HF_SOFTMMU_MASK;
/* do not allow linking to another block */
T0 = 0;
}
#endif
}
} else {
}
@@ -372,12 +357,7 @@ int cpu_exec(CPUState *env1)
EDI = saved_EDI;
#endif
#elif defined(TARGET_ARM)
{
int ZF;
ZF = (env->NZF == 0);
env->cpsr = env->cpsr | (env->NZF & 0x80000000) | (ZF << 30) |
(env->CF << 29) | ((env->VF & 0x80000000) >> 3);
}
env->cpsr = compute_cpsr();
#else
#error unsupported target CPU
#endif
@@ -399,16 +379,10 @@ void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
saved_env = env;
env = s;
if (env->eflags & VM_MASK) {
SegmentCache *sc;
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
selector &= 0xffff;
sc = &env->segs[seg_reg];
/* NOTE: in VM86 mode, limit and flags are never reloaded,
so we must load them here */
sc->base = (void *)(selector << 4);
sc->limit = 0xffff;
sc->flags = 0;
sc->selector = selector;
cpu_x86_load_seg_cache(env, seg_reg, selector,
(uint8_t *)(selector << 4), 0xffff, 0);
} else {
load_seg(seg_reg, selector, 0);
}
@@ -488,14 +462,21 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
a virtual CPU fault */
cpu_restore_state(tb, env, pc);
}
if (ret == 1) {
#if 0
printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
env->eip, env->cr[2], env->error_code);
printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
env->eip, env->cr[2], env->error_code);
#endif
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
sigprocmask(SIG_SETMASK, old_set, NULL);
raise_exception_err(EXCP0E_PAGE, env->error_code);
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
sigprocmask(SIG_SETMASK, old_set, NULL);
raise_exception_err(EXCP0E_PAGE, env->error_code);
} else {
/* activate soft MMU for this block */
env->hflags |= HF_SOFTMMU_MASK;
sigprocmask(SIG_SETMASK, old_set, NULL);
cpu_loop_exit();
}
/* never comes here */
return 1;
}
@@ -635,6 +616,23 @@ int cpu_signal_handler(int host_signum, struct siginfo *info,
&uc->uc_sigmask);
}
#elif defined(__mc68000)
int cpu_signal_handler(int host_signum, struct siginfo *info,
void *puc)
{
struct ucontext *uc = puc;
unsigned long pc;
int is_write;
pc = uc->uc_mcontext.gregs[16];
/* XXX: compute is_write */
is_write = 0;
return handle_cpu_signal(pc, (unsigned long)info->si_addr,
is_write,
&uc->uc_sigmask);
}
#else
#error host CPU specific signal handler needed

View File

@@ -20,8 +20,7 @@
#ifndef CPU_I386_H
#define CPU_I386_H
#include "config.h"
#include <setjmp.h>
#include "cpu-defs.h"
#define R_EAX 0
#define R_ECX 1
@@ -74,6 +73,10 @@
#define CC_S 0x0080
#define CC_O 0x0800
#define TF_SHIFT 8
#define IOPL_SHIFT 12
#define VM_SHIFT 17
#define TF_MASK 0x00000100
#define IF_MASK 0x00000200
#define DF_MASK 0x00000400
@@ -86,6 +89,29 @@
#define VIP_MASK 0x00100000
#define ID_MASK 0x00200000
/* hidden flags - used internally by qemu to represent additionnal cpu
states. Only the CPL and INHIBIT_IRQ are not redundant. We avoid
using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring
with eflags. */
/* current cpl */
#define HF_CPL_SHIFT 0
/* true if soft mmu is being used */
#define HF_SOFTMMU_SHIFT 2
/* true if hardware interrupts must be disabled for next instruction */
#define HF_INHIBIT_IRQ_SHIFT 3
/* 16 or 32 segments */
#define HF_CS32_SHIFT 4
#define HF_SS32_SHIFT 5
/* zero base for DS, ES and SS */
#define HF_ADDSEG_SHIFT 6
#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
#define HF_CS32_MASK (1 << HF_CS32_SHIFT)
#define HF_SS32_MASK (1 << HF_SS32_SHIFT)
#define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
#define CR0_PE_MASK (1 << 0)
#define CR0_TS_MASK (1 << 3)
#define CR0_WP_MASK (1 << 16)
@@ -153,9 +179,6 @@
#define EXCP11_ALGN 17
#define EXCP12_MCHK 18
#define EXCP_INTERRUPT 256 /* async interruption */
#define EXCP_HLT 257 /* hlt instruction reached */
enum {
CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
CC_OP_EFLAGS, /* all cc are explicitely computed, CC_SRC = flags */
@@ -230,6 +253,7 @@ typedef struct CPUX86State {
uint32_t cc_dst;
uint32_t cc_op;
int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
uint32_t hflags; /* hidden flags, see HF_xxx constants */
/* FPU state */
unsigned int fpstt; /* top of stack index */
@@ -253,7 +277,7 @@ typedef struct CPUX86State {
SegmentCache tr;
SegmentCache gdt; /* only base and limit are used */
SegmentCache idt; /* only base and limit are used */
/* sysenter registers */
uint32_t sysenter_cs;
uint32_t sysenter_esp;
@@ -270,7 +294,17 @@ typedef struct CPUX86State {
uint32_t dr[8]; /* debug registers */
int interrupt_request;
int user_mode_only; /* user mode only simulation */
/* soft mmu support */
/* 0 = kernel, 1 = user */
CPUTLBEntry tlb_read[2][CPU_TLB_SIZE];
CPUTLBEntry tlb_write[2][CPU_TLB_SIZE];
/* ice debug support */
uint32_t breakpoints[MAX_BREAKPOINTS];
int nb_breakpoints;
int singlestep_enabled;
/* user data */
void *opaque;
} CPUX86State;
@@ -289,10 +323,57 @@ int cpu_x86_exec(CPUX86State *s);
void cpu_x86_close(CPUX86State *s);
int cpu_x86_get_pic_interrupt(CPUX86State *s);
/* needed to load some predefinied segment registers */
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
/* this function must always be used to load data in the segment
cache: it synchronizes the hflags with the segment cache values */
static inline void cpu_x86_load_seg_cache(CPUX86State *env,
int seg_reg, unsigned int selector,
uint8_t *base, unsigned int limit,
unsigned int flags)
{
SegmentCache *sc;
unsigned int new_hflags;
sc = &env->segs[seg_reg];
sc->selector = selector;
sc->base = base;
sc->limit = limit;
sc->flags = flags;
/* simulate fsave/frstor */
/* update the hidden flags */
new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - HF_CS32_SHIFT);
new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - HF_SS32_SHIFT);
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
/* XXX: try to avoid this test. The problem comes from the
fact that is real mode or vm86 mode we only modify the
'base' and 'selector' fields of the segment cache to go
faster. A solution may be to force addseg to one in
translate-i386.c. */
new_hflags |= HF_ADDSEG_MASK;
} else {
new_hflags |= (((unsigned long)env->segs[R_DS].base |
(unsigned long)env->segs[R_ES].base |
(unsigned long)env->segs[R_SS].base) != 0) <<
HF_ADDSEG_SHIFT;
}
env->hflags = (env->hflags &
~(HF_CS32_MASK | HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
}
/* wrapper, just in case memory mappings must be changed */
static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
{
#if HF_CPL_MASK == 3
s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl;
#else
#error HF_CPL_MASK is hardcoded
#endif
}
/* the following helpers are only usable in user mode simulation as
they can trigger unexpected exceptions */
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32);
void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32);

View File

@@ -42,7 +42,7 @@ perror_memory (status, memaddr, info)
/* Actually, address between memaddr and memaddr + len was
out of bounds. */
(*info->fprintf_func) (info->stream,
"Address 0x%x is out of bounds.\n", memaddr);
"Address 0x%llx is out of bounds.\n", memaddr);
}
/* This could be in a separate file, to save miniscule amounts of space
@@ -57,7 +57,7 @@ generic_print_address (addr, info)
bfd_vma addr;
struct disassemble_info *info;
{
(*info->fprintf_func) (info->stream, "0x%x", addr);
(*info->fprintf_func) (info->stream, "0x%llx", addr);
}
/* Just return the given address. */

View File

@@ -109,6 +109,13 @@ extern int printf(const char *, ...);
#define AREG5 "$13"
#define AREG6 "$14"
#endif
#ifdef __mc68000
#define AREG0 "%a5"
#define AREG1 "%a4"
#define AREG2 "%d7"
#define AREG3 "%d6"
#define AREG4 "%d5"
#endif
#ifdef __ia64__
#define AREG0 "r27"
#define AREG1 "r24"
@@ -125,6 +132,8 @@ extern int printf(const char *, ...);
#define xglue(x, y) x ## y
#define glue(x, y) xglue(x, y)
#define stringify(s) tostring(s)
#define tostring(s) #s
#ifdef __alpha__
/* the symbols are considered non exported so a br immediate is generated */
@@ -152,4 +161,30 @@ extern int __op_param1, __op_param2, __op_param3;
#define PARAM3 ((long)(&__op_param3))
#endif
extern int __op_jmp0, __op_jmp1;
extern int __op_jmp0, __op_jmp1, __op_jmp2, __op_jmp3;
#ifdef __i386__
#define EXIT_TB() asm volatile ("ret")
#endif
#ifdef __powerpc__
#define EXIT_TB() asm volatile ("blr")
#endif
#ifdef __s390__
#define EXIT_TB() asm volatile ("br %r14")
#endif
#ifdef __alpha__
#define EXIT_TB() asm volatile ("ret")
#endif
#ifdef __ia64__
#define EXIT_TB() asm volatile ("br.ret.sptk.many b0;;")
#endif
#ifdef __sparc__
#define EXIT_TB() asm volatile ("jmpl %i0 + 8, %g0\n" \
"nop")
#endif
#ifdef __arm__
#define EXIT_TB() asm volatile ("b exec_loop")
#endif
#ifdef __mc68000
#define EXIT_TB() asm volatile ("rts")
#endif

104
dyngen.c
View File

@@ -25,7 +25,7 @@
#include <unistd.h>
#include <fcntl.h>
#include "config.h"
#include "config-host.h"
/* elf format definitions. We use these macros to test the CPU to
allow cross compilation (this tool must be ran on the build
@@ -86,6 +86,13 @@
#define elf_check_arch(x) ((x) == EM_ARM)
#define ELF_USES_RELOC
#elif defined(HOST_M68K)
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_68K
#define elf_check_arch(x) ((x) == EM_68K)
#define ELF_USES_RELOCA
#else
#error unsupported CPU - please update the code
#endif
@@ -108,8 +115,7 @@ typedef uint64_t host_ulong;
#define SHT_RELOC SHT_REL
#endif
#define NO_THUNK_TYPE_SIZE
#include "thunk.h"
#include "bswap.h"
enum {
OUT_GEN_OP,
@@ -576,6 +582,21 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
relocs, nb_relocs);
break;
#endif
case EM_68K:
{
uint8_t *p;
p = (void *)(p_end - 2);
if (p == p_start)
error("empty code for %s", name);
// remove NOP's, probably added for alignment
while ((get16((uint16_t *)p) == 0x4e71) &&
(p>p_start))
p -= 2;
if (get16((uint16_t *)p) != 0x4e75)
error("rts expected at the end of %s", name);
copy_size = p - p_start;
}
break;
default:
error("unknown ELF architecture");
}
@@ -648,7 +669,7 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
{
ElfW(Sym) *sym;
const char *sym_name, *p;
target_ulong val;
unsigned long val;
int n;
for(i = 0, sym = symtab; i < nb_syms; i++, sym++) {
@@ -663,7 +684,7 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
if (!ptr)
error("__op_labelN in invalid section");
offset = sym->st_value;
val = *(target_ulong *)(ptr + offset);
val = *(unsigned long *)(ptr + offset);
#ifdef ELF_USES_RELOCA
{
int reloc_shndx, nb_relocs1, j;
@@ -687,7 +708,7 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
if (val >= start_offset && val < start_offset + copy_size) {
n = strtol(p, NULL, 10);
fprintf(outfile, " label_offsets[%d] = %d + (gen_code_ptr - gen_code_buf);\n", n, val - start_offset);
fprintf(outfile, " label_offsets[%d] = %ld + (gen_code_ptr - gen_code_buf);\n", n, val - start_offset);
}
}
}
@@ -1063,6 +1084,41 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
}
}
}
#elif defined(HOST_M68K)
{
char name[256];
int type;
int addend;
Elf32_Sym *sym;
for(i = 0, rel = relocs;i < nb_relocs; i++, rel++) {
if (rel->r_offset >= start_offset &&
rel->r_offset < start_offset + copy_size) {
sym = &(symtab[ELFW(R_SYM)(rel->r_info)]);
sym_name = strtab + symtab[ELFW(R_SYM)(rel->r_info)].st_name;
if (strstart(sym_name, "__op_param", &p)) {
snprintf(name, sizeof(name), "param%s", p);
} else {
snprintf(name, sizeof(name), "(long)(&%s)", sym_name);
}
type = ELF32_R_TYPE(rel->r_info);
addend = get32((uint32_t *)(text + rel->r_offset)) + rel->r_addend;
switch(type) {
case R_68K_32:
fprintf(outfile, " /* R_68K_32 RELOC, offset %x */\n", rel->r_offset) ;
fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s + %#x;\n",
rel->r_offset - start_offset, name, addend );
break;
case R_68K_PC32:
fprintf(outfile, " /* R_68K_PC32 RELOC, offset %x */\n", rel->r_offset);
fprintf(outfile, " *(uint32_t *)(gen_code_ptr + %d) = %s - (long)(gen_code_ptr + %#x) + %#x;\n",
rel->r_offset - start_offset, name, rel->r_offset - start_offset, /*sym->st_value+*/ addend);
break;
default:
error("unsupported m68k relocation (%d)", type);
}
}
}
}
#else
#error unsupported CPU
#endif
@@ -1306,38 +1362,10 @@ fprintf(outfile,
" the_end:\n"
);
/* generate epilogue */
switch(ELF_ARCH) {
case EM_386:
fprintf(outfile, "*gen_code_ptr++ = 0xc3; /* ret */\n");
break;
case EM_PPC:
fprintf(outfile, "*((uint32_t *)gen_code_ptr)++ = 0x4e800020; /* blr */\n");
break;
case EM_S390:
fprintf(outfile, "*((uint16_t *)gen_code_ptr)++ = 0x07fe; /* br %%r14 */\n");
break;
case EM_ALPHA:
fprintf(outfile, "*((uint32_t *)gen_code_ptr)++ = 0x6bfa8001; /* ret */\n");
break;
case EM_IA_64:
fprintf(outfile, "*((uint32_t *)gen_code_ptr)++ = 0x00840008; /* br.ret.sptk.many b0;; */\n");
break;
case EM_SPARC:
case EM_SPARC32PLUS:
fprintf(outfile, "*((uint32_t *)gen_code_ptr)++ = 0x81c62008; /* jmpl %%i0 + 8, %%g0 */\n");
fprintf(outfile, "*((uint32_t *)gen_code_ptr)++ = 0x01000000; /* nop */\n");
break;
case EM_SPARCV9:
fprintf(outfile, "*((uint32_t *)gen_code_ptr)++ = 0x81c7e008; /* ret */\n");
fprintf(outfile, "*((uint32_t *)gen_code_ptr)++ = 0x81e80000; /* restore */\n");
break;
case EM_ARM:
fprintf(outfile, "gen_code_ptr = arm_flush_ldr(gen_code_ptr, arm_ldr_table, arm_ldr_ptr, arm_data_table, arm_data_ptr, 0);\n");
break;
default:
error("unknown ELF architecture");
}
/* generate some code patching */
#ifdef HOST_ARM
fprintf(outfile, "gen_code_ptr = arm_flush_ldr(gen_code_ptr, arm_ldr_table, arm_ldr_ptr, arm_data_table, arm_data_ptr, 0);\n");
#endif
/* flush instruction cache */
fprintf(outfile, "flush_icache_range((unsigned long)gen_code_buf, (unsigned long)gen_code_ptr);\n");

View File

@@ -19,7 +19,7 @@
*/
int __op_param1, __op_param2, __op_param3;
int __op_jmp0, __op_jmp1;
int __op_jmp0, __op_jmp1, __op_jmp2, __op_jmp3;
#ifdef __i386__
static inline void flush_icache_range(unsigned long start, unsigned long stop)
@@ -94,6 +94,14 @@ static inline void flush_icache_range(unsigned long start, unsigned long stop)
}
#endif
#ifdef __mc68000
#include <asm/cachectl.h>
static inline void flush_icache_range(unsigned long start, unsigned long stop)
{
cacheflush(start,FLUSH_SCOPE_LINE,FLUSH_CACHE_BOTH,stop-start+16);
}
#endif
#ifdef __alpha__
register int gp asm("$29");
@@ -152,11 +160,7 @@ static uint8_t *arm_flush_ldr(uint8_t *gen_code_ptr,
data_size = (uint8_t *)data_end - (uint8_t *)data_start;
if (!gen_jmp) {
/* b exec_loop */
arm_reloc_pc24((uint32_t *)gen_code_ptr, 0xeafffffe, (long)(&exec_loop));
gen_code_ptr += 4;
} else {
if (gen_jmp) {
/* generate branch to skip the data */
if (data_size == 0)
return gen_code_ptr;

View File

@@ -30,3 +30,11 @@ register uint32_t T2 asm(AREG3);
void cpu_lock(void);
void cpu_unlock(void);
void cpu_loop_exit(void);
static inline int compute_cpsr(void)
{
int ZF;
ZF = (env->NZF == 0);
return env->cpsr | (env->NZF & 0x80000000) | (ZF << 30) |
(env->CF << 29) | ((env->VF & 0x80000000) >> 3);
}

View File

@@ -123,8 +123,12 @@ typedef struct CCTable {
extern CCTable cc_table[];
void load_seg(int seg_reg, int selector, unsigned cur_eip);
void jmp_seg(int selector, unsigned int new_eip);
void helper_ljmp_protected_T0_T1(void);
void helper_lcall_real_T0_T1(int shift, int next_eip);
void helper_lcall_protected_T0_T1(int shift, int next_eip);
void helper_iret_real(int shift);
void helper_iret_protected(int shift);
void helper_lret_protected(int shift, int addend);
void helper_lldt_T0(void);
void helper_ltr_T0(void);
void helper_movl_crN_T0(int reg);
@@ -134,10 +138,11 @@ void cpu_x86_update_cr0(CPUX86State *env);
void cpu_x86_update_cr3(CPUX86State *env);
void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr);
int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write);
void tlb_fill(unsigned long addr, int is_write, void *retaddr);
void __hidden cpu_lock(void);
void __hidden cpu_unlock(void);
void do_interrupt(int intno, int is_int, int error_code,
unsigned int next_eip);
unsigned int next_eip, int is_hw);
void do_interrupt_user(int intno, int is_int, int error_code,
unsigned int next_eip);
void raise_interrupt(int intno, int is_int, int error_code,
@@ -360,3 +365,52 @@ static inline void load_eflags(int eflags, int update_mask)
env->eflags = (env->eflags & ~update_mask) |
(eflags & update_mask);
}
/* memory access macros */
#define ldul ldl
#define lduq ldq
#define ldul_user ldl_user
#define ldul_kernel ldl_kernel
#define ldub_raw ldub
#define ldsb_raw ldsb
#define lduw_raw lduw
#define ldsw_raw ldsw
#define ldl_raw ldl
#define ldq_raw ldq
#define stb_raw stb
#define stw_raw stw
#define stl_raw stl
#define stq_raw stq
#define MEMUSER 0
#define DATA_SIZE 1
#include "softmmu_header.h"
#define DATA_SIZE 2
#include "softmmu_header.h"
#define DATA_SIZE 4
#include "softmmu_header.h"
#define DATA_SIZE 8
#include "softmmu_header.h"
#undef MEMUSER
#define MEMUSER 1
#define DATA_SIZE 1
#include "softmmu_header.h"
#define DATA_SIZE 2
#include "softmmu_header.h"
#define DATA_SIZE 4
#include "softmmu_header.h"
#define DATA_SIZE 8
#include "softmmu_header.h"
#undef MEMUSER

195
exec.c
View File

@@ -68,6 +68,7 @@ typedef struct PageDesc {
#define L2_SIZE (1 << L2_BITS)
static void tb_invalidate_page(unsigned long address);
static void io_mem_init(void);
unsigned long real_host_page_size;
unsigned long host_page_bits;
@@ -76,6 +77,12 @@ unsigned long host_page_mask;
static PageDesc *l1_map[L1_SIZE];
/* io memory support */
static unsigned long *l1_physmap[L1_SIZE];
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
static int io_mem_nb;
static void page_init(void)
{
/* NOTE: we can always suppose that host_page_size >=
@@ -201,6 +208,7 @@ void cpu_exec_init(void)
if (!code_gen_ptr) {
code_gen_ptr = code_gen_buffer;
page_init();
io_mem_init();
}
}
@@ -617,6 +625,63 @@ static void tb_reset_jump_recursive(TranslationBlock *tb)
tb_reset_jump_recursive2(tb, 1);
}
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
breakpoint is reached */
int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
{
#if defined(TARGET_I386)
int i;
for(i = 0; i < env->nb_breakpoints; i++) {
if (env->breakpoints[i] == pc)
return 0;
}
if (env->nb_breakpoints >= MAX_BREAKPOINTS)
return -1;
env->breakpoints[env->nb_breakpoints++] = pc;
tb_invalidate_page(pc);
return 0;
#else
return -1;
#endif
}
/* remove a breakpoint */
int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
{
#if defined(TARGET_I386)
int i;
for(i = 0; i < env->nb_breakpoints; i++) {
if (env->breakpoints[i] == pc)
goto found;
}
return -1;
found:
memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
(env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
env->nb_breakpoints--;
tb_invalidate_page(pc);
return 0;
#else
return -1;
#endif
}
/* enable or disable single step mode. EXCP_DEBUG is returned by the
CPU loop after each instruction */
void cpu_single_step(CPUState *env, int enabled)
{
#if defined(TARGET_I386)
if (env->singlestep_enabled != enabled) {
env->singlestep_enabled = enabled;
/* must flush all the translated code to avoid inconsistancies */
tb_flush();
}
#endif
}
/* mask must never be zero */
void cpu_interrupt(CPUState *env, int mask)
{
@@ -687,3 +752,133 @@ void page_unmap(void)
tb_flush();
}
#endif
void tlb_flush(CPUState *env)
{
#if defined(TARGET_I386)
int i;
for(i = 0; i < CPU_TLB_SIZE; i++) {
env->tlb_read[0][i].address = -1;
env->tlb_write[0][i].address = -1;
env->tlb_read[1][i].address = -1;
env->tlb_write[1][i].address = -1;
}
#endif
}
void tlb_flush_page(CPUState *env, uint32_t addr)
{
#if defined(TARGET_I386)
int i;
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
env->tlb_read[0][i].address = -1;
env->tlb_write[0][i].address = -1;
env->tlb_read[1][i].address = -1;
env->tlb_write[1][i].address = -1;
#endif
}
static inline unsigned long *physpage_find_alloc(unsigned int page)
{
unsigned long **lp, *p;
unsigned int index, i;
index = page >> TARGET_PAGE_BITS;
lp = &l1_physmap[index >> L2_BITS];
p = *lp;
if (!p) {
/* allocate if not found */
p = malloc(sizeof(unsigned long) * L2_SIZE);
for(i = 0; i < L2_SIZE; i++)
p[i] = IO_MEM_UNASSIGNED;
*lp = p;
}
return p + (index & (L2_SIZE - 1));
}
/* return NULL if no page defined (unused memory) */
unsigned long physpage_find(unsigned long page)
{
unsigned long *p;
unsigned int index;
index = page >> TARGET_PAGE_BITS;
p = l1_physmap[index >> L2_BITS];
if (!p)
return IO_MEM_UNASSIGNED;
return p[index & (L2_SIZE - 1)];
}
/* register physical memory. 'size' must be a multiple of the target
page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
io memory page */
void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
long phys_offset)
{
unsigned long addr, end_addr;
unsigned long *p;
end_addr = start_addr + size;
for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
p = physpage_find_alloc(addr);
*p = phys_offset;
if ((phys_offset & ~TARGET_PAGE_MASK) == 0)
phys_offset += TARGET_PAGE_SIZE;
}
}
static uint32_t unassigned_mem_readb(uint32_t addr)
{
return 0;
}
static void unassigned_mem_writeb(uint32_t addr, uint32_t val)
{
}
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
unassigned_mem_readb,
unassigned_mem_readb,
unassigned_mem_readb,
};
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
unassigned_mem_writeb,
unassigned_mem_writeb,
unassigned_mem_writeb,
};
static void io_mem_init(void)
{
io_mem_nb = 1;
cpu_register_io_memory(0, unassigned_mem_read, unassigned_mem_write);
}
/* mem_read and mem_write are arrays of functions containing the
function to access byte (index 0), word (index 1) and dword (index
2). All functions must be supplied. If io_index is non zero, the
corresponding io zone is modified. If it is zero, a new io zone is
allocated. The return value can be used with
cpu_register_physical_memory(). (-1) is returned if error. */
int cpu_register_io_memory(int io_index,
CPUReadMemoryFunc **mem_read,
CPUWriteMemoryFunc **mem_write)
{
int i;
if (io_index <= 0) {
if (io_index >= IO_MEM_NB_ENTRIES)
return -1;
io_index = io_mem_nb++;
} else {
if (io_index >= IO_MEM_NB_ENTRIES)
return -1;
}
for(i = 0;i < 3; i++) {
io_mem_read[io_index][i] = mem_read[i];
io_mem_write[io_index][i] = mem_write[i];
}
return io_index << IO_MEM_SHIFT;
}

107
exec.h
View File

@@ -21,6 +21,23 @@
/* allow to see translation results - the slowdown should be negligible, so we leave it */
#define DEBUG_DISAS
#ifndef glue
#define xglue(x, y) x ## y
#define glue(x, y) xglue(x, y)
#define stringify(s) tostring(s)
#define tostring(s) #s
#endif
#if GCC_MAJOR < 3
#define __builtin_expect(x, n) (x)
#endif
#ifdef __i386__
#define REGPARM(n) __attribute((regparm(n)))
#else
#define REGPARM(n)
#endif
/* is_jmp field values */
#define DISAS_NEXT 0 /* next instruction can be analyzed */
#define DISAS_JUMP 1 /* only pc was modified dynamically */
@@ -44,30 +61,25 @@ extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
#if defined(TARGET_I386)
#define GEN_FLAG_CODE32_SHIFT 0
#define GEN_FLAG_ADDSEG_SHIFT 1
#define GEN_FLAG_SS32_SHIFT 2
#define GEN_FLAG_VM_SHIFT 3
#define GEN_FLAG_ST_SHIFT 4
#define GEN_FLAG_TF_SHIFT 8 /* same position as eflags */
#define GEN_FLAG_CPL_SHIFT 9
#define GEN_FLAG_IOPL_SHIFT 12 /* same position as eflags */
void optimize_flags_init(void);
#endif
extern FILE *logfile;
extern int loglevel;
int gen_intermediate_code(struct TranslationBlock *tb);
int gen_intermediate_code_pc(struct TranslationBlock *tb);
int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf);
int cpu_gen_code(struct TranslationBlock *tb,
int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
int max_code_size, int *gen_code_size_ptr);
int cpu_restore_state(struct TranslationBlock *tb,
CPUState *env, unsigned long searched_pc);
void cpu_exec_init(void);
int page_unprotect(unsigned long address);
void page_unmap(void);
void tlb_flush_page(CPUState *env, uint32_t addr);
void tlb_flush(CPUState *env);
#define CODE_GEN_MAX_SIZE 65536
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
@@ -96,7 +108,7 @@ typedef struct TranslationBlock {
the code of this one. */
uint16_t tb_next_offset[2]; /* offset of original jump target */
#ifdef USE_DIRECT_JUMP
uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
#else
uint32_t tb_next[2]; /* address of jump generated code */
#endif
@@ -148,18 +160,14 @@ static inline TranslationBlock *tb_find(TranslationBlock ***pptb,
#if defined(__powerpc__)
static inline void tb_set_jmp_target(TranslationBlock *tb,
int n, unsigned long addr)
static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
{
uint32_t val, *ptr;
unsigned long offset;
offset = (unsigned long)(tb->tc_ptr + tb->tb_jmp_offset[n]);
/* patch the branch destination */
ptr = (uint32_t *)offset;
ptr = (uint32_t *)jmp_addr;
val = *ptr;
val = (val & ~0x03fffffc) | ((addr - offset) & 0x03fffffc);
val = (val & ~0x03fffffc) | ((addr - jmp_addr) & 0x03fffffc);
*ptr = val;
/* flush icache */
asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory");
@@ -169,6 +177,18 @@ static inline void tb_set_jmp_target(TranslationBlock *tb,
asm volatile ("isync" : : : "memory");
}
static inline void tb_set_jmp_target(TranslationBlock *tb,
int n, unsigned long addr)
{
unsigned long offset;
offset = tb->tb_jmp_offset[n];
tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
offset = tb->tb_jmp_offset[n + 2];
if (offset != 0xffff)
tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
}
#else
/* set the jump target */
@@ -203,20 +223,29 @@ TranslationBlock *tb_find_pc(unsigned long pc_ptr);
#if defined(__powerpc__)
/* on PowerPC we patch the jump instruction directly */
#define JUMP_TB(tbparam, n, eip)\
#define JUMP_TB(opname, tbparam, n, eip)\
do {\
static void __attribute__((unused)) *__op_label ## n = &&label ## n;\
asm volatile ("b %0" : : "i" (&__op_jmp ## n));\
label ## n:\
asm volatile (".section \".data\"\n"\
"__op_label" #n "." stringify(opname) ":\n"\
".long 1f\n"\
".previous\n"\
"b __op_jmp" #n "\n"\
"1:\n");\
T0 = (long)(tbparam) + (n);\
EIP = eip;\
EXIT_TB();\
} while (0)
#define JUMP_TB2(opname, tbparam, n)\
do {\
asm volatile ("b __op_jmp%0\n" : : "i" (n + 2));\
} while (0)
#else
/* jump to next block operations (more portable code, does not need
cache flushing, but slower because of indirect jump) */
#define JUMP_TB(tbparam, n, eip)\
#define JUMP_TB(opname, tbparam, n, eip)\
do {\
static void __attribute__((unused)) *__op_label ## n = &&label ## n;\
static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\
@@ -225,10 +254,28 @@ label ## n:\
T0 = (long)(tbparam) + (n);\
EIP = eip;\
dummy_label ## n:\
EXIT_TB();\
} while (0)
/* second jump to same destination 'n' */
#define JUMP_TB2(opname, tbparam, n)\
do {\
goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\
} while (0)
#endif
/* physical memory access */
#define IO_MEM_NB_ENTRIES 256
#define TLB_INVALID_MASK (1 << 3)
#define IO_MEM_SHIFT 4
#define IO_MEM_UNASSIGNED (1 << IO_MEM_SHIFT)
unsigned long physpage_find(unsigned long page);
extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
#ifdef __powerpc__
static inline int testandset (int *p)
{
@@ -320,6 +367,18 @@ static inline int testandset (int *spinlock)
}
#endif
#ifdef __mc68000
static inline int testandset (int *p)
{
char ret;
__asm__ __volatile__("tas %1; sne %0"
: "=r" (ret)
: "m" (p)
: "cc","memory");
return ret == 0;
}
#endif
typedef int spinlock_t;
#define SPIN_LOCK_UNLOCKED 0

View File

@@ -283,11 +283,11 @@ static int memory_rw(uint8_t *buf, uint32_t addr, int len, int is_write)
}
/* port = 0 means default port */
int cpu_gdbstub(void *opaque, void (*main_loop)(void *opaque), int port)
int cpu_gdbstub(void *opaque, int (*main_loop)(void *opaque), int port)
{
CPUState *env;
const char *p;
int ret, ch, nb_regs, i;
int ret, ch, nb_regs, i, type;
char buf[4096];
uint8_t mem_buf[2000];
uint32_t *registers;
@@ -309,8 +309,37 @@ int cpu_gdbstub(void *opaque, void (*main_loop)(void *opaque), int port)
put_packet(buf);
break;
case 'c':
main_loop(opaque);
snprintf(buf, sizeof(buf), "S%02x", 0);
if (*p != '\0') {
addr = strtoul(p, (char **)&p, 16);
env = cpu_gdbstub_get_env(opaque);
#if defined(TARGET_I386)
env->eip = addr;
#endif
}
ret = main_loop(opaque);
if (ret == EXCP_DEBUG)
ret = SIGTRAP;
else
ret = 0;
snprintf(buf, sizeof(buf), "S%02x", ret);
put_packet(buf);
break;
case 's':
env = cpu_gdbstub_get_env(opaque);
if (*p != '\0') {
addr = strtoul(p, (char **)&p, 16);
#if defined(TARGET_I386)
env->eip = addr;
#endif
}
cpu_single_step(env, 1);
ret = main_loop(opaque);
cpu_single_step(env, 0);
if (ret == EXCP_DEBUG)
ret = SIGTRAP;
else
ret = 0;
snprintf(buf, sizeof(buf), "S%02x", ret);
put_packet(buf);
break;
case 'g':
@@ -379,6 +408,40 @@ int cpu_gdbstub(void *opaque, void (*main_loop)(void *opaque), int port)
else
put_packet("OK");
break;
case 'Z':
type = strtoul(p, (char **)&p, 16);
if (*p == ',')
p++;
addr = strtoul(p, (char **)&p, 16);
if (*p == ',')
p++;
len = strtoul(p, (char **)&p, 16);
if (type == 0 || type == 1) {
env = cpu_gdbstub_get_env(opaque);
if (cpu_breakpoint_insert(env, addr) < 0)
goto breakpoint_error;
put_packet("OK");
} else {
breakpoint_error:
put_packet("ENN");
}
break;
case 'z':
type = strtoul(p, (char **)&p, 16);
if (*p == ',')
p++;
addr = strtoul(p, (char **)&p, 16);
if (*p == ',')
p++;
len = strtoul(p, (char **)&p, 16);
if (type == 0 || type == 1) {
env = cpu_gdbstub_get_env(opaque);
cpu_breakpoint_remove(env, addr);
put_packet("OK");
} else {
goto breakpoint_error;
}
break;
default:
/* put empty packet */
buf[0] = '\0';

View File

@@ -182,14 +182,42 @@ static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
return 0;
}
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
{
unsigned int limit;
limit = (e1 & 0xffff) | (e2 & 0x000f0000);
if (e2 & DESC_G_MASK)
limit = (limit << 12) | 0xfff;
return limit;
}
static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
{
return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
}
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
{
sc->base = get_seg_base(e1, e2);
sc->limit = get_seg_limit(e1, e2);
sc->flags = e2;
}
/* init the segment cache in vm86 mode. */
static inline void load_seg_vm(int seg, int selector)
{
selector &= 0xffff;
cpu_x86_load_seg_cache(env, seg, selector,
(uint8_t *)(selector << 4), 0xffff, 0);
}
/* protected mode interrupt */
static void do_interrupt_protected(int intno, int is_int, int error_code,
unsigned int next_eip)
unsigned int next_eip, int is_hw)
{
SegmentCache *dt;
uint8_t *ptr, *ssp;
int type, dpl, cpl, selector, ss_dpl;
int type, dpl, selector, ss_dpl, cpl;
int has_error_code, new_stack, shift;
uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
uint32_t old_cs, old_ss, old_esp, old_eip;
@@ -216,10 +244,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
break;
}
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
if (env->eflags & VM_MASK)
cpl = 3;
else
cpl = env->segs[R_CS].selector & 3;
cpl = env->hflags & HF_CPL_MASK;
/* check privledge if software int */
if (is_int && dpl < cpl)
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
@@ -269,7 +294,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
shift = type >> 3;
has_error_code = 0;
if (!is_int) {
if (!is_int && !is_hw) {
switch(intno) {
case 8:
case 10:
@@ -289,22 +314,31 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
/* XXX: check that enough room is available */
if (new_stack) {
old_esp = env->regs[R_ESP];
old_esp = ESP;
old_ss = env->segs[R_SS].selector;
load_seg(R_SS, ss, env->eip);
ss = (ss & ~3) | dpl;
cpu_x86_load_seg_cache(env, R_SS, ss,
get_seg_base(ss_e1, ss_e2),
get_seg_limit(ss_e1, ss_e2),
ss_e2);
} else {
old_esp = 0;
old_ss = 0;
esp = env->regs[R_ESP];
esp = ESP;
}
if (is_int)
old_eip = next_eip;
else
old_eip = env->eip;
old_cs = env->segs[R_CS].selector;
load_seg(R_CS, selector, env->eip);
selector = (selector & ~3) | dpl;
cpu_x86_load_seg_cache(env, R_CS, selector,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
cpu_x86_set_cpl(env, dpl);
env->eip = offset;
env->regs[R_ESP] = esp - push_size;
ESP = esp - push_size;
ssp = env->segs[R_SS].base + esp;
if (shift == 1) {
int old_eflags;
@@ -378,23 +412,22 @@ static void do_interrupt_real(int intno, int is_int, int error_code,
ptr = dt->base + intno * 4;
offset = lduw(ptr);
selector = lduw(ptr + 2);
esp = env->regs[R_ESP] & 0xffff;
ssp = env->segs[R_SS].base + esp;
esp = ESP;
ssp = env->segs[R_SS].base;
if (is_int)
old_eip = next_eip;
else
old_eip = env->eip;
old_cs = env->segs[R_CS].selector;
ssp -= 2;
stw(ssp, compute_eflags());
ssp -= 2;
stw(ssp, old_cs);
ssp -= 2;
stw(ssp, old_eip);
esp -= 6;
esp -= 2;
stw(ssp + (esp & 0xffff), compute_eflags());
esp -= 2;
stw(ssp + (esp & 0xffff), old_cs);
esp -= 2;
stw(ssp + (esp & 0xffff), old_eip);
/* update processor state */
env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
ESP = (ESP & ~0xffff) | (esp & 0xffff);
env->eip = offset;
env->segs[R_CS].selector = selector;
env->segs[R_CS].base = (uint8_t *)(selector << 4);
@@ -415,7 +448,7 @@ void do_interrupt_user(int intno, int is_int, int error_code,
e2 = ldl(ptr + 4);
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
cpl = 3;
cpl = env->hflags & HF_CPL_MASK;
/* check privledge if software int */
if (is_int && dpl < cpl)
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
@@ -433,10 +466,10 @@ void do_interrupt_user(int intno, int is_int, int error_code,
* instruction. It is only relevant if is_int is TRUE.
*/
void do_interrupt(int intno, int is_int, int error_code,
unsigned int next_eip)
unsigned int next_eip, int is_hw)
{
if (env->cr[0] & CR0_PE_MASK) {
do_interrupt_protected(intno, is_int, error_code, next_eip);
do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
} else {
do_interrupt_real(intno, is_int, error_code, next_eip);
}
@@ -597,15 +630,6 @@ void helper_cpuid(void)
}
}
static inline void load_seg_cache(SegmentCache *sc, uint32_t e1, uint32_t e2)
{
sc->base = (void *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
sc->limit = (e1 & 0xffff) | (e2 & 0x000f0000);
if (e2 & DESC_G_MASK)
sc->limit = (sc->limit << 12) | 0xfff;
sc->flags = e2;
}
void helper_lldt_T0(void)
{
int selector;
@@ -633,7 +657,7 @@ void helper_lldt_T0(void)
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
if (!(e2 & DESC_P_MASK))
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
load_seg_cache(&env->ldt, e1, e2);
load_seg_cache_raw_dt(&env->ldt, e1, e2);
}
env->ldt.selector = selector;
}
@@ -668,30 +692,26 @@ void helper_ltr_T0(void)
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
if (!(e2 & DESC_P_MASK))
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
load_seg_cache(&env->tr, e1, e2);
load_seg_cache_raw_dt(&env->tr, e1, e2);
e2 |= 0x00000200; /* set the busy bit */
stl(ptr + 4, e2);
}
env->tr.selector = selector;
}
/* only works if protected mode and not VM86 */
/* only works if protected mode and not VM86. Calling load_seg with
seg_reg == R_CS is discouraged */
void load_seg(int seg_reg, int selector, unsigned int cur_eip)
{
SegmentCache *sc;
uint32_t e1, e2;
sc = &env->segs[seg_reg];
if ((selector & 0xfffc) == 0) {
/* null selector case */
if (seg_reg == R_SS) {
EIP = cur_eip;
raise_exception_err(EXCP0D_GPF, 0);
} else {
/* XXX: each access should trigger an exception */
sc->base = NULL;
sc->limit = 0;
sc->flags = 0;
cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
}
} else {
if (load_segment(&e1, &e2, selector) != 0) {
@@ -723,97 +743,353 @@ void load_seg(int seg_reg, int selector, unsigned int cur_eip)
else
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
}
load_seg_cache(sc, e1, e2);
cpu_x86_load_seg_cache(env, seg_reg, selector,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
#if 0
fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
selector, (unsigned long)sc->base, sc->limit, sc->flags);
#endif
}
sc->selector = selector;
}
/* protected mode jump */
void jmp_seg(int selector, unsigned int new_eip)
void helper_ljmp_protected_T0_T1(void)
{
SegmentCache sc1;
uint32_t e1, e2, cpl, dpl, rpl;
int new_cs, new_eip;
uint32_t e1, e2, cpl, dpl, rpl, limit;
if ((selector & 0xfffc) == 0) {
new_cs = T0;
new_eip = T1;
if ((new_cs & 0xfffc) == 0)
raise_exception_err(EXCP0D_GPF, 0);
}
if (load_segment(&e1, &e2, selector) != 0)
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
cpl = env->segs[R_CS].selector & 3;
if (load_segment(&e1, &e2, new_cs) != 0)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
cpl = env->hflags & HF_CPL_MASK;
if (e2 & DESC_S_MASK) {
if (!(e2 & DESC_CS_MASK))
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
if (e2 & DESC_CS_MASK) {
/* conforming code segment */
if (dpl > cpl)
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
} else {
/* non conforming code segment */
rpl = selector & 3;
rpl = new_cs & 3;
if (rpl > cpl)
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
if (dpl != cpl)
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
}
if (!(e2 & DESC_P_MASK))
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
load_seg_cache(&sc1, e1, e2);
if (new_eip > sc1.limit)
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
env->segs[R_CS].base = sc1.base;
env->segs[R_CS].limit = sc1.limit;
env->segs[R_CS].flags = sc1.flags;
env->segs[R_CS].selector = (selector & 0xfffc) | cpl;
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
limit = get_seg_limit(e1, e2);
if (new_eip > limit)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
get_seg_base(e1, e2), limit, e2);
EIP = new_eip;
} else {
cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x",
selector, new_eip);
new_cs, new_eip);
}
}
/* init the segment cache in vm86 mode */
static inline void load_seg_vm(int seg, int selector)
/* real mode call */
void helper_lcall_real_T0_T1(int shift, int next_eip)
{
SegmentCache *sc = &env->segs[seg];
selector &= 0xffff;
sc->base = (uint8_t *)(selector << 4);
sc->selector = selector;
sc->flags = 0;
sc->limit = 0xffff;
int new_cs, new_eip;
uint32_t esp, esp_mask;
uint8_t *ssp;
new_cs = T0;
new_eip = T1;
esp = ESP;
esp_mask = 0xffffffff;
if (!(env->segs[R_SS].flags & DESC_B_MASK))
esp_mask = 0xffff;
ssp = env->segs[R_SS].base;
if (shift) {
esp -= 4;
stl(ssp + (esp & esp_mask), env->segs[R_CS].selector);
esp -= 4;
stl(ssp + (esp & esp_mask), next_eip);
} else {
esp -= 2;
stw(ssp + (esp & esp_mask), env->segs[R_CS].selector);
esp -= 2;
stw(ssp + (esp & esp_mask), next_eip);
}
if (!(env->segs[R_SS].flags & DESC_B_MASK))
ESP = (ESP & ~0xffff) | (esp & 0xffff);
else
ESP = esp;
env->eip = new_eip;
env->segs[R_CS].selector = new_cs;
env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
}
/* protected mode iret */
void helper_iret_protected(int shift)
/* protected mode call */
void helper_lcall_protected_T0_T1(int shift, int next_eip)
{
uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
uint32_t new_es, new_ds, new_fs, new_gs;
uint32_t e1, e2;
int cpl, dpl, rpl, eflags_mask;
uint8_t *ssp;
int new_cs, new_eip;
uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
uint32_t ss, ss_e1, ss_e2, push_size, sp, type, ss_dpl;
uint32_t old_ss, old_esp, val, i, limit;
uint8_t *ssp, *old_ssp;
sp = env->regs[R_ESP];
if (!(env->segs[R_SS].flags & DESC_B_MASK))
sp &= 0xffff;
new_cs = T0;
new_eip = T1;
if ((new_cs & 0xfffc) == 0)
raise_exception_err(EXCP0D_GPF, 0);
if (load_segment(&e1, &e2, new_cs) != 0)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
cpl = env->hflags & HF_CPL_MASK;
if (e2 & DESC_S_MASK) {
if (!(e2 & DESC_CS_MASK))
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
if (e2 & DESC_CS_MASK) {
/* conforming code segment */
if (dpl > cpl)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
} else {
/* non conforming code segment */
rpl = new_cs & 3;
if (rpl > cpl)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
if (dpl != cpl)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
}
if (!(e2 & DESC_P_MASK))
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
sp = ESP;
if (!(env->segs[R_SS].flags & DESC_B_MASK))
sp &= 0xffff;
ssp = env->segs[R_SS].base + sp;
if (shift) {
ssp -= 4;
stl(ssp, env->segs[R_CS].selector);
ssp -= 4;
stl(ssp, next_eip);
} else {
ssp -= 2;
stw(ssp, env->segs[R_CS].selector);
ssp -= 2;
stw(ssp, next_eip);
}
sp -= (4 << shift);
limit = get_seg_limit(e1, e2);
if (new_eip > limit)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
/* from this point, not restartable */
if (!(env->segs[R_SS].flags & DESC_B_MASK))
ESP = (ESP & 0xffff0000) | (sp & 0xffff);
else
ESP = sp;
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
get_seg_base(e1, e2), limit, e2);
EIP = new_eip;
} else {
/* check gate type */
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
switch(type) {
case 1: /* available 286 TSS */
case 9: /* available 386 TSS */
case 5: /* task gate */
cpu_abort(env, "task gate not supported");
break;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
break;
default:
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
break;
}
shift = type >> 3;
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
rpl = new_cs & 3;
if (dpl < cpl || dpl < rpl)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
/* check valid bit */
if (!(e2 & DESC_P_MASK))
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
selector = e1 >> 16;
offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
if ((selector & 0xfffc) == 0)
raise_exception_err(EXCP0D_GPF, 0);
if (load_segment(&e1, &e2, selector) != 0)
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
if (dpl > cpl)
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
if (!(e2 & DESC_P_MASK))
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
/* to inner priviledge */
get_ss_esp_from_tss(&ss, &sp, dpl);
if ((ss & 0xfffc) == 0)
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
if ((ss & 3) != dpl)
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
if (load_segment(&ss_e1, &ss_e2, ss) != 0)
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
if (ss_dpl != dpl)
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
if (!(ss_e2 & DESC_S_MASK) ||
(ss_e2 & DESC_CS_MASK) ||
!(ss_e2 & DESC_W_MASK))
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
if (!(ss_e2 & DESC_P_MASK))
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
param_count = e2 & 0x1f;
push_size = ((param_count * 2) + 8) << shift;
old_esp = ESP;
old_ss = env->segs[R_SS].selector;
if (!(env->segs[R_SS].flags & DESC_B_MASK))
old_esp &= 0xffff;
old_ssp = env->segs[R_SS].base + old_esp;
/* XXX: from this point not restartable */
ss = (ss & ~3) | dpl;
cpu_x86_load_seg_cache(env, R_SS, ss,
get_seg_base(ss_e1, ss_e2),
get_seg_limit(ss_e1, ss_e2),
ss_e2);
if (!(env->segs[R_SS].flags & DESC_B_MASK))
sp &= 0xffff;
ssp = env->segs[R_SS].base + sp;
if (shift) {
ssp -= 4;
stl(ssp, old_ss);
ssp -= 4;
stl(ssp, old_esp);
ssp -= 4 * param_count;
for(i = 0; i < param_count; i++) {
val = ldl(old_ssp + i * 4);
stl(ssp + i * 4, val);
}
} else {
ssp -= 2;
stw(ssp, old_ss);
ssp -= 2;
stw(ssp, old_esp);
ssp -= 2 * param_count;
for(i = 0; i < param_count; i++) {
val = lduw(old_ssp + i * 2);
stw(ssp + i * 2, val);
}
}
} else {
/* to same priviledge */
if (!(env->segs[R_SS].flags & DESC_B_MASK))
sp &= 0xffff;
ssp = env->segs[R_SS].base + sp;
push_size = (4 << shift);
}
if (shift) {
ssp -= 4;
stl(ssp, env->segs[R_CS].selector);
ssp -= 4;
stl(ssp, next_eip);
} else {
ssp -= 2;
stw(ssp, env->segs[R_CS].selector);
ssp -= 2;
stw(ssp, next_eip);
}
sp -= push_size;
selector = (selector & ~3) | dpl;
cpu_x86_load_seg_cache(env, R_CS, selector,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
cpu_x86_set_cpl(env, dpl);
/* from this point, not restartable if same priviledge */
if (!(env->segs[R_SS].flags & DESC_B_MASK))
ESP = (ESP & 0xffff0000) | (sp & 0xffff);
else
ESP = sp;
EIP = offset;
}
}
/* real mode iret */
void helper_iret_real(int shift)
{
uint32_t sp, new_cs, new_eip, new_eflags, new_esp;
uint8_t *ssp;
int eflags_mask;
sp = ESP & 0xffff;
ssp = env->segs[R_SS].base + sp;
if (shift == 1) {
/* 32 bits */
new_eflags = ldl(ssp + 8);
new_cs = ldl(ssp + 4) & 0xffff;
new_eip = ldl(ssp);
if (new_eflags & VM_MASK)
goto return_to_vm86;
new_eip = ldl(ssp) & 0xffff;
} else {
/* 16 bits */
new_eflags = lduw(ssp + 4);
new_cs = lduw(ssp + 2);
new_eip = lduw(ssp);
}
new_esp = sp + (6 << shift);
ESP = (ESP & 0xffff0000) |
(new_esp & 0xffff);
load_seg_vm(R_CS, new_cs);
env->eip = new_eip;
eflags_mask = FL_UPDATE_CPL0_MASK;
if (shift == 0)
eflags_mask &= 0xffff;
load_eflags(new_eflags, eflags_mask);
}
/* protected mode iret */
static inline void helper_ret_protected(int shift, int is_iret, int addend)
{
uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
uint32_t new_es, new_ds, new_fs, new_gs;
uint32_t e1, e2, ss_e1, ss_e2;
int cpl, dpl, rpl, eflags_mask;
uint8_t *ssp;
sp = ESP;
if (!(env->segs[R_SS].flags & DESC_B_MASK))
sp &= 0xffff;
ssp = env->segs[R_SS].base + sp;
if (shift == 1) {
/* 32 bits */
if (is_iret)
new_eflags = ldl(ssp + 8);
new_cs = ldl(ssp + 4) & 0xffff;
new_eip = ldl(ssp);
if (is_iret && (new_eflags & VM_MASK))
goto return_to_vm86;
} else {
/* 16 bits */
if (is_iret)
new_eflags = lduw(ssp + 4);
new_cs = lduw(ssp + 2);
new_eip = lduw(ssp);
}
if ((new_cs & 0xfffc) == 0)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
if (load_segment(&e1, &e2, new_cs) != 0)
@@ -821,7 +1097,7 @@ void helper_iret_protected(int shift)
if (!(e2 & DESC_S_MASK) ||
!(e2 & DESC_CS_MASK))
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
cpl = env->segs[R_CS].selector & 3;
cpl = env->hflags & HF_CPL_MASK;
rpl = new_cs & 3;
if (rpl < cpl)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
@@ -838,50 +1114,64 @@ void helper_iret_protected(int shift)
if (rpl == cpl) {
/* return to same priledge level */
load_seg(R_CS, new_cs, env->eip);
new_esp = sp + (6 << shift);
cpu_x86_load_seg_cache(env, R_CS, new_cs,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
new_esp = sp + (4 << shift) + ((2 * is_iret) << shift) + addend;
} else {
/* return to differentr priviledge level */
/* return to different priviledge level */
ssp += (4 << shift) + ((2 * is_iret) << shift) + addend;
if (shift == 1) {
/* 32 bits */
new_esp = ldl(ssp + 12);
new_ss = ldl(ssp + 16) & 0xffff;
new_esp = ldl(ssp);
new_ss = ldl(ssp + 4) & 0xffff;
} else {
/* 16 bits */
new_esp = lduw(ssp + 6);
new_ss = lduw(ssp + 8);
new_esp = lduw(ssp);
new_ss = lduw(ssp + 2);
}
if ((new_ss & 3) != rpl)
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
if (load_segment(&e1, &e2, new_ss) != 0)
if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
if (!(e2 & DESC_S_MASK) ||
(e2 & DESC_CS_MASK) ||
!(e2 & DESC_W_MASK))
if (!(ss_e2 & DESC_S_MASK) ||
(ss_e2 & DESC_CS_MASK) ||
!(ss_e2 & DESC_W_MASK))
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
if (dpl != rpl)
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
if (!(e2 & DESC_P_MASK))
if (!(ss_e2 & DESC_P_MASK))
raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
load_seg(R_CS, new_cs, env->eip);
load_seg(R_SS, new_ss, env->eip);
cpu_x86_load_seg_cache(env, R_CS, new_cs,
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
cpu_x86_load_seg_cache(env, R_SS, new_ss,
get_seg_base(ss_e1, ss_e2),
get_seg_limit(ss_e1, ss_e2),
ss_e2);
cpu_x86_set_cpl(env, rpl);
}
if (env->segs[R_SS].flags & DESC_B_MASK)
env->regs[R_ESP] = new_esp;
ESP = new_esp;
else
env->regs[R_ESP] = (env->regs[R_ESP] & 0xffff0000) |
ESP = (ESP & 0xffff0000) |
(new_esp & 0xffff);
env->eip = new_eip;
if (cpl == 0)
eflags_mask = FL_UPDATE_CPL0_MASK;
else
eflags_mask = FL_UPDATE_MASK32;
if (shift == 0)
eflags_mask &= 0xffff;
load_eflags(new_eflags, eflags_mask);
if (is_iret) {
/* NOTE: 'cpl' can be different from the current CPL */
if (cpl == 0)
eflags_mask = FL_UPDATE_CPL0_MASK;
else
eflags_mask = FL_UPDATE_MASK32;
if (shift == 0)
eflags_mask &= 0xffff;
load_eflags(new_eflags, eflags_mask);
}
return;
return_to_vm86:
@@ -895,14 +1185,25 @@ void helper_iret_protected(int shift)
/* modify processor state */
load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
load_seg_vm(R_CS, new_cs);
cpu_x86_set_cpl(env, 3);
load_seg_vm(R_SS, new_ss);
load_seg_vm(R_ES, new_es);
load_seg_vm(R_DS, new_ds);
load_seg_vm(R_FS, new_fs);
load_seg_vm(R_GS, new_gs);
env->eip = new_eip;
env->regs[R_ESP] = new_esp;
ESP = new_esp;
}
void helper_iret_protected(int shift)
{
helper_ret_protected(shift, 1, 0);
}
void helper_lret_protected(int shift, int addend)
{
helper_ret_protected(shift, 0, addend);
}
void helper_movl_crN_T0(int reg)
@@ -1469,3 +1770,34 @@ void helper_frstor(uint8_t *ptr, int data32)
}
}
#define SHIFT 0
#include "softmmu_template.h"
#define SHIFT 1
#include "softmmu_template.h"
#define SHIFT 2
#include "softmmu_template.h"
#define SHIFT 3
#include "softmmu_template.h"
/* try to fill the TLB and return an exception if error */
void tlb_fill(unsigned long addr, int is_write, void *retaddr)
{
TranslationBlock *tb;
int ret;
unsigned long pc;
ret = cpu_x86_handle_mmu_fault(env, addr, is_write);
if (ret) {
/* now we have a real cpu fault */
pc = (unsigned long)retaddr;
tb = tb_find_pc(pc);
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
cpu_restore_state(tb, env, pc);
}
raise_exception_err(EXCP0E_PAGE, env->error_code);
}
}

390
helper2-i386.c Normal file
View File

@@ -0,0 +1,390 @@
/*
* i386 helpers (without register variable usage)
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include <signal.h>
#include <assert.h>
#include <sys/mman.h>
#include "cpu-i386.h"
#include "exec.h"
//#define DEBUG_MMU
CPUX86State *cpu_x86_init(void)
{
CPUX86State *env;
int i;
static int inited;
cpu_exec_init();
env = malloc(sizeof(CPUX86State));
if (!env)
return NULL;
memset(env, 0, sizeof(CPUX86State));
/* basic FPU init */
for(i = 0;i < 8; i++)
env->fptags[i] = 1;
env->fpuc = 0x37f;
/* flags setup : we activate the IRQs by default as in user mode */
env->eflags = 0x2 | IF_MASK;
tlb_flush(env);
#ifdef CONFIG_SOFTMMU
env->hflags |= HF_SOFTMMU_MASK;
#endif
/* init various static tables */
if (!inited) {
inited = 1;
optimize_flags_init();
}
return env;
}
void cpu_x86_close(CPUX86State *env)
{
free(env);
}
/***********************************************************/
/* x86 debug */
static const char *cc_op_str[] = {
"DYNAMIC",
"EFLAGS",
"MUL",
"ADDB",
"ADDW",
"ADDL",
"ADCB",
"ADCW",
"ADCL",
"SUBB",
"SUBW",
"SUBL",
"SBBB",
"SBBW",
"SBBL",
"LOGICB",
"LOGICW",
"LOGICL",
"INCB",
"INCW",
"INCL",
"DECB",
"DECW",
"DECL",
"SHLB",
"SHLW",
"SHLL",
"SARB",
"SARW",
"SARL",
};
void cpu_x86_dump_state(CPUX86State *env, FILE *f, int flags)
{
int eflags;
char cc_op_name[32];
eflags = env->eflags;
fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
"ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
"EIP=%08x EFL=%08x [%c%c%c%c%c%c%c]\n",
env->regs[R_EAX], env->regs[R_EBX], env->regs[R_ECX], env->regs[R_EDX],
env->regs[R_ESI], env->regs[R_EDI], env->regs[R_EBP], env->regs[R_ESP],
env->eip, eflags,
eflags & DF_MASK ? 'D' : '-',
eflags & CC_O ? 'O' : '-',
eflags & CC_S ? 'S' : '-',
eflags & CC_Z ? 'Z' : '-',
eflags & CC_A ? 'A' : '-',
eflags & CC_P ? 'P' : '-',
eflags & CC_C ? 'C' : '-');
fprintf(f, "CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x\n",
env->segs[R_CS].selector,
env->segs[R_SS].selector,
env->segs[R_DS].selector,
env->segs[R_ES].selector,
env->segs[R_FS].selector,
env->segs[R_GS].selector);
if (flags & X86_DUMP_CCOP) {
if ((unsigned)env->cc_op < CC_OP_NB)
strcpy(cc_op_name, cc_op_str[env->cc_op]);
else
snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
env->cc_src, env->cc_dst, cc_op_name);
}
if (flags & X86_DUMP_FPU) {
fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n",
(double)env->fpregs[0],
(double)env->fpregs[1],
(double)env->fpregs[2],
(double)env->fpregs[3]);
fprintf(f, "ST4=%f ST5=%f ST6=%f ST7=%f\n",
(double)env->fpregs[4],
(double)env->fpregs[5],
(double)env->fpregs[7],
(double)env->fpregs[8]);
}
}
/***********************************************************/
/* x86 mmu */
/* XXX: add PGE support */
/* called when cr3 or PG bit are modified */
static int last_pg_state = -1;
static int last_pe_state = 0;
int phys_ram_size;
int phys_ram_fd;
uint8_t *phys_ram_base;
void cpu_x86_update_cr0(CPUX86State *env)
{
int pg_state, pe_state;
#ifdef DEBUG_MMU
printf("CR0 update: CR0=0x%08x\n", env->cr[0]);
#endif
pg_state = env->cr[0] & CR0_PG_MASK;
if (pg_state != last_pg_state) {
page_unmap();
tlb_flush(env);
last_pg_state = pg_state;
}
pe_state = env->cr[0] & CR0_PE_MASK;
if (last_pe_state != pe_state) {
tb_flush();
last_pe_state = pe_state;
}
}
void cpu_x86_update_cr3(CPUX86State *env)
{
if (env->cr[0] & CR0_PG_MASK) {
#if defined(DEBUG_MMU)
printf("CR3 update: CR3=%08x\n", env->cr[3]);
#endif
page_unmap();
tlb_flush(env);
}
}
void cpu_x86_init_mmu(CPUX86State *env)
{
last_pg_state = -1;
cpu_x86_update_cr0(env);
}
/* XXX: also flush 4MB pages */
void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
{
int flags;
unsigned long virt_addr;
tlb_flush_page(env, addr);
flags = page_get_flags(addr);
if (flags & PAGE_VALID) {
virt_addr = addr & ~0xfff;
munmap((void *)virt_addr, 4096);
page_set_flags(virt_addr, virt_addr + 4096, 0);
}
}
/* return value:
-1 = cannot handle fault
0 = nothing more to do
1 = generate PF fault
2 = soft MMU activation required for this block
*/
int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
{
uint8_t *pde_ptr, *pte_ptr;
uint32_t pde, pte, virt_addr;
int cpl, error_code, is_dirty, is_user, prot, page_size, ret;
unsigned long pd;
cpl = env->hflags & HF_CPL_MASK;
is_user = (cpl == 3);
#ifdef DEBUG_MMU
printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n",
addr, is_write, is_user, env->eip);
#endif
if (env->user_mode_only) {
/* user mode only emulation */
error_code = 0;
goto do_fault;
}
if (!(env->cr[0] & CR0_PG_MASK)) {
pte = addr;
virt_addr = addr & ~0xfff;
prot = PROT_READ | PROT_WRITE;
page_size = 4096;
goto do_mapping;
}
/* page directory entry */
pde_ptr = phys_ram_base + ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3));
pde = ldl(pde_ptr);
if (!(pde & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
if (is_user) {
if (!(pde & PG_USER_MASK))
goto do_fault_protect;
if (is_write && !(pde & PG_RW_MASK))
goto do_fault_protect;
} else {
if ((env->cr[0] & CR0_WP_MASK) && (pde & PG_USER_MASK) &&
is_write && !(pde & PG_RW_MASK))
goto do_fault_protect;
}
/* if PSE bit is set, then we use a 4MB page */
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
is_dirty = is_write && !(pde & PG_DIRTY_MASK);
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
if (is_dirty)
pde |= PG_DIRTY_MASK;
stl(pde_ptr, pde);
}
pte = pde & ~0x003ff000; /* align to 4MB */
page_size = 4096 * 1024;
virt_addr = addr & ~0x003fffff;
} else {
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
stl(pde_ptr, pde);
}
/* page directory entry */
pte_ptr = phys_ram_base + ((pde & ~0xfff) + ((addr >> 10) & 0xffc));
pte = ldl(pte_ptr);
if (!(pte & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
if (is_user) {
if (!(pte & PG_USER_MASK))
goto do_fault_protect;
if (is_write && !(pte & PG_RW_MASK))
goto do_fault_protect;
} else {
if ((env->cr[0] & CR0_WP_MASK) && (pte & PG_USER_MASK) &&
is_write && !(pte & PG_RW_MASK))
goto do_fault_protect;
}
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
pte |= PG_ACCESSED_MASK;
if (is_dirty)
pte |= PG_DIRTY_MASK;
stl(pte_ptr, pte);
}
page_size = 4096;
virt_addr = addr & ~0xfff;
}
/* the page can be put in the TLB */
prot = PROT_READ;
if (is_user) {
if (pte & PG_RW_MASK)
prot |= PROT_WRITE;
} else {
if (!(env->cr[0] & CR0_WP_MASK) || !(pte & PG_USER_MASK) ||
(pte & PG_RW_MASK))
prot |= PROT_WRITE;
}
do_mapping:
if (env->hflags & HF_SOFTMMU_MASK) {
unsigned long paddr, vaddr, address, addend, page_offset;
int index;
/* software MMU case. Even if 4MB pages, we map only one 4KB
page in the cache to avoid filling it too fast */
page_offset = (addr & ~0xfff) & (page_size - 1);
paddr = (pte & ~0xfff) + page_offset;
vaddr = virt_addr + page_offset;
index = (addr >> 12) & (CPU_TLB_SIZE - 1);
pd = physpage_find(paddr);
if (pd & 0xfff) {
/* IO memory case */
address = vaddr | pd;
addend = paddr;
} else {
/* standard memory */
address = vaddr;
addend = (unsigned long)phys_ram_base + pd;
}
addend -= vaddr;
env->tlb_read[is_user][index].address = address;
env->tlb_read[is_user][index].addend = addend;
if (prot & PROT_WRITE) {
env->tlb_write[is_user][index].address = address;
env->tlb_write[is_user][index].addend = addend;
}
}
ret = 0;
/* XXX: incorrect for 4MB pages */
pd = physpage_find(pte & ~0xfff);
if ((pd & 0xfff) != 0) {
/* IO access: no mapping is done as it will be handled by the
soft MMU */
if (!(env->hflags & HF_SOFTMMU_MASK))
ret = 2;
} else {
void *map_addr;
map_addr = mmap((void *)virt_addr, page_size, prot,
MAP_SHARED | MAP_FIXED, phys_ram_fd, pd);
if (map_addr == MAP_FAILED) {
fprintf(stderr,
"mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
pte & ~0xfff, virt_addr);
exit(1);
}
#ifdef DEBUG_MMU
printf("mmaping 0x%08x to virt 0x%08x pse=%d\n",
pte & ~0xfff, virt_addr, (page_size != 4096));
#endif
page_set_flags(virt_addr, virt_addr + page_size,
PAGE_VALID | PAGE_EXEC | prot);
}
return ret;
do_fault_protect:
error_code = PG_ERROR_P_MASK;
do_fault:
env->cr[2] = addr;
env->error_code = (is_write << PG_ERROR_W_BIT) | error_code;
if (is_user)
env->error_code |= PG_ERROR_U_MASK;
return 1;
}

1377
hw/vga.c Normal file

File diff suppressed because it is too large Load Diff

420
hw/vga_template.h Normal file
View File

@@ -0,0 +1,420 @@
/*
* QEMU VGA Emulator templates
*
* Copyright (c) 2003 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#if DEPTH == 8
#define BPP 1
#define PIXEL_TYPE uint8_t
#elif DEPTH == 15 || DEPTH == 16
#define BPP 2
#define PIXEL_TYPE uint16_t
#elif DEPTH == 32
#define BPP 4
#define PIXEL_TYPE uint32_t
#else
#error unsupport depth
#endif
#if DEPTH != 15
static inline void glue(vga_draw_glyph_line_, DEPTH)(uint8_t *d,
uint32_t font_data,
uint32_t xorcol,
uint32_t bgcol)
{
#if BPP == 1
((uint32_t *)d)[0] = (dmask16[(font_data >> 4)] & xorcol) ^ bgcol;
((uint32_t *)d)[3] = (dmask16[(font_data >> 0) & 0xf] & xorcol) ^ bgcol;
#elif BPP == 2
((uint32_t *)d)[0] = (dmask4[(font_data >> 6)] & xorcol) ^ bgcol;
((uint32_t *)d)[1] = (dmask4[(font_data >> 4) & 3] & xorcol) ^ bgcol;
((uint32_t *)d)[2] = (dmask4[(font_data >> 2) & 3] & xorcol) ^ bgcol;
((uint32_t *)d)[3] = (dmask4[(font_data >> 0) & 3] & xorcol) ^ bgcol;
#else
((uint32_t *)d)[0] = (-((font_data >> 7)) & xorcol) ^ bgcol;
((uint32_t *)d)[1] = (-((font_data >> 6) & 1) & xorcol) ^ bgcol;
((uint32_t *)d)[2] = (-((font_data >> 5) & 1) & xorcol) ^ bgcol;
((uint32_t *)d)[3] = (-((font_data >> 4) & 1) & xorcol) ^ bgcol;
((uint32_t *)d)[4] = (-((font_data >> 3) & 1) & xorcol) ^ bgcol;
((uint32_t *)d)[5] = (-((font_data >> 2) & 1) & xorcol) ^ bgcol;
((uint32_t *)d)[6] = (-((font_data >> 1) & 1) & xorcol) ^ bgcol;
((uint32_t *)d)[7] = (-((font_data >> 0) & 1) & xorcol) ^ bgcol;
#endif
}
static void glue(vga_draw_glyph8_, DEPTH)(uint8_t *d, int linesize,
const uint8_t *font_ptr, int h,
uint32_t fgcol, uint32_t bgcol)
{
uint32_t font_data, xorcol;
xorcol = bgcol ^ fgcol;
do {
font_data = font_ptr[0];
glue(vga_draw_glyph_line_, DEPTH)(d, font_data, xorcol, bgcol);
font_ptr += 4;
d += linesize;
} while (--h);
}
static void glue(vga_draw_glyph16_, DEPTH)(uint8_t *d, int linesize,
const uint8_t *font_ptr, int h,
uint32_t fgcol, uint32_t bgcol)
{
uint32_t font_data, xorcol;
xorcol = bgcol ^ fgcol;
do {
font_data = font_ptr[0];
glue(vga_draw_glyph_line_, DEPTH)(d,
expand4to8[font_data >> 4],
xorcol, bgcol);
glue(vga_draw_glyph_line_, DEPTH)(d + 8 * BPP,
expand4to8[font_data & 0x0f],
xorcol, bgcol);
font_ptr += 4;
d += linesize;
} while (--h);
}
static void glue(vga_draw_glyph9_, DEPTH)(uint8_t *d, int linesize,
const uint8_t *font_ptr, int h,
uint32_t fgcol, uint32_t bgcol, int dup9)
{
uint32_t font_data, xorcol, v;
xorcol = bgcol ^ fgcol;
do {
font_data = font_ptr[0];
/* XXX: unaligned accesses are done */
#if BPP == 1
((uint32_t *)d)[0] = (dmask16[(font_data >> 4)] & xorcol) ^ bgcol;
v = (dmask16[(font_data >> 0) & 0xf] & xorcol) ^ bgcol;
((uint32_t *)d)[3] = v;
if (dup9)
((uint8_t *)d)[8] = v >> (24 * (1 - BIG));
else
((uint8_t *)d)[8] = bgcol;
#elif BPP == 2
((uint32_t *)d)[0] = (dmask4[(font_data >> 6)] & xorcol) ^ bgcol;
((uint32_t *)d)[1] = (dmask4[(font_data >> 4) & 3] & xorcol) ^ bgcol;
((uint32_t *)d)[2] = (dmask4[(font_data >> 2) & 3] & xorcol) ^ bgcol;
v = (dmask4[(font_data >> 0) & 3] & xorcol) ^ bgcol;
((uint32_t *)d)[3] = v;
if (dup9)
((uint16_t *)d)[8] = v >> (16 * (1 - BIG));
else
((uint16_t *)d)[8] = bgcol;
#else
((uint32_t *)d)[0] = ((-(font_data >> 7)) & xorcol) ^ bgcol;
((uint32_t *)d)[1] = ((-(font_data >> 6) & 1) & xorcol) ^ bgcol;
((uint32_t *)d)[2] = ((-(font_data >> 5) & 1) & xorcol) ^ bgcol;
((uint32_t *)d)[3] = ((-(font_data >> 4) & 1) & xorcol) ^ bgcol;
((uint32_t *)d)[4] = ((-(font_data >> 3) & 1) & xorcol) ^ bgcol;
((uint32_t *)d)[5] = ((-(font_data >> 2) & 1) & xorcol) ^ bgcol;
((uint32_t *)d)[6] = ((-(font_data >> 1) & 1) & xorcol) ^ bgcol;
v = ((-(font_data >> 0) & 1) & xorcol) ^ bgcol;
((uint32_t *)d)[7] = v;
if (dup9)
((uint32_t *)d)[8] = v;
else
((uint32_t *)d)[8] = bgcol;
#endif
font_ptr += 4;
d += linesize;
} while (--h);
}
/*
* 4 color mode
*/
static void glue(vga_draw_line2_, DEPTH)(VGAState *s1, uint8_t *d,
const uint8_t *s, int width)
{
uint32_t plane_mask, *palette, data, v;
int x;
palette = s1->last_palette;
plane_mask = mask16[s1->ar[0x12] & 0xf];
width >>= 3;
for(x = 0; x < width; x++) {
data = ((uint32_t *)s)[0];
data &= plane_mask;
v = expand2[GET_PLANE(data, 0)];
v |= expand2[GET_PLANE(data, 2)] << 2;
((PIXEL_TYPE *)d)[0] = palette[v >> 12];
((PIXEL_TYPE *)d)[1] = palette[(v >> 8) & 0xf];
((PIXEL_TYPE *)d)[2] = palette[(v >> 4) & 0xf];
((PIXEL_TYPE *)d)[3] = palette[(v >> 0) & 0xf];
v = expand2[GET_PLANE(data, 1)];
v |= expand2[GET_PLANE(data, 3)] << 2;
((PIXEL_TYPE *)d)[4] = palette[v >> 12];
((PIXEL_TYPE *)d)[5] = palette[(v >> 8) & 0xf];
((PIXEL_TYPE *)d)[6] = palette[(v >> 4) & 0xf];
((PIXEL_TYPE *)d)[7] = palette[(v >> 0) & 0xf];
d += BPP * 8;
s += 4;
}
}
#if BPP == 1
#define PUT_PIXEL2(d, n, v) ((uint16_t *)d)[(n)] = (v)
#elif BPP == 2
#define PUT_PIXEL2(d, n, v) ((uint32_t *)d)[(n)] = (v)
#else
#define PUT_PIXEL2(d, n, v) \
((uint32_t *)d)[2*(n)] = ((uint32_t *)d)[2*(n)+1] = (v)
#endif
/*
* 4 color mode, dup2 horizontal
*/
static void glue(vga_draw_line2d2_, DEPTH)(VGAState *s1, uint8_t *d,
const uint8_t *s, int width)
{
uint32_t plane_mask, *palette, data, v;
int x;
palette = s1->last_palette;
plane_mask = mask16[s1->ar[0x12] & 0xf];
width >>= 3;
for(x = 0; x < width; x++) {
data = ((uint32_t *)s)[0];
data &= plane_mask;
v = expand2[GET_PLANE(data, 0)];
v |= expand2[GET_PLANE(data, 2)] << 2;
PUT_PIXEL2(d, 0, palette[v >> 12]);
PUT_PIXEL2(d, 1, palette[(v >> 8) & 0xf]);
PUT_PIXEL2(d, 2, palette[(v >> 4) & 0xf]);
PUT_PIXEL2(d, 3, palette[(v >> 0) & 0xf]);
v = expand2[GET_PLANE(data, 1)];
v |= expand2[GET_PLANE(data, 3)] << 2;
PUT_PIXEL2(d, 4, palette[v >> 12]);
PUT_PIXEL2(d, 5, palette[(v >> 8) & 0xf]);
PUT_PIXEL2(d, 6, palette[(v >> 4) & 0xf]);
PUT_PIXEL2(d, 7, palette[(v >> 0) & 0xf]);
d += BPP * 16;
s += 4;
}
}
/*
* 16 color mode
*/
static void glue(vga_draw_line4_, DEPTH)(VGAState *s1, uint8_t *d,
const uint8_t *s, int width)
{
uint32_t plane_mask, data, v, *palette;
int x;
palette = s1->last_palette;
plane_mask = mask16[s1->ar[0x12] & 0xf];
width >>= 3;
for(x = 0; x < width; x++) {
data = ((uint32_t *)s)[0];
data &= plane_mask;
v = expand4[GET_PLANE(data, 0)];
v |= expand4[GET_PLANE(data, 1)] << 1;
v |= expand4[GET_PLANE(data, 2)] << 2;
v |= expand4[GET_PLANE(data, 3)] << 3;
((PIXEL_TYPE *)d)[0] = palette[v >> 28];
((PIXEL_TYPE *)d)[1] = palette[(v >> 24) & 0xf];
((PIXEL_TYPE *)d)[2] = palette[(v >> 20) & 0xf];
((PIXEL_TYPE *)d)[3] = palette[(v >> 16) & 0xf];
((PIXEL_TYPE *)d)[4] = palette[(v >> 12) & 0xf];
((PIXEL_TYPE *)d)[5] = palette[(v >> 8) & 0xf];
((PIXEL_TYPE *)d)[6] = palette[(v >> 4) & 0xf];
((PIXEL_TYPE *)d)[7] = palette[(v >> 0) & 0xf];
d += BPP * 8;
s += 4;
}
}
/*
* 16 color mode, dup2 horizontal
*/
static void glue(vga_draw_line4d2_, DEPTH)(VGAState *s1, uint8_t *d,
const uint8_t *s, int width)
{
uint32_t plane_mask, data, v, *palette;
int x;
palette = s1->last_palette;
plane_mask = mask16[s1->ar[0x12] & 0xf];
width >>= 3;
for(x = 0; x < width; x++) {
data = ((uint32_t *)s)[0];
data &= plane_mask;
v = expand4[GET_PLANE(data, 0)];
v |= expand4[GET_PLANE(data, 1)] << 1;
v |= expand4[GET_PLANE(data, 2)] << 2;
v |= expand4[GET_PLANE(data, 3)] << 3;
PUT_PIXEL2(d, 0, palette[v >> 28]);
PUT_PIXEL2(d, 1, palette[(v >> 24) & 0xf]);
PUT_PIXEL2(d, 2, palette[(v >> 20) & 0xf]);
PUT_PIXEL2(d, 3, palette[(v >> 16) & 0xf]);
PUT_PIXEL2(d, 4, palette[(v >> 12) & 0xf]);
PUT_PIXEL2(d, 5, palette[(v >> 8) & 0xf]);
PUT_PIXEL2(d, 6, palette[(v >> 4) & 0xf]);
PUT_PIXEL2(d, 7, palette[(v >> 0) & 0xf]);
d += BPP * 16;
s += 4;
}
}
/*
* 256 color mode, double pixels
*
* XXX: add plane_mask support (never used in standard VGA modes)
*/
static void glue(vga_draw_line8d2_, DEPTH)(VGAState *s1, uint8_t *d,
const uint8_t *s, int width)
{
uint32_t *palette;
int x;
palette = s1->last_palette;
width >>= 3;
for(x = 0; x < width; x++) {
PUT_PIXEL2(d, 0, palette[s[0]]);
PUT_PIXEL2(d, 1, palette[s[1]]);
PUT_PIXEL2(d, 2, palette[s[2]]);
PUT_PIXEL2(d, 3, palette[s[3]]);
d += BPP * 8;
s += 4;
}
}
/*
* standard 256 color mode
*
* XXX: add plane_mask support (never used in standard VGA modes)
*/
static void glue(vga_draw_line8_, DEPTH)(VGAState *s1, uint8_t *d,
const uint8_t *s, int width)
{
uint32_t *palette;
int x;
palette = s1->last_palette;
width >>= 3;
for(x = 0; x < width; x++) {
((PIXEL_TYPE *)d)[0] = palette[s[0]];
((PIXEL_TYPE *)d)[1] = palette[s[1]];
((PIXEL_TYPE *)d)[2] = palette[s[2]];
((PIXEL_TYPE *)d)[3] = palette[s[3]];
((PIXEL_TYPE *)d)[4] = palette[s[4]];
((PIXEL_TYPE *)d)[5] = palette[s[5]];
((PIXEL_TYPE *)d)[6] = palette[s[6]];
((PIXEL_TYPE *)d)[7] = palette[s[7]];
d += BPP * 8;
s += 8;
}
}
#endif /* DEPTH != 15 */
/* XXX: optimize */
/*
* 15 bit color
*/
static void glue(vga_draw_line15_, DEPTH)(VGAState *s1, uint8_t *d,
const uint8_t *s, int width)
{
#if DEPTH == 15 && !defined(WORDS_BIGENDIAN)
memcpy(d, s, width * 2);
#else
int w;
uint32_t v, r, g, b;
w = width;
do {
v = lduw((void *)s);
r = (v >> 7) & 0xf8;
g = (v >> 2) & 0xf8;
b = (v << 3) & 0xf8;
((PIXEL_TYPE *)d)[0] = glue(rgb_to_pixel, DEPTH)(r, g, b);
s += 2;
d += BPP;
} while (--w != 0);
#endif
}
/*
* 16 bit color
*/
static void glue(vga_draw_line16_, DEPTH)(VGAState *s1, uint8_t *d,
const uint8_t *s, int width)
{
#if DEPTH == 16 && !defined(WORDS_BIGENDIAN)
memcpy(d, s, width * 2);
#else
int w;
uint32_t v, r, g, b;
w = width;
do {
v = lduw((void *)s);
r = (v >> 8) & 0xf8;
g = (v >> 3) & 0xfc;
b = (v << 3) & 0xf8;
((PIXEL_TYPE *)d)[0] = glue(rgb_to_pixel, DEPTH)(r, g, b);
s += 2;
d += BPP;
} while (--w != 0);
#endif
}
/*
* 32 bit color
*/
static void glue(vga_draw_line32_, DEPTH)(VGAState *s1, uint8_t *d,
const uint8_t *s, int width)
{
#if DEPTH == 32 && !defined(WORDS_BIGENDIAN)
memcpy(d, s, width * 4);
#else
int w;
uint32_t r, g, b;
w = width;
do {
b = s[0];
g = s[1];
r = s[2];
((PIXEL_TYPE *)d)[0] = glue(rgb_to_pixel, DEPTH)(r, g, b);
s += 4;
d += BPP;
} while (--w != 0);
#endif
}
#undef PUT_PIXEL2
#undef DEPTH
#undef BPP
#undef PIXEL_TYPE

View File

@@ -74,7 +74,8 @@ static inline void init_thread(struct target_pt_regs *regs, struct image_info *i
regs->ARM_sp = infop->start_stack;
regs->ARM_r2 = tswapl(stack[2]); /* envp */
regs->ARM_r1 = tswapl(stack[1]); /* argv */
regs->ARM_r0 = tswapl(stack[0]); /* argc */
/* XXX: it seems that r0 is zeroed after ! */
// regs->ARM_r0 = tswapl(stack[0]); /* argc */
}
#define USE_ELF_CORE_DUMP
@@ -490,7 +491,7 @@ static unsigned int * create_elf_tables(char *p, int argc, int envc,
* Force 16 byte alignment here for generality.
*/
sp = (unsigned int *) (~15UL & (unsigned long) p);
sp -= exec ? DLINFO_ITEMS*2 : 2;
sp -= DLINFO_ITEMS*2;
dlinfo = sp;
sp -= envc+1;
envp = sp;
@@ -505,21 +506,20 @@ static unsigned int * create_elf_tables(char *p, int argc, int envc,
put_user (tswapl(id), dlinfo++); \
put_user (tswapl(val), dlinfo++)
if (exec) { /* Put this here for an ELF program interpreter */
NEW_AUX_ENT (AT_PHDR, (target_ulong)(load_addr + exec->e_phoff));
NEW_AUX_ENT (AT_PHENT, (target_ulong)(sizeof (struct elf_phdr)));
NEW_AUX_ENT (AT_PHNUM, (target_ulong)(exec->e_phnum));
NEW_AUX_ENT (AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE));
NEW_AUX_ENT (AT_BASE, (target_ulong)(interp_load_addr));
NEW_AUX_ENT (AT_FLAGS, (target_ulong)0);
NEW_AUX_ENT (AT_ENTRY, load_bias + exec->e_entry);
NEW_AUX_ENT (AT_UID, (target_ulong) getuid());
NEW_AUX_ENT (AT_EUID, (target_ulong) geteuid());
NEW_AUX_ENT (AT_GID, (target_ulong) getgid());
NEW_AUX_ENT (AT_EGID, (target_ulong) getegid());
}
NEW_AUX_ENT (AT_PHDR, (target_ulong)(load_addr + exec->e_phoff));
NEW_AUX_ENT (AT_PHENT, (target_ulong)(sizeof (struct elf_phdr)));
NEW_AUX_ENT (AT_PHNUM, (target_ulong)(exec->e_phnum));
NEW_AUX_ENT (AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE));
NEW_AUX_ENT (AT_BASE, (target_ulong)(interp_load_addr));
NEW_AUX_ENT (AT_FLAGS, (target_ulong)0);
NEW_AUX_ENT (AT_ENTRY, load_bias + exec->e_entry);
NEW_AUX_ENT (AT_UID, (target_ulong) getuid());
NEW_AUX_ENT (AT_EUID, (target_ulong) geteuid());
NEW_AUX_ENT (AT_GID, (target_ulong) getgid());
NEW_AUX_ENT (AT_EGID, (target_ulong) getegid());
NEW_AUX_ENT (AT_NULL, 0);
#undef NEW_AUX_ENT
put_user(tswapl(argc),--sp);
info->arg_start = (unsigned int)((unsigned long)p & 0xffffffff);
while (argc-->0) {
@@ -1087,7 +1087,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * r
create_elf_tables((char *)bprm->p,
bprm->argc,
bprm->envc,
(interpreter_type == INTERPRETER_ELF ? &elf_ex : NULL),
&elf_ex,
load_addr, load_bias,
interp_load_addr,
(interpreter_type == INTERPRETER_AOUT ? 0 : 1),

View File

@@ -246,8 +246,6 @@ void cpu_loop(CPUX86State *env)
#ifdef TARGET_ARM
#define ARM_SYSCALL_BASE 0x900000
void cpu_loop(CPUARMState *env)
{
int trapnr;
@@ -285,6 +283,9 @@ void cpu_loop(CPUARMState *env)
}
}
break;
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
break;
default:
error:
fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n",
@@ -432,6 +433,10 @@ int main(int argc, char **argv)
env->user_mode_only = 1;
#if defined(TARGET_I386)
cpu_x86_set_cpl(env, 3);
env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK;
/* linux register setup */
env->regs[R_EAX] = regs->eax;
env->regs[R_EBX] = regs->ebx;

View File

@@ -60,44 +60,122 @@ static int signal_pending; /* non zero if a signal may be pending */
static void host_signal_handler(int host_signum, siginfo_t *info,
void *puc);
/* XXX: do it properly */
static uint8_t host_to_target_signal_table[65] = {
[SIGHUP] = TARGET_SIGHUP,
[SIGINT] = TARGET_SIGINT,
[SIGQUIT] = TARGET_SIGQUIT,
[SIGILL] = TARGET_SIGILL,
[SIGTRAP] = TARGET_SIGTRAP,
[SIGABRT] = TARGET_SIGABRT,
[SIGIOT] = TARGET_SIGIOT,
[SIGBUS] = TARGET_SIGBUS,
[SIGFPE] = TARGET_SIGFPE,
[SIGKILL] = TARGET_SIGKILL,
[SIGUSR1] = TARGET_SIGUSR1,
[SIGSEGV] = TARGET_SIGSEGV,
[SIGUSR2] = TARGET_SIGUSR2,
[SIGPIPE] = TARGET_SIGPIPE,
[SIGALRM] = TARGET_SIGALRM,
[SIGTERM] = TARGET_SIGTERM,
#ifdef SIGSTKFLT
[SIGSTKFLT] = TARGET_SIGSTKFLT,
#endif
[SIGCHLD] = TARGET_SIGCHLD,
[SIGCONT] = TARGET_SIGCONT,
[SIGSTOP] = TARGET_SIGSTOP,
[SIGTSTP] = TARGET_SIGTSTP,
[SIGTTIN] = TARGET_SIGTTIN,
[SIGTTOU] = TARGET_SIGTTOU,
[SIGURG] = TARGET_SIGURG,
[SIGXCPU] = TARGET_SIGXCPU,
[SIGXFSZ] = TARGET_SIGXFSZ,
[SIGVTALRM] = TARGET_SIGVTALRM,
[SIGPROF] = TARGET_SIGPROF,
[SIGWINCH] = TARGET_SIGWINCH,
[SIGIO] = TARGET_SIGIO,
[SIGPWR] = TARGET_SIGPWR,
[SIGSYS] = TARGET_SIGSYS,
/* next signals stay the same */
};
static uint8_t target_to_host_signal_table[65];
static inline int host_to_target_signal(int sig)
{
return sig;
return host_to_target_signal_table[sig];
}
static inline int target_to_host_signal(int sig)
{
return sig;
return target_to_host_signal_table[sig];
}
void host_to_target_sigset(target_sigset_t *d, sigset_t *s)
void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
{
int i;
for(i = 0;i < TARGET_NSIG_WORDS; i++) {
unsigned long sigmask;
uint32_t target_sigmask;
sigmask = ((unsigned long *)s)[0];
target_sigmask = 0;
for(i = 0; i < 32; i++) {
if (sigmask & (1 << i))
target_sigmask |= 1 << (host_to_target_signal(i + 1) - 1);
}
#if TARGET_LONG_BITS == 32 && HOST_LONG_BITS == 32
d->sig[0] = tswapl(target_sigmask);
for(i = 1;i < TARGET_NSIG_WORDS; i++) {
d->sig[i] = tswapl(((unsigned long *)s)[i]);
}
#elif TARGET_LONG_BITS == 32 && HOST_LONG_BITS == 64 && TARGET_NSIG_WORDS == 2
d->sig[0] = tswapl(target_sigmask);
d->sig[1] = tswapl(sigmask >> 32);
#else
#error host_to_target_sigset
#endif
}
void target_to_host_sigset(sigset_t *d, target_sigset_t *s)
void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
{
int i;
for(i = 0;i < TARGET_NSIG_WORDS; i++) {
unsigned long sigmask;
target_ulong target_sigmask;
target_sigmask = tswapl(s->sig[0]);
sigmask = 0;
for(i = 0; i < 32; i++) {
if (target_sigmask & (1 << i))
sigmask |= 1 << (target_to_host_signal(i + 1) - 1);
}
#if TARGET_LONG_BITS == 32 && HOST_LONG_BITS == 32
((unsigned long *)d)[0] = sigmask;
for(i = 1;i < TARGET_NSIG_WORDS; i++) {
((unsigned long *)d)[i] = tswapl(s->sig[i]);
}
#elif TARGET_LONG_BITS == 32 && HOST_LONG_BITS == 64 && TARGET_NSIG_WORDS == 2
((unsigned long *)d)[0] = sigmask | ((unsigned long)tswapl(s->sig[1]) << 32);
#else
#error target_to_host_sigset
#endif /* TARGET_LONG_BITS */
}
void host_to_target_old_sigset(target_ulong *old_sigset,
const sigset_t *sigset)
{
*old_sigset = tswap32(*(unsigned long *)sigset & 0xffffffff);
target_sigset_t d;
host_to_target_sigset(&d, sigset);
*old_sigset = d.sig[0];
}
void target_to_host_old_sigset(sigset_t *sigset,
const target_ulong *old_sigset)
{
sigemptyset(sigset);
*(unsigned long *)sigset = tswapl(*old_sigset);
target_sigset_t d;
int i;
d.sig[0] = *old_sigset;
for(i = 1;i < TARGET_NSIG_WORDS; i++)
d.sig[i] = 0;
target_to_host_sigset(sigset, &d);
}
/* siginfo conversion */
@@ -167,8 +245,18 @@ void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
void signal_init(void)
{
struct sigaction act;
int i;
int i, j;
/* generate signal conversion tables */
for(i = 1; i <= 64; i++) {
if (host_to_target_signal_table[i] == 0)
host_to_target_signal_table[i] = i;
}
for(i = 1; i <= 64; i++) {
j = host_to_target_signal_table[i];
target_to_host_signal_table[j] = i;
}
/* set all host signal handlers. ALL signals are blocked during
the handlers to serialize them. */
sigfillset(&act.sa_mask);
@@ -364,6 +452,80 @@ int do_sigaction(int sig, const struct target_sigaction *act,
return 0;
}
#define __put_user(x,ptr)\
({\
int size = sizeof(*ptr);\
switch(size) {\
case 1:\
stb(ptr, (typeof(*ptr))(x));\
break;\
case 2:\
stw(ptr, (typeof(*ptr))(x));\
break;\
case 4:\
stl(ptr, (typeof(*ptr))(x));\
break;\
case 8:\
stq(ptr, (typeof(*ptr))(x));\
break;\
default:\
abort();\
}\
0;\
})
#define __get_user(x, ptr) \
({\
int size = sizeof(*ptr);\
switch(size) {\
case 1:\
x = (typeof(*ptr))ldub(ptr);\
break;\
case 2:\
x = (typeof(*ptr))lduw(ptr);\
break;\
case 4:\
x = (typeof(*ptr))ldl(ptr);\
break;\
case 8:\
x = (typeof(*ptr))ldq(ptr);\
break;\
default:\
abort();\
}\
0;\
})
#define __copy_to_user(dst, src, size)\
({\
memcpy(dst, src, size);\
0;\
})
#define __copy_from_user(dst, src, size)\
({\
memcpy(dst, src, size);\
0;\
})
#define __clear_user(dst, size)\
({\
memset(dst, 0, size);\
0;\
})
#ifndef offsetof
#define offsetof(type, field) ((size_t) &((type *)0)->field)
#endif
static inline int copy_siginfo_to_user(target_siginfo_t *tinfo,
const target_siginfo_t *info)
{
tswap_siginfo(tinfo, info);
return 0;
}
#ifdef TARGET_I386
/* from the Linux kernel */
@@ -472,44 +634,6 @@ struct rt_sigframe
* Set up a signal frame.
*/
#define __put_user(x,ptr)\
({\
int size = sizeof(*ptr);\
switch(size) {\
case 1:\
stb(ptr, (typeof(*ptr))(x));\
break;\
case 2:\
stw(ptr, (typeof(*ptr))(x));\
break;\
case 4:\
stl(ptr, (typeof(*ptr))(x));\
break;\
case 8:\
stq(ptr, (typeof(*ptr))(x));\
break;\
default:\
abort();\
}\
0;\
})
#define get_user(val, ptr) (typeof(*ptr))(*(ptr))
#define __copy_to_user(dst, src, size)\
({\
memcpy(dst, src, size);\
0;\
})
static inline int copy_siginfo_to_user(target_siginfo_t *tinfo,
const target_siginfo_t *info)
{
tswap_siginfo(tinfo, info);
return 0;
}
/* XXX: save x87 state */
static int
setup_sigcontext(struct target_sigcontext *sc, struct target_fpstate *fpstate,
@@ -825,6 +949,377 @@ badframe:
return 0;
}
#elif defined(TARGET_ARM)
struct target_sigcontext {
target_ulong trap_no;
target_ulong error_code;
target_ulong oldmask;
target_ulong arm_r0;
target_ulong arm_r1;
target_ulong arm_r2;
target_ulong arm_r3;
target_ulong arm_r4;
target_ulong arm_r5;
target_ulong arm_r6;
target_ulong arm_r7;
target_ulong arm_r8;
target_ulong arm_r9;
target_ulong arm_r10;
target_ulong arm_fp;
target_ulong arm_ip;
target_ulong arm_sp;
target_ulong arm_lr;
target_ulong arm_pc;
target_ulong arm_cpsr;
target_ulong fault_address;
};
typedef struct target_sigaltstack {
target_ulong ss_sp;
int ss_flags;
target_ulong ss_size;
} target_stack_t;
struct target_ucontext {
target_ulong uc_flags;
target_ulong uc_link;
target_stack_t uc_stack;
struct target_sigcontext uc_mcontext;
target_sigset_t uc_sigmask; /* mask last for extensibility */
};
struct sigframe
{
struct target_sigcontext sc;
target_ulong extramask[TARGET_NSIG_WORDS-1];
target_ulong retcode;
};
struct rt_sigframe
{
struct target_siginfo *pinfo;
void *puc;
struct target_siginfo info;
struct target_ucontext uc;
target_ulong retcode;
};
#define TARGET_CONFIG_CPU_32 1
/*
* For ARM syscalls, we encode the syscall number into the instruction.
*/
#define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
#define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
/*
* For Thumb syscalls, we pass the syscall number via r7. We therefore
* need two 16-bit instructions.
*/
#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
static const target_ulong retcodes[4] = {
SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
};
#define __put_user_error(x,p,e) __put_user(x, p)
#define __get_user_error(x,p,e) __get_user(x, p)
static inline int valid_user_regs(CPUState *regs)
{
return 1;
}
static int
setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
CPUState *env, unsigned long mask)
{
int err = 0;
__put_user_error(env->regs[0], &sc->arm_r0, err);
__put_user_error(env->regs[1], &sc->arm_r1, err);
__put_user_error(env->regs[2], &sc->arm_r2, err);
__put_user_error(env->regs[3], &sc->arm_r3, err);
__put_user_error(env->regs[4], &sc->arm_r4, err);
__put_user_error(env->regs[5], &sc->arm_r5, err);
__put_user_error(env->regs[6], &sc->arm_r6, err);
__put_user_error(env->regs[7], &sc->arm_r7, err);
__put_user_error(env->regs[8], &sc->arm_r8, err);
__put_user_error(env->regs[9], &sc->arm_r9, err);
__put_user_error(env->regs[10], &sc->arm_r10, err);
__put_user_error(env->regs[11], &sc->arm_fp, err);
__put_user_error(env->regs[12], &sc->arm_ip, err);
__put_user_error(env->regs[13], &sc->arm_sp, err);
__put_user_error(env->regs[14], &sc->arm_lr, err);
__put_user_error(env->regs[15], &sc->arm_pc, err);
#ifdef TARGET_CONFIG_CPU_32
__put_user_error(env->cpsr, &sc->arm_cpsr, err);
#endif
__put_user_error(/* current->thread.trap_no */ 0, &sc->trap_no, err);
__put_user_error(/* current->thread.error_code */ 0, &sc->error_code, err);
__put_user_error(/* current->thread.address */ 0, &sc->fault_address, err);
__put_user_error(mask, &sc->oldmask, err);
return err;
}
static inline void *
get_sigframe(struct emulated_sigaction *ka, CPUState *regs, int framesize)
{
unsigned long sp = regs->regs[13];
#if 0
/*
* This is the X/Open sanctioned signal stack switching.
*/
if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
#endif
/*
* ATPCS B01 mandates 8-byte alignment
*/
return (void *)((sp - framesize) & ~7);
}
static int
setup_return(CPUState *env, struct emulated_sigaction *ka,
target_ulong *rc, void *frame, int usig)
{
target_ulong handler = (target_ulong)ka->sa._sa_handler;
target_ulong retcode;
int thumb = 0;
#if defined(TARGET_CONFIG_CPU_32)
target_ulong cpsr = env->cpsr;
#if 0
/*
* Maybe we need to deliver a 32-bit signal to a 26-bit task.
*/
if (ka->sa.sa_flags & SA_THIRTYTWO)
cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
#ifdef CONFIG_ARM_THUMB
if (elf_hwcap & HWCAP_THUMB) {
/*
* The LSB of the handler determines if we're going to
* be using THUMB or ARM mode for this signal handler.
*/
thumb = handler & 1;
if (thumb)
cpsr |= T_BIT;
else
cpsr &= ~T_BIT;
}
#endif
#endif
#endif /* TARGET_CONFIG_CPU_32 */
if (ka->sa.sa_flags & TARGET_SA_RESTORER) {
retcode = (target_ulong)ka->sa.sa_restorer;
} else {
unsigned int idx = thumb;
if (ka->sa.sa_flags & TARGET_SA_SIGINFO)
idx += 2;
if (__put_user(retcodes[idx], rc))
return 1;
#if 0
flush_icache_range((target_ulong)rc,
(target_ulong)(rc + 1));
#endif
retcode = ((target_ulong)rc) + thumb;
}
env->regs[0] = usig;
env->regs[13] = (target_ulong)frame;
env->regs[14] = retcode;
env->regs[15] = handler & (thumb ? ~1 : ~3);
#ifdef TARGET_CONFIG_CPU_32
env->cpsr = cpsr;
#endif
return 0;
}
static void setup_frame(int usig, struct emulated_sigaction *ka,
target_sigset_t *set, CPUState *regs)
{
struct sigframe *frame = get_sigframe(ka, regs, sizeof(*frame));
int err = 0;
err |= setup_sigcontext(&frame->sc, /*&frame->fpstate,*/ regs, set->sig[0]);
if (TARGET_NSIG_WORDS > 1) {
err |= __copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask));
}
if (err == 0)
err = setup_return(regs, ka, &frame->retcode, frame, usig);
// return err;
}
static void setup_rt_frame(int usig, struct emulated_sigaction *ka,
target_siginfo_t *info,
target_sigset_t *set, CPUState *env)
{
struct rt_sigframe *frame = get_sigframe(ka, env, sizeof(*frame));
int err = 0;
#if 0
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
return 1;
#endif
__put_user_error(&frame->info, (target_ulong *)&frame->pinfo, err);
__put_user_error(&frame->uc, (target_ulong *)&frame->puc, err);
err |= copy_siginfo_to_user(&frame->info, info);
/* Clear all the bits of the ucontext we don't use. */
err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
err |= setup_sigcontext(&frame->uc.uc_mcontext, /*&frame->fpstate,*/
env, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err == 0)
err = setup_return(env, ka, &frame->retcode, frame, usig);
if (err == 0) {
/*
* For realtime signals we must also set the second and third
* arguments for the signal handler.
* -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
*/
env->regs[1] = (target_ulong)frame->pinfo;
env->regs[2] = (target_ulong)frame->puc;
}
// return err;
}
static int
restore_sigcontext(CPUState *env, struct target_sigcontext *sc)
{
int err = 0;
__get_user_error(env->regs[0], &sc->arm_r0, err);
__get_user_error(env->regs[1], &sc->arm_r1, err);
__get_user_error(env->regs[2], &sc->arm_r2, err);
__get_user_error(env->regs[3], &sc->arm_r3, err);
__get_user_error(env->regs[4], &sc->arm_r4, err);
__get_user_error(env->regs[5], &sc->arm_r5, err);
__get_user_error(env->regs[6], &sc->arm_r6, err);
__get_user_error(env->regs[7], &sc->arm_r7, err);
__get_user_error(env->regs[8], &sc->arm_r8, err);
__get_user_error(env->regs[9], &sc->arm_r9, err);
__get_user_error(env->regs[10], &sc->arm_r10, err);
__get_user_error(env->regs[11], &sc->arm_fp, err);
__get_user_error(env->regs[12], &sc->arm_ip, err);
__get_user_error(env->regs[13], &sc->arm_sp, err);
__get_user_error(env->regs[14], &sc->arm_lr, err);
__get_user_error(env->regs[15], &sc->arm_pc, err);
#ifdef TARGET_CONFIG_CPU_32
__get_user_error(env->cpsr, &sc->arm_cpsr, err);
#endif
err |= !valid_user_regs(env);
return err;
}
long do_sigreturn(CPUState *env)
{
struct sigframe *frame;
target_sigset_t set;
sigset_t host_set;
/*
* Since we stacked the signal on a 64-bit boundary,
* then 'sp' should be word aligned here. If it's
* not, then the user is trying to mess with us.
*/
if (env->regs[13] & 7)
goto badframe;
frame = (struct sigframe *)env->regs[13];
#if 0
if (verify_area(VERIFY_READ, frame, sizeof (*frame)))
goto badframe;
#endif
if (__get_user(set.sig[0], &frame->sc.oldmask)
|| (TARGET_NSIG_WORDS > 1
&& __copy_from_user(&set.sig[1], &frame->extramask,
sizeof(frame->extramask))))
goto badframe;
target_to_host_sigset(&host_set, &set);
sigprocmask(SIG_SETMASK, &host_set, NULL);
if (restore_sigcontext(env, &frame->sc))
goto badframe;
#if 0
/* Send SIGTRAP if we're single-stepping */
if (ptrace_cancel_bpt(current))
send_sig(SIGTRAP, current, 1);
#endif
return env->regs[0];
badframe:
force_sig(SIGSEGV /* , current */);
return 0;
}
long do_rt_sigreturn(CPUState *env)
{
struct rt_sigframe *frame;
target_sigset_t set;
sigset_t host_set;
/*
* Since we stacked the signal on a 64-bit boundary,
* then 'sp' should be word aligned here. If it's
* not, then the user is trying to mess with us.
*/
if (env->regs[13] & 7)
goto badframe;
frame = (struct rt_sigframe *)env->regs[13];
#if 0
if (verify_area(VERIFY_READ, frame, sizeof (*frame)))
goto badframe;
#endif
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
target_to_host_sigset(&host_set, &set);
sigprocmask(SIG_SETMASK, &host_set, NULL);
if (restore_sigcontext(env, &frame->uc.uc_mcontext))
goto badframe;
#if 0
/* Send SIGTRAP if we're single-stepping */
if (ptrace_cancel_bpt(current))
send_sig(SIGTRAP, current, 1);
#endif
return env->regs[0];
badframe:
force_sig(SIGSEGV /* , current */);
return 0;
}
#else
static void setup_frame(int sig, struct emulated_sigaction *ka,

View File

@@ -68,6 +68,129 @@
#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct dirent [2])
#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct dirent [2])
#if defined(__powerpc__)
#undef __syscall_nr
#undef __sc_loadargs_0
#undef __sc_loadargs_1
#undef __sc_loadargs_2
#undef __sc_loadargs_3
#undef __sc_loadargs_4
#undef __sc_loadargs_5
#undef __sc_asm_input_0
#undef __sc_asm_input_1
#undef __sc_asm_input_2
#undef __sc_asm_input_3
#undef __sc_asm_input_4
#undef __sc_asm_input_5
#undef _syscall0
#undef _syscall1
#undef _syscall2
#undef _syscall3
#undef _syscall4
#undef _syscall5
/* need to redefine syscalls as Linux kernel defines are incorrect for
the clobber list */
/* On powerpc a system call basically clobbers the same registers like a
* function call, with the exception of LR (which is needed for the
* "sc; bnslr" sequence) and CR (where only CR0.SO is clobbered to signal
* an error return status).
*/
#define __syscall_nr(nr, type, name, args...) \
unsigned long __sc_ret, __sc_err; \
{ \
register unsigned long __sc_0 __asm__ ("r0"); \
register unsigned long __sc_3 __asm__ ("r3"); \
register unsigned long __sc_4 __asm__ ("r4"); \
register unsigned long __sc_5 __asm__ ("r5"); \
register unsigned long __sc_6 __asm__ ("r6"); \
register unsigned long __sc_7 __asm__ ("r7"); \
\
__sc_loadargs_##nr(name, args); \
__asm__ __volatile__ \
("sc \n\t" \
"mfcr %0 " \
: "=&r" (__sc_0), \
"=&r" (__sc_3), "=&r" (__sc_4), \
"=&r" (__sc_5), "=&r" (__sc_6), \
"=&r" (__sc_7) \
: __sc_asm_input_##nr \
: "cr0", "ctr", "memory", \
"r8", "r9", "r10","r11", "r12"); \
__sc_ret = __sc_3; \
__sc_err = __sc_0; \
} \
if (__sc_err & 0x10000000) \
{ \
errno = __sc_ret; \
__sc_ret = -1; \
} \
return (type) __sc_ret
#define __sc_loadargs_0(name, dummy...) \
__sc_0 = __NR_##name
#define __sc_loadargs_1(name, arg1) \
__sc_loadargs_0(name); \
__sc_3 = (unsigned long) (arg1)
#define __sc_loadargs_2(name, arg1, arg2) \
__sc_loadargs_1(name, arg1); \
__sc_4 = (unsigned long) (arg2)
#define __sc_loadargs_3(name, arg1, arg2, arg3) \
__sc_loadargs_2(name, arg1, arg2); \
__sc_5 = (unsigned long) (arg3)
#define __sc_loadargs_4(name, arg1, arg2, arg3, arg4) \
__sc_loadargs_3(name, arg1, arg2, arg3); \
__sc_6 = (unsigned long) (arg4)
#define __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5) \
__sc_loadargs_4(name, arg1, arg2, arg3, arg4); \
__sc_7 = (unsigned long) (arg5)
#define __sc_asm_input_0 "0" (__sc_0)
#define __sc_asm_input_1 __sc_asm_input_0, "1" (__sc_3)
#define __sc_asm_input_2 __sc_asm_input_1, "2" (__sc_4)
#define __sc_asm_input_3 __sc_asm_input_2, "3" (__sc_5)
#define __sc_asm_input_4 __sc_asm_input_3, "4" (__sc_6)
#define __sc_asm_input_5 __sc_asm_input_4, "5" (__sc_7)
#define _syscall0(type,name) \
type name(void) \
{ \
__syscall_nr(0, type, name); \
}
#define _syscall1(type,name,type1,arg1) \
type name(type1 arg1) \
{ \
__syscall_nr(1, type, name, arg1); \
}
#define _syscall2(type,name,type1,arg1,type2,arg2) \
type name(type1 arg1, type2 arg2) \
{ \
__syscall_nr(2, type, name, arg1, arg2); \
}
#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
type name(type1 arg1, type2 arg2, type3 arg3) \
{ \
__syscall_nr(3, type, name, arg1, arg2, arg3); \
}
#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
{ \
__syscall_nr(4, type, name, arg1, arg2, arg3, arg4); \
}
#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
{ \
__syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5); \
}
#endif
#define __NR_sys_uname __NR_uname
#define __NR_sys_getcwd1 __NR_getcwd
#define __NR_sys_statfs __NR_statfs
@@ -210,6 +333,21 @@ static inline void host_to_target_fds(target_long *target_fds,
#endif
}
#if defined(__alpha__)
#define HOST_HZ 1024
#else
#define HOST_HZ 100
#endif
static inline long host_to_target_clock_t(long ticks)
{
#if HOST_HZ == TARGET_HZ
return ticks;
#else
return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
#endif
}
static inline void host_to_target_rusage(struct target_rusage *target_rusage,
const struct rusage *rusage)
{
@@ -1423,11 +1561,13 @@ long do_syscall(void *cpu_env, int num, long arg1, long arg2, long arg3,
struct tms tms;
ret = get_errno(times(&tms));
if (tmsp) {
tmsp->tms_utime = tswapl(tms.tms_utime);
tmsp->tms_stime = tswapl(tms.tms_stime);
tmsp->tms_cutime = tswapl(tms.tms_cutime);
tmsp->tms_cstime = tswapl(tms.tms_cstime);
tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
}
if (!is_error(ret))
ret = host_to_target_clock_t(ret);
}
break;
case TARGET_NR_prof:
@@ -1763,7 +1903,17 @@ long do_syscall(void *cpu_env, int num, long arg1, long arg2, long arg3,
}
break;
case TARGET_NR_select:
goto unimplemented;
{
struct target_sel_arg_struct *sel = (void *)arg1;
sel->n = tswapl(sel->n);
sel->inp = tswapl(sel->inp);
sel->outp = tswapl(sel->outp);
sel->exp = tswapl(sel->exp);
sel->tvp = tswapl(sel->tvp);
ret = do_select(sel->n, (void *)sel->inp, (void *)sel->outp,
(void *)sel->exp, (void *)sel->tvp);
}
break;
case TARGET_NR_symlink:
ret = get_errno(symlink((const char *)arg1, (const char *)arg2));
break;
@@ -1781,8 +1931,8 @@ long do_syscall(void *cpu_env, int num, long arg1, long arg2, long arg3,
goto unimplemented;
case TARGET_NR_readdir:
goto unimplemented;
#ifdef TARGET_I386
case TARGET_NR_mmap:
#if defined(TARGET_I386) || defined(TARGET_ARM)
{
uint32_t v1, v2, v3, v4, v5, v6, *vptr;
vptr = (uint32_t *)arg1;
@@ -1796,13 +1946,14 @@ long do_syscall(void *cpu_env, int num, long arg1, long arg2, long arg3,
target_to_host_bitmask(v4, mmap_flags_tbl),
v5, v6));
}
break;
#endif
#ifdef TARGET_I386
case TARGET_NR_mmap2:
#else
case TARGET_NR_mmap:
ret = get_errno(target_mmap(arg1, arg2, arg3,
target_to_host_bitmask(arg4, mmap_flags_tbl),
arg5,
arg6));
#endif
break;
case TARGET_NR_mmap2:
ret = get_errno(target_mmap(arg1, arg2, arg3,
target_to_host_bitmask(arg4, mmap_flags_tbl),
arg5,

View File

@@ -383,6 +383,8 @@ struct target_itimerval {
typedef target_long target_clock_t;
#define TARGET_HZ 100
struct target_tms {
target_clock_t tms_utime;
target_clock_t tms_stime;
@@ -390,6 +392,12 @@ struct target_tms {
target_clock_t tms_cstime;
};
struct target_sel_arg_struct {
target_long n;
target_long inp, outp, exp;
target_long tvp;
};
struct target_iovec {
target_long iov_base; /* Starting address */
target_long iov_len; /* Number of bytes */
@@ -533,8 +541,8 @@ static inline void target_siginitset(target_sigset_t *d, target_ulong set)
d->sig[i] = 0;
}
void host_to_target_sigset(target_sigset_t *d, sigset_t *s);
void target_to_host_sigset(sigset_t *d, target_sigset_t *s);
void host_to_target_sigset(target_sigset_t *d, const sigset_t *s);
void target_to_host_sigset(sigset_t *d, const target_sigset_t *s);
void host_to_target_old_sigset(target_ulong *old_sigset,
const sigset_t *sigset);
void target_to_host_old_sigset(sigset_t *sigset,
@@ -584,6 +592,8 @@ int do_sigaction(int sig, const struct target_sigaction *act,
#define TARGET_SIGPROF 27
#define TARGET_SIGWINCH 28
#define TARGET_SIGIO 29
#define TARGET_SIGPWR 30
#define TARGET_SIGSYS 31
#define TARGET_SIGRTMIN 32
#define TARGET_SIG_BLOCK 0 /* for blocking signals */

177
m68k.ld Normal file
View File

@@ -0,0 +1,177 @@
/* Script for -z combreloc: combine and sort reloc sections */
OUTPUT_FORMAT("elf32-m68k", "elf32-m68k",
"elf32-m68k")
OUTPUT_ARCH(m68k)
ENTRY(_start)
SEARCH_DIR("/usr/local/m68k-linux/lib");
/* Do we need any of these for elf?
__DYNAMIC = 0; */
SECTIONS
{
/* Read-only sections, merged into text segment: */
. = 0x60000000 + SIZEOF_HEADERS;
.interp : { *(.interp) }
.hash : { *(.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.rel.dyn :
{
*(.rel.init)
*(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
*(.rel.fini)
*(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
*(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
*(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
*(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
*(.rel.ctors)
*(.rel.dtors)
*(.rel.got)
*(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
}
.rela.dyn :
{
*(.rela.init)
*(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
*(.rela.fini)
*(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
*(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
*(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
*(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
*(.rela.ctors)
*(.rela.dtors)
*(.rela.got)
*(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
}
.rel.plt : { *(.rel.plt) }
.rela.plt : { *(.rela.plt) }
.init :
{
KEEP (*(.init))
} =0x4e754e75
.plt : { *(.plt) }
.text :
{
*(.text .stub .text.* .gnu.linkonce.t.*)
/* .gnu.warning sections are handled specially by elf32.em. */
*(.gnu.warning)
} =0x4e754e75
.fini :
{
KEEP (*(.fini))
} =0x4e754e75
PROVIDE (__etext = .);
PROVIDE (_etext = .);
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
.eh_frame_hdr : { *(.eh_frame_hdr) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
. = ALIGN(0x2000) + (. & (0x2000 - 1));
/* Ensure the __preinit_array_start label is properly aligned. We
could instead move the label definition inside the section, but
the linker would then create the section even if it turns out to
be empty, which isn't pretty. */
. = ALIGN(32 / 8);
PROVIDE (__preinit_array_start = .);
.preinit_array : { *(.preinit_array) }
PROVIDE (__preinit_array_end = .);
PROVIDE (__init_array_start = .);
.init_array : { *(.init_array) }
PROVIDE (__init_array_end = .);
PROVIDE (__fini_array_start = .);
.fini_array : { *(.fini_array) }
PROVIDE (__fini_array_end = .);
.data :
{
*(.data .data.* .gnu.linkonce.d.*)
SORT(CONSTRUCTORS)
}
.data1 : { *(.data1) }
.tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
.tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
.eh_frame : { KEEP (*(.eh_frame)) }
.gcc_except_table : { *(.gcc_except_table) }
.dynamic : { *(.dynamic) }
.ctors :
{
/* gcc uses crtbegin.o to find the start of
the constructors, so we make sure it is
first. Because this is a wildcard, it
doesn't matter if the user does not
actually link against crtbegin.o; the
linker won't look for a file to match a
wildcard. The wildcard also means that it
doesn't matter which directory crtbegin.o
is in. */
KEEP (*crtbegin.o(.ctors))
/* We don't want to include the .ctor section from
from the crtend.o file until after the sorted ctors.
The .ctor section from the crtend file contains the
end of ctors marker and it must be last */
KEEP (*(EXCLUDE_FILE (*crtend.o ) .ctors))
KEEP (*(SORT(.ctors.*)))
KEEP (*(.ctors))
}
.dtors :
{
KEEP (*crtbegin.o(.dtors))
KEEP (*(EXCLUDE_FILE (*crtend.o ) .dtors))
KEEP (*(SORT(.dtors.*)))
KEEP (*(.dtors))
}
.jcr : { KEEP (*(.jcr)) }
.got : { *(.got.plt) *(.got) }
_edata = .;
PROVIDE (edata = .);
__bss_start = .;
.bss :
{
*(.dynbss)
*(.bss .bss.* .gnu.linkonce.b.*)
*(COMMON)
/* Align here to ensure that the .bss section occupies space up to
_end. Align after .bss to ensure correct alignment even if the
.bss section disappears because there are no input sections. */
. = ALIGN(32 / 8);
}
. = ALIGN(32 / 8);
_end = .;
PROVIDE (end = .);
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
/* DWARF debug sections.
Symbols in the DWARF debugging sections are relative to the beginning
of the section so we begin them at 0. */
/* DWARF 1 */
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
/* GNU DWARF 1 extensions */
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
/* DWARF 2 */
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
}

View File

@@ -154,11 +154,11 @@ void OPPROTO op_adcl_T0_T1_cc(void)
FORCE_RET();
}
#define OPSUB(sub, sbc, T0, T1) \
#define OPSUB(sub, sbc, res, T0, T1) \
\
void OPPROTO op_ ## sub ## l_T0_T1(void) \
{ \
T0 -= T1; \
res = T0 - T1; \
} \
\
void OPPROTO op_ ## sub ## l_T0_T1_cc(void) \
@@ -167,13 +167,14 @@ void OPPROTO op_ ## sub ## l_T0_T1_cc(void) \
src1 = T0; \
T0 -= T1; \
env->NZF = T0; \
env->CF = src1 < T1; \
env->CF = src1 >= T1; \
env->VF = (src1 ^ T1) & (src1 ^ T0); \
res = T0; \
} \
\
void OPPROTO op_ ## sbc ## l_T0_T1(void) \
{ \
T0 = T0 - T1 + env->CF - 1; \
res = T0 - T1 + env->CF - 1; \
} \
\
void OPPROTO op_ ## sbc ## l_T0_T1_cc(void) \
@@ -182,20 +183,20 @@ void OPPROTO op_ ## sbc ## l_T0_T1_cc(void) \
src1 = T0; \
if (!env->CF) { \
T0 = T0 - T1 - 1; \
T0 += T1; \
env->CF = src1 < T1; \
env->CF = src1 >= T1; \
} else { \
T0 = T0 - T1; \
env->CF = src1 <= T1; \
env->CF = src1 > T1; \
} \
env->VF = (src1 ^ T1) & (src1 ^ T0); \
env->NZF = T0; \
res = T0; \
FORCE_RET(); \
}
OPSUB(sub, sbc, T0, T1)
OPSUB(sub, sbc, T0, T0, T1)
OPSUB(rsb, rsc, T1, T0)
OPSUB(rsb, rsc, T0, T1, T0)
void OPPROTO op_andl_T0_T1(void)
{
@@ -222,122 +223,129 @@ void OPPROTO op_notl_T1(void)
T1 = ~T1;
}
void OPPROTO op_logic_cc(void)
void OPPROTO op_logic_T0_cc(void)
{
env->NZF = T0;
}
void OPPROTO op_logic_T1_cc(void)
{
env->NZF = T1;
}
#define EIP (env->regs[15])
void OPPROTO op_test_eq(void)
{
if (env->NZF == 0)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(op_test_eq, PARAM1, 0, PARAM2);
FORCE_RET();
}
void OPPROTO op_test_ne(void)
{
if (env->NZF != 0)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(op_test_ne, PARAM1, 0, PARAM2);
FORCE_RET();
}
void OPPROTO op_test_cs(void)
{
if (env->CF != 0)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(op_test_cs, PARAM1, 0, PARAM2);
FORCE_RET();
}
void OPPROTO op_test_cc(void)
{
if (env->CF == 0)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(op_test_cc, PARAM1, 0, PARAM2);
FORCE_RET();
}
void OPPROTO op_test_mi(void)
{
if ((env->NZF & 0x80000000) != 0)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(op_test_mi, PARAM1, 0, PARAM2);
FORCE_RET();
}
void OPPROTO op_test_pl(void)
{
if ((env->NZF & 0x80000000) == 0)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(op_test_pl, PARAM1, 0, PARAM2);
FORCE_RET();
}
void OPPROTO op_test_vs(void)
{
if ((env->VF & 0x80000000) != 0)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(op_test_vs, PARAM1, 0, PARAM2);
FORCE_RET();
}
void OPPROTO op_test_vc(void)
{
if ((env->VF & 0x80000000) == 0)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(op_test_vc, PARAM1, 0, PARAM2);
FORCE_RET();
}
void OPPROTO op_test_hi(void)
{
if (env->CF != 0 && env->NZF != 0)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(op_test_hi, PARAM1, 0, PARAM2);
FORCE_RET();
}
void OPPROTO op_test_ls(void)
{
if (env->CF == 0 || env->NZF == 0)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(op_test_ls, PARAM1, 0, PARAM2);
FORCE_RET();
}
void OPPROTO op_test_ge(void)
{
if (((env->VF ^ env->NZF) & 0x80000000) == 0)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(op_test_ge, PARAM1, 0, PARAM2);
FORCE_RET();
}
void OPPROTO op_test_lt(void)
{
if (((env->VF ^ env->NZF) & 0x80000000) != 0)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(op_test_lt, PARAM1, 0, PARAM2);
FORCE_RET();
}
void OPPROTO op_test_gt(void)
{
if (env->NZF != 0 && ((env->VF ^ env->NZF) & 0x80000000) == 0)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(op_test_gt, PARAM1, 0, PARAM2);
FORCE_RET();
}
void OPPROTO op_test_le(void)
{
if (env->NZF == 0 || ((env->VF ^ env->NZF) & 0x80000000) != 0)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(op_test_le, PARAM1, 0, PARAM2);
FORCE_RET();
}
void OPPROTO op_jmp(void)
{
JUMP_TB(PARAM1, 1, PARAM2);
JUMP_TB(op_jmp, PARAM1, 1, PARAM2);
}
void OPPROTO op_exit_tb(void)
{
EXIT_TB();
}
void OPPROTO op_movl_T0_psr(void)
{
int ZF;
ZF = (env->NZF == 0);
T0 = env->cpsr | (env->NZF & 0x80000000) | (ZF << 30) |
(env->CF << 29) | ((env->VF & 0x80000000) >> 3);
T0 = compute_cpsr();
}
/* NOTE: N = 1 and Z = 1 cannot be stored currently */

163
op-i386.c
View File

@@ -376,70 +376,14 @@ void OPPROTO op_andl_A0_ffff(void)
/* memory access */
void OPPROTO op_ldub_T0_A0(void)
{
T0 = ldub((uint8_t *)A0);
}
#define MEMSUFFIX
#include "ops_mem.h"
void OPPROTO op_ldsb_T0_A0(void)
{
T0 = ldsb((int8_t *)A0);
}
#define MEMSUFFIX _user
#include "ops_mem.h"
void OPPROTO op_lduw_T0_A0(void)
{
T0 = lduw((uint8_t *)A0);
}
void OPPROTO op_ldsw_T0_A0(void)
{
T0 = ldsw((int8_t *)A0);
}
void OPPROTO op_ldl_T0_A0(void)
{
T0 = ldl((uint8_t *)A0);
}
void OPPROTO op_ldub_T1_A0(void)
{
T1 = ldub((uint8_t *)A0);
}
void OPPROTO op_ldsb_T1_A0(void)
{
T1 = ldsb((int8_t *)A0);
}
void OPPROTO op_lduw_T1_A0(void)
{
T1 = lduw((uint8_t *)A0);
}
void OPPROTO op_ldsw_T1_A0(void)
{
T1 = ldsw((int8_t *)A0);
}
void OPPROTO op_ldl_T1_A0(void)
{
T1 = ldl((uint8_t *)A0);
}
void OPPROTO op_stb_T0_A0(void)
{
stb((uint8_t *)A0, T0);
}
void OPPROTO op_stw_T0_A0(void)
{
stw((uint8_t *)A0, T0);
}
void OPPROTO op_stl_T0_A0(void)
{
stl((uint8_t *)A0, T0);
}
#define MEMSUFFIX _kernel
#include "ops_mem.h"
/* used for bit operations */
@@ -471,6 +415,12 @@ void OPPROTO op_hlt(void)
cpu_loop_exit();
}
void OPPROTO op_debug(void)
{
env->exception_index = EXCP_DEBUG;
cpu_loop_exit();
}
void OPPROTO op_raise_interrupt(void)
{
int intno;
@@ -507,6 +457,16 @@ void OPPROTO op_sti(void)
env->eflags |= IF_MASK;
}
void OPPROTO op_set_inhibit_irq(void)
{
env->hflags |= HF_INHIBIT_IRQ_MASK;
}
void OPPROTO op_reset_inhibit_irq(void)
{
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
}
#if 0
/* vm86plus instructions */
void OPPROTO op_cli_vm(void)
@@ -556,9 +516,9 @@ void OPPROTO op_cmpxchg8b(void)
helper_cmpxchg8b();
}
void OPPROTO op_jmp_tb_next(void)
void OPPROTO op_jmp(void)
{
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(op_jmp, PARAM1, 0, PARAM2);
}
void OPPROTO op_movl_T0_0(void)
@@ -566,6 +526,11 @@ void OPPROTO op_movl_T0_0(void)
T0 = 0;
}
void OPPROTO op_exit_tb(void)
{
EXIT_TB();
}
/* multiple size ops */
#define ldul ldl
@@ -624,6 +589,38 @@ void OPPROTO op_movswl_DX_AX(void)
EDX = (EDX & 0xffff0000) | (((int16_t)EAX >> 15) & 0xffff);
}
/* string ops helpers */
void OPPROTO op_addl_ESI_T0(void)
{
ESI += T0;
}
void OPPROTO op_addw_ESI_T0(void)
{
ESI = (ESI & ~0xffff) | ((ESI + T0) & 0xffff);
}
void OPPROTO op_addl_EDI_T0(void)
{
EDI += T0;
}
void OPPROTO op_addw_EDI_T0(void)
{
EDI = (EDI & ~0xffff) | ((EDI + T0) & 0xffff);
}
void OPPROTO op_decl_ECX(void)
{
ECX--;
}
void OPPROTO op_decw_ECX(void)
{
ECX = (ECX & ~0xffff) | ((ECX - 1) & 0xffff);
}
/* push/pop */
void op_pushl_T0(void)
@@ -893,6 +890,7 @@ void OPPROTO op_das(void)
/* segment handling */
/* never use it with R_CS */
void OPPROTO op_movl_seg_T0(void)
{
load_seg(PARAM1, T0 & 0xffff, PARAM2);
@@ -937,9 +935,24 @@ void OPPROTO op_lar(void)
}
/* T0: segment, T1:eip */
void OPPROTO op_ljmp_T0_T1(void)
void OPPROTO op_ljmp_protected_T0_T1(void)
{
jmp_seg(T0 & 0xffff, T1);
helper_ljmp_protected_T0_T1();
}
void OPPROTO op_lcall_real_T0_T1(void)
{
helper_lcall_real_T0_T1(PARAM1, PARAM2);
}
void OPPROTO op_lcall_protected_T0_T1(void)
{
helper_lcall_protected_T0_T1(PARAM1, PARAM2);
}
void OPPROTO op_iret_real(void)
{
helper_iret_real(PARAM1);
}
void OPPROTO op_iret_protected(void)
@@ -947,6 +960,11 @@ void OPPROTO op_iret_protected(void)
helper_iret_protected(PARAM1);
}
void OPPROTO op_lret_protected(void)
{
helper_lret_protected(PARAM1, PARAM2);
}
void OPPROTO op_lldt_T0(void)
{
helper_lldt_T0();
@@ -1009,9 +1027,18 @@ void OPPROTO op_clts(void)
void OPPROTO op_jcc(void)
{
if (T0)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(op_jcc, PARAM1, 0, PARAM2);
else
JUMP_TB(PARAM1, 1, PARAM3);
JUMP_TB(op_jcc, PARAM1, 1, PARAM3);
FORCE_RET();
}
void OPPROTO op_jcc_im(void)
{
if (T0)
EIP = PARAM1;
else
EIP = PARAM2;
FORCE_RET();
}

View File

@@ -1,244 +0,0 @@
void OPPROTO glue(glue(op_movs, SUFFIX), STRING_SUFFIX)(void)
{
int v, inc;
v = glue(ldu, SUFFIX)(SI_ADDR);
glue(st, SUFFIX)(DI_ADDR, v);
inc = (DF << SHIFT);
INC_SI();
INC_DI();
}
void OPPROTO glue(glue(op_rep_movs, SUFFIX), STRING_SUFFIX)(void)
{
int v, inc;
inc = (DF << SHIFT);
while (CX != 0) {
v = glue(ldu, SUFFIX)(SI_ADDR);
glue(st, SUFFIX)(DI_ADDR, v);
INC_SI();
INC_DI();
DEC_CX();
}
FORCE_RET();
}
void OPPROTO glue(glue(op_stos, SUFFIX), STRING_SUFFIX)(void)
{
int inc;
glue(st, SUFFIX)(DI_ADDR, EAX);
inc = (DF << SHIFT);
INC_DI();
}
void OPPROTO glue(glue(op_rep_stos, SUFFIX), STRING_SUFFIX)(void)
{
int inc;
inc = (DF << SHIFT);
while (CX != 0) {
glue(st, SUFFIX)(DI_ADDR, EAX);
INC_DI();
DEC_CX();
}
FORCE_RET();
}
void OPPROTO glue(glue(op_lods, SUFFIX), STRING_SUFFIX)(void)
{
int v, inc;
v = glue(ldu, SUFFIX)(SI_ADDR);
#if SHIFT == 0
EAX = (EAX & ~0xff) | v;
#elif SHIFT == 1
EAX = (EAX & ~0xffff) | v;
#else
EAX = v;
#endif
inc = (DF << SHIFT);
INC_SI();
}
/* don't know if it is used */
void OPPROTO glue(glue(op_rep_lods, SUFFIX), STRING_SUFFIX)(void)
{
int v, inc;
inc = (DF << SHIFT);
while (CX != 0) {
v = glue(ldu, SUFFIX)(SI_ADDR);
#if SHIFT == 0
EAX = (EAX & ~0xff) | v;
#elif SHIFT == 1
EAX = (EAX & ~0xffff) | v;
#else
EAX = v;
#endif
INC_SI();
DEC_CX();
}
FORCE_RET();
}
void OPPROTO glue(glue(op_scas, SUFFIX), STRING_SUFFIX)(void)
{
int v, inc;
v = glue(ldu, SUFFIX)(DI_ADDR);
inc = (DF << SHIFT);
INC_DI();
CC_SRC = v;
CC_DST = EAX - v;
}
void OPPROTO glue(glue(op_repz_scas, SUFFIX), STRING_SUFFIX)(void)
{
int v1, v2, inc;
if (CX != 0) {
/* NOTE: the flags are not modified if CX == 0 */
v1 = EAX & DATA_MASK;
inc = (DF << SHIFT);
do {
v2 = glue(ldu, SUFFIX)(DI_ADDR);
INC_DI();
DEC_CX();
if (v1 != v2)
break;
} while (CX != 0);
CC_SRC = v2;
CC_DST = v1 - v2;
CC_OP = CC_OP_SUBB + SHIFT;
}
FORCE_RET();
}
void OPPROTO glue(glue(op_repnz_scas, SUFFIX), STRING_SUFFIX)(void)
{
int v1, v2, inc;
if (CX != 0) {
/* NOTE: the flags are not modified if CX == 0 */
v1 = EAX & DATA_MASK;
inc = (DF << SHIFT);
do {
v2 = glue(ldu, SUFFIX)(DI_ADDR);
INC_DI();
DEC_CX();
if (v1 == v2)
break;
} while (CX != 0);
CC_SRC = v2;
CC_DST = v1 - v2;
CC_OP = CC_OP_SUBB + SHIFT;
}
FORCE_RET();
}
void OPPROTO glue(glue(op_cmps, SUFFIX), STRING_SUFFIX)(void)
{
int v1, v2, inc;
v1 = glue(ldu, SUFFIX)(SI_ADDR);
v2 = glue(ldu, SUFFIX)(DI_ADDR);
inc = (DF << SHIFT);
INC_SI();
INC_DI();
CC_SRC = v2;
CC_DST = v1 - v2;
}
void OPPROTO glue(glue(op_repz_cmps, SUFFIX), STRING_SUFFIX)(void)
{
int v1, v2, inc;
if (CX != 0) {
inc = (DF << SHIFT);
do {
v1 = glue(ldu, SUFFIX)(SI_ADDR);
v2 = glue(ldu, SUFFIX)(DI_ADDR);
INC_SI();
INC_DI();
DEC_CX();
if (v1 != v2)
break;
} while (CX != 0);
CC_SRC = v2;
CC_DST = v1 - v2;
CC_OP = CC_OP_SUBB + SHIFT;
}
FORCE_RET();
}
void OPPROTO glue(glue(op_repnz_cmps, SUFFIX), STRING_SUFFIX)(void)
{
int v1, v2, inc;
if (CX != 0) {
inc = (DF << SHIFT);
do {
v1 = glue(ldu, SUFFIX)(SI_ADDR);
v2 = glue(ldu, SUFFIX)(DI_ADDR);
INC_SI();
INC_DI();
DEC_CX();
if (v1 == v2)
break;
} while (CX != 0);
CC_SRC = v2;
CC_DST = v1 - v2;
CC_OP = CC_OP_SUBB + SHIFT;
}
FORCE_RET();
}
void OPPROTO glue(glue(op_outs, SUFFIX), STRING_SUFFIX)(void)
{
int v, dx, inc;
dx = EDX & 0xffff;
v = glue(ldu, SUFFIX)(SI_ADDR);
glue(cpu_x86_out, SUFFIX)(env, dx, v);
inc = (DF << SHIFT);
INC_SI();
}
void OPPROTO glue(glue(op_rep_outs, SUFFIX), STRING_SUFFIX)(void)
{
int v, dx, inc;
inc = (DF << SHIFT);
dx = EDX & 0xffff;
while (CX != 0) {
v = glue(ldu, SUFFIX)(SI_ADDR);
glue(cpu_x86_out, SUFFIX)(env, dx, v);
INC_SI();
DEC_CX();
}
FORCE_RET();
}
void OPPROTO glue(glue(op_ins, SUFFIX), STRING_SUFFIX)(void)
{
int v, dx, inc;
dx = EDX & 0xffff;
v = glue(cpu_x86_in, SUFFIX)(env, dx);
glue(st, SUFFIX)(DI_ADDR, v);
inc = (DF << SHIFT);
INC_DI();
}
void OPPROTO glue(glue(op_rep_ins, SUFFIX), STRING_SUFFIX)(void)
{
int v, dx, inc;
inc = (DF << SHIFT);
dx = EDX & 0xffff;
while (CX != 0) {
v = glue(cpu_x86_in, SUFFIX)(env, dx);
glue(st, SUFFIX)(DI_ADDR, v);
INC_DI();
DEC_CX();
}
FORCE_RET();
}
#undef STRING_SUFFIX
#undef SI_ADDR
#undef DI_ADDR
#undef INC_SI
#undef INC_DI
#undef CX
#undef DEC_CX

66
ops_mem.h Normal file
View File

@@ -0,0 +1,66 @@
void OPPROTO glue(glue(op_ldub, MEMSUFFIX), _T0_A0)(void)
{
T0 = glue(ldub, MEMSUFFIX)((uint8_t *)A0);
}
void OPPROTO glue(glue(op_ldsb, MEMSUFFIX), _T0_A0)(void)
{
T0 = glue(ldsb, MEMSUFFIX)((int8_t *)A0);
}
void OPPROTO glue(glue(op_lduw, MEMSUFFIX), _T0_A0)(void)
{
T0 = glue(lduw, MEMSUFFIX)((uint8_t *)A0);
}
void OPPROTO glue(glue(op_ldsw, MEMSUFFIX), _T0_A0)(void)
{
T0 = glue(ldsw, MEMSUFFIX)((int8_t *)A0);
}
void OPPROTO glue(glue(op_ldl, MEMSUFFIX), _T0_A0)(void)
{
T0 = glue(ldl, MEMSUFFIX)((uint8_t *)A0);
}
void OPPROTO glue(glue(op_ldub, MEMSUFFIX), _T1_A0)(void)
{
T1 = glue(ldub, MEMSUFFIX)((uint8_t *)A0);
}
void OPPROTO glue(glue(op_ldsb, MEMSUFFIX), _T1_A0)(void)
{
T1 = glue(ldsb, MEMSUFFIX)((int8_t *)A0);
}
void OPPROTO glue(glue(op_lduw, MEMSUFFIX), _T1_A0)(void)
{
T1 = glue(lduw, MEMSUFFIX)((uint8_t *)A0);
}
void OPPROTO glue(glue(op_ldsw, MEMSUFFIX), _T1_A0)(void)
{
T1 = glue(ldsw, MEMSUFFIX)((int8_t *)A0);
}
void OPPROTO glue(glue(op_ldl, MEMSUFFIX), _T1_A0)(void)
{
T1 = glue(ldl, MEMSUFFIX)((uint8_t *)A0);
}
void OPPROTO glue(glue(op_stb, MEMSUFFIX), _T0_A0)(void)
{
glue(stb, MEMSUFFIX)((uint8_t *)A0, T0);
}
void OPPROTO glue(glue(op_stw, MEMSUFFIX), _T0_A0)(void)
{
glue(stw, MEMSUFFIX)((uint8_t *)A0, T0);
}
void OPPROTO glue(glue(op_stl, MEMSUFFIX), _T0_A0)(void)
{
glue(stl, MEMSUFFIX)((uint8_t *)A0, T0);
}
#undef MEMSUFFIX

View File

@@ -238,18 +238,18 @@ void OPPROTO glue(op_jb_sub, SUFFIX)(void)
src2 = CC_SRC;
if ((DATA_TYPE)src1 < (DATA_TYPE)src2)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(glue(op_jb_sub, SUFFIX), PARAM1, 0, PARAM2);
else
JUMP_TB(PARAM1, 1, PARAM3);
JUMP_TB(glue(op_jb_sub, SUFFIX), PARAM1, 1, PARAM3);
FORCE_RET();
}
void OPPROTO glue(op_jz_sub, SUFFIX)(void)
{
if ((DATA_TYPE)CC_DST == 0)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(glue(op_jz_sub, SUFFIX), PARAM1, 0, PARAM2);
else
JUMP_TB(PARAM1, 1, PARAM3);
JUMP_TB(glue(op_jz_sub, SUFFIX), PARAM1, 1, PARAM3);
FORCE_RET();
}
@@ -260,18 +260,18 @@ void OPPROTO glue(op_jbe_sub, SUFFIX)(void)
src2 = CC_SRC;
if ((DATA_TYPE)src1 <= (DATA_TYPE)src2)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(glue(op_jbe_sub, SUFFIX), PARAM1, 0, PARAM2);
else
JUMP_TB(PARAM1, 1, PARAM3);
JUMP_TB(glue(op_jbe_sub, SUFFIX), PARAM1, 1, PARAM3);
FORCE_RET();
}
void OPPROTO glue(op_js_sub, SUFFIX)(void)
{
if (CC_DST & SIGN_MASK)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(glue(op_js_sub, SUFFIX), PARAM1, 0, PARAM2);
else
JUMP_TB(PARAM1, 1, PARAM3);
JUMP_TB(glue(op_js_sub, SUFFIX), PARAM1, 1, PARAM3);
FORCE_RET();
}
@@ -282,9 +282,9 @@ void OPPROTO glue(op_jl_sub, SUFFIX)(void)
src2 = CC_SRC;
if ((DATA_STYPE)src1 < (DATA_STYPE)src2)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(glue(op_jl_sub, SUFFIX), PARAM1, 0, PARAM2);
else
JUMP_TB(PARAM1, 1, PARAM3);
JUMP_TB(glue(op_jl_sub, SUFFIX), PARAM1, 1, PARAM3);
FORCE_RET();
}
@@ -295,9 +295,9 @@ void OPPROTO glue(op_jle_sub, SUFFIX)(void)
src2 = CC_SRC;
if ((DATA_STYPE)src1 <= (DATA_STYPE)src2)
JUMP_TB(PARAM1, 0, PARAM2);
JUMP_TB(glue(op_jle_sub, SUFFIX), PARAM1, 0, PARAM2);
else
JUMP_TB(PARAM1, 1, PARAM3);
JUMP_TB(glue(op_jle_sub, SUFFIX), PARAM1, 1, PARAM3);
FORCE_RET();
}
@@ -518,34 +518,73 @@ void OPPROTO op_update_bt_cc(void)
#endif
/* string operations */
/* XXX: maybe use lower level instructions to ease 16 bit / segment handling */
#define STRING_SUFFIX _fast
#define SI_ADDR (void *)ESI
#define DI_ADDR (void *)EDI
#define INC_SI() ESI += inc
#define INC_DI() EDI += inc
#define CX ECX
#define DEC_CX() ECX--
#include "op_string.h"
void OPPROTO glue(op_movl_T0_Dshift, SUFFIX)(void)
{
T0 = DF << SHIFT;
}
#define STRING_SUFFIX _a32
#define SI_ADDR (uint8_t *)A0 + ESI
#define DI_ADDR env->segs[R_ES].base + EDI
#define INC_SI() ESI += inc
#define INC_DI() EDI += inc
#define CX ECX
#define DEC_CX() ECX--
#include "op_string.h"
void OPPROTO glue(op_string_jz_sub, SUFFIX)(void)
{
if ((DATA_TYPE)CC_DST == 0)
JUMP_TB2(glue(op_string_jz_sub, SUFFIX), PARAM1, 1);
FORCE_RET();
}
#define STRING_SUFFIX _a16
#define SI_ADDR (uint8_t *)A0 + (ESI & 0xffff)
#define DI_ADDR env->segs[R_ES].base + (EDI & 0xffff)
#define INC_SI() ESI = (ESI & ~0xffff) | ((ESI + inc) & 0xffff)
#define INC_DI() EDI = (EDI & ~0xffff) | ((EDI + inc) & 0xffff)
#define CX (ECX & 0xffff)
#define DEC_CX() ECX = (ECX & ~0xffff) | ((ECX - 1) & 0xffff)
#include "op_string.h"
void OPPROTO glue(op_string_jnz_sub, SUFFIX)(void)
{
if ((DATA_TYPE)CC_DST != 0)
JUMP_TB2(glue(op_string_jnz_sub, SUFFIX), PARAM1, 1);
FORCE_RET();
}
void OPPROTO glue(glue(op_string_jz_sub, SUFFIX), _im)(void)
{
if ((DATA_TYPE)CC_DST == 0) {
EIP = PARAM1;
if (env->eflags & TF_MASK) {
raise_exception(EXCP01_SSTP);
}
T0 = 0;
EXIT_TB();
}
FORCE_RET();
}
void OPPROTO glue(glue(op_string_jnz_sub, SUFFIX), _im)(void)
{
if ((DATA_TYPE)CC_DST != 0) {
EIP = PARAM1;
if (env->eflags & TF_MASK) {
raise_exception(EXCP01_SSTP);
}
T0 = 0;
EXIT_TB();
}
FORCE_RET();
}
#if DATA_BITS >= 16
void OPPROTO glue(op_jz_ecx, SUFFIX)(void)
{
if ((DATA_TYPE)ECX == 0)
JUMP_TB(glue(op_jz_ecx, SUFFIX), PARAM1, 1, PARAM2);
FORCE_RET();
}
void OPPROTO glue(glue(op_jz_ecx, SUFFIX), _im)(void)
{
if ((DATA_TYPE)ECX == 0) {
EIP = PARAM1;
if (env->eflags & TF_MASK) {
raise_exception(EXCP01_SSTP);
}
T0 = 0;
EXIT_TB();
}
FORCE_RET();
}
#endif
/* port I/O */
@@ -559,6 +598,16 @@ void OPPROTO glue(glue(op_in, SUFFIX), _T0_T1)(void)
T1 = glue(cpu_x86_in, SUFFIX)(env, T0 & 0xffff);
}
void OPPROTO glue(glue(op_in, SUFFIX), _DX_T0)(void)
{
T0 = glue(cpu_x86_in, SUFFIX)(env, EDX & 0xffff);
}
void OPPROTO glue(glue(op_out, SUFFIX), _DX_T0)(void)
{
glue(cpu_x86_out, SUFFIX)(env, EDX & 0xffff, T0);
}
#undef DATA_BITS
#undef SHIFT_MASK
#undef SIGN_MASK

View File

@@ -44,6 +44,9 @@ void OPPROTO glue(glue(op_rol, MEM_SUFFIX), _T0_T1_cc)(void)
T0 = (T0 << count) | (T0 >> (DATA_BITS - count));
#ifdef MEM_WRITE
glue(st, SUFFIX)((uint8_t *)A0, T0);
#else
/* gcc 3.2 workaround. This is really a bug in gcc. */
asm volatile("" : : "r" (T0));
#endif
CC_SRC = (cc_table[CC_OP].compute_all() & ~(CC_O | CC_C)) |
(lshift(src ^ T0, 11 - (DATA_BITS - 1)) & CC_O) |
@@ -63,6 +66,9 @@ void OPPROTO glue(glue(op_ror, MEM_SUFFIX), _T0_T1_cc)(void)
T0 = (T0 >> count) | (T0 << (DATA_BITS - count));
#ifdef MEM_WRITE
glue(st, SUFFIX)((uint8_t *)A0, T0);
#else
/* gcc 3.2 workaround. This is really a bug in gcc. */
asm volatile("" : : "r" (T0));
#endif
CC_SRC = (cc_table[CC_OP].compute_all() & ~(CC_O | CC_C)) |
(lshift(src ^ T0, 11 - (DATA_BITS - 1)) & CC_O) |

View File

@@ -480,9 +480,9 @@ disk image. QEMU stores the original raw disk image name and its
modified time in the COW disk image so that chances of mistakes are
reduced.
If raw disk image is not read-only, by pressing @key{C-a s} you can
flush the COW disk image back into the raw disk image, as in snapshot
mode.
If the raw disk image is not read-only, by pressing @key{C-a s} you
can flush the COW disk image back into the raw disk image, as in
snapshot mode.
COW disk images can also be created without a corresponding raw disk
image. It is useful to have a big initial virtual disk image without
@@ -602,7 +602,7 @@ QEMU has a primitive support to work with gdb, so that you can do
In order to use gdb, launch vl with the '-s' option. It will wait for a
gdb connection:
@example
> vl -s arch/i386/boot/bzImage initrd-2.4.20.img root=/dev/ram0 ramdisk_size=6144
> vl -s arch/i386/boot/bzImage -hda root-2.4.20.img root=/dev/hda
Connected to host network interface: tun0
Waiting gdb connection on port 1234
@end example

275
sdl.c Normal file
View File

@@ -0,0 +1,275 @@
/*
* QEMU SDL display driver
*
* Copyright (c) 2003 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <getopt.h>
#include <inttypes.h>
#include <unistd.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <signal.h>
#include <time.h>
#include <sys/time.h>
#include <malloc.h>
#include <termios.h>
#include <sys/poll.h>
#include <errno.h>
#include <sys/wait.h>
#include <netinet/in.h>
#include <SDL.h>
#include "cpu-i386.h"
#include "exec.h"
#include "vl.h"
static SDL_Surface *screen;
static int gui_grab; /* if true, all keyboard/mouse events are grabbed */
static void sdl_update(DisplayState *ds, int x, int y, int w, int h)
{
SDL_UpdateRect(screen, x, y, w, h);
}
static void sdl_resize(DisplayState *ds, int w, int h)
{
int flags;
// printf("resizing to %d %d\n", w, h);
flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
flags |= SDL_RESIZABLE;
screen = SDL_SetVideoMode(w, h, 0, flags);
if (!screen) {
fprintf(stderr, "Could not open SDL display\n");
exit(1);
}
ds->data = screen->pixels;
ds->linesize = screen->pitch;
ds->depth = screen->format->BitsPerPixel;
}
static const uint32_t x_keycode_to_pc_keycode[61] = {
0x47e0, /* 97 Home */
0x48e0, /* 98 Up */
0x49e0, /* 99 PgUp */
0x4be0, /* 100 Left */
0x4c, /* 101 KP-5 */
0x4de0, /* 102 Right */
0x4fe0, /* 103 End */
0x50e0, /* 104 Down */
0x51e0, /* 105 PgDn */
0x52e0, /* 106 Ins */
0x53e0, /* 107 Del */
0x1ce0, /* 108 Enter */
0x1de0, /* 109 Ctrl-R */
0x451de1, /* 110 Pause */
0x37e0, /* 111 Print */
0x35e0, /* 112 Divide */
0x38e0, /* 113 Alt-R */
0x46e0, /* 114 Break */
0x0, /* 115 */
0x0, /* 116 */
0x0, /* 117 */
0x0, /* 118 */
0x0, /* 119 */
0x0, /* 120 */
0x0, /* 121 */
0x0, /* 122 */
0x0, /* 123 */
0x0, /* 124 */
0x0, /* 125 */
0x0, /* 126 */
0x0, /* 127 */
0x0, /* 128 */
0x0, /* 129 */
0x0, /* 130 */
0x0, /* 131 */
0x0, /* 132 */
0x0, /* 133 */
0x0, /* 134 */
0x0, /* 135 */
0x47, /* 136 KP_7 */
0x48, /* 137 KP_8 */
0x49, /* 138 KP_9 */
0x4b, /* 139 KP_4 */
0x4c, /* 140 KP_5 */
0x4d, /* 141 KP_6 */
0x4f, /* 142 KP_1 */
0x50, /* 143 KP_2 */
0x51, /* 144 KP_3 */
0x52, /* 145 KP_0 */
0x53, /* 146 KP_. */
0x47, /* 147 KP_HOME */
0x48, /* 148 KP_UP */
0x49, /* 149 KP_PgUp */
0x4b, /* 150 KP_Left */
0x4c, /* 151 KP_ */
0x4d, /* 152 KP_Right */
0x4f, /* 153 KP_End */
0x50, /* 154 KP_Down */
0x51, /* 155 KP_PgDn */
0x52, /* 156 KP_Ins */
0x53, /* 157 KP_Del */
};
static void sdl_process_key(SDL_KeyboardEvent *ev)
{
int keycode, v;
/* XXX: not portable, but avoids complicated mappings */
keycode = ev->keysym.scancode;
if (keycode < 9) {
keycode = 0;
} else if (keycode < 97) {
keycode -= 8; /* just an offset */
} else if (keycode < 158) {
/* use conversion table */
keycode = x_keycode_to_pc_keycode[keycode - 97];
} else {
keycode = 0;
}
/* now send the key code */
while (keycode != 0) {
v = keycode & 0xff;
if (ev->type == SDL_KEYUP)
v |= 0x80;
kbd_put_keycode(v);
keycode >>= 8;
}
}
static void sdl_grab_start(void)
{
SDL_WM_SetCaption("QEMU - Press Ctrl-Shift to exit grab", "QEMU");
SDL_ShowCursor(0);
SDL_WM_GrabInput(SDL_GRAB_ON);
/* dummy read to avoid moving the mouse */
SDL_GetRelativeMouseState(NULL, NULL);
gui_grab = 1;
}
static void sdl_grab_end(void)
{
SDL_WM_SetCaption("QEMU", "QEMU");
SDL_WM_GrabInput(SDL_GRAB_OFF);
SDL_ShowCursor(1);
gui_grab = 0;
}
static void sdl_send_mouse_event(void)
{
int dx, dy, dz, state, buttons;
state = SDL_GetRelativeMouseState(&dx, &dy);
buttons = 0;
if (state & SDL_BUTTON(SDL_BUTTON_LEFT))
buttons |= MOUSE_EVENT_LBUTTON;
if (state & SDL_BUTTON(SDL_BUTTON_RIGHT))
buttons |= MOUSE_EVENT_RBUTTON;
if (state & SDL_BUTTON(SDL_BUTTON_MIDDLE))
buttons |= MOUSE_EVENT_MBUTTON;
/* XXX: test wheel */
dz = 0;
if (state & SDL_BUTTON(SDL_BUTTON_WHEELUP))
dz--;
if (state & SDL_BUTTON(SDL_BUTTON_WHEELDOWN))
dz++;
kbd_mouse_event(dx, dy, dz, buttons);
}
static void sdl_refresh(DisplayState *ds)
{
SDL_Event ev1, *ev = &ev1;
vga_update_display();
while (SDL_PollEvent(ev)) {
switch (ev->type) {
case SDL_VIDEOEXPOSE:
sdl_update(ds, 0, 0, screen->w, screen->h);
break;
case SDL_KEYDOWN:
case SDL_KEYUP:
if (ev->type == SDL_KEYDOWN) {
if ((SDL_GetModState() & (KMOD_LSHIFT | KMOD_LCTRL)) ==
(KMOD_LSHIFT | KMOD_LCTRL)) {
/* exit/enter grab if pressing Ctrl-Shift */
if (!gui_grab)
sdl_grab_start();
else
sdl_grab_end();
}
}
sdl_process_key(&ev->key);
break;
case SDL_QUIT:
reset_requested = 1;
break;
case SDL_MOUSEMOTION:
if (gui_grab) {
sdl_send_mouse_event();
}
break;
case SDL_MOUSEBUTTONDOWN:
case SDL_MOUSEBUTTONUP:
{
SDL_MouseButtonEvent *bev = &ev->button;
if (!gui_grab) {
if (ev->type == SDL_MOUSEBUTTONDOWN &&
(bev->state & SDL_BUTTON_LMASK)) {
/* start grabbing all events */
sdl_grab_start();
}
} else {
sdl_send_mouse_event();
}
}
break;
default:
break;
}
}
}
void sdl_display_init(DisplayState *ds)
{
int flags;
flags = SDL_INIT_VIDEO | SDL_INIT_NOPARACHUTE;
if (SDL_Init (flags)) {
fprintf(stderr, "Could not initialize SDL - exiting\n");
exit(1);
}
ds->dpy_update = sdl_update;
ds->dpy_resize = sdl_resize;
ds->dpy_refresh = sdl_refresh;
sdl_resize(ds, 640, 400);
SDL_WM_SetCaption("QEMU", "QEMU");
SDL_EnableKeyRepeat(250, 50);
gui_grab = 0;
}

111
softmmu_header.h Normal file
View File

@@ -0,0 +1,111 @@
/*
* Software MMU support
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#if DATA_SIZE == 8
#define SUFFIX q
#define DATA_TYPE uint64_t
#elif DATA_SIZE == 4
#define SUFFIX l
#define DATA_TYPE uint32_t
#elif DATA_SIZE == 2
#define SUFFIX w
#define DATA_TYPE uint16_t
#define DATA_STYPE int16_t
#elif DATA_SIZE == 1
#define SUFFIX b
#define DATA_TYPE uint8_t
#define DATA_STYPE int8_t
#else
#error unsupported data size
#endif
#if MEMUSER == 0
#define MEMSUFFIX _kernel
#else
#define MEMSUFFIX _user
#endif
#if DATA_SIZE == 8
#define RES_TYPE uint64_t
#else
#define RES_TYPE int
#endif
#if MEMUSER == 0
DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), _mmu)(unsigned long addr);
void REGPARM(2) glue(glue(__st, SUFFIX), _mmu)(unsigned long addr, DATA_TYPE v);
#endif
static inline int glue(glue(ldu, SUFFIX), MEMSUFFIX)(void *ptr)
{
int index;
RES_TYPE res;
unsigned long addr, physaddr;
addr = (unsigned long)ptr;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
if (__builtin_expect(env->tlb_read[MEMUSER][index].address !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) {
res = glue(glue(__ld, SUFFIX), _mmu)(addr);
} else {
physaddr = addr + env->tlb_read[MEMUSER][index].addend;
res = glue(glue(ldu, SUFFIX), _raw)((uint8_t *)physaddr);
}
return res;
}
#if DATA_SIZE <= 2
static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(void *ptr)
{
int res, index;
unsigned long addr, physaddr;
addr = (unsigned long)ptr;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
if (__builtin_expect(env->tlb_read[MEMUSER][index].address !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) {
res = (DATA_STYPE)glue(glue(__ld, SUFFIX), _mmu)(addr);
} else {
physaddr = addr + env->tlb_read[MEMUSER][index].addend;
res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr);
}
return res;
}
#endif
static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(void *ptr, RES_TYPE v)
{
int index;
unsigned long addr, physaddr;
addr = (unsigned long)ptr;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
if (__builtin_expect(env->tlb_write[MEMUSER][index].address !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) {
glue(glue(__st, SUFFIX), _mmu)(addr, v);
} else {
physaddr = addr + env->tlb_write[MEMUSER][index].addend;
glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v);
}
}
#undef RES_TYPE
#undef DATA_TYPE
#undef DATA_STYPE
#undef SUFFIX
#undef DATA_SIZE
#undef MEMSUFFIX

241
softmmu_template.h Normal file
View File

@@ -0,0 +1,241 @@
/*
* Software MMU support
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define DATA_SIZE (1 << SHIFT)
#if DATA_SIZE == 8
#define SUFFIX q
#define DATA_TYPE uint64_t
#elif DATA_SIZE == 4
#define SUFFIX l
#define DATA_TYPE uint32_t
#elif DATA_SIZE == 2
#define SUFFIX w
#define DATA_TYPE uint16_t
#elif DATA_SIZE == 1
#define SUFFIX b
#define DATA_TYPE uint8_t
#else
#error unsupported data size
#endif
static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr);
static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val,
void *retaddr);
static inline DATA_TYPE glue(io_read, SUFFIX)(unsigned long physaddr,
unsigned long tlb_addr)
{
DATA_TYPE res;
int index;
index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
#if SHIFT <= 2
res = io_mem_read[index][SHIFT](physaddr);
#else
#ifdef TARGET_WORDS_BIGENDIAN
res = (uint64_t)io_mem_read[index][2](physaddr) << 32;
res |= io_mem_read[index][2](physaddr + 4);
#else
res = io_mem_read[index][2](physaddr);
res |= (uint64_t)io_mem_read[index][2](physaddr + 4) << 32;
#endif
#endif /* SHIFT > 2 */
return res;
}
static inline void glue(io_write, SUFFIX)(unsigned long physaddr,
DATA_TYPE val,
unsigned long tlb_addr)
{
int index;
index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
#if SHIFT <= 2
io_mem_write[index][SHIFT](physaddr, val);
#else
#ifdef TARGET_WORDS_BIGENDIAN
io_mem_write[index][2](physaddr, val >> 32);
io_mem_write[index][2](physaddr + 4, val);
#else
io_mem_write[index][2](physaddr, val);
io_mem_write[index][2](physaddr + 4, val >> 32);
#endif
#endif /* SHIFT > 2 */
}
/* handle all cases except unaligned access which span two pages */
DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), _mmu)(unsigned long addr)
{
DATA_TYPE res;
int is_user, index;
unsigned long physaddr, tlb_addr;
void *retaddr;
/* test if there is match for unaligned or IO access */
/* XXX: could done more in memory macro in a non portable way */
is_user = ((env->hflags & HF_CPL_MASK) == 3);
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
tlb_addr = env->tlb_read[is_user][index].address;
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
physaddr = addr + env->tlb_read[is_user][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
} else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
/* slow unaligned access (it spans two pages or IO) */
do_unaligned_access:
retaddr = __builtin_return_address(0);
res = glue(slow_ld, SUFFIX)(addr, retaddr);
} else {
/* unaligned access in the same page */
res = glue(glue(ldu, SUFFIX), _raw)((uint8_t *)physaddr);
}
} else {
/* the page is not in the TLB : fill it */
retaddr = __builtin_return_address(0);
tlb_fill(addr, 0, retaddr);
goto redo;
}
return res;
}
/* handle all unaligned cases */
static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr)
{
DATA_TYPE res, res1, res2;
int is_user, index, shift;
unsigned long physaddr, tlb_addr, addr1, addr2;
is_user = ((env->hflags & HF_CPL_MASK) == 3);
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
tlb_addr = env->tlb_read[is_user][index].address;
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
physaddr = addr + env->tlb_read[is_user][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
} else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
/* slow unaligned access (it spans two pages) */
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
res1 = glue(slow_ld, SUFFIX)(addr1, retaddr);
res2 = glue(slow_ld, SUFFIX)(addr2, retaddr);
shift = (addr & (DATA_SIZE - 1)) * 8;
#ifdef TARGET_WORDS_BIGENDIAN
res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
#else
res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
#endif
} else {
/* unaligned/aligned access in the same page */
res = glue(glue(ldu, SUFFIX), _raw)((uint8_t *)physaddr);
}
} else {
/* the page is not in the TLB : fill it */
tlb_fill(addr, 0, retaddr);
goto redo;
}
return res;
}
void REGPARM(2) glue(glue(__st, SUFFIX), _mmu)(unsigned long addr, DATA_TYPE val)
{
unsigned long physaddr, tlb_addr;
void *retaddr;
int is_user, index;
is_user = ((env->hflags & HF_CPL_MASK) == 3);
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
tlb_addr = env->tlb_write[is_user][index].address;
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
physaddr = addr + env->tlb_read[is_user][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
glue(io_write, SUFFIX)(physaddr, val, tlb_addr);
} else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
retaddr = __builtin_return_address(0);
glue(slow_st, SUFFIX)(addr, val, retaddr);
} else {
/* aligned/unaligned access in the same page */
glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, val);
}
} else {
/* the page is not in the TLB : fill it */
retaddr = __builtin_return_address(0);
tlb_fill(addr, 1, retaddr);
goto redo;
}
}
/* handles all unaligned cases */
static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val,
void *retaddr)
{
unsigned long physaddr, tlb_addr;
int is_user, index, i;
is_user = ((env->hflags & HF_CPL_MASK) == 3);
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
tlb_addr = env->tlb_write[is_user][index].address;
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
physaddr = addr + env->tlb_read[is_user][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
glue(io_write, SUFFIX)(physaddr, val, tlb_addr);
} else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
/* XXX: not efficient, but simple */
for(i = 0;i < DATA_SIZE; i++) {
#ifdef TARGET_WORDS_BIGENDIAN
slow_stb(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)), retaddr);
#else
slow_stb(addr + i, val >> (i * 8), retaddr);
#endif
}
} else {
/* aligned/unaligned access in the same page */
glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, val);
}
} else {
/* the page is not in the TLB : fill it */
tlb_fill(addr, 1, retaddr);
goto redo;
}
}
#undef SHIFT
#undef DATA_TYPE
#undef SUFFIX
#undef DATA_SIZE

View File

@@ -25,3 +25,4 @@ struct target_pt_regs {
#define ARM_r0 uregs[0]
#define ARM_ORIG_r0 uregs[17]
#define ARM_SYSCALL_BASE 0x900000

View File

@@ -1,4 +1,4 @@
include ../config.mak
include ../config-host.mak
CFLAGS=-Wall -O2 -g
LDFLAGS=
@@ -8,7 +8,7 @@ TESTS=testclone testsig testthread sha1-i386 test-i386 runcom
endif
TESTS+=sha1 test_path
QEMU=../qemu
QEMU=../i386/qemu
all: $(TESTS)

View File

@@ -6,12 +6,14 @@
#include <math.h>
#include <signal.h>
#include <setjmp.h>
#include <errno.h>
#include <sys/ucontext.h>
#include <sys/mman.h>
#include <asm/vm86.h>
#define TEST_CMOV 0
#define TEST_FCOMI 0
//#define LINUX_VM86_IOPL_FIX
#define xglue(x, y) x ## y
#define glue(x, y) xglue(x, y)
@@ -855,7 +857,6 @@ void test_segs(void)
#endif
/* do some tests with fs or gs */
asm volatile ("movl %0, %%fs" : : "r" (MK_SEL(1)));
asm volatile ("movl %0, %%gs" : : "r" (MK_SEL(2)));
seg_data1[1] = 0xaa;
seg_data2[1] = 0x55;
@@ -863,7 +864,12 @@ void test_segs(void)
asm volatile ("fs movzbl 0x1, %0" : "=r" (res));
printf("FS[1] = %02x\n", res);
asm volatile ("gs movzbl 0x1, %0" : "=r" (res));
asm volatile ("pushl %%gs\n"
"movl %1, %%gs\n"
"gs movzbl 0x1, %0\n"
"popl %%gs\n"
: "=r" (res)
: "r" (MK_SEL(2)));
printf("GS[1] = %02x\n", res);
/* tests with ds/ss (implicit segment case) */
@@ -965,6 +971,11 @@ void test_misc(void)
asm volatile ("pushl $12345432 ; pushl $0x9abcdef ; popl (%%esp) ; popl %0"
: "=g" (res));
printf("popl esp=%x\n", res);
/* specific popw test */
asm volatile ("pushl $12345432 ; pushl $0x9abcdef ; popw (%%esp) ; addl $2, %%esp ; popl %0"
: "=g" (res));
printf("popw esp=%x\n", res);
}
uint8_t str_buffer[4096];
@@ -1096,7 +1107,7 @@ void test_vm86(void)
switch(VM86_TYPE(ret)) {
case VM86_INTx:
{
int int_num, ah;
int int_num, ah, v;
int_num = VM86_ARG(ret);
if (int_num != 0x21)
@@ -1124,8 +1135,12 @@ void test_vm86(void)
r->eax = (r->eax & ~0xff) | '$';
}
break;
case 0xff: /* extension: write hex number in edx */
printf("%08x\n", (int)r->edx);
case 0xff: /* extension: write eflags number in edx */
v = (int)r->edx;
#ifndef LINUX_VM86_IOPL_FIX
v &= ~0x3000;
#endif
printf("%08x\n", v);
break;
default:
unknown_int:
@@ -1250,6 +1265,8 @@ void test_exceptions(void)
printf("PF exception:\n");
if (setjmp(jmp_env) == 0) {
val = 1;
/* we add a nop to test a weird PC retrieval case */
asm volatile ("nop");
/* now store in an invalid address */
*(char *)0x1234 = 1;
}
@@ -1344,6 +1361,90 @@ void test_exceptions(void)
printf("val=0x%x\n", val);
}
/* specific precise single step test */
void sig_trap_handler(int sig, siginfo_t *info, void *puc)
{
struct ucontext *uc = puc;
printf("EIP=0x%08x\n", uc->uc_mcontext.gregs[REG_EIP]);
}
const uint8_t sstep_buf1[4] = { 1, 2, 3, 4};
uint8_t sstep_buf2[4];
void test_single_step(void)
{
struct sigaction act;
volatile int val;
int i;
val = 0;
act.sa_sigaction = sig_trap_handler;
sigemptyset(&act.sa_mask);
act.sa_flags = SA_SIGINFO;
sigaction(SIGTRAP, &act, NULL);
asm volatile ("pushf\n"
"orl $0x00100, (%%esp)\n"
"popf\n"
"movl $0xabcd, %0\n"
/* jmp test */
"movl $3, %%ecx\n"
"1:\n"
"addl $1, %0\n"
"decl %%ecx\n"
"jnz 1b\n"
/* movsb: the single step should stop at each movsb iteration */
"movl $sstep_buf1, %%esi\n"
"movl $sstep_buf2, %%edi\n"
"movl $0, %%ecx\n"
"rep movsb\n"
"movl $3, %%ecx\n"
"rep movsb\n"
"movl $1, %%ecx\n"
"rep movsb\n"
/* cmpsb: the single step should stop at each cmpsb iteration */
"movl $sstep_buf1, %%esi\n"
"movl $sstep_buf2, %%edi\n"
"movl $0, %%ecx\n"
"rep cmpsb\n"
"movl $4, %%ecx\n"
"rep cmpsb\n"
/* getpid() syscall: single step should skip one
instruction */
"movl $20, %%eax\n"
"int $0x80\n"
"movl $0, %%eax\n"
/* when modifying SS, trace is not done on the next
instruction */
"movl %%ss, %%ecx\n"
"movl %%ecx, %%ss\n"
"addl $1, %0\n"
"movl $1, %%eax\n"
"movl %%ecx, %%ss\n"
"jmp 1f\n"
"addl $1, %0\n"
"1:\n"
"movl $1, %%eax\n"
"pushl %%ecx\n"
"popl %%ss\n"
"addl $1, %0\n"
"movl $1, %%eax\n"
"pushf\n"
"andl $~0x00100, (%%esp)\n"
"popf\n"
: "=m" (val)
:
: "cc", "memory", "eax", "ecx", "esi", "edi");
printf("val=%d\n", val);
for(i = 0; i < 4; i++)
printf("sstep_buf2[%d] = %d\n", i, sstep_buf2[i]);
}
/* self modifying code test */
uint8_t code[] = {
0xb8, 0x1, 0x00, 0x00, 0x00, /* movl $1, %eax */
@@ -1390,5 +1491,6 @@ int main(int argc, char **argv)
test_vm86();
test_exceptions();
test_self_modifying_code();
test_single_step();
return 0;
}

75
thunk.h
View File

@@ -23,43 +23,7 @@
#include <inttypes.h>
#include "config.h"
#ifdef HAVE_BYTESWAP_H
#include <byteswap.h>
#else
#define bswap_16(x) \
({ \
uint16_t __x = (x); \
((uint16_t)( \
(((uint16_t)(__x) & (uint16_t)0x00ffU) << 8) | \
(((uint16_t)(__x) & (uint16_t)0xff00U) >> 8) )); \
})
#define bswap_32(x) \
({ \
uint32_t __x = (x); \
((uint32_t)( \
(((uint32_t)(__x) & (uint32_t)0x000000ffUL) << 24) | \
(((uint32_t)(__x) & (uint32_t)0x0000ff00UL) << 8) | \
(((uint32_t)(__x) & (uint32_t)0x00ff0000UL) >> 8) | \
(((uint32_t)(__x) & (uint32_t)0xff000000UL) >> 24) )); \
})
#define bswap_64(x) \
({ \
uint64_t __x = (x); \
((uint64_t)( \
(uint64_t)(((uint64_t)(__x) & (uint64_t)0x00000000000000ffULL) << 56) | \
(uint64_t)(((uint64_t)(__x) & (uint64_t)0x000000000000ff00ULL) << 40) | \
(uint64_t)(((uint64_t)(__x) & (uint64_t)0x0000000000ff0000ULL) << 24) | \
(uint64_t)(((uint64_t)(__x) & (uint64_t)0x00000000ff000000ULL) << 8) | \
(uint64_t)(((uint64_t)(__x) & (uint64_t)0x000000ff00000000ULL) >> 8) | \
(uint64_t)(((uint64_t)(__x) & (uint64_t)0x0000ff0000000000ULL) >> 24) | \
(uint64_t)(((uint64_t)(__x) & (uint64_t)0x00ff000000000000ULL) >> 40) | \
(uint64_t)(((uint64_t)(__x) & (uint64_t)0xff00000000000000ULL) >> 56) )); \
})
#endif
#include "bswap.h"
#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
#define BSWAP_NEEDED
@@ -68,44 +32,7 @@
/* XXX: autoconf */
#define TARGET_LONG_BITS 32
#if defined(__alpha__) || defined (__ia64__)
#define HOST_LONG_BITS 64
#else
#define HOST_LONG_BITS 32
#endif
#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
#define HOST_LONG_SIZE (HOST_LONG_BITS / 8)
static inline uint16_t bswap16(uint16_t x)
{
return bswap_16(x);
}
static inline uint32_t bswap32(uint32_t x)
{
return bswap_32(x);
}
static inline uint64_t bswap64(uint64_t x)
{
return bswap_64(x);
}
static inline void bswap16s(uint16_t *s)
{
*s = bswap16(*s);
}
static inline void bswap32s(uint32_t *s)
{
*s = bswap32(*s);
}
static inline void bswap64s(uint64_t *s)
{
*s = bswap64(*s);
}
#ifdef BSWAP_NEEDED

View File

@@ -34,6 +34,8 @@ typedef struct DisasContext {
struct TranslationBlock *tb;
} DisasContext;
#define DISAS_JUMP_NEXT 4
/* XXX: move that elsewhere */
static uint16_t *gen_opc_ptr;
static uint32_t *gen_opparam_ptr;
@@ -333,10 +335,11 @@ static void disas_arm_insn(DisasContext *s)
/* if not always execute, we generate a conditional jump to
next instruction */
gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
s->is_jmp = 1;
s->is_jmp = DISAS_JUMP_NEXT;
}
if ((insn & 0x0c000000) == 0 &&
(insn & 0x00000090) != 0x90) {
if (((insn & 0x0e000000) == 0 &&
(insn & 0x00000090) != 0x90) ||
((insn & 0x0e000000) == (1 << 25))) {
int set_cc, logic_cc, shiftop;
op1 = (insn >> 21) & 0xf;
@@ -367,7 +370,7 @@ static void disas_arm_insn(DisasContext *s)
}
}
} else {
rs = (insn >> 16) & 0xf;
rs = (insn >> 8) & 0xf;
gen_movl_T0_reg(s, rs);
if (logic_cc) {
gen_shift_T1_T0_cc[shiftop]();
@@ -385,10 +388,14 @@ static void disas_arm_insn(DisasContext *s)
case 0x00:
gen_op_andl_T0_T1();
gen_movl_reg_T0(s, rd);
if (logic_cc)
gen_op_logic_T0_cc();
break;
case 0x01:
gen_op_xorl_T0_T1();
gen_movl_reg_T0(s, rd);
if (logic_cc)
gen_op_logic_T0_cc();
break;
case 0x02:
if (set_cc)
@@ -435,11 +442,13 @@ static void disas_arm_insn(DisasContext *s)
case 0x08:
if (set_cc) {
gen_op_andl_T0_T1();
gen_op_logic_T0_cc();
}
break;
case 0x09:
if (set_cc) {
gen_op_xorl_T0_T1();
gen_op_logic_T0_cc();
}
break;
case 0x0a:
@@ -455,22 +464,28 @@ static void disas_arm_insn(DisasContext *s)
case 0x0c:
gen_op_orl_T0_T1();
gen_movl_reg_T0(s, rd);
if (logic_cc)
gen_op_logic_T0_cc();
break;
case 0x0d:
gen_movl_reg_T1(s, rd);
if (logic_cc)
gen_op_logic_T1_cc();
break;
case 0x0e:
gen_op_bicl_T0_T1();
gen_movl_reg_T0(s, rd);
if (logic_cc)
gen_op_logic_T0_cc();
break;
default:
case 0x0f:
gen_op_notl_T1();
gen_movl_reg_T1(s, rd);
if (logic_cc)
gen_op_logic_T1_cc();
break;
}
if (logic_cc)
gen_op_logic_cc();
} else {
/* other instructions */
op1 = (insn >> 24) & 0xf;
@@ -494,7 +509,7 @@ static void disas_arm_insn(DisasContext *s)
gen_op_addl_T0_T1();
}
if (insn & (1 << 20))
gen_op_logic_cc();
gen_op_logic_T0_cc();
gen_movl_reg_T0(s, rd);
} else {
/* 64 bit mul */
@@ -551,10 +566,12 @@ static void disas_arm_insn(DisasContext *s)
/* store */
gen_op_stw_T0_T1();
}
if (!(insn & (1 << 24)))
if (!(insn & (1 << 24))) {
gen_add_datah_offset(s, insn);
if (insn & (1 << 21))
gen_movl_reg_T1(s, rn);
} else if (insn & (1 << 21)) {
gen_movl_reg_T1(s, rn);
}
}
break;
case 0x4:
@@ -582,40 +599,94 @@ static void disas_arm_insn(DisasContext *s)
else
gen_op_stl_T0_T1();
}
if (!(insn & (1 << 24)))
if (!(insn & (1 << 24))) {
gen_add_data_offset(s, insn);
if (insn & (1 << 21))
gen_movl_reg_T1(s, rn);
} else if (insn & (1 << 21))
gen_movl_reg_T1(s, rn); {
}
break;
case 0x08:
case 0x09:
/* load/store multiple words */
if (insn & (1 << 22))
goto illegal_op; /* only usable in supervisor mode */
rn = (insn >> 16) & 0xf;
gen_movl_T1_reg(s, rn);
val = 4;
if (!(insn & (1 << 23)))
val = -val;
for(i=0;i<16;i++) {
if (insn & (1 << i)) {
if (insn & (1 << 24))
gen_op_addl_T1_im(val);
if (insn & (1 << 20)) {
/* load */
gen_op_ldl_T0_T1();
gen_movl_reg_T0(s, i);
{
int j, n;
/* load/store multiple words */
/* XXX: store correct base if write back */
if (insn & (1 << 22))
goto illegal_op; /* only usable in supervisor mode */
rn = (insn >> 16) & 0xf;
gen_movl_T1_reg(s, rn);
/* compute total size */
n = 0;
for(i=0;i<16;i++) {
if (insn & (1 << i))
n++;
}
/* XXX: test invalid n == 0 case ? */
if (insn & (1 << 23)) {
if (insn & (1 << 24)) {
/* pre increment */
gen_op_addl_T1_im(4);
} else {
/* store */
gen_movl_T0_reg(s, i);
gen_op_stl_T0_T1();
/* post increment */
}
if (!(insn & (1 << 24)))
gen_op_addl_T1_im(val);
} else {
if (insn & (1 << 24)) {
/* pre decrement */
gen_op_addl_T1_im(-(n * 4));
} else {
/* post decrement */
if (n != 1)
gen_op_addl_T1_im(-((n - 1) * 4));
}
}
j = 0;
for(i=0;i<16;i++) {
if (insn & (1 << i)) {
if (insn & (1 << 20)) {
/* load */
gen_op_ldl_T0_T1();
gen_movl_reg_T0(s, i);
} else {
/* store */
if (i == 15) {
/* special case: r15 = PC + 12 */
val = (long)s->pc + 8;
gen_op_movl_TN_im[0](val);
} else {
gen_movl_T0_reg(s, i);
}
gen_op_stl_T0_T1();
}
j++;
/* no need to add after the last transfer */
if (j != n)
gen_op_addl_T1_im(4);
}
}
if (insn & (1 << 21)) {
/* write back */
if (insn & (1 << 23)) {
if (insn & (1 << 24)) {
/* pre increment */
} else {
/* post increment */
gen_op_addl_T1_im(4);
}
} else {
if (insn & (1 << 24)) {
/* pre decrement */
if (n != 1)
gen_op_addl_T1_im(-((n - 1) * 4));
} else {
/* post decrement */
gen_op_addl_T1_im(-(n * 4));
}
}
gen_movl_reg_T1(s, rn);
}
}
if (insn & (1 << 21))
gen_movl_reg_T1(s, rn);
break;
case 0xa:
case 0xb:
@@ -641,6 +712,66 @@ static void disas_arm_insn(DisasContext *s)
gen_op_swi();
s->is_jmp = DISAS_JUMP;
break;
case 0xc:
case 0xd:
rd = (insn >> 12) & 0x7;
rn = (insn >> 16) & 0xf;
gen_movl_T1_reg(s, rn);
val = (insn) & 0xff;
if (!(insn & (1 << 23)))
val = -val;
switch((insn >> 8) & 0xf) {
case 0x1:
/* load/store */
if ((insn & (1 << 24)))
gen_op_addl_T1_im(val);
/* XXX: do it */
if (!(insn & (1 << 24)))
gen_op_addl_T1_im(val);
if (insn & (1 << 21))
gen_movl_reg_T1(s, rn);
break;
case 0x2:
{
int n, i;
/* load store multiple */
if ((insn & (1 << 24)))
gen_op_addl_T1_im(val);
switch(insn & 0x00408000) {
case 0x00008000: n = 1; break;
case 0x00400000: n = 2; break;
case 0x00408000: n = 3; break;
default: n = 4; break;
}
for(i = 0;i < n; i++) {
/* XXX: do it */
}
if (!(insn & (1 << 24)))
gen_op_addl_T1_im(val);
if (insn & (1 << 21))
gen_movl_reg_T1(s, rn);
}
break;
default:
goto illegal_op;
}
break;
case 0x0e:
/* float ops */
/* XXX: do it */
switch((insn >> 20) & 0xf) {
case 0x2: /* wfs */
break;
case 0x3: /* rfs */
break;
case 0x4: /* wfc */
break;
case 0x5: /* rfc */
break;
default:
goto illegal_op;
}
break;
default:
illegal_op:
gen_op_movl_T0_im((long)s->pc - 4);
@@ -655,7 +786,9 @@ static void disas_arm_insn(DisasContext *s)
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
basic block 'tb'. If search_pc is TRUE, also generate PC
information for each intermediate instruction. */
static inline int gen_intermediate_code_internal(TranslationBlock *tb, int search_pc)
static inline int gen_intermediate_code_internal(CPUState *env,
TranslationBlock *tb,
int search_pc)
{
DisasContext dc1, *dc = &dc1;
uint16_t *gen_opc_end;
@@ -681,22 +814,27 @@ static inline int gen_intermediate_code_internal(TranslationBlock *tb, int searc
lj++;
while (lj < j)
gen_opc_instr_start[lj++] = 0;
gen_opc_pc[lj] = (uint32_t)dc->pc;
gen_opc_instr_start[lj] = 1;
}
gen_opc_pc[lj] = (uint32_t)dc->pc;
gen_opc_instr_start[lj] = 1;
}
disas_arm_insn(dc);
} while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
(dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32));
/* we must store the eflags state if it is not already done */
if (dc->is_jmp != DISAS_TB_JUMP &&
dc->is_jmp != DISAS_JUMP) {
gen_op_movl_T0_im((long)dc->pc - 4);
gen_op_movl_reg_TN[0][15]();
}
if (dc->is_jmp != DISAS_TB_JUMP) {
switch(dc->is_jmp) {
case DISAS_JUMP_NEXT:
case DISAS_NEXT:
gen_op_jmp((long)dc->tb, (long)dc->pc);
break;
default:
case DISAS_JUMP:
/* indicate that the hash table must be used to find the next TB */
gen_op_movl_T0_0();
gen_op_exit_tb();
break;
case DISAS_TB_JUMP:
/* nothing more to generate */
break;
}
*gen_opc_ptr = INDEX_op_end;
@@ -717,14 +855,14 @@ static inline int gen_intermediate_code_internal(TranslationBlock *tb, int searc
return 0;
}
int gen_intermediate_code(TranslationBlock *tb)
int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
{
return gen_intermediate_code_internal(tb, 0);
return gen_intermediate_code_internal(env, tb, 0);
}
int gen_intermediate_code_pc(TranslationBlock *tb)
int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
{
return gen_intermediate_code_internal(tb, 1);
return gen_intermediate_code_internal(env, tb, 1);
}
CPUARMState *cpu_arm_init(void)
@@ -756,5 +894,10 @@ void cpu_arm_dump_state(CPUARMState *env, FILE *f, int flags)
else
fprintf(f, " ");
}
fprintf(f, "CPSR=%08x", env->cpsr);
fprintf(f, "PSR=%08x %c%c%c%c\n",
env->cpsr,
env->cpsr & (1 << 31) ? 'N' : '-',
env->cpsr & (1 << 30) ? 'Z' : '-',
env->cpsr & (1 << 29) ? 'C' : '-',
env->cpsr & (1 << 28) ? 'V' : '-');
}

File diff suppressed because it is too large Load Diff

View File

@@ -107,19 +107,24 @@ void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf)
'*gen_code_size_ptr' contains the size of the generated code (host
code).
*/
int cpu_gen_code(TranslationBlock *tb,
int cpu_gen_code(CPUState *env, TranslationBlock *tb,
int max_code_size, int *gen_code_size_ptr)
{
uint8_t *gen_code_buf;
int gen_code_size;
if (gen_intermediate_code(tb) < 0)
if (gen_intermediate_code(env, tb) < 0)
return -1;
/* generate machine code */
tb->tb_next_offset[0] = 0xffff;
tb->tb_next_offset[1] = 0xffff;
gen_code_buf = tb->tc_ptr;
#ifdef USE_DIRECT_JUMP
/* the following two entries are optional (only used for string ops) */
tb->tb_jmp_offset[2] = 0xffff;
tb->tb_jmp_offset[3] = 0xffff;
#endif
gen_code_size = dyngen_code(gen_code_buf, tb->tb_next_offset,
#ifdef USE_DIRECT_JUMP
tb->tb_jmp_offset,
@@ -154,7 +159,7 @@ int cpu_restore_state(TranslationBlock *tb,
unsigned long tc_ptr;
uint16_t *opc_ptr;
if (gen_intermediate_code_pc(tb) < 0)
if (gen_intermediate_code_pc(env, tb) < 0)
return -1;
/* find opc index corresponding to search_pc */
@@ -182,13 +187,14 @@ int cpu_restore_state(TranslationBlock *tb,
#ifdef DEBUG_DISAS
if (loglevel) {
int i;
fprintf(logfile, "RESTORE:\n");
for(i=0;i<=j; i++) {
if (gen_opc_instr_start[i]) {
fprintf(logfile, "0x%04x: 0x%08x", i, gen_opc_pc[i]);
fprintf(logfile, "0x%04x: 0x%08x\n", i, gen_opc_pc[i]);
}
}
fprintf(logfile, "j=0x%x eip=0x%lx cs_base=%lx\n",
j, gen_opc_pc[j] - tb->cs_base, tb->cs_base);
fprintf(logfile, "spc=0x%08lx j=0x%x eip=0x%lx cs_base=%lx\n",
searched_pc, j, gen_opc_pc[j] - tb->cs_base, tb->cs_base);
}
#endif
env->eip = gen_opc_pc[j] - tb->cs_base;

1187
vl.c

File diff suppressed because it is too large Load Diff

47
vl.h
View File

@@ -25,7 +25,22 @@
#define VL_H
/* vl.c */
struct CPUX86State;
extern int reset_requested;
typedef void (IOPortWriteFunc)(struct CPUX86State *env, uint32_t address, uint32_t data);
typedef uint32_t (IOPortReadFunc)(struct CPUX86State *env, uint32_t address);
void *get_mmap_addr(unsigned long size);
int register_ioport_read(int start, int length, IOPortReadFunc *func, int size);
int register_ioport_write(int start, int length, IOPortWriteFunc *func, int size);
void kbd_put_keycode(int keycode);
#define MOUSE_EVENT_LBUTTON 0x01
#define MOUSE_EVENT_RBUTTON 0x02
#define MOUSE_EVENT_MBUTTON 0x04
void kbd_mouse_event(int dx, int dy, int dz, int buttons_state);
/* block.c */
typedef struct BlockDriverState BlockDriverState;
@@ -45,11 +60,41 @@ int bdrv_commit(BlockDriverState *bs);
struct cow_header_v2 {
uint32_t magic;
uint32_t long version;
uint32_t version;
char backing_file[1024];
int32_t mtime;
uint64_t size;
uint32_t sectorsize;
};
/* vga.c */
#define VGA_RAM_SIZE (8192 * 1024)
typedef struct DisplayState {
uint8_t *data;
int linesize;
int depth;
void (*dpy_update)(struct DisplayState *s, int x, int y, int w, int h);
void (*dpy_resize)(struct DisplayState *s, int w, int h);
void (*dpy_refresh)(struct DisplayState *s);
} DisplayState;
static inline void dpy_update(DisplayState *s, int x, int y, int w, int h)
{
s->dpy_update(s, x, y, w, h);
}
static inline void dpy_resize(DisplayState *s, int w, int h)
{
s->dpy_resize(s, w, h);
}
int vga_init(DisplayState *ds, uint8_t *vga_ram_base,
unsigned long vga_ram_offset, int vga_ram_size);
void vga_update_display(void);
/* sdl.c */
void sdl_display_init(DisplayState *ds);
#endif /* VL_H */

View File

@@ -38,12 +38,12 @@
#include <sys/poll.h>
#include <errno.h>
#include <sys/wait.h>
#include <sys/stat.h>
#include <netinet/in.h>
#include "vl.h"
#define NO_THUNK_TYPE_SIZE
#include "thunk.h"
#include "bswap.h"
int cow_create(int cow_fd, const char *image_filename,
int64_t image_sectors)