summaryrefslogtreecommitdiff
path: root/coregrind/.svn
diff options
context:
space:
mode:
authorStephane Marchesin <marchesin@icps.u-strasbg.fr>2009-05-04 19:05:59 +0200
committerStephane Marchesin <marchesin@icps.u-strasbg.fr>2009-05-04 19:05:59 +0200
commit6e410b3bb6ff51580897431105aae14591cbf7fb (patch)
treef8aeba9352710f10cd6b1d5138c8fc3ece91c8c3 /coregrind/.svn
Initial import of fatgrind.HEADmaster
Diffstat (limited to 'coregrind/.svn')
-rw-r--r--coregrind/.svn/dir-prop-base18
-rw-r--r--coregrind/.svn/entries1164
-rw-r--r--coregrind/.svn/format1
-rw-r--r--coregrind/.svn/prop-base/Makefile.am.svn-base9
-rw-r--r--coregrind/.svn/prop-base/m_hashtable.c.svn-base9
-rw-r--r--coregrind/.svn/prop-base/m_main.c.svn-base9
-rw-r--r--coregrind/.svn/prop-base/m_mallocfree.c.svn-base9
-rw-r--r--coregrind/.svn/prop-base/m_signals.c.svn-base9
-rw-r--r--coregrind/.svn/prop-base/m_tooliface.c.svn-base9
-rw-r--r--coregrind/.svn/prop-base/m_trampoline.S.svn-base9
-rw-r--r--coregrind/.svn/prop-base/m_translate.c.svn-base9
-rw-r--r--coregrind/.svn/prop-base/m_transtab.c.svn-base9
-rw-r--r--coregrind/.svn/prop-base/m_wordfm.c.svn-base5
-rw-r--r--coregrind/.svn/prop-base/pub_core_ume.h.svn-base9
-rw-r--r--coregrind/.svn/text-base/Makefile.am.svn-base429
-rw-r--r--coregrind/.svn/text-base/launcher-aix5-bootblock.h.svn-base90
-rw-r--r--coregrind/.svn/text-base/launcher-aix5.c.svn-base1712
-rw-r--r--coregrind/.svn/text-base/launcher-linux.c.svn-base322
-rw-r--r--coregrind/.svn/text-base/m_clientstate.c.svn-base105
-rw-r--r--coregrind/.svn/text-base/m_commandline.c.svn-base250
-rw-r--r--coregrind/.svn/text-base/m_cpuid.S.svn-base153
-rw-r--r--coregrind/.svn/text-base/m_debugger.c.svn-base321
-rw-r--r--coregrind/.svn/text-base/m_debuglog.c.svn-base885
-rw-r--r--coregrind/.svn/text-base/m_errormgr.c.svn-base1309
-rw-r--r--coregrind/.svn/text-base/m_execontext.c.svn-base479
-rw-r--r--coregrind/.svn/text-base/m_hashtable.c.svn-base275
-rw-r--r--coregrind/.svn/text-base/m_libcassert.c.svn-base261
-rw-r--r--coregrind/.svn/text-base/m_libcbase.c.svn-base627
-rw-r--r--coregrind/.svn/text-base/m_libcfile.c.svn-base858
-rw-r--r--coregrind/.svn/text-base/m_libcprint.c.svn-base393
-rw-r--r--coregrind/.svn/text-base/m_libcproc.c.svn-base644
-rw-r--r--coregrind/.svn/text-base/m_libcsignal.c.svn-base309
-rw-r--r--coregrind/.svn/text-base/m_machine.c.svn-base676
-rw-r--r--coregrind/.svn/text-base/m_main.c.svn-base2623
-rw-r--r--coregrind/.svn/text-base/m_mallocfree.c.svn-base1869
-rw-r--r--coregrind/.svn/text-base/m_options.c.svn-base256
-rw-r--r--coregrind/.svn/text-base/m_oset.c.svn-base882
-rw-r--r--coregrind/.svn/text-base/m_redir.c.svn-base1115
-rw-r--r--coregrind/.svn/text-base/m_seqmatch.c.svn-base233
-rw-r--r--coregrind/.svn/text-base/m_signals.c.svn-base2227
-rw-r--r--coregrind/.svn/text-base/m_sparsewa.c.svn-base478
-rw-r--r--coregrind/.svn/text-base/m_stacks.c.svn-base329
-rw-r--r--coregrind/.svn/text-base/m_stacktrace.c.svn-base580
-rw-r--r--coregrind/.svn/text-base/m_syscall.c.svn-base567
-rw-r--r--coregrind/.svn/text-base/m_threadstate.c.svn-base143
-rw-r--r--coregrind/.svn/text-base/m_tooliface.c.svn-base413
-rw-r--r--coregrind/.svn/text-base/m_trampoline.S.svn-base757
-rw-r--r--coregrind/.svn/text-base/m_translate.c.svn-base1559
-rw-r--r--coregrind/.svn/text-base/m_transtab.c.svn-base1630
-rw-r--r--coregrind/.svn/text-base/m_vki.c.svn-base52
-rw-r--r--coregrind/.svn/text-base/m_vkiscnums.c.svn-base1129
-rw-r--r--coregrind/.svn/text-base/m_wordfm.c.svn-base984
-rw-r--r--coregrind/.svn/text-base/m_xarray.c.svn-base281
-rw-r--r--coregrind/.svn/text-base/no_op_client_for_valgrind.c.svn-base16
-rw-r--r--coregrind/.svn/text-base/pub_core_aspacemgr.h.svn-base408
-rw-r--r--coregrind/.svn/text-base/pub_core_basics.h.svn-base70
-rw-r--r--coregrind/.svn/text-base/pub_core_basics_asm.h.svn-base50
-rw-r--r--coregrind/.svn/text-base/pub_core_clientstate.h.svn-base93
-rw-r--r--coregrind/.svn/text-base/pub_core_clreq.h.svn-base80
-rw-r--r--coregrind/.svn/text-base/pub_core_commandline.h.svn-base47
-rw-r--r--coregrind/.svn/text-base/pub_core_coredump.h.svn-base45
-rw-r--r--coregrind/.svn/text-base/pub_core_cpuid.h.svn-base45
-rw-r--r--coregrind/.svn/text-base/pub_core_debugger.h.svn-base45
-rw-r--r--coregrind/.svn/text-base/pub_core_debuginfo.h.svn-base141
-rw-r--r--coregrind/.svn/text-base/pub_core_debuglog.h.svn-base92
-rw-r--r--coregrind/.svn/text-base/pub_core_demangle.h.svn-base68
-rw-r--r--coregrind/.svn/text-base/pub_core_dispatch.h.svn-base97
-rw-r--r--coregrind/.svn/text-base/pub_core_dispatch_asm.h.svn-base55
-rw-r--r--coregrind/.svn/text-base/pub_core_errormgr.h.svn-base70
-rw-r--r--coregrind/.svn/text-base/pub_core_execontext.h.svn-base61
-rw-r--r--coregrind/.svn/text-base/pub_core_hashtable.h.svn-base48
-rw-r--r--coregrind/.svn/text-base/pub_core_initimg.h.svn-base181
-rw-r--r--coregrind/.svn/text-base/pub_core_libcassert.h.svn-base81
-rw-r--r--coregrind/.svn/text-base/pub_core_libcbase.h.svn-base46
-rw-r--r--coregrind/.svn/text-base/pub_core_libcfile.h.svn-base97
-rw-r--r--coregrind/.svn/text-base/pub_core_libcprint.h.svn-base55
-rw-r--r--coregrind/.svn/text-base/pub_core_libcproc.h.svn-base91
-rw-r--r--coregrind/.svn/text-base/pub_core_libcsignal.h.svn-base86
-rw-r--r--coregrind/.svn/text-base/pub_core_machine.h.svn-base182
-rw-r--r--coregrind/.svn/text-base/pub_core_mallocfree.h.svn-base117
-rw-r--r--coregrind/.svn/text-base/pub_core_options.h.svn-base202
-rw-r--r--coregrind/.svn/text-base/pub_core_oset.h.svn-base49
-rw-r--r--coregrind/.svn/text-base/pub_core_redir.h.svn-base125
-rw-r--r--coregrind/.svn/text-base/pub_core_replacemalloc.h.svn-base61
-rw-r--r--coregrind/.svn/text-base/pub_core_scheduler.h.svn-base101
-rw-r--r--coregrind/.svn/text-base/pub_core_seqmatch.h.svn-base51
-rw-r--r--coregrind/.svn/text-base/pub_core_sigframe.h.svn-base65
-rw-r--r--coregrind/.svn/text-base/pub_core_signals.h.svn-base88
-rw-r--r--coregrind/.svn/text-base/pub_core_sparsewa.h.svn-base51
-rw-r--r--coregrind/.svn/text-base/pub_core_stacks.h.svn-base52
-rw-r--r--coregrind/.svn/text-base/pub_core_stacktrace.h.svn-base63
-rw-r--r--coregrind/.svn/text-base/pub_core_syscall.h.svn-base89
-rw-r--r--coregrind/.svn/text-base/pub_core_syswrap.h.svn-base83
-rw-r--r--coregrind/.svn/text-base/pub_core_threadstate.h.svn-base272
-rw-r--r--coregrind/.svn/text-base/pub_core_tooliface.h.svn-base245
-rw-r--r--coregrind/.svn/text-base/pub_core_trampoline.h.svn-base119
-rw-r--r--coregrind/.svn/text-base/pub_core_translate.h.svn-base53
-rw-r--r--coregrind/.svn/text-base/pub_core_transtab.h.svn-base104
-rw-r--r--coregrind/.svn/text-base/pub_core_transtab_asm.h.svn-base63
-rw-r--r--coregrind/.svn/text-base/pub_core_ume.h.svn-base85
-rw-r--r--coregrind/.svn/text-base/pub_core_vki.h.svn-base50
-rw-r--r--coregrind/.svn/text-base/pub_core_vkiscnums.h.svn-base62
-rw-r--r--coregrind/.svn/text-base/pub_core_wordfm.h.svn-base64
-rw-r--r--coregrind/.svn/text-base/pub_core_xarray.h.svn-base49
-rw-r--r--coregrind/.svn/text-base/vg_preloaded.c.svn-base72
105 files changed, 34268 insertions, 0 deletions
diff --git a/coregrind/.svn/dir-prop-base b/coregrind/.svn/dir-prop-base
new file mode 100644
index 0000000..4fab291
--- /dev/null
+++ b/coregrind/.svn/dir-prop-base
@@ -0,0 +1,18 @@
+K 10
+svn:ignore
+V 150
+*.a
+.deps
+Makefile
+Makefile.in
+no_op_client_for_valgrind
+*.so
+stage2
+stage2.lds
+valgrind
+vg_intercept.c
+vg_replace_malloc.c
+vg_toolint.c
+vg_toolint.h
+
+END
diff --git a/coregrind/.svn/entries b/coregrind/.svn/entries
new file mode 100644
index 0000000..5129bf8
--- /dev/null
+++ b/coregrind/.svn/entries
@@ -0,0 +1,1164 @@
+8
+
+dir
+9703
+svn://svn.valgrind.org/valgrind/trunk/coregrind
+svn://svn.valgrind.org/valgrind
+
+
+
+2009-04-30T07:41:24.107263Z
+9702
+njn
+has-props
+
+svn:special svn:externals svn:needs-lock
+
+
+
+
+
+
+
+
+
+
+
+a5019735-40e9-0310-863c-91ae7b9d1cf9
+
+m_replacemalloc
+dir
+
+launcher-linux.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+ac2a1bd3afaaf302272eba805d6dfc29
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_transtab.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+b2e7719e74459e93d5940ac6a4bcad23
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_scheduler.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+c0f9f84633a9f93e4ca2dca7165d186b
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_commandline.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+6f7689b62a377b476f26199bddabdf22
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_debugger.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+a853f90ce05b180f4abada68168ebffd
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_demangle
+dir
+
+pub_core_commandline.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+f8f6ddaf8b752ffa03ba7c023f2184c1
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_translate.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+57a97809c8a08df9222520eed3313564
+2009-03-10T22:02:09.669944Z
+9344
+njn
+has-props
+
+pub_core_debuglog.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+3f8ff3a772b6e8ec08a8890031220769
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_libcbase.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+cc69570098c14adb1327f48de2319696
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_translate.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+36f12b60615f324a33209ce87001f6f0
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_ume
+dir
+
+pub_core_dispatch_asm.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+b784bfdd131cf097e431d283817feab9
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_basics_asm.h
+file
+
+
+
+
+2009-04-30T16:44:06.000000Z
+bfdb1ff4d6cc87ecbc4efe46fea20f17
+2009-04-24T04:57:07.028318Z
+9601
+njn
+
+m_sigframe
+dir
+
+pub_core_seqmatch.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+128ed0c116b32fef92cd82625426690b
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_dispatch
+dir
+
+vg_preloaded.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+0872a7d23bd4009d30ec35f6bb1624be
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_trampoline.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+da9eaeceb2398929333e4ac71c197d37
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_coredump.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+6d8573c5d6c5682680aaae9561d4e26e
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_sparsewa.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+7e2d6546ab3923bc1895c02125fd24ae
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_execontext.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+8ce7fc84c3723970088f7132ba53c672
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_syswrap
+dir
+
+m_libcassert.c
+file
+
+
+
+
+2009-04-30T16:44:06.000000Z
+c35668de453a35bcf8d7fff762c2e7c0
+2009-04-16T00:33:20.050476Z
+9556
+njn
+
+pub_core_stacktrace.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+5a800251ceb08eeff5773ceab192a6c6
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_ume.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+d4f6d6c16e720608f02038586017833d
+2009-03-10T22:02:09.669944Z
+9344
+njn
+has-props
+
+pub_core_stacks.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+6c106c600b6e6ff74ecf5658ad1f1393
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_errormgr.c
+file
+
+
+
+
+2009-04-30T16:44:06.000000Z
+0f4239fe707f2f619550b215b5835163
+2009-03-15T23:25:38.213170Z
+9416
+njn
+
+pub_core_libcproc.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+8517e8abe03d8143212fda7218454af6
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_sigframe.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+e00f963f245ca2b87d4cf797371907f2
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_machine.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+a2e312f33f05291095a8599726c6298a
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_tooliface.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+5f6da75ca90b79b6d94af3d013bb8237
+2009-03-10T22:02:09.669944Z
+9344
+njn
+has-props
+
+pub_core_wordfm.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+810121ef9d158c366b5c817e8a028974
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_dispatch.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+0ea0b4c16953379c6bb9eaccaa0abff1
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_mallocfree.c
+file
+
+
+
+
+2009-04-30T16:44:06.000000Z
+26d47fe96d67c08ddd80871f62a83e77
+2009-04-16T00:33:20.050476Z
+9556
+njn
+has-props
+
+m_syscall.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+3ca19b034476b1d59203dbda22202835
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_oset.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+262e68a739a6d86d414c2aafa66e4279
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_basics.h
+file
+
+
+
+
+2009-04-30T16:44:06.000000Z
+66a6c48b8609da763225520eb77f2a6c
+2009-04-24T04:57:07.028318Z
+9601
+njn
+
+pub_core_machine.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+060693994e53d02d5f5f52d815827676
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_tooliface.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+c21fe24477a7a151b942d04f001b6678
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_xarray.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+498f83ba3b7718328ce487fa4ea79233
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_clreq.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+b58566478fbfcc066b4cf30f0d04f277
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_libcsignal.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+0feed8b7b2a52a2817b07d690c5a9774
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_transtab.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+e0387337afe846b30178bf7be299e706
+2009-03-10T22:02:09.669944Z
+9344
+njn
+has-props
+
+m_initimg
+dir
+
+m_clientstate.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+6bb7c21ca1527ac12de4733baee8eb0d
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_syswrap.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+e5bc8ce60240138d2313e80b20cd93d5
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_libcfile.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+48622f2765820c9a992ba1b80ec949b3
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_debuginfo
+dir
+
+pub_core_syscall.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+0bb2cf3fdac77b9f35e02313ea6ae800
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_vkiscnums.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+568ef31653cb13fc3828fb851b84dc59
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_debugger.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+b641e51629ca96c5bb7dde933c04aa33
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_clientstate.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+5f8ef2addcd3625d7ef5aed531069f6a
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_libcprint.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+7145bafa38232303b881b1237c2af404
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_debuglog.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+b008d5b7e4a6bad7b1a21b4e729a0a0a
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+launcher-aix5.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+898aaeef7ea3c60ca3e17fb881ed081f
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_vkiscnums.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+2d5a1e7f12d24292a2d13e42f8d9883d
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_trampoline.S
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+9bfe851c03aefe2f31128d91bb381d7c
+2009-03-10T22:02:09.669944Z
+9344
+njn
+has-props
+
+m_coredump
+dir
+
+pub_core_libcprint.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+8341f6a39d00f98ec3e308db202ad4fa
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_signals.c
+file
+
+
+
+
+2009-04-30T16:44:06.000000Z
+eda431634f02b99c25b502615b5bce42
+2009-04-30T07:41:24.107263Z
+9702
+njn
+has-props
+
+m_seqmatch.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+e4cfab5f880179ee46da35f52fdc3f71
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_redir.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+abfdea02e88f8ebae21c87ce21aa504e
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_signals.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+9c08b51579a9324d45d2f0a3f6aaee7b
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_initimg.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+b72232f2cd0396f951c87a02c3625e54
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_debuginfo.h
+file
+
+
+
+
+2009-04-30T16:44:06.000000Z
+5e3538a9b8f7e3c87a3a5ae172035eaa
+2009-04-22T22:42:10.255420Z
+9580
+sewardj
+
+launcher-aix5-bootblock.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+4f22ddcccde2a3b2a17cb4d8c2a0c404
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_replacemalloc.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+d3b8ab5fc2d06d8a4ebbcca322db3fe5
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_libcbase.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+50828fea965aa091c3a286d64411c17b
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_redir.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+4d4d54158ddb884dc38c05ca0b57a5de
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_aspacemgr
+dir
+
+pub_core_demangle.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+22da8971888764e3f8f32cdaaee92048
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_main.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+bb2baff1c09f411671e423fad889c2c1
+2009-03-10T22:02:09.669944Z
+9344
+njn
+has-props
+
+m_stacktrace.c
+file
+
+
+
+
+2009-04-30T16:44:06.000000Z
+af2582b8d3c24307d7a32eafb64833aa
+2009-04-22T22:42:10.255420Z
+9580
+sewardj
+
+m_cpuid.S
+file
+
+
+
+
+2009-04-30T16:44:06.000000Z
+8da75f25447e40b70c4f836b8c47654e
+2009-04-24T04:57:07.028318Z
+9601
+njn
+
+pub_core_transtab_asm.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+ac4e773354a1220555d93f23708d3926
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+no_op_client_for_valgrind.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+7bb3982b6c21c2859424b9cd403f34b5
+2007-03-19T18:38:55.003458Z
+6653
+sewardj
+
+m_stacks.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+fb8b52fefa439a60eaea74b60f34d5bb
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_vki.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+0a79acf2cd7ab2b64df0ccdd8f68bd2c
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_options.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+ed393bd61f360b2b3c2f6524f26330df
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_hashtable.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+3a84f5fb57b8dd2d37c75a4681c95e3f
+2009-03-10T22:02:09.669944Z
+9344
+njn
+has-props
+
+pub_core_sparsewa.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+ec3bd3557526a4ab6d2d06b7a0d5582b
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_scheduler
+dir
+
+m_libcproc.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+4eb793b265cf563be40f3e1e39758834
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_execontext.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+80d91728dec2a584e582aca8650e5842
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_wordfm.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+ef75b0f987e1ab9db2866d6b1868d9e9
+2009-03-10T22:02:09.669944Z
+9344
+njn
+has-props
+
+pub_core_vki.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+daa7af80a9a6734108169d9189b8aa4f
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_hashtable.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+ae6c76190fd9b0229d2c06678c509bfb
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_options.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+c24f6a6e058753ad1b9396136ac175a9
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_libcassert.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+f98d91418c92fc23c50a00c63dcea35b
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_errormgr.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+90e05e0a1557fc51d52eaba7a18548ad
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_xarray.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+ae2e64f1ee80f432a6579843a613c038
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_libcsignal.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+e3155b553cc5739de6a2c5057c8fe539
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_threadstate.c
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+5b25378b3ff366709a263cd60bb4485a
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_aspacemgr.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+d6fa893ba5d48280b7d79e006996e14b
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+m_libcfile.c
+file
+
+
+
+
+2009-04-30T16:44:06.000000Z
+a0cb7e199edf7cea7a9470a1ff169e2c
+2009-04-28T05:35:53.227539Z
+9658
+njn
+
+pub_core_cpuid.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+7610c6cd0f5f6f74218502a02cf172be
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+Makefile.am
+file
+
+
+
+
+2009-04-30T16:44:06.000000Z
+a4d184dc8aa516d8894a5c3848d041a2
+2009-04-24T04:12:28.751135Z
+9598
+njn
+has-props
+
+pub_core_threadstate.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+c7bbaf37eaf651212d3a012ce823c7ed
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_mallocfree.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+4762b3fd0be5e64e1139e0c2604908aa
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
+pub_core_oset.h
+file
+
+
+
+
+2009-03-13T17:30:28.000000Z
+3342864510411f0f12e1bbb3dc8f973f
+2009-03-10T22:02:09.669944Z
+9344
+njn
+
diff --git a/coregrind/.svn/format b/coregrind/.svn/format
new file mode 100644
index 0000000..45a4fb7
--- /dev/null
+++ b/coregrind/.svn/format
@@ -0,0 +1 @@
+8
diff --git a/coregrind/.svn/prop-base/Makefile.am.svn-base b/coregrind/.svn/prop-base/Makefile.am.svn-base
new file mode 100644
index 0000000..df54a06
--- /dev/null
+++ b/coregrind/.svn/prop-base/Makefile.am.svn-base
@@ -0,0 +1,9 @@
+K 13
+svn:eol-style
+V 6
+native
+K 12
+svn:keywords
+V 23
+author date id revision
+END
diff --git a/coregrind/.svn/prop-base/m_hashtable.c.svn-base b/coregrind/.svn/prop-base/m_hashtable.c.svn-base
new file mode 100644
index 0000000..df54a06
--- /dev/null
+++ b/coregrind/.svn/prop-base/m_hashtable.c.svn-base
@@ -0,0 +1,9 @@
+K 13
+svn:eol-style
+V 6
+native
+K 12
+svn:keywords
+V 23
+author date id revision
+END
diff --git a/coregrind/.svn/prop-base/m_main.c.svn-base b/coregrind/.svn/prop-base/m_main.c.svn-base
new file mode 100644
index 0000000..df54a06
--- /dev/null
+++ b/coregrind/.svn/prop-base/m_main.c.svn-base
@@ -0,0 +1,9 @@
+K 13
+svn:eol-style
+V 6
+native
+K 12
+svn:keywords
+V 23
+author date id revision
+END
diff --git a/coregrind/.svn/prop-base/m_mallocfree.c.svn-base b/coregrind/.svn/prop-base/m_mallocfree.c.svn-base
new file mode 100644
index 0000000..df54a06
--- /dev/null
+++ b/coregrind/.svn/prop-base/m_mallocfree.c.svn-base
@@ -0,0 +1,9 @@
+K 13
+svn:eol-style
+V 6
+native
+K 12
+svn:keywords
+V 23
+author date id revision
+END
diff --git a/coregrind/.svn/prop-base/m_signals.c.svn-base b/coregrind/.svn/prop-base/m_signals.c.svn-base
new file mode 100644
index 0000000..df54a06
--- /dev/null
+++ b/coregrind/.svn/prop-base/m_signals.c.svn-base
@@ -0,0 +1,9 @@
+K 13
+svn:eol-style
+V 6
+native
+K 12
+svn:keywords
+V 23
+author date id revision
+END
diff --git a/coregrind/.svn/prop-base/m_tooliface.c.svn-base b/coregrind/.svn/prop-base/m_tooliface.c.svn-base
new file mode 100644
index 0000000..df54a06
--- /dev/null
+++ b/coregrind/.svn/prop-base/m_tooliface.c.svn-base
@@ -0,0 +1,9 @@
+K 13
+svn:eol-style
+V 6
+native
+K 12
+svn:keywords
+V 23
+author date id revision
+END
diff --git a/coregrind/.svn/prop-base/m_trampoline.S.svn-base b/coregrind/.svn/prop-base/m_trampoline.S.svn-base
new file mode 100644
index 0000000..df54a06
--- /dev/null
+++ b/coregrind/.svn/prop-base/m_trampoline.S.svn-base
@@ -0,0 +1,9 @@
+K 13
+svn:eol-style
+V 6
+native
+K 12
+svn:keywords
+V 23
+author date id revision
+END
diff --git a/coregrind/.svn/prop-base/m_translate.c.svn-base b/coregrind/.svn/prop-base/m_translate.c.svn-base
new file mode 100644
index 0000000..df54a06
--- /dev/null
+++ b/coregrind/.svn/prop-base/m_translate.c.svn-base
@@ -0,0 +1,9 @@
+K 13
+svn:eol-style
+V 6
+native
+K 12
+svn:keywords
+V 23
+author date id revision
+END
diff --git a/coregrind/.svn/prop-base/m_transtab.c.svn-base b/coregrind/.svn/prop-base/m_transtab.c.svn-base
new file mode 100644
index 0000000..df54a06
--- /dev/null
+++ b/coregrind/.svn/prop-base/m_transtab.c.svn-base
@@ -0,0 +1,9 @@
+K 13
+svn:eol-style
+V 6
+native
+K 12
+svn:keywords
+V 23
+author date id revision
+END
diff --git a/coregrind/.svn/prop-base/m_wordfm.c.svn-base b/coregrind/.svn/prop-base/m_wordfm.c.svn-base
new file mode 100644
index 0000000..3160658
--- /dev/null
+++ b/coregrind/.svn/prop-base/m_wordfm.c.svn-base
@@ -0,0 +1,5 @@
+K 13
+svn:mergeinfo
+V 0
+
+END
diff --git a/coregrind/.svn/prop-base/pub_core_ume.h.svn-base b/coregrind/.svn/prop-base/pub_core_ume.h.svn-base
new file mode 100644
index 0000000..df54a06
--- /dev/null
+++ b/coregrind/.svn/prop-base/pub_core_ume.h.svn-base
@@ -0,0 +1,9 @@
+K 13
+svn:eol-style
+V 6
+native
+K 12
+svn:keywords
+V 23
+author date id revision
+END
diff --git a/coregrind/.svn/text-base/Makefile.am.svn-base b/coregrind/.svn/text-base/Makefile.am.svn-base
new file mode 100644
index 0000000..d1b1b09
--- /dev/null
+++ b/coregrind/.svn/text-base/Makefile.am.svn-base
@@ -0,0 +1,429 @@
+
+# Be very careful when renaming any files, targets, whatever, in this
+# Makefile. Various parts of the system rely on these names having
+# particular forms.
+
+include $(top_srcdir)/Makefile.all.am
+include $(top_srcdir)/Makefile.flags.am
+include $(top_srcdir)/Makefile.core-tool.am
+
+
+AM_CPPFLAGS_CORE_COMMON = \
+ -I$(top_srcdir)/coregrind \
+ -DVG_LIBDIR="\"$(valdir)"\"
+
+AM_CPPFLAGS_X86_LINUX += \
+ $(AM_CPPFLAGS_CORE_COMMON) -DVG_PLATFORM="\"x86-linux\""
+AM_CPPFLAGS_AMD64_LINUX += \
+ $(AM_CPPFLAGS_CORE_COMMON) -DVG_PLATFORM="\"amd64-linux\""
+AM_CPPFLAGS_PPC32_LINUX += \
+ $(AM_CPPFLAGS_CORE_COMMON) -DVG_PLATFORM="\"ppc32-linux\""
+AM_CPPFLAGS_PPC64_LINUX += \
+ $(AM_CPPFLAGS_CORE_COMMON) -DVG_PLATFORM="\"ppc64-linux\""
+AM_CPPFLAGS_PPC32_AIX5 += \
+ $(AM_CPPFLAGS_CORE_COMMON) -DVG_PLATFORM="\"ppc32-aix5\""
+AM_CPPFLAGS_PPC64_AIX5 += \
+ $(AM_CPPFLAGS_CORE_COMMON) -DVG_PLATFORM="\"ppc64-aix5\""
+
+
+default.supp: $(SUPP_FILES)
+
+
+noinst_PROGRAMS =
+pkglib_LIBRARIES =
+LIBVEX =
+
+if VGCONF_PLATFORMS_INCLUDE_X86_LINUX
+noinst_PROGRAMS += vgpreload_core-x86-linux.so
+pkglib_LIBRARIES += libcoregrind-x86-linux.a libreplacemalloc_toolpreload-x86-linux.a
+LIBVEX += libvex-x86-linux.a
+endif
+
+if VGCONF_PLATFORMS_INCLUDE_AMD64_LINUX
+noinst_PROGRAMS += vgpreload_core-amd64-linux.so
+pkglib_LIBRARIES += libcoregrind-amd64-linux.a libreplacemalloc_toolpreload-amd64-linux.a
+LIBVEX += libvex-amd64-linux.a
+endif
+
+if VGCONF_PLATFORMS_INCLUDE_PPC32_LINUX
+noinst_PROGRAMS += vgpreload_core-ppc32-linux.so
+pkglib_LIBRARIES += libcoregrind-ppc32-linux.a libreplacemalloc_toolpreload-ppc32-linux.a
+LIBVEX += libvex-ppc32-linux.a
+endif
+
+if VGCONF_PLATFORMS_INCLUDE_PPC64_LINUX
+noinst_PROGRAMS += vgpreload_core-ppc64-linux.so
+pkglib_LIBRARIES += libcoregrind-ppc64-linux.a libreplacemalloc_toolpreload-ppc64-linux.a
+LIBVEX += libvex-ppc64-linux.a
+endif
+
+if VGCONF_PLATFORMS_INCLUDE_PPC32_AIX5
+noinst_PROGRAMS += vgpreload_core-ppc32-aix5.so
+pkglib_LIBRARIES += libcoregrind-ppc32-aix5.a libreplacemalloc_toolpreload-ppc32-aix5.a
+LIBVEX += libvex-ppc32-aix5.a
+endif
+
+if VGCONF_PLATFORMS_INCLUDE_PPC64_AIX5
+noinst_PROGRAMS += vgpreload_core-ppc64-aix5.so
+pkglib_LIBRARIES += libcoregrind-ppc64-aix5.a libreplacemalloc_toolpreload-ppc64-aix5.a
+LIBVEX += libvex-ppc64-aix5.a
+endif
+
+
+#------------------------- launcher -----------------------
+# Build the launcher (valgrind) for the primary target only.
+#
+bin_PROGRAMS = \
+ valgrind no_op_client_for_valgrind
+
+if VGCONF_OS_IS_AIX5
+valgrind_SOURCES = \
+ launcher-aix5.c \
+ m_debuglog.c \
+ m_vkiscnums.c
+else
+valgrind_SOURCES = \
+ launcher-linux.c \
+ m_debuglog.c
+endif
+
+valgrind_CPPFLAGS = $(AM_CPPFLAGS_PRI)
+valgrind_CFLAGS = $(AM_CFLAGS_PRI)
+valgrind_CCASFLAGS = $(AM_CCASFLAGS_PRI)
+valgrind_LDFLAGS = $(AM_CFLAGS_PRI)
+
+no_op_client_for_valgrind_SOURCES = no_op_client_for_valgrind.c
+no_op_client_for_valgrind_CPPFLAGS = $(AM_CPPFLAGS_PRI)
+no_op_client_for_valgrind_CFLAGS = $(AM_CFLAGS_PRI)
+no_op_client_for_valgrind_CCASFLAGS = $(AM_CCASFLAGS_PRI)
+no_op_client_for_valgrind_LDFLAGS = $(AM_CFLAGS_PRI)
+#
+#----------------------------------------------------------
+
+
+noinst_HEADERS = \
+ pub_core_aspacemgr.h \
+ pub_core_basics.h \
+ pub_core_basics_asm.h \
+ pub_core_clientstate.h \
+ pub_core_clreq.h \
+ pub_core_commandline.h \
+ pub_core_coredump.h \
+ pub_core_cpuid.h \
+ pub_core_debuginfo.h \
+ pub_core_debugger.h \
+ pub_core_debuglog.h \
+ pub_core_demangle.h \
+ pub_core_dispatch.h \
+ pub_core_dispatch_asm.h \
+ pub_core_errormgr.h \
+ pub_core_execontext.h \
+ pub_core_hashtable.h \
+ pub_core_initimg.h \
+ pub_core_libcbase.h \
+ pub_core_libcassert.h \
+ pub_core_libcfile.h \
+ pub_core_libcprint.h \
+ pub_core_libcproc.h \
+ pub_core_libcsignal.h \
+ pub_core_machine.h \
+ pub_core_mallocfree.h \
+ pub_core_options.h \
+ pub_core_oset.h \
+ pub_core_redir.h \
+ pub_core_replacemalloc.h\
+ pub_core_scheduler.h \
+ pub_core_seqmatch.h \
+ pub_core_sigframe.h \
+ pub_core_signals.h \
+ pub_core_sparsewa.h \
+ pub_core_stacks.h \
+ pub_core_stacktrace.h \
+ pub_core_syscall.h \
+ pub_core_syswrap.h \
+ pub_core_threadstate.h \
+ pub_core_tooliface.h \
+ pub_core_trampoline.h \
+ pub_core_translate.h \
+ pub_core_transtab.h \
+ pub_core_transtab_asm.h \
+ pub_core_ume.h \
+ pub_core_vki.h \
+ pub_core_vkiscnums.h \
+ pub_core_wordfm.h \
+ pub_core_xarray.h \
+ m_aspacemgr/priv_aspacemgr.h \
+ m_coredump/priv_elf.h \
+ m_debuginfo/priv_misc.h \
+ m_debuginfo/priv_storage.h \
+ m_debuginfo/priv_tytypes.h \
+ m_debuginfo/priv_readstabs.h \
+ m_debuginfo/priv_readpdb.h \
+ m_debuginfo/priv_d3basics.h \
+ m_debuginfo/priv_readdwarf.h \
+ m_debuginfo/priv_readdwarf3.h \
+ m_debuginfo/priv_readelf.h \
+ m_debuginfo/priv_readxcoff.h \
+ m_demangle/ansidecl.h \
+ m_demangle/cp-demangle.h \
+ m_demangle/dyn-string.h \
+ m_demangle/demangle.h \
+ m_demangle/safe-ctype.h \
+ m_demangle/vg_libciface.h \
+ m_scheduler/priv_sema.h \
+ m_syswrap/priv_types_n_macros.h \
+ m_syswrap/priv_syswrap-generic.h \
+ m_syswrap/priv_syswrap-linux.h \
+ m_syswrap/priv_syswrap-linux-variants.h \
+ m_syswrap/priv_syswrap-aix5.h \
+ m_syswrap/priv_syswrap-main.h \
+ m_ume/priv_ume.h \
+ launcher-aix5-bootblock.h \
+ m_initimg/simple_huffman.c
+
+BUILT_SOURCES =
+CLEANFILES =
+
+
+COREGRIND_SOURCES_COMMON = \
+ m_commandline.c \
+ m_cpuid.S \
+ m_clientstate.c \
+ m_debugger.c \
+ m_debuglog.c \
+ m_errormgr.c \
+ m_execontext.c \
+ m_hashtable.c \
+ m_libcbase.c \
+ m_libcassert.c \
+ m_libcfile.c \
+ m_libcprint.c \
+ m_libcproc.c \
+ m_libcsignal.c \
+ m_machine.c \
+ m_main.c \
+ m_mallocfree.c \
+ m_options.c \
+ m_oset.c \
+ m_redir.c \
+ m_seqmatch.c \
+ m_signals.c \
+ m_sparsewa.c \
+ m_stacks.c \
+ m_stacktrace.c \
+ m_syscall.c \
+ m_threadstate.c \
+ m_tooliface.c \
+ m_trampoline.S \
+ m_translate.c \
+ m_transtab.c \
+ m_vki.c \
+ m_vkiscnums.c \
+ m_wordfm.c \
+ m_xarray.c \
+ m_aspacemgr/aspacemgr-common.c \
+ m_debuginfo/misc.c \
+ m_debuginfo/d3basics.c \
+ m_debuginfo/storage.c \
+ m_debuginfo/debuginfo.c \
+ m_debuginfo/tytypes.c \
+ m_demangle/cp-demangle.c \
+ m_demangle/cplus-dem.c \
+ m_demangle/demangle.c \
+ m_demangle/dyn-string.c \
+ m_demangle/safe-ctype.c \
+ m_replacemalloc/replacemalloc_core.c \
+ m_scheduler/scheduler.c \
+ m_scheduler/sema.c \
+ m_syswrap/syswrap-main.c
+
+COREGRIND_LINUX_SOURCE = \
+ m_aspacemgr/aspacemgr-linux.c \
+ m_coredump/coredump-elf.c \
+ m_debuginfo/readelf.c \
+ m_debuginfo/readdwarf.c \
+ m_debuginfo/readdwarf3.c \
+ m_debuginfo/readstabs.c \
+ m_debuginfo/readpdb.c \
+ m_initimg/initimg-linux.c \
+ m_syswrap/syswrap-linux.c \
+ m_syswrap/syswrap-linux-variants.c \
+ m_syswrap/syswrap-generic.c \
+ m_ume/elf.c \
+ m_ume/main.c \
+ m_ume/script.c
+
+COREGRIND_AIX5_SOURCE = \
+ m_aspacemgr/aspacemgr-aix5.c \
+ m_initimg/initimg-aix5.c \
+ m_debuginfo/readxcoff.c \
+ m_syswrap/syswrap-aix5.c
+
+libcoregrind_x86_linux_a_SOURCES = \
+ $(COREGRIND_SOURCES_COMMON) \
+ $(COREGRIND_LINUX_SOURCE) \
+ m_coredump/coredump-x86-linux.c \
+ m_dispatch/dispatch-x86-linux.S \
+ m_sigframe/sigframe-x86-linux.c \
+ m_syswrap/syscall-x86-linux.S \
+ m_syswrap/syswrap-x86-linux.c
+libcoregrind_x86_linux_a_CPPFLAGS = $(AM_CPPFLAGS_X86_LINUX)
+libcoregrind_x86_linux_a_CFLAGS = $(AM_CFLAGS_X86_LINUX)
+libcoregrind_x86_linux_a_CCASFLAGS = $(AM_CCASFLAGS_X86_LINUX)
+
+libcoregrind_amd64_linux_a_SOURCES = \
+ $(COREGRIND_SOURCES_COMMON) \
+ $(COREGRIND_LINUX_SOURCE) \
+ m_coredump/coredump-amd64-linux.c \
+ m_dispatch/dispatch-amd64-linux.S \
+ m_sigframe/sigframe-amd64-linux.c \
+ m_syswrap/syscall-amd64-linux.S \
+ m_syswrap/syswrap-amd64-linux.c
+libcoregrind_amd64_linux_a_CPPFLAGS = $(AM_CPPFLAGS_AMD64_LINUX)
+libcoregrind_amd64_linux_a_CFLAGS = $(AM_CFLAGS_AMD64_LINUX)
+libcoregrind_amd64_linux_a_CCASFLAGS = $(AM_CCASFLAGS_AMD64_LINUX)
+
+libcoregrind_ppc32_linux_a_SOURCES = \
+ $(COREGRIND_SOURCES_COMMON) \
+ $(COREGRIND_LINUX_SOURCE) \
+ m_coredump/coredump-ppc32-linux.c \
+ m_dispatch/dispatch-ppc32-linux.S \
+ m_sigframe/sigframe-ppc32-linux.c \
+ m_syswrap/syscall-ppc32-linux.S \
+ m_syswrap/syswrap-ppc32-linux.c
+libcoregrind_ppc32_linux_a_CPPFLAGS = $(AM_CPPFLAGS_PPC32_LINUX)
+libcoregrind_ppc32_linux_a_CFLAGS = $(AM_CFLAGS_PPC32_LINUX)
+libcoregrind_ppc32_linux_a_CCASFLAGS = $(AM_CCASFLAGS_PPC32_LINUX)
+
+libcoregrind_ppc64_linux_a_SOURCES = \
+ $(COREGRIND_SOURCES_COMMON) \
+ $(COREGRIND_LINUX_SOURCE) \
+ m_coredump/coredump-ppc64-linux.c \
+ m_dispatch/dispatch-ppc64-linux.S \
+ m_sigframe/sigframe-ppc64-linux.c \
+ m_syswrap/syscall-ppc64-linux.S \
+ m_syswrap/syswrap-ppc64-linux.c
+libcoregrind_ppc64_linux_a_CPPFLAGS = $(AM_CPPFLAGS_PPC64_LINUX)
+libcoregrind_ppc64_linux_a_CFLAGS = $(AM_CFLAGS_PPC64_LINUX)
+libcoregrind_ppc64_linux_a_CCASFLAGS = $(AM_CCASFLAGS_PPC64_LINUX)
+
+libcoregrind_ppc32_aix5_a_SOURCES = \
+ $(COREGRIND_SOURCES_COMMON) \
+ $(COREGRIND_AIX5_SOURCE) \
+ m_coredump/coredump-ppc32-aix5.c \
+ m_dispatch/dispatch-ppc32-aix5.S \
+ m_sigframe/sigframe-ppc32-aix5.c \
+ m_syswrap/syscall-ppc32-aix5.S \
+ m_syswrap/syswrap-ppc32-aix5.c
+libcoregrind_ppc32_aix5_a_CPPFLAGS = $(AM_CPPFLAGS_PPC32_AIX5)
+libcoregrind_ppc32_aix5_a_CFLAGS = $(AM_CFLAGS_PPC32_AIX5)
+libcoregrind_ppc32_aix5_a_CCASFLAGS = $(AM_CCASFLAGS_PPC32_AIX5)
+libcoregrind_ppc32_aix5_a_AR = $(AR) -X32 cru
+
+libcoregrind_ppc64_aix5_a_SOURCES = \
+ $(COREGRIND_SOURCES_COMMON) \
+ $(COREGRIND_AIX5_SOURCE) \
+ m_coredump/coredump-ppc64-aix5.c \
+ m_dispatch/dispatch-ppc64-aix5.S \
+ m_sigframe/sigframe-ppc64-aix5.c \
+ m_syswrap/syscall-ppc64-aix5.S \
+ m_syswrap/syswrap-ppc64-aix5.c
+libcoregrind_ppc64_aix5_a_CPPFLAGS = $(AM_CPPFLAGS_PPC64_AIX5)
+libcoregrind_ppc64_aix5_a_CFLAGS = $(AM_CFLAGS_PPC64_AIX5)
+libcoregrind_ppc64_aix5_a_CCASFLAGS = $(AM_CCASFLAGS_PPC64_AIX5)
+libcoregrind_ppc64_aix5_a_AR = $(AR) -X64 cru
+
+
+libreplacemalloc_toolpreload_x86_linux_a_SOURCES = m_replacemalloc/vg_replace_malloc.c
+libreplacemalloc_toolpreload_x86_linux_a_CPPFLAGS = $(AM_CPPFLAGS_X86_LINUX)
+libreplacemalloc_toolpreload_x86_linux_a_CFLAGS = $(AM_CFLAGS_X86_LINUX) -fpic -fno-omit-frame-pointer
+
+libreplacemalloc_toolpreload_amd64_linux_a_SOURCES = m_replacemalloc/vg_replace_malloc.c
+libreplacemalloc_toolpreload_amd64_linux_a_CPPFLAGS = $(AM_CPPFLAGS_AMD64_LINUX)
+libreplacemalloc_toolpreload_amd64_linux_a_CFLAGS = $(AM_CFLAGS_AMD64_LINUX) -fpic -fno-omit-frame-pointer
+
+libreplacemalloc_toolpreload_ppc32_linux_a_SOURCES = m_replacemalloc/vg_replace_malloc.c
+libreplacemalloc_toolpreload_ppc32_linux_a_CPPFLAGS = $(AM_CPPFLAGS_PPC32_LINUX)
+libreplacemalloc_toolpreload_ppc32_linux_a_CFLAGS = $(AM_CFLAGS_PPC32_LINUX) -fpic -fno-omit-frame-pointer
+
+libreplacemalloc_toolpreload_ppc64_linux_a_SOURCES = m_replacemalloc/vg_replace_malloc.c
+libreplacemalloc_toolpreload_ppc64_linux_a_CPPFLAGS = $(AM_CPPFLAGS_PPC64_LINUX)
+libreplacemalloc_toolpreload_ppc64_linux_a_CFLAGS = $(AM_CFLAGS_PPC64_LINUX) -fpic -fno-omit-frame-pointer
+
+libreplacemalloc_toolpreload_ppc32_aix5_a_SOURCES = m_replacemalloc/vg_replace_malloc.c
+libreplacemalloc_toolpreload_ppc32_aix5_a_CPPFLAGS = $(AM_CPPFLAGS_PPC32_AIX5)
+libreplacemalloc_toolpreload_ppc32_aix5_a_CFLAGS = $(AM_CFLAGS_PPC32_AIX5) -fpic -fno-omit-frame-pointer
+libreplacemalloc_toolpreload_ppc32_aix5_a_AR = $(AR) -X32 cru
+
+libreplacemalloc_toolpreload_ppc64_aix5_a_SOURCES = m_replacemalloc/vg_replace_malloc.c
+libreplacemalloc_toolpreload_ppc64_aix5_a_CPPFLAGS = $(AM_CPPFLAGS_PPC64_AIX5)
+libreplacemalloc_toolpreload_ppc64_aix5_a_CFLAGS = $(AM_CFLAGS_PPC64_AIX5) -fpic -fno-omit-frame-pointer
+libreplacemalloc_toolpreload_ppc64_aix5_a_AR = $(AR) -X64 cru
+
+m_dispatch/dispatch-x86-linux.S: libvex_guest_offsets.h
+m_dispatch/dispatch-amd64-linux.S: libvex_guest_offsets.h
+m_dispatch/dispatch-ppc32-linux.S: libvex_guest_offsets.h
+m_dispatch/dispatch-ppc64-linux.S: libvex_guest_offsets.h
+m_dispatch/dispatch-ppc32-aix5.S: libvex_guest_offsets.h
+m_dispatch/dispatch-ppc64-aix5.S: libvex_guest_offsets.h
+m_syswrap/syscall-x86-linux.S: libvex_guest_offsets.h
+m_syswrap/syscall-amd64-linux.S: libvex_guest_offsets.h
+m_syswrap/syscall-ppc32-linux.S: libvex_guest_offsets.h
+m_syswrap/syscall-ppc64-linux.S: libvex_guest_offsets.h
+m_syswrap/syscall-ppc32-aix5.S: libvex_guest_offsets.h
+m_syswrap/syscall-ppc64-aix5.S: libvex_guest_offsets.h
+m_syswrap/syswrap-main.c: libvex_guest_offsets.h
+
+libvex_guest_offsets.h:
+ $(MAKE) -C @VEX_DIR@ CC="$(CC)" AR="$(AR)" pub/libvex_guest_offsets.h
+
+VGPRELOAD_CORE_SOURCES_COMMON = vg_preloaded.c
+
+vgpreload_core_x86_linux_so_SOURCES = $(VGPRELOAD_CORE_SOURCES_COMMON)
+vgpreload_core_x86_linux_so_CPPFLAGS = $(AM_CPPFLAGS_X86_LINUX)
+vgpreload_core_x86_linux_so_CFLAGS = $(AM_CFLAGS_X86_LINUX) $(AM_CFLAGS_PIC)
+vgpreload_core_x86_linux_so_LDFLAGS = $(PRELOAD_LDFLAGS_X86_LINUX)
+
+vgpreload_core_amd64_linux_so_SOURCES = $(VGPRELOAD_CORE_SOURCES_COMMON)
+vgpreload_core_amd64_linux_so_CPPFLAGS = $(AM_CPPFLAGS_AMD64_LINUX)
+vgpreload_core_amd64_linux_so_CFLAGS = $(AM_CFLAGS_AMD64_LINUX) $(AM_CFLAGS_PIC)
+vgpreload_core_amd64_linux_so_LDFLAGS = $(PRELOAD_LDFLAGS_AMD64_LINUX)
+
+vgpreload_core_ppc32_linux_so_SOURCES = $(VGPRELOAD_CORE_SOURCES_COMMON)
+vgpreload_core_ppc32_linux_so_CPPFLAGS = $(AM_CPPFLAGS_PPC32_LINUX)
+vgpreload_core_ppc32_linux_so_CFLAGS = $(AM_CFLAGS_PPC32_LINUX) $(AM_CFLAGS_PIC)
+vgpreload_core_ppc32_linux_so_LDFLAGS = $(PRELOAD_LDFLAGS_PPC32_LINUX)
+
+vgpreload_core_ppc64_linux_so_SOURCES = $(VGPRELOAD_CORE_SOURCES_COMMON)
+vgpreload_core_ppc64_linux_so_CPPFLAGS = $(AM_CPPFLAGS_PPC64_LINUX)
+vgpreload_core_ppc64_linux_so_CFLAGS = $(AM_CFLAGS_PPC64_LINUX) $(AM_CFLAGS_PIC)
+vgpreload_core_ppc64_linux_so_LDFLAGS = $(PRELOAD_LDFLAGS_PPC64_LINUX)
+
+vgpreload_core_ppc32_aix5_so_SOURCES = $(VGPRELOAD_CORE_SOURCES_COMMON)
+vgpreload_core_ppc32_aix5_so_CPPFLAGS = $(AM_CPPFLAGS_PPC32_AIX5)
+vgpreload_core_ppc32_aix5_so_CFLAGS = $(AM_CFLAGS_PPC32_AIX5) $(AM_CFLAGS_PIC)
+vgpreload_core_ppc32_aix5_so_LDFLAGS = $(PRELOAD_LDFLAGS_PPC32_AIX5)
+
+vgpreload_core_ppc64_aix5_so_SOURCES = $(VGPRELOAD_CORE_SOURCES_COMMON)
+vgpreload_core_ppc64_aix5_so_CPPFLAGS = $(AM_CPPFLAGS_PPC64_AIX5)
+vgpreload_core_ppc64_aix5_so_CFLAGS = $(AM_CFLAGS_PPC64_AIX5) $(AM_CFLAGS_PIC)
+vgpreload_core_ppc64_aix5_so_LDFLAGS = $(PRELOAD_LDFLAGS_PPC64_AIX5)
+
+all-local: inplace-noinst_PROGRAMS
+
+clean-local:
+ $(MAKE) -C @VEX_DIR@ CC="$(CC)" AR="$(AR)" clean
+
+# Nb: The loop installs the libvex library for possible use by standalone
+# tools.
+install-exec-local: install-noinst_PROGRAMS
+ for v in $(LIBVEX) ; do \
+ $(INSTALL_DATA) @VEX_DIR@/$$v $(DESTDIR)$(valdir) ; \
+ done
+
+
+MANUAL_DEPS = $(noinst_HEADERS) $(include_HEADERS)
+
+EXTRA_DIST = \
+ m_debuginfo/UNUSED_STABS.txt \
+ m_debuginfo/README.txt
diff --git a/coregrind/.svn/text-base/launcher-aix5-bootblock.h.svn-base b/coregrind/.svn/text-base/launcher-aix5-bootblock.h.svn-base
new file mode 100644
index 0000000..275b057
--- /dev/null
+++ b/coregrind/.svn/text-base/launcher-aix5-bootblock.h.svn-base
@@ -0,0 +1,90 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Structure written into the child process by launcher-aix5.c. ---*/
+/*--- launcher-aix5-bootblock.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2006-2009 OpenWorks LLP
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+
+ Neither the names of the U.S. Department of Energy nor the
+ University of California nor the names of its contributors may be
+ used to endorse or promote products derived from this software
+ without prior written permission.
+*/
+
+
+/* This is the structure written into the child process by the AIX5
+ launcher. The tool's entry point (see bottom of
+ coregrind/m_main.c) must be aware of the layout as it will have to
+ read info from it at startup. This struct is designed to work
+ equally well in 32- and 64-bit mode. It must be placed at an
+ 8-aligned address in the child. */
+
+#define N_BOOTBLOCK_INSNS 60 /* number of insns */
+#define N_BOOTBLOCK_ERRMSG 64 /* max bytes in the error message */
+#define N_BOOTBLOCK_TOOLFILE 256 /* max bytes in the tool file name */
+
+typedef
+
+ struct {
+ /* Adler32 checksum of the uncompressed data of the compressed
+ page (the second part of which contains this struct. */
+ /* 0 */ UInt adler32;
+
+ /* The system call numbers for enough critical syscalls that the
+ tool can start both debug logging and also read the
+ /proc/../sysent file. */
+ /* 4 */ UInt __NR_getpid;
+ /* 8 */ UInt __NR_write;
+ /* 12 */ UInt __NR_exit;
+ /* 16 */ UInt __NR_open;
+ /* 20 */ UInt __NR_read;
+ /* 24 */ UInt __NR_close;
+ /* 28 */ UInt __off28;
+
+ /* The 37 integer registers for the client, as they should be at
+ startup. On 32-bit targets the registers are stored in the
+ lower half of each quadword, which, since this is a bigendian
+ platform, is the higher-addressed 4 bytes. */
+ /* MUST BE 8-aligned */
+ /* 32 */ ULong iregs_pc_cr_lr_ctr_xer[37];
+
+ /* The instructions for the bootstrap loader. */
+ /* 328 */ UInt code[N_BOOTBLOCK_INSNS];
+
+ /* A zero-terminated error message to be used when the bootstrap
+ loader fails. */
+ /* 628 */ UChar errmsg[N_BOOTBLOCK_ERRMSG];
+
+ /* The name of the tool file, again zero-terminated. */
+ /* 692 */ UChar toolfile[N_BOOTBLOCK_TOOLFILE];
+
+ /* 1024 */
+ }
+ AIX5Bootblock;
+
+/*--------------------------------------------------------------------*/
+/*--- end launcher-aix5-bootblock.h ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/launcher-aix5.c.svn-base b/coregrind/.svn/text-base/launcher-aix5.c.svn-base
new file mode 100644
index 0000000..2c41062
--- /dev/null
+++ b/coregrind/.svn/text-base/launcher-aix5.c.svn-base
@@ -0,0 +1,1712 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Launching Valgrind on AIX5. launcher-aix5.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2006-2009 OpenWorks LLP
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+
+ Neither the names of the U.S. Department of Energy nor the
+ University of California nor the names of its contributors may be
+ used to endorse or promote products derived from this software
+ without prior written permission.
+*/
+
+/* Cut-down version of the normal launcher, except it is completely
+ different on AIX5. Does not handle shell scripts, only real
+ machine code XCOFF executables.
+
+ Note: this is a "normal" program and not part of Valgrind proper,
+ and so it doesn't have to conform to Valgrind's arcane rules on
+ no-glibc-usage etc.
+*/
+
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <sys/ptrace.h>
+#include <sys/wait.h>
+
+/* Get both struct __ld_info32 and struct __ld_info64. */
+#define __LDINFO_PTRACE32__ 1
+#define __LDINFO_PTRACE64__ 1
+#include <sys/ldr.h>
+
+#include <sys/reg.h> /* GPR0 .. GPR31 */
+#include <sys/procfs.h> /* prsysent_t */
+
+#include "pub_core_debuglog.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h"
+#include "pub_core_libcproc.h" // For VALGRIND_LIB, VALGRIND_LAUNCHER
+
+/* Get the definition for the AIX5Bootblock structure. This is what
+ we will generate and patch into the child's address space. */
+#include "launcher-aix5-bootblock.h"
+
+/* Simple routines for Huffman compression/decompression */
+#include "m_initimg/simple_huffman.c"
+
+
+/* -------------------------------------------------------------- */
+/* --- --- */
+/* --- A uniform interface to the ptrace facilities we need. --- */
+/* --- --- */
+/* -------------------------------------------------------------- */
+
+typedef
+ struct {
+ pid_t pid;
+ Bool is64;
+ }
+ Child;
+
+
+/* Read len bytes from target's rsrc to local ldst. Returns True if
+ error. */
+static
+Bool ptrace_READ_BLOCK ( Child* ch, Int len, void* ldst, Addr64 rsrc )
+{
+ Int r;
+ assert(len >= 0 && len <= 1024);
+ r = ptrace64( PT_READ_BLOCK, (ULong)ch->pid, rsrc, len, ldst );
+ if (r == len)
+ return False; /* success */
+ return True; /* error */
+}
+
+
+/* Write len bytes to target's rdst from local lsrc. Returns True if
+ error. */
+static
+Bool ptrace_WRITE_BLOCK ( Child* child, Int len, Addr64 rdst, void* lsrc )
+{
+ Int r;
+ assert(len >= 0 && len <= 1024);
+ r = ptrace64( PT_WRITE_BLOCK, (ULong)child->pid, rdst, len, lsrc );
+ if (r == len)
+ return False; /* success */
+ return True; /* error */
+}
+
+
+/* Read a GPR from the target. Returns True if error. */
+static
+Bool ptrace_READ_GPR ( Child* child, Int reg, ULong* ldst )
+{
+ ULong w64;
+ UInt w32;
+ errno = 0;
+ if (child->is64) {
+ (void)ptrace64( PT_READ_GPR,
+ (ULong)child->pid, (ULong)reg, 8, (Int*)(&w64) );
+ if (errno != 0) return True; /* error */
+ } else {
+ w32 = ptrace64( PT_READ_GPR,
+ (ULong)child->pid, (ULong)reg, 0, 0 );
+ if (errno != 0) return True; /* error */
+ w64 = (ULong)w32;
+ }
+ *ldst = w64;
+ return False; /* success */
+}
+
+
+/* Write a GPR to the target. Returns True if error. */
+static
+Bool ptrace_WRITE_GPR ( Child* child, Int reg, ULong val )
+{
+ ULong w64;
+ UInt w32;
+ errno = 0;
+ if (child->is64) {
+ w64 = val;
+ (void)ptrace64( PT_WRITE_GPR,
+ (ULong)child->pid, (ULong)reg, 8, (Int*)&w64 );
+ if (errno != 0) return True; /* error */
+ } else {
+ w32 = (UInt)val;
+ (void)ptrace64( PT_WRITE_GPR,
+ (ULong)child->pid, (ULong)reg, w32, 0 );
+ if (errno != 0) return True; /* error */
+ }
+ return False; /* success */
+}
+
+
+/* -------------------------------------------------------------- */
+/* --- --- */
+/* --- Helper functions --- */
+/* --- --- */
+/* -------------------------------------------------------------- */
+
+/* Search the path for the client program */
+static const char* find_client ( const char* clientname )
+{
+ static char fullname[PATH_MAX];
+ const char *path = getenv("PATH");
+ const char *colon;
+
+ while (path)
+ {
+ if ((colon = strchr(path, ':')) == NULL)
+ {
+ strcpy(fullname, path);
+ path = NULL;
+ }
+ else
+ {
+ memcpy(fullname, path, colon - path);
+ fullname[colon - path] = '\0';
+ path = colon + 1;
+ }
+ strcat(fullname, "/");
+ strcat(fullname, clientname);
+
+ if (access(fullname, R_OK|X_OK) == 0)
+ return fullname;
+ }
+
+ return clientname;
+}
+
+/* Examine the given file. If it looks like valid XCOFF32 return 32,
+ if valid XCOFF64 return 64, else return 0. */
+static Int examine_client ( const char* clientname )
+{
+ UChar buf[16];
+ Int n;
+ FILE* f = fopen( clientname, "r" );
+ if (f == NULL)
+ return 0;
+ n = fread( buf, 1, 16, f );
+ fclose(f);
+ if (n != 16)
+ return 0;
+ if (buf[0] == 0x01 && buf[1] == 0xDF)
+ return 32; /* XCOFF32 */
+ if (buf[0] == 0x01 && buf[1] == 0xF7)
+ return 64; /* XCOFF64 */
+ return 0;
+}
+
+static Bool file_exists ( char* fname )
+{
+ struct stat buf;
+ int r = stat(fname, &buf);
+ return r == 0;
+}
+
+static Addr64 ROUNDDN_PAGE ( Addr64 v )
+{
+ ULong p = (ULong)v;
+ ULong a = PAGE_SIZE;
+ p &= ~(a-1);
+ return (Addr64)p;
+}
+
+static Bool IS_PAGE_ALIGNED ( Addr64 v )
+{
+ ULong p = (ULong)v;
+ ULong a = PAGE_SIZE;
+ if (p & (a-1))
+ return False;
+ else
+ return True;
+}
+
+static Bool IS_8_ALIGNED ( Addr64 v )
+{
+ ULong p = (ULong)v;
+ ULong a = 8;
+ if (p & (a-1))
+ return False;
+ else
+ return True;
+}
+
+
+/* Read a 4096-byte page from CHILD's address space at location SRC,
+ into local address space at DST. Returns True if error, False
+ otherwise.
+*/
+static Bool ptrace_read_page ( Child* child, UChar* ldst, Addr64 rsrc )
+{
+ Int off;
+ Bool err;
+
+ assert(IS_PAGE_ALIGNED(rsrc));
+
+ off = 0;
+ err = ptrace_READ_BLOCK(child, 1024, ldst + off, rsrc + off);
+ if (err) return err;
+
+ off += 1024;
+ err = ptrace_READ_BLOCK(child, 1024, ldst + off, rsrc + off);
+ if (err) return err;
+
+ off += 1024;
+ err = ptrace_READ_BLOCK(child, 1024, ldst + off, rsrc + off);
+ if (err) return err;
+
+ off += 1024;
+ err = ptrace_READ_BLOCK(child, 1024, ldst + off, rsrc + off);
+ if (err) return err;
+
+ off += 1024;
+ assert(off == PAGE_SIZE);
+
+ return False;
+}
+
+
+/* Write a 4096-byte page from local address space at SRC to CHILD's
+ address space at location DST. Returns True if error, False
+ otherwise.
+*/
+static Bool ptrace_write_page ( Child* child, Addr64 rdst, UChar* lsrc )
+{
+ Int off;
+ Bool err;
+
+ assert(IS_PAGE_ALIGNED(rdst));
+
+ off = 0;
+ err = ptrace_WRITE_BLOCK(child, 1024, rdst + off, lsrc + off);
+ if (err) return err;
+
+ off += 1024;
+ err = ptrace_WRITE_BLOCK(child, 1024, rdst + off, lsrc + off);
+ if (err) return err;
+
+ off += 1024;
+ err = ptrace_WRITE_BLOCK(child, 1024, rdst + off, lsrc + off);
+ if (err) return err;
+
+ off += 1024;
+ err = ptrace_WRITE_BLOCK(child, 1024, rdst + off, lsrc + off);
+ if (err) return err;
+
+ off += 1024;
+ assert(off == PAGE_SIZE);
+
+ return False;
+}
+
+
+/* Get 37 integer registers (GPR0 .. GPR31, PC, CR, LR, CTR, XER) from
+ CHILD into the given array. Returns True if there is any kind of
+ error. */
+static
+Bool ptrace_get_iregs_pc_cr_lr_ctr_xer (
+ Child* child,
+ /*OUT*/ULong* iregs_pc_cr_lr_ctr_xer
+ )
+{
+ Int i, j;
+ Bool err;
+
+ for (i = GPR0; i <= GPR31; i++) {
+ j = i - GPR0;
+ assert(j >= 0 && j < 32);
+ err = ptrace_READ_GPR( child, i, &iregs_pc_cr_lr_ctr_xer[j] );
+ if (err) return err;
+ }
+
+ /* PC */
+ err = ptrace_READ_GPR( child, IAR, &iregs_pc_cr_lr_ctr_xer[32+0] );
+ if (err) return err;
+
+ /* CR */
+ err = ptrace_READ_GPR( child, CR, &iregs_pc_cr_lr_ctr_xer[32+1] );
+ if (err) return err;
+
+ /* LR */
+ err = ptrace_READ_GPR( child, LR, &iregs_pc_cr_lr_ctr_xer[32+2] );
+ if (err) return err;
+
+ /* CTR */
+ err = ptrace_READ_GPR( child, CTR, &iregs_pc_cr_lr_ctr_xer[32+3] );
+ if (err) return err;
+
+ /* XER */
+ err = ptrace_READ_GPR( child, XER, &iregs_pc_cr_lr_ctr_xer[32+4] );
+ if (err) return err;
+
+ return False;
+}
+
+
+/* Set CHILD's program counter to the given value. Returns True if
+ there is any kind of error. */
+static
+Bool ptrace_put_pc ( Child* child, ULong newpc )
+{
+ return ptrace_WRITE_GPR( child, IAR, newpc );
+}
+
+
+/* Set CHILD's R31 to the given value. Returns True if there is any
+ kind of error. */
+static
+Bool ptrace_put_r31 ( Child* child, ULong newr31 )
+{
+ return ptrace_WRITE_GPR( child, GPR31, newr31 );
+}
+
+
+/* ------ Instruction generators ------ */
+
+static UInt mkFormD ( UInt opc1, UInt r1, UInt r2, UInt imm )
+{
+ UInt theInstr;
+ assert(opc1 < 0x40);
+ assert(r1 < 0x20);
+ assert(r2 < 0x20);
+ imm = imm & 0xFFFF;
+ theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) | (imm));
+ return theInstr;
+}
+static UInt mkFormX ( UInt opc1,
+ UInt r1, UInt r2, UInt r3, UInt opc2, UInt b0 )
+{
+ UInt theInstr;
+ assert(opc1 < 0x40);
+ assert(r1 < 0x20);
+ assert(r2 < 0x20);
+ assert(r3 < 0x20);
+ assert(opc2 < 0x400);
+ assert(b0 < 0x2);
+ theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
+ (r3<<11) | (opc2<<1) | (b0));
+ return theInstr;
+}
+static UInt mkFormXFX ( UInt r1, UInt f2, UInt opc2 )
+{
+ UInt theInstr;
+ assert(r1 < 0x20);
+ assert(f2 < 0x20);
+ assert(opc2 < 0x400);
+ switch (opc2) {
+ case 144: // mtcrf
+ assert(f2 < 0x100);
+ f2 = f2 << 1;
+ break;
+ case 339: // mfspr
+ case 371: // mftb
+ case 467: // mtspr
+ assert(f2 < 0x400);
+ // re-arrange split field
+ f2 = ((f2>>5) & 0x1F) | ((f2 & 0x1F)<<5);
+ break;
+ default: assert(0);
+ }
+ theInstr = ((31<<26) | (r1<<21) | (f2<<11) | (opc2<<1));
+ return theInstr;
+}
+static UInt mkFormMD ( UInt opc1, UInt r1, UInt r2,
+ UInt imm1, UInt imm2, UInt opc2 )
+{
+ UInt theInstr;
+ assert(opc1 < 0x40);
+ assert(r1 < 0x20);
+ assert(r2 < 0x20);
+ assert(imm1 < 0x40);
+ assert(imm2 < 0x40);
+ assert(opc2 < 0x08);
+ imm2 = ((imm2 & 0x1F) << 1) | (imm2 >> 5);
+ theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
+ ((imm1 & 0x1F)<<11) | (imm2<<5) |
+ (opc2<<2) | ((imm1 >> 5)<<1));
+ return theInstr;
+}
+static UInt mkFormXO ( UInt opc1, UInt r1, UInt r2,
+ UInt r3, UInt b10, UInt opc2, UInt b0 )
+{
+ UInt theInstr;
+ assert(opc1 < 0x40);
+ assert(r1 < 0x20);
+ assert(r2 < 0x20);
+ assert(r3 < 0x20);
+ assert(b10 < 0x2);
+ assert(opc2 < 0x200);
+ assert(b0 < 0x2);
+ theInstr = ((opc1<<26) | (r1<<21) | (r2<<16) |
+ (r3<<11) | (b10 << 10) | (opc2<<1) | (b0));
+ return theInstr;
+}
+
+static UInt gen_lis_r_N ( UInt r, UInt N ) {
+ return mkFormD(15, r, 0, N & 0xFFFF); /* lis r,r,N */
+}
+static UInt gen_ori_r_r_N ( UInt r, UInt N ) {
+ return mkFormD(24, r, r, N & 0xFFFF); /* ori r,r,N */
+}
+static UInt gen_addi_rd_rs_N ( UInt rd, UInt rs, UInt N ) {
+ assert(rs != 0);
+ return mkFormD(14, rd, rs, N & 0xFFFF); /* addi rd,rs,N */
+}
+static UInt gen_addis_rd_rs_N ( UInt rd, UInt rs, UInt N ) {
+ assert(rs != 0);
+ return mkFormD(15, rd, rs, N & 0xFFFF); /* addis rd,rs,N */
+}
+static UInt gen_crorc_6_6_6 ( void ) {
+ return 0x4CC63342; /* crorc 6,6,6 */
+}
+static UInt gen_mr_rd_rs ( UInt rd, UInt rs ) {
+ return mkFormX(31, rs, rd, rs, 444, 0); /* or rd,rs,ts */
+}
+static UInt gen_bl_next ( void ) {
+ return 0x48000005; /* bl .+4 */
+}
+static UInt gen_mflr_r ( UInt r ) {
+ return mkFormXFX(r, 8, 339); /* mflr r */
+}
+static UInt gen_mtlr_r ( UInt r ) {
+ return mkFormXFX(r, 8, 467); /* mtlr r */
+}
+static UInt gen_blr ( void ) {
+ return 0x4E800020; /* blr */
+}
+__attribute__((unused))
+static UInt gen_blrl ( void ) {
+ return 0x4E800021; /* blrl */
+}
+static UInt gen_add_r_N ( UInt r, UInt N ) {
+ return mkFormD(14, r, r, N & 0xFFFF); /* addi r,r,N */
+}
+static UInt gen_cmpli_cr7_r_N ( UInt r, UInt N ) {
+ return mkFormD(10, 7<<2, r, N & 0xFFFF); /* cmpli cr7,r,N */
+}
+static UInt gen_bne_cr7_delta ( UInt delta ) {
+ return 0x409E0000 | (delta & 0x0000FFFC); /* bne- cr7,delta */
+}
+__attribute__((unused))
+static UInt gen_beq_cr7_delta ( UInt delta ) {
+ return 0x419E0000 | (delta & 0x0000FFFC); /* beq- cr7,delta */
+}
+static UInt gen_sc ( void ) {
+ return 0x44000002; /* sc */
+}
+static UInt gen_lwz_rd_off_ra ( UInt rd, UInt off, UInt ra ) {
+ return mkFormD(32, rd, ra, off); /* lwz rd, off(ra) */
+}
+static UInt gen_add_rd_rL_rR (UInt rd, UInt rsrcL, UInt rsrcR ) {
+ return mkFormXO(31, rd, rsrcL, rsrcR, 0, 266, 0);
+}
+static UInt gen_subf_rd_rL_rR (UInt rd, UInt rsrcL, UInt rsrcR ) {
+ return mkFormXO(31, rd, rsrcL, rsrcR, 0, 40, 0);
+}
+
+static Int emit_insn ( UInt* code, Int ix, UInt insn ) {
+ code[ix++] = insn;
+ return ix;
+}
+static Int emit_li32 ( UInt* code, Int ix, UInt rd, UInt imm32 ) {
+ code[ix++] = gen_lis_r_N(rd, imm32 >> 16);
+ if (imm32 & 0xFFFF)
+ code[ix++] = gen_ori_r_r_N(rd, imm32 & 0xFFFF);
+ return ix;
+}
+static Int emit_dosc ( UInt* code, Int ix ) {
+ /* Generate code to do a syscall and continue at the next insn.
+ Note: trashes r29. */
+ code[ix++] = gen_crorc_6_6_6();
+ code[ix++] = gen_bl_next();
+ code[ix++] = gen_mflr_r(29);
+ code[ix++] = gen_add_r_N(29,16);
+ code[ix++] = gen_mtlr_r(29);
+ code[ix++] = gen_sc();
+ return ix;
+}
+
+/* Generate 64-bit insns */
+static Int emit_li64 ( UInt* code, Int ix, UInt rd, ULong imm64 ) {
+ if (imm64 >= 0xFFFFFFFF80000000ULL || imm64 < 0x80000000ULL) {
+ // sign-extendable from 32 bits
+ // addis rd,r0,(imm64>>16) => lis rd, (imm64>>16)
+ code[ix++] = mkFormD(15, rd, 0, (imm64>>16) & 0xFFFF);
+ // ori rd, rd, (imm64 & 0xFFFF)
+ code[ix++] = mkFormD(24, rd, rd, imm64 & 0xFFFF);
+ } else {
+ // load high word
+ // lis rd, (imm64>>48) & 0xFFFF
+ code[ix++] = mkFormD(15, rd, 0, (imm64>>48) & 0xFFFF);
+ // ori rd, rd, (imm64>>32) & 0xFFFF
+ code[ix++] = mkFormD(24, rd, rd, (imm64>>32) & 0xFFFF);
+ // shift rd low word to high word => rldicr
+ code[ix++] = mkFormMD(30, rd, rd, 32, 31, 1);
+ // load low word
+ // oris rd, rd, (imm64>>16) & 0xFFFF
+ code[ix++] = mkFormD(25, rd, rd, (imm64>>16) & 0xFFFF);
+ // ori rd, rd, (imm64) & 0xFFFF
+ code[ix++] = mkFormD(24, rd, rd, imm64 & 0xFFFF);
+ }
+ return ix;
+}
+static UInt gen_ld_rd_off_ra ( UInt rd, UInt off, UInt ra ) {
+ assert((off & 3) == 0);
+ return mkFormD(58, rd, ra, off); /* ld rd, off(ra) */
+}
+
+static UInt compute_adler32 ( void* addr, UWord len )
+{
+ UInt s1 = 1;
+ UInt s2 = 0;
+ UChar* buf = (UChar*)addr;
+ while (len > 0) {
+ s1 += buf[0];
+ s2 += s1;
+ s1 %= 65521;
+ s2 %= 65521;
+ len--;
+ buf++;
+ }
+ return (s2 << 16) + s1;
+}
+
+
+/* -------------------------------------------------------------- */
+/* --- --- */
+/* --- BEGIN write bootstrap loader into child process --- */
+/* --- --- */
+/* -------------------------------------------------------------- */
+
+/* From using truss, __loadx is used to load a module into a running
+ process in 32-bit mode, and kload in 64-bit mode. __loadx is
+ simple: it returns a pointer to a standard function descriptor to
+ the entry point.
+
+ kload isn't: it returns a pointer which, from examination of
+ /proc/<pid>/maps, doesn't point into the loaded object image. It
+ does appear to point to some kind of struct, words [4] and [6] of
+ which do point into the loaded object image. From comparison with
+ /proc/<pid>/maps, they are respectively the actual VMAs of the text
+ and data sections of the loaded module.
+
+ Knowing this it is possible to find the entry point descriptor:
+ - figure out where the auxiliary header is. We have a pointer to
+ the start of the mapped text section, so just add the size of
+ the XCOFF file header to that.
+ - figure out the data bias. We know the avma of the data section;
+ and the svma of it is in the auxiliary header in field
+ o_data_start. The data bias is therefore the difference between
+ them.
+ - The auxiliary header also gives the svma of the entry point
+ descriptor; (o_entry); therefore its avma is o_entry + the data
+ bias.
+
+ ULong* kr = (result of kload)
+ // r3 is this value
+
+ AOUTHDR* aux = kr[4] (text_avma) + 24 (size of XCOFF file header);
+ // ld 9,32(3) kr[4]
+ // addi 9,9,24 + 24
+ // 9=aux
+
+ ULong data_avma = kr[6];
+ // ld 11,48(3) kr[6]
+ // 9=aux
+ // 11=data_avma
+
+ ULong data_svma = aux->o_data_start;
+ // ld 0,16(9) aux->o_data_start
+ // 9=aux
+ // 11=data_avma
+ // 0=data_svma
+
+ ULong data_bias = data_avma - data_svma;
+ // subf 11,0,11
+ // 9=aux
+ // 11=data_bias
+ // 0=data_svma
+
+ ULong ent_svma = (ULong)aux->o_entry;
+ // ld 9,80(9) aux->o_entry
+ // 9=ent_svma
+ // 11=data_bias
+ // 0=data_svma
+
+ ULong ent_avma = ent_svma + data_bias;
+ // add 10,9,11
+ // 9=ent_svma
+ // 11=data_bias
+ // 0=data_svma
+ // 10=ent_avma
+*/
+
+#define LAUNCHER_SYSENT_SIZE 100000
+static char sysent_buf[LAUNCHER_SYSENT_SIZE];
+
+/* The executable loaded must have no more than N_LDINFOs direct
+ shared-object dependencies. Just increase this value and rebuild,
+ if you ever run out. We have two arrays, one for each kind of
+ target process. */
+#define N_LDINFOs 1000
+static struct __ld_info32 ld_info32_array[N_LDINFOs];
+static struct __ld_info64 ld_info64_array[N_LDINFOs];
+
+
+static
+UChar* bootstrap_errmsg
+ = "\nvalgrind: bootstrap loader failed. Cannot continue.\n\n";
+
+
+/* Write the bootstrap loader and associated data (iow, an
+ AIX5Bootblock structure) into CHILD, so that when
+ ptrace-detached, it will continue by loading TOOLNAME and
+ continuing with that. Returns NULL on success or an error string
+ on failure. */
+
+static char* write_bootstrap_loader_into_child
+ ( Child* child, char* toolfile )
+{
+ /* ------ STEP 1: Fill in most parts of the bootblock. ------ */
+
+ /* All parts except code[], off_zdata and len_zdata. */
+
+ AIX5Bootblock block;
+
+ VG_(debugLog)(1, "launcher", "parent: size of bootblock is %ld\n",
+ sizeof(AIX5Bootblock));
+
+ assert(IS_8_ALIGNED( sizeof(AIX5Bootblock) ));
+
+ memset(&block, 0, sizeof(block));
+
+ /* --- OFFSETS--- */
+
+ /* off_zdata not known yet */
+ /* len_zdata not known yet */
+
+ /* --- SYSCALL NUMBERS --- */
+
+ /* Read some system call entries from the child's
+ /proc/<pid>/sysent file. */
+ char sysent_name[50];
+ FILE* sysent_file;
+ int sysent_used = 0;
+ prsysent_t* sysent_hdr;
+ int i;
+
+ VG_(debugLog)(1, "launcher",
+ "parent: reading child's /proc/../sysent\n");
+
+ sprintf(sysent_name, "/proc/%d/sysent", child->pid);
+ sysent_file = fopen(sysent_name, "r");
+ if (sysent_file == NULL)
+ return "Can't open child's /proc/<pid>/sysent file";
+
+ sysent_used = fread(sysent_buf, 1, LAUNCHER_SYSENT_SIZE, sysent_file);
+ if (sysent_used == 0)
+ return "Error reading child's /proc/<pid>/sysent file";
+ if (sysent_used == LAUNCHER_SYSENT_SIZE)
+ return "LAUNCHER_SYSENT_SIZE is too low; increase and recompile";
+ assert(sysent_used > 0 && sysent_used < LAUNCHER_SYSENT_SIZE);
+
+ fclose(sysent_file);
+
+ sysent_hdr = (prsysent_t*)&sysent_buf[0];
+
+ /* Find some syscall numbers for the child. */
+ Int __nr__getpid = -1;
+ Int __nr_kwrite = -1;
+ Int __nr___loadx = -1; /* 32-bit child only */
+ Int __nr_kload = -1; /* 64-bit child only */
+ Int __nr__exit = -1;
+ Int __nr_open = -1;
+ Int __nr_kread = -1;
+ Int __nr_close = -1;
+
+ for (i = 0; i < sysent_hdr->pr_nsyscalls; i++) {
+ char* name = &sysent_buf[ sysent_hdr->pr_syscall[i].pr_nameoff ];
+ int nmbr = sysent_hdr->pr_syscall[i].pr_number;
+ if (0 == strcmp(name, "_getpid"))
+ __nr__getpid = nmbr;
+ if (0 == strcmp(name, "kwrite"))
+ __nr_kwrite = nmbr;
+ if (0 == strcmp(name, "__loadx"))
+ __nr___loadx = nmbr;
+ if (0 == strcmp(name, "kload"))
+ __nr_kload = nmbr;
+ if (0 == strcmp(name, "_exit"))
+ __nr__exit = nmbr;
+ if (0 == strcmp(name, "open"))
+ __nr_open = nmbr;
+ if (0 == strcmp(name, "kread"))
+ __nr_kread = nmbr;
+ if (0 == strcmp(name, "close"))
+ __nr_close = nmbr;
+ }
+
+ if (__nr__getpid == -1
+ || __nr_kwrite == -1
+ || ((!child->is64) && __nr___loadx == -1)
+ || ((child->is64) && __nr_kload == -1)
+ || __nr__exit == -1
+ || __nr_open == -1
+ || __nr_kread == -1
+ || __nr_close == -1)
+ return "can't establish syscall #s needed for bootstrap";
+
+ block.__NR_getpid = __nr__getpid;
+ block.__NR_write = __nr_kwrite;
+ block.__NR_exit = __nr__exit;
+ block.__NR_open = __nr_open;
+ block.__NR_read = __nr_kread;
+ block.__NR_close = __nr_close;
+
+ /* --- REGS --- */
+
+ /* Continue by copying out the child's current integer register
+ state. */
+ VG_(debugLog)(1, "launcher",
+ "parent: reading child's int registers\n");
+
+ Bool err = ptrace_get_iregs_pc_cr_lr_ctr_xer
+ ( child, &block.iregs_pc_cr_lr_ctr_xer[0] );
+ if (err)
+ return "read of child's int registers failed";
+
+ /* --- CODE --- */
+
+ /* We'll leave that till last (is difficult). */
+
+ /* --- ERRMSG --- */
+
+ if (1 + strlen(bootstrap_errmsg) > N_BOOTBLOCK_ERRMSG)
+ return "bootstrap error message won't fit in bootblock";
+
+ for (i = 0; bootstrap_errmsg[i]; i++)
+ block.errmsg[i] = bootstrap_errmsg[i];
+ assert(i <= N_BOOTBLOCK_ERRMSG);
+
+ /* --- TOOLFILE --- */
+
+ if (1 + strlen(toolfile) > N_BOOTBLOCK_TOOLFILE)
+ return "tool file path is too long, won't fit in bootblock";
+
+ for (i = 0; toolfile[i]; i++)
+ block.toolfile[i] = toolfile[i];
+ assert(i <= N_BOOTBLOCK_TOOLFILE);
+
+
+ /* ------ STEP 2: Generate the bootblock code. ------ */
+
+ VG_(debugLog)(1, "launcher",
+ "parent: creating bootblock code ..\n");
+
+ /* This is the tricky bit. The resulting code has to be position
+ independent since we don't yet know where it's going to be
+ placed. The code is entered with r31 pointing at the bootblock.
+ r29-31 are callee-saved, so presumably they don't get trashed
+ across syscalls. r30 is used as scratch, and r29 is also used
+ as scratch by 'emit_dosc'. */
+
+ /* Preliminaries: to do a syscall, we have to do 'crorc 6,6,6' and
+ put the continuation address in LR, which is a bit of a drag.
+ Hence the following macro:
+
+ SYSCALL_SEQUENCE = crorc 6,6,6
+ bl .+4
+ mflr 29
+ addi 29,29,16
+ mtlr 29
+ sc
+
+ Also: 'imm' is an imaginary instruction to get a 32-bit literal into
+ a register. It's really li followed by oris.
+ */
+
+ /* So, the code. First, prepare for and do a _loadx syscall, to
+ get the tool aboard:
+ addis 1, 1, -4
+ imm 2, __NR__loadx
+ imm 3, VKI_DL_LOAD
+ mr 4, 1
+ imm 5, 3<<16
+ addi 6, 31, offset_of_toolfile
+ mr 7, 4
+ mr 8, 4
+ mr 9, 4
+ mr 10,4
+ SYSCALL_SEQUENCE
+ addis 1, 1, 4
+
+ If the syscall failed, r4 will be nonzero. Branch elsewhere if so.
+ cmpi 4, 0
+ bne error
+ */
+ int ix = 0;
+
+# if 1
+# define TRAP \
+ do { \
+ ix=emit_insn( &block.code[0],ix, 0x7fe00008 ); } \
+ while (0)
+# define SEGV \
+ do { \
+ if (child->is64) { \
+ ix=emit_li64( &block.code[0],ix, 28,0); \
+ ix=emit_insn( &block.code[0],ix, \
+ gen_ld_rd_off_ra(27,0xfffc,28)); \
+ } else { \
+ ix=emit_li32( &block.code[0],ix, 28,0); \
+ ix=emit_insn( &block.code[0],ix, \
+ gen_lwz_rd_off_ra(27,0xffff,28)); \
+ } \
+ } while (0)
+# define ILL \
+ do { \
+ ix=emit_insn( &block.code[0],ix, 0 ); } \
+ while (0)
+# endif
+
+ if (child->is64) {
+
+ /* 64-bit sequence */
+ /* Set up for 'sys_kload(toolfile, 0, 0)'
+ li64 2, __NR_kload
+ addi 3, 31, offset_toolfile
+ li64 4, 0
+ mr 5, 4
+ mr 6, 4
+ mr 7, 4
+ mr 8, 4
+ mr 9, 4
+ mr 10,4
+ SYSCALL_SEQUENCE
+
+ // if kload failed, r3 will hold zero
+ cmpdi 3,0
+ beq error
+
+ // from result of kload, figure out entry point address
+ // as described above
+ ld 9,32(3)
+ addi 9,9,24
+ ld 11,48(3)
+ ld 0,16(9)
+ subf 11,0,11
+ ld 9,80(9)
+ add 10,9,11 // r10 is entry descriptor avma
+
+ void(*fn)(void*) = (void(*)(void*))ent_avma;
+ fn();
+ ld 9,0(10)
+ mtlr 9
+ ld 2,8(10)
+ ld 11,16(10)
+ mr 3,31 // arg to pass
+ blr
+ */
+ ix = emit_li64( &block.code[0],ix, 2, __nr_kload );
+ ix = emit_insn( &block.code[0],ix,
+ gen_addi_rd_rs_N(3,31,offsetof(AIX5Bootblock,toolfile)));
+ ix = emit_li64( &block.code[0],ix, 4, 0 );
+ ix = emit_insn( &block.code[0],ix, gen_mr_rd_rs(5,4) );
+ ix = emit_insn( &block.code[0],ix, gen_mr_rd_rs(6,4) );
+ ix = emit_insn( &block.code[0],ix, gen_mr_rd_rs(7,4) );
+ ix = emit_insn( &block.code[0],ix, gen_mr_rd_rs(8,4) );
+ ix = emit_insn( &block.code[0],ix, gen_mr_rd_rs(9,4) );
+ ix = emit_insn( &block.code[0],ix, gen_mr_rd_rs(10,4) );
+ ix = emit_dosc( &block.code[0],ix );
+
+ ix = emit_insn( &block.code[0],ix, gen_cmpli_cr7_r_N(3,0) );
+ Int ix_beq = ix; /* Patch this later */
+ ix = emit_insn( &block.code[0],ix, 0 );
+
+ ix = emit_insn( &block.code[0],ix, gen_ld_rd_off_ra( 9, 32, 3 ) );
+ ix = emit_insn( &block.code[0],ix, gen_addi_rd_rs_N( 9, 9, 24 ) );
+ ix = emit_insn( &block.code[0],ix, gen_ld_rd_off_ra( 11, 48, 3 ) );
+ ix = emit_insn( &block.code[0],ix, gen_ld_rd_off_ra( 0, 16, 9 ) );
+ ix = emit_insn( &block.code[0],ix, gen_subf_rd_rL_rR( 11, 0, 11 ) );
+ ix = emit_insn( &block.code[0],ix, gen_ld_rd_off_ra( 9, 80, 9 ) );
+ ix = emit_insn( &block.code[0],ix, gen_add_rd_rL_rR( 10, 9, 11 ) );
+
+ ix = emit_insn( &block.code[0],ix, gen_ld_rd_off_ra( 9, 0, 10 ) );
+ ix = emit_insn( &block.code[0],ix, gen_mtlr_r( 9 ) );
+ ix = emit_insn( &block.code[0],ix, gen_ld_rd_off_ra( 2, 8, 10 ) );
+ ix = emit_insn( &block.code[0],ix, gen_ld_rd_off_ra( 11, 16, 10 ) );
+ ix = emit_insn( &block.code[0],ix, gen_mr_rd_rs(3, 31) );
+ ix = emit_insn( &block.code[0],ix, gen_blr() );
+ TRAP;
+ assert(ix <= N_BOOTBLOCK_INSNS);
+
+ /* error:
+ We get here if the kload syscall fails. Write a terse message
+ to stderr saying so, then exit, carrying the error code of the
+ kload call. The latter is saved in r30 across the write() call.
+ mr 30,4 (4 contains the error result from kload)
+ imm 2, __NR_write
+ imm 3,2 (2=stderr)
+ addi 4, 31, offset_of_errormsg
+ imm 5, length(errormsg)
+ SYSCALL_SEQUENCE
+ imm 2, __NR_exit
+ mr 3, 30
+ SYSCALL_SEQUENCE
+
+ Well, we shouldn't be alive here. But just in case we do, put
+ a zero word, which will generate SIGILL and definitely stop the
+ party.
+ .word 0
+ */
+ /* fill in the conditional jump */
+ (void)emit_insn( &block.code[0],ix_beq,
+ gen_beq_cr7_delta(4*(ix-ix_beq)));
+ ix = emit_insn( &block.code[0],ix, gen_mr_rd_rs(30,4) );
+ ix = emit_li64( &block.code[0],ix, 2, __nr_kwrite);
+ ix = emit_li64( &block.code[0],ix, 3, 2);
+ ix = emit_insn( &block.code[0],ix,
+ gen_addi_rd_rs_N(4,31,offsetof(AIX5Bootblock,errmsg)));
+ ix = emit_li64( &block.code[0],ix, 5, strlen(bootstrap_errmsg));
+ ix = emit_dosc( &block.code[0],ix );
+ ix = emit_li64( &block.code[0],ix, 2, __nr__exit);
+ ix = emit_insn( &block.code[0],ix, gen_mr_rd_rs(3,30) );
+ ix = emit_dosc( &block.code[0],ix );
+ ix = emit_insn( &block.code[0],ix, 0 );
+ assert(ix <= N_BOOTBLOCK_INSNS);
+
+ } else {
+
+ /* 32-bit sequence */
+ ix = emit_insn( &block.code[0],ix,
+ gen_addis_rd_rs_N(1,1,-4) );
+ ix = emit_li32( &block.code[0],ix, 2, __nr___loadx );
+ ix = emit_li32( &block.code[0],ix, 3, VKI_DL_LOAD );
+ ix = emit_insn( &block.code[0],ix, gen_mr_rd_rs(4,1) );
+ ix = emit_li32( &block.code[0],ix, 5, 3<<16 );
+ ix = emit_insn( &block.code[0],ix,
+ gen_addi_rd_rs_N(6,31,offsetof(AIX5Bootblock,toolfile)));
+ ix = emit_li32( &block.code[0],ix, 7, 0);
+ ix = emit_insn( &block.code[0],ix, gen_mr_rd_rs(8,7) );
+ ix = emit_insn( &block.code[0],ix, gen_mr_rd_rs(9,7) );
+ ix = emit_insn( &block.code[0],ix, gen_mr_rd_rs(10,7) );
+ ix = emit_dosc( &block.code[0],ix );
+ ix = emit_insn( &block.code[0],ix,
+ gen_addis_rd_rs_N(1,1,4) );
+ ix = emit_insn( &block.code[0],ix, gen_cmpli_cr7_r_N(4,0) );
+ Int ix_bne = ix; /* Patch this later */
+ ix = emit_insn( &block.code[0],ix, 0 );
+ assert(ix <= N_BOOTBLOCK_INSNS);
+
+ /* Looks like we're good. r3 now points at a standard function
+ descriptor for the entry point of the module we just loaded.
+ Load r2/r11 from the descriptor, then put the address of the
+ bootstrap area in r3, and jump to the code address. Not a
+ call -- we don't intend to return here. Note, must use r30
+ as scratch here since r31 is live.
+ lwz 30, 0(3)
+ mtlr 30
+ lwz 2, 4(3)
+ lwz 11, 8(3)
+ mr 3, 31
+ blr
+ */
+ ix = emit_insn( &block.code[0],ix, gen_lwz_rd_off_ra(30, 0, 3));
+ ix = emit_insn( &block.code[0],ix, gen_mtlr_r(30) );
+ ix = emit_insn( &block.code[0],ix, gen_lwz_rd_off_ra( 2, 4, 3));
+ ix = emit_insn( &block.code[0],ix, gen_lwz_rd_off_ra(11, 8, 3));
+ ix = emit_insn( &block.code[0],ix, gen_mr_rd_rs(3,31));
+ ix = emit_insn( &block.code[0],ix, gen_blr() );
+ assert(ix <= N_BOOTBLOCK_INSNS);
+
+ /* error:
+ We get here if the _loadx syscall fails. Write a terse message
+ to stderr saying so, then exit, carrying the error code of the
+ _loadx call. The latter is saved in r30 across the write() call.
+ mr 30,4 (4 contains the error result from __loadx)
+ imm 2, __NR_write
+ imm 3,2 (2=stderr)
+ addi 4, 31, offset_of_errormsg
+ imm 5, length(errormsg)
+ SYSCALL_SEQUENCE
+ imm 2, __NR_exit
+ mr 3, 30
+ SYSCALL_SEQUENCE
+
+ Well, we shouldn't be alive here. But just in case we do, put
+ a zero word, which will generate SIGILL and definitely stop the
+ party.
+ .word 0
+ */
+ /* fill in the conditional jump */
+ (void)emit_insn( &block.code[0],ix_bne,
+ gen_bne_cr7_delta(4*(ix-ix_bne)));
+ ix = emit_insn( &block.code[0],ix, gen_mr_rd_rs(30,4) );
+ ix = emit_li32( &block.code[0],ix, 2, __nr_kwrite);
+ ix = emit_li32( &block.code[0],ix, 3, 2);
+ ix = emit_insn( &block.code[0],ix,
+ gen_addi_rd_rs_N(4,31,offsetof(AIX5Bootblock,errmsg)));
+ ix = emit_li32( &block.code[0],ix, 5, strlen(bootstrap_errmsg));
+ ix = emit_dosc( &block.code[0],ix );
+ ix = emit_li32( &block.code[0],ix, 2, __nr__exit);
+ ix = emit_insn( &block.code[0],ix, gen_mr_rd_rs(3,30) );
+ ix = emit_dosc( &block.code[0],ix );
+ ix = emit_insn( &block.code[0],ix, 0 );
+ assert(ix <= N_BOOTBLOCK_INSNS);
+
+ }
+
+ VG_(debugLog)(1, "launcher",
+ "parent: .. %d instructions emitted\n", ix);
+
+# if 0
+ for (i = 0; i < ix; i++) {
+ if (0) printf("code[%d] = 0x%08x\n", i, block.code[i]);
+ char buff[100];
+ sprintf(buff, "echo 0x%x | ./ascii2u32", block.code[i]);
+ system(buff);
+ }
+# endif
+
+ /* ------ STEP 3: Find out where to place stuff in the child. ------ */
+
+ /* We'll have to hijack some space in the data section of the main
+ executable. First off, find the first and last pages of said
+ data section. We can't use the text section, because the child
+ is unable to write to its own text section, to undo the
+ compression of the hijacked page. We can't use the stack
+ because it appears, although stacks in AIX 5.3 appear to be
+ executable, the child gets SIGKILL'd after the ptrace detach if
+ its program counter is pointing into its stack. The data
+ section of the main executable appears to be executable, though,
+ so use that.
+
+ This requires wading though the list of loaded modules in the
+ child, to find the main executable. */
+
+ long lr;
+ if (child->is64) {
+ lr = ptrace64(PT_LDINFO, (ULong)child->pid,
+ (ULong)(UWord)&ld_info64_array,
+ sizeof(ld_info64_array), 0/*ignored*/);
+ } else {
+ lr = ptrace64(PT_LDINFO, (ULong)child->pid,
+ (ULong)(UWord)&ld_info32_array,
+ sizeof(ld_info32_array), 0/*ignored*/);
+ }
+ VG_(debugLog)(1, "launcher", "parent: ptrace PT_LDINFO got %ld\n", lr);
+ if (lr == -1)
+ return "ptrace(PT_LDINFO, ...) failed";
+ else
+ assert(lr == 0);
+
+ /* We have to iterate through the entire array to close the object
+ files that this has opened. Duh. */
+ if (child->is64) {
+ char* p = (char*)&ld_info64_array;
+ while (1) {
+ struct __ld_info64* info = (struct __ld_info64*)p;
+
+ VG_(debugLog)(1,
+ "launcher", "parent: text 0x%llx-0x%llx data 0x%llx-0x%llx\n",
+ (Addr64)info->ldinfo_textorg,
+ (Addr64)info->ldinfo_textorg + (Addr64)info->ldinfo_textsize,
+ (Addr64)info->ldinfo_dataorg,
+ (Addr64)info->ldinfo_dataorg + (Addr64)info->ldinfo_datasize
+ );
+
+ Int ir = close(info->_file._ldinfo_fd);
+ assert(ir == 0);
+ /* The last entry in the array is marked by having a zero
+ offset-link field. */
+ if (info->ldinfo_next == 0)
+ break;
+ p += info->ldinfo_next;
+ }
+ } else {
+ char* p = (char*)&ld_info32_array;
+ while (1) {
+ struct __ld_info32* info = (struct __ld_info32*)p;
+
+ VG_(debugLog)(1,
+ "launcher", "parent: text 0x%llx-0x%llx data 0x%llx-0x%llx\n",
+ (Addr64)(UWord)info->ldinfo_textorg,
+ (Addr64)(UWord)info->ldinfo_textorg + info->ldinfo_textsize,
+ (Addr64)(UWord)info->ldinfo_dataorg,
+ (Addr64)(UWord)info->ldinfo_dataorg + info->ldinfo_datasize
+ );
+
+ Int ir = close(info->_file._ldinfo_fd);
+ assert(ir == 0);
+ /* The last entry in the array is marked by having a zero
+ offset-link field. */
+ if (info->ldinfo_next == 0)
+ break;
+ p += info->ldinfo_next;
+ }
+ }
+
+ /* The first entry in that array -- and it is guaranteed to to have
+ at least one entry -- is that of the the main executable. We
+ need to put our bootblock in one of the pages the main
+ executable's data segment. The abovementioned AIX 'ptrace'
+ documentation says:
+
+ To allow a debugger to generate code more easily (in order to
+ handle fast trap instructions, for example), memory from the
+ end of the main program up to the next segment boundary can be
+ modified. That memory is read-only to the process but can be
+ modified by the debugger.
+
+ which would be great if it actually worked reliably; but not so.
+ On AIX 5.2 this is true, but on 5.3 it appears to be impossible
+ to read or write (via ptrace) anything beyond the last page of
+ the executable's text section.
+ */
+ Addr64 c_cand_text_first, c_cand_text_last;
+
+ if (child->is64) {
+ c_cand_text_first
+ = (Addr64)ld_info64_array[0].ldinfo_dataorg;
+ c_cand_text_last
+ = c_cand_text_first
+ + ld_info64_array[0].ldinfo_datasize - 1;
+ } else {
+ c_cand_text_first
+ = (Addr64)(UWord)ld_info32_array[0].ldinfo_dataorg;
+ c_cand_text_last
+ = c_cand_text_first
+ + ld_info32_array[0].ldinfo_datasize - 1;
+ }
+
+ VG_(debugLog)(1, "launcher",
+ "parent: candidate first 0x%llx last 0x%llx\n",
+ c_cand_text_first, c_cand_text_last);
+
+ /* Page align the text section limits. */
+ Addr64 c_first_page = ROUNDDN_PAGE( c_cand_text_first );
+ Addr64 c_last_page = ROUNDDN_PAGE( c_cand_text_last );
+
+ /* It's safe to try out any page p satisfying
+ c_first_page <= p && p <= c_last_page
+ */
+
+ /* CHOOSE A PAGE. Do a test compression of available pages until
+ we find one for which compression yields enough free space to
+ put the bootblock in. */
+ Int zsize;
+ Addr64 c_chosen_page = 0;
+ Addr64 c_page;
+ UChar p_page_unzbuf[PAGE_SIZE];
+ UChar p_page_unzbuf2[PAGE_SIZE];
+ UChar p_page_zbuf[PAGE_SIZE + 384 + 8/*paranoia*/];
+
+ for (c_page = c_first_page; c_page <= c_last_page; c_page += PAGE_SIZE) {
+ assert(IS_PAGE_ALIGNED(c_page));
+ err = ptrace_read_page( child, p_page_unzbuf, c_page );
+ if (err)
+ return "read of page from child failed(1)";
+ zsize = Huffman_Compress(p_page_unzbuf, p_page_zbuf, PAGE_SIZE);
+ assert(zsize >= 0 && zsize <= PAGE_SIZE + 384);
+
+ /* Do a test decompression, to check the compress/decompress
+ cycle works properly */
+ Huffman_Uncompress( p_page_zbuf, p_page_unzbuf2,
+ PAGE_SIZE + 384, PAGE_SIZE);
+ assert(0 == memcmp(p_page_unzbuf, p_page_unzbuf2, PAGE_SIZE));
+
+ VG_(debugLog)(1, "launcher",
+ "parent: page 0x%llx has %d usable bytes\n",
+ c_page, PAGE_SIZE - zsize);
+
+ if ( (Int)(PAGE_SIZE - zsize)
+ >= (Int)sizeof(AIX5Bootblock)+8/*paranoia*/) {
+ c_chosen_page = c_page;
+ break;
+ }
+ }
+
+ if (c_chosen_page == NULL)
+ return "can't find a page with enough free space for bootblock";
+
+ /* Compress the chosen page, leaving the compressed data at the
+ start of the page, and put the bootblock at the end of the
+ page. */
+
+ VG_(debugLog)(1, "launcher",
+ "parent: reading page at 0x%llx\n", c_chosen_page);
+
+ err = ptrace_read_page( child, p_page_unzbuf, c_chosen_page );
+ if (err)
+ return "read of page from child failed(2)";
+
+ block.adler32 = compute_adler32( p_page_unzbuf, PAGE_SIZE );
+ VG_(debugLog)(1, "launcher",
+ "parent: adler32 of unz page is 0x%x\n", block.adler32);
+
+ memset(p_page_zbuf, 0, sizeof(p_page_zbuf));
+ zsize = Huffman_Compress(p_page_unzbuf, p_page_zbuf, PAGE_SIZE);
+ assert(zsize >= 0 && zsize <= PAGE_SIZE + 384);
+
+ assert(PAGE_SIZE - zsize >= sizeof(AIX5Bootblock)+8/*paranoia*/);
+
+ UChar* p_dst = p_page_zbuf + PAGE_SIZE - sizeof(AIX5Bootblock);
+ Addr64 c_dst = c_chosen_page + PAGE_SIZE - sizeof(AIX5Bootblock);
+ assert(IS_8_ALIGNED(c_dst));
+
+ VG_(debugLog)(1, "launcher",
+ "parent: free space starts at 0x%llx in child\n",
+ c_chosen_page + zsize);
+ VG_(debugLog)(1, "launcher",
+ "parent: bootblock will be at 0x%llx in child\n",
+ c_dst);
+
+ *(AIX5Bootblock*)p_dst = block;
+
+ VG_(debugLog)(1, "launcher",
+ "parent: writing page at 0x%llx\n", c_chosen_page);
+
+ err = ptrace_write_page( child, c_chosen_page, p_page_zbuf );
+ if (err)
+ return "write of page to child failed";
+
+ /* Do a test read back to ensure ptrace didn't screw up. */
+
+ err = ptrace_read_page( child, p_page_unzbuf2, c_chosen_page );
+ if (err)
+ return "test read back of boot page failed (1)";
+ if (0 != memcmp(p_page_zbuf, p_page_unzbuf2, PAGE_SIZE))
+ return "test read back of boot page failed (2)";
+
+ /* Finally .. set the program counter so that when we detach, our
+ magic stub is run, not the original program. */
+
+ VG_(debugLog)(1, "launcher",
+ "parent: set child's pc to 0x%llx\n",
+ c_dst + offsetof(AIX5Bootblock,code) );
+ err = ptrace_put_pc ( child, c_dst + offsetof(AIX5Bootblock,code) );
+ if (err)
+ return "write of new initial pc into child failed";
+
+ VG_(debugLog)(1, "launcher",
+ "parent: set child's r31 to 0x%llx\n", c_dst);
+ err = ptrace_put_r31 ( child, c_dst );
+ if (err)
+ return "write of new r31 into child failed";
+
+ return NULL; /* success */
+}
+
+
+/* -------------------------------------------------------------- */
+/* --- --- */
+/* --- END write bootstrap loader into child process --- */
+/* --- --- */
+/* -------------------------------------------------------------- */
+
+static void barf ( int exitcode, char* argv0, char* msg )
+{
+ fprintf(stderr, "%s: %s\n", argv0, msg);
+ exit(exitcode);
+}
+
+int main ( int argc, char** argv, char** envp )
+{
+ Child child;
+ Int i, loglevel;
+ const char *toolname = NULL;
+ char *clientname = NULL;
+
+ /* First, look in our own /proc/<pid>/sysent file to find
+ the syscall numbers for kwrite and _getpid. These are needed
+ to make the VG_(debugLog) usable. We'll temporarily use
+ the sysent_buf used by write_bootstrap_loader_into_child for this
+ purpose. */
+
+ char sysent_name[50];
+ FILE* sysent_file;
+ int sysent_used = 0;
+ prsysent_t* sysent_hdr;
+
+ child.pid = 0;
+ child.is64 = False;
+
+ sprintf(sysent_name, "/proc/%d/sysent", getpid());
+ sysent_file = fopen(sysent_name, "r");
+ if (sysent_file == NULL)
+ barf(1, argv[0], "Can't open my own /proc/<pid>/sysent file");
+
+ sysent_used = fread(sysent_buf, 1, LAUNCHER_SYSENT_SIZE, sysent_file);
+ if (sysent_used == 0)
+ barf(1, argv[0], "Error reading my own /proc/<pid>/sysent file");
+ if (sysent_used == LAUNCHER_SYSENT_SIZE)
+ barf(1, argv[0], "LAUNCHER_SYSENT_SIZE is too low; increase and recompile");
+ assert(sysent_used > 0 && sysent_used < LAUNCHER_SYSENT_SIZE);
+
+ fclose(sysent_file);
+
+ sysent_hdr = (prsysent_t*)&sysent_buf[0];
+
+ /* Find some syscall numbers for the child. Note, we copy them
+ from our own /proc/../sysent file, which isn't really right. */
+ Word __nr__getpid = -1;
+ Word __nr_kwrite = -1;
+ for (i = 0; i < sysent_hdr->pr_nsyscalls; i++) {
+ char* name = &sysent_buf[ sysent_hdr->pr_syscall[i].pr_nameoff ];
+ int nmbr = sysent_hdr->pr_syscall[i].pr_number;
+ if (0 == strcmp(name, "_getpid"))
+ __nr__getpid = nmbr;
+ if (0 == strcmp(name, "kwrite"))
+ __nr_kwrite = nmbr;
+ }
+ if (__nr__getpid == -1 || __nr_kwrite == -1)
+ barf(1, argv[0], "can't establish syscall #s needed for startup");
+
+ /* "Tell" m_vkiscnums about them */
+ __NR_getpid = __nr__getpid;
+ __NR_write = __nr_kwrite;
+
+ /* Right, now we're safe to start the debug logging system. */
+ /* Start the debugging-log system ASAP. First find out how many
+ "-d"s were specified. This is a pre-scan of the command line.
+ At the same time, look for the tool name. */
+ loglevel = 0;
+ for (i = 1; i < argc; i++) {
+ if (argv[i][0] != '-') {
+ clientname = argv[i];
+ break;
+ }
+ if (0 == strcmp(argv[i], "--")) {
+ if (i+1 < argc)
+ clientname = argv[i+1];
+ break;
+ }
+ if (0 == strcmp(argv[i], "-d"))
+ loglevel++;
+ if (0 == strncmp(argv[i], "--tool=", 7))
+ toolname = argv[i] + 7;
+ }
+
+ /* ... and start the debug logger. Now we can safely emit logging
+ messages all through startup. */
+ VG_(debugLog_startup)(loglevel, "Stage 1");
+
+ /* Make sure we know which tool we're using */
+ if (toolname) {
+ VG_(debugLog)(1, "launcher", "tool '%s' requested\n", toolname);
+ } else {
+ VG_(debugLog)(1, "launcher",
+ "no tool requested, defaulting to 'memcheck'\n");
+ toolname = "memcheck";
+ }
+
+ /* Do some preliminary sanity checks */
+ long pagesize = sysconf(_SC_PAGESIZE);
+ if (pagesize != 4096)
+ barf(1, argv[0], "config error: sysconf(_SC_PAGESIZE) is not 4096");
+
+ assert(PAGE_SIZE == 4096); /* stay sane */
+
+ const char* valgrind_lib = VG_LIBDIR;
+
+ /* If there is no program to run, which will be the case if the
+ user just does "valgrind --help", etc, run a dummy do-nothing
+ program so at least the tool can get started and handle the
+ --help/--version etc. It spots the fact that this is a dummy
+ program and acts like it was started with no program, hence
+ behaving the same as the Linux ports would have. */
+ if (clientname == NULL) {
+ Int j;
+ char** new_argv;
+ const char* noop_exe_name = "no_op_client_for_valgrind";
+ const char* up_n_bindir = "/../../bin";
+ clientname = malloc(strlen(valgrind_lib) + strlen(up_n_bindir)
+ + 2 + strlen(noop_exe_name));
+ if (clientname == NULL) {
+ fprintf(stderr,"%s: malloc of clientname failed\n", argv[0]);
+ return 1;
+ }
+ sprintf(clientname, "%s%s/%s", valgrind_lib, up_n_bindir, noop_exe_name);
+ /* now we have to add it to the end of argv, which means making
+ that one word longer. How tedious. */
+ for (j = 0; argv[j]; j++)
+ ;
+ j += 2;
+ new_argv = calloc(j, sizeof(char*));
+ if (new_argv == NULL) {
+ fprintf(stderr,"%s: malloc of new_argv failed\n", argv[0]);
+ return 1;
+ }
+ for (i = 0; i < j-2; i++)
+ new_argv[i] = argv[i];
+ new_argv[j-2] = clientname;
+ assert(new_argv[j-1] == NULL);
+ argv = new_argv;
+ argc++;
+ }
+
+ if (argc < 2 || toolname == NULL || clientname == NULL)
+ barf(1, argv[0], "usage: valgrind [args-for-valgrind] prog args");
+
+ /* Find the client, and figure out if it's a 32- or 64-bit
+ executable. */
+ VG_(debugLog)(1, "launcher", "searching for client in $PATH\n");
+ if (strchr(clientname, '/') == NULL)
+ clientname = (char*)find_client(clientname);
+ VG_(debugLog)(1, "launcher", "found %s\n", clientname);
+
+ Int client_exekind = examine_client ( clientname );
+ switch (client_exekind) {
+ case 32:
+ child.is64 = False;
+ break;
+ case 64:
+ child.is64 = True;
+ break;
+ default:
+ fprintf(stderr, "%s: requested executable %s\n",
+ argv[0], clientname);
+ fprintf(stderr, "%s: not found, or is not a valid XCOFF32 "
+ "or XCOFF64 executable.\n", argv[0]);
+ return 1;
+ }
+
+ VG_(debugLog)(1, "launcher", "client is an XCOFF%d executable\n",
+ client_exekind);
+
+ const char* platform = child.is64 ? "ppc64-aix5" : "ppc32-aix5";
+
+ VG_(debugLog)(1, "launcher", "looking for the tool file\n");
+
+ char* toolfile = malloc(strlen(valgrind_lib)
+ + strlen(toolname) + strlen(platform) + 3);
+ if (toolfile == NULL) {
+ fprintf(stderr,"%s: malloc of toolfile failed\n", argv[0]);
+ return 1;
+ }
+ sprintf(toolfile, "%s/%s-%s", valgrind_lib, toolname, platform);
+
+ if (!file_exists(toolfile)) {
+ fprintf(stderr,"%s: can't stat %s\n", argv[0], toolfile);
+ return 1;
+ }
+
+ /* Force the client to use a 1:1 threading model - this works
+ because the client inherits our environment. */
+ VG_(debugLog)(1, "launcher", "doing putenv(\"AIXTHREAD_SCOPE=S\")\n");
+ Int putenv_err = putenv("AIXTHREAD_SCOPE=S");
+ if (putenv_err) {
+ fprintf(stderr,"%s: putenv(\"AIXTHREAD_SCOPE=S\") failed\n", argv[0]);
+ return 1;
+ }
+
+ VG_(debugLog)(1, "launcher", "doing putenv(\"MP_SHARED_MEMORY=no\")\n");
+ putenv_err = putenv("MP_SHARED_MEMORY=no");
+ if (putenv_err) {
+ fprintf(stderr,"%s: putenv(\"MP_SHARED_MEMORY=no\") failed\n", argv[0]);
+ return 1;
+ }
+
+ /* Find out what the current working directory is, and stuff it into the
+ environment so that the child can find it. */
+ char wd_buf[4096];
+ memset(wd_buf, 0, sizeof(wd_buf));
+ if (getcwd(wd_buf, sizeof(wd_buf)-1) == NULL) {
+ fprintf(stderr,"%s: getcwd(..) failed\n", argv[0]);
+ return 1;
+ }
+ assert(wd_buf[ sizeof(wd_buf)-1 ] == 0);
+ char* set_cwd = calloc(1, 100+sizeof(wd_buf));
+ if (set_cwd == NULL) {
+ fprintf(stderr,"%s: calloc of set_cwd failed\n", argv[0]);
+ return 1;
+ }
+ sprintf(set_cwd, "VALGRIND_STARTUP_PWD_%d_XYZZY=%s", getpid(), wd_buf);
+ VG_(debugLog)(1, "launcher", "doing putenv(\"%s\")\n", set_cwd);
+ putenv_err = putenv(set_cwd);
+ if (putenv_err) {
+ fprintf(stderr,"%s: putenv(\"VALGRIND_STARTUP_PWD_...\") failed\n",
+ argv[0]);
+ return 1;
+ }
+
+ /* Also, cook up the fully qualified name of this executable. The
+ following is a kludge, but I don't see how to really get the
+ fully qualified name on AIX. */
+ char* up_n_down = "/../../bin/valgrind";
+ char* launcher = malloc(strlen(valgrind_lib)
+ + strlen(up_n_down) + 2);
+ if (launcher == NULL) {
+ fprintf(stderr,"%s: malloc of launcher failed\n", argv[0]);
+ return 1;
+ }
+ sprintf(launcher, "%s%s", valgrind_lib, up_n_down);
+
+ if (!file_exists(launcher)) {
+ fprintf(stderr,"%s: can't stat %s\n", argv[0], launcher);
+ return 1;
+ }
+
+ /* First, fork.
+
+ In the child, ask for a ptrace, then exec argv[2 ..]. This
+ causes the kernel to complete the exec, hence loading the
+ child, but does not start it; instead the child remains frozen
+ so that the parent can mess with it via ptrace().
+ */
+ VG_(debugLog)(1, "launcher", "doing fork()\n");
+ child.pid = fork();
+ if (child.pid == -1) {
+ fprintf(stderr,"%s: fork() failed\n", argv[0]);
+ return 1;
+ }
+
+ if (child.pid == 0) {
+ /* --- CHILD --- */
+ VG_(debugLog)(1, "launcher", "child: before ptrace\n");
+ long rl = ptrace64(PT_TRACE_ME, 0,0,0,0);
+ if (rl != 0) {
+ fprintf(stderr,"%s: child: ptrace(PT_TRACE_ME, ...) failed\n", argv[0]);
+ fprintf(stderr,"%s: ", argv[0]);
+ perror(NULL);
+ fflush(stderr);
+ _exit(1);
+ }
+ VG_(debugLog)(1, "launcher", "child: before execve\n");
+
+ /* make VALGRIND_LAUNCHER point at something plausible. */
+ VG_(debugLog)(1, "launcher", "child: launcher = %s\n", launcher);
+ int r = setenv("VALGRIND_LAUNCHER", launcher, 1/*overwrite*/);
+ if (r) {
+ /* setenv failed. */
+ fprintf(stderr,"%s: child: setenv failed\n", argv[0]);
+ fprintf(stderr,"%s: ", argv[0]);
+ perror(NULL);
+ fflush(stderr);
+ _exit(1);
+ /* NOTREACHED */
+ }
+
+ /* This is kind-of strange. We're execvp-ing the client but
+ argv[0] is the toolname, which is irrelevant - m_main ignores
+ it. However, setting it like this at least makes m_main's
+ view of the world (as far as the argv goes) look the same as
+ it does in Linux-land:
+ tool-exe-name [args for V] client-name [args for client]
+ */
+ argv[0] = toolfile;
+ int ri = execvp(clientname, &argv[0]);
+ /* WE ONLY GET HERE IF execve FAILED */
+ assert(ri == -1);
+ fprintf(stderr,"%s: exec failed: %s: ", argv[0], clientname);
+ perror("");
+ return 1;
+ /* NOTREACHED */
+ }
+
+ /* --- PARENT --- */
+ VG_(debugLog)(1, "launcher", "parent: waitpid-ing for child\n");
+ int status;
+ /* Wait to hear back from the child. */
+ pid_t p2 = waitpid(child.pid, &status, 0);
+ /* We could hear back for two reasons. (1) the exec was
+ successful, and because the child is being ptraced, it is now
+ waiting for the parent. (2) the exec failed, and so the child
+ did _exit(). */
+ VG_(debugLog)(1, "launcher", "parent: waitpid got pid %d\n", (int)p2);
+ VG_(debugLog)(1, "launcher", "parent: waitpid got status 0x%x\n", status);
+ assert(p2 == child.pid); /* Huh?! We only have one child. */
+
+ if (WIFEXITED(status)) {
+ /* Case (2) - exec failed. */
+ fprintf(stderr, "parent: child's exec failed.\n");
+ return 0;
+ }
+
+ /* else case (1) must apply */
+ assert(WIFSTOPPED(status));
+
+ /* ------ BEGIN write bootstrap pages into child ------ */
+
+ /* In this section, if for any reason we can't continue to the
+ child-detach and so have to give up, we have to kill the child,
+ else it'll become a zombie. That's what the code at
+ latched_error: does. */
+ char* badness
+ = write_bootstrap_loader_into_child ( &child, toolfile );
+ /* Returns NULL if no error, else points to a string of at least
+ some descriptiveness. */
+ if (badness)
+ goto latched_error;
+
+ /* ------ END write bootstrap pages into child ------ */
+
+ VG_(debugLog)(1, "launcher", "parent: detaching child\n");
+ long lr = ptrace64(PT_DETACH, (ULong)child.pid, 0, SIGCONT, 0);
+ VG_(debugLog)(1, "launcher", "parent: detach got %ld\n", lr);
+ assert(lr == 0);
+ VG_(debugLog)(1, "launcher", "parent: waiting for child to finish\n");
+
+ p2 = waitpid(child.pid, &status, 0);
+ assert(p2 == child.pid);
+ if (0)
+ fprintf(stderr,"parent: child finished, status 0x%x 0x%x\n",
+ status, WEXITSTATUS(status));
+
+ if (WIFEXITED(status)) {
+ VG_(debugLog)(1, "launcher",
+ "parent: child finished normally, exit code %d\n",
+ WEXITSTATUS(status));
+ return WEXITSTATUS(status);
+ }
+ else if (WIFSIGNALED(status)) {
+ VG_(debugLog)(1, "launcher",
+ "parent: child exited on signal %d\n",
+ (int)WTERMSIG(status));
+ /* Since the child exited with a signal, we'd better
+ whack ourselves on the head with the same signal. */
+ kill( getpid(), (int)WTERMSIG(status) );
+ /* presumably NOTREACHED? */
+ return 0; /* This is completely bogus */
+ }
+ else {
+ /* erm. Can we ever get here? */
+ assert(0);
+ return 0;
+ }
+
+ latched_error:
+ /* We get here if there was some kind of problem messing with the
+ child whilst we still had it latched by ptrace. In this case we
+ need to kill it before exiting, since otherwise it will become a
+ zombie. */
+ assert(badness);
+ fprintf(stderr, "%s: error while doing ptracery on '%s'\n",
+ argv[0], clientname);
+ fprintf(stderr, "%s: error is: %s\n",
+ argv[0], badness);
+ return 0; /*BOGUS*/
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end launcher-aix5.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/launcher-linux.c.svn-base b/coregrind/.svn/text-base/launcher-linux.c.svn-base
new file mode 100644
index 0000000..1713434
--- /dev/null
+++ b/coregrind/.svn/text-base/launcher-linux.c.svn-base
@@ -0,0 +1,322 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Launching valgrind m_launcher.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Note: this is a "normal" program and not part of Valgrind proper,
+ and so it doesn't have to conform to Valgrind's arcane rules on
+ no-glibc-usage etc. */
+
+#include <assert.h>
+#include <ctype.h>
+#include <elf.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/user.h>
+#include <unistd.h>
+
+#include "pub_core_debuglog.h"
+#include "pub_core_vki.h" // Avoids warnings from
+ // pub_core_libcfile.h
+#include "pub_core_libcproc.h" // For VALGRIND_LIB, VALGRIND_LAUNCHER
+#include "pub_core_ume.h"
+
+
+
+#define PATH_MAX 4096 /* POSIX refers to this a lot but I dunno
+ where it is defined */
+
+#ifndef EM_X86_64
+#define EM_X86_64 62 // elf.h doesn't define this on some older systems
+#endif
+
+/* Report fatal errors */
+__attribute__((noreturn))
+static void barf ( const char *format, ... )
+{
+ va_list vargs;
+
+ va_start(vargs, format);
+ fprintf(stderr, "valgrind: Cannot continue: ");
+ vfprintf(stderr, format, vargs);
+ fprintf(stderr, "\n");
+ va_end(vargs);
+
+ exit(1);
+ /*NOTREACHED*/
+ assert(0);
+}
+
+/* Search the path for the client program */
+static const char *find_client(const char *clientname)
+{
+ static char fullname[PATH_MAX];
+ const char *path = getenv("PATH");
+ const char *colon;
+
+ while (path)
+ {
+ if ((colon = strchr(path, ':')) == NULL)
+ {
+ strcpy(fullname, path);
+ path = NULL;
+ }
+ else
+ {
+ memcpy(fullname, path, colon - path);
+ fullname[colon - path] = '\0';
+ path = colon + 1;
+ }
+
+ strcat(fullname, "/");
+ strcat(fullname, clientname);
+
+ if (access(fullname, R_OK|X_OK) == 0)
+ return fullname;
+ }
+
+ return clientname;
+}
+
+/* Examine the client and work out which platform it is for */
+static const char *select_platform(const char *clientname)
+{
+ int fd;
+ unsigned char *header;
+ const char *platform = NULL;
+ long pagesize = sysconf(_SC_PAGESIZE);
+
+ if (strchr(clientname, '/') == NULL)
+ clientname = find_client(clientname);
+
+ if ((fd = open(clientname, O_RDONLY)) < 0)
+ return NULL;
+ // barf("open(%s): %s", clientname, strerror(errno));
+
+ if ((header = mmap(NULL, pagesize, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0)) == MAP_FAILED)
+ return NULL;
+ // barf("mmap(%s): %s", clientname, strerror(errno));
+
+ close(fd);
+
+ if (header[0] == '#' && header[1] == '!') {
+ char *interp = (char *)header + 2;
+ char *interpend;
+
+ while (*interp == ' ' || *interp == '\t')
+ interp++;
+
+ for (interpend = interp; !isspace(*interpend); interpend++)
+ ;
+
+ *interpend = '\0';
+
+ platform = select_platform(interp);
+ } else if (memcmp(header, ELFMAG, SELFMAG) == 0) {
+
+ if (header[EI_CLASS] == ELFCLASS32) {
+ const Elf32_Ehdr *ehdr = (Elf32_Ehdr *)header;
+
+ if (header[EI_DATA] == ELFDATA2LSB) {
+ if (ehdr->e_machine == EM_386 &&
+ ehdr->e_ident[EI_OSABI] == ELFOSABI_SYSV) {
+ platform = "x86-linux";
+ }
+ }
+ else if (header[EI_DATA] == ELFDATA2MSB) {
+ if (ehdr->e_machine == EM_PPC &&
+ ehdr->e_ident[EI_OSABI] == ELFOSABI_SYSV) {
+ platform = "ppc32-linux";
+ }
+ }
+ } else if (header[EI_CLASS] == ELFCLASS64) {
+ const Elf64_Ehdr *ehdr = (Elf64_Ehdr *)header;
+
+ if (header[EI_DATA] == ELFDATA2LSB) {
+ if (ehdr->e_machine == EM_X86_64 &&
+ ehdr->e_ident[EI_OSABI] == ELFOSABI_SYSV) {
+ platform = "amd64-linux";
+ }
+ } else if (header[EI_DATA] == ELFDATA2MSB) {
+ if (ehdr->e_machine == EM_PPC64 &&
+ ehdr->e_ident[EI_OSABI] == ELFOSABI_SYSV) {
+ platform = "ppc64-linux";
+ }
+ }
+ }
+ }
+
+ munmap(header, pagesize);
+
+ return platform;
+}
+
+/* Where we expect to find all our aux files */
+static const char *valgrind_lib = VG_LIBDIR;
+
+int main(int argc, char** argv, char** envp)
+{
+ int i, j, loglevel, r;
+ const char *toolname = NULL;
+ const char *clientname = NULL;
+ const char *platform;
+ const char *default_platform;
+ const char *cp;
+ char *toolfile;
+ char launcher_name[PATH_MAX+1];
+ char* new_line;
+ char** new_env;
+
+ /* Start the debugging-log system ASAP. First find out how many
+ "-d"s were specified. This is a pre-scan of the command line.
+ At the same time, look for the tool name. */
+ loglevel = 0;
+ for (i = 1; i < argc; i++) {
+ if (argv[i][0] != '-') {
+ clientname = argv[i];
+ break;
+ }
+ if (0 == strcmp(argv[i], "--")) {
+ if (i+1 < argc)
+ clientname = argv[i+1];
+ break;
+ }
+ if (0 == strcmp(argv[i], "-d"))
+ loglevel++;
+ if (0 == strncmp(argv[i], "--tool=", 7))
+ toolname = argv[i] + 7;
+ }
+
+ /* ... and start the debug logger. Now we can safely emit logging
+ messages all through startup. */
+ VG_(debugLog_startup)(loglevel, "Stage 1");
+
+ /* Make sure we know which tool we're using */
+ if (toolname) {
+ VG_(debugLog)(1, "launcher", "tool '%s' requested\n", toolname);
+ } else {
+ VG_(debugLog)(1, "launcher",
+ "no tool requested, defaulting to 'memcheck'\n");
+ toolname = "memcheck";
+ }
+
+ /* Select a platform to use if we can't decide that by looking at
+ the executable (eg because it's a shell script). Note that the
+ default_platform is not necessarily either the primary or
+ secondary build target. Instead it's chosen to maximise the
+ chances that /bin/sh will work on it. Hence for a primary
+ target of ppc64-linux we still choose ppc32-linux as the default
+ target, because on most ppc64-linux setups, the basic /bin,
+ /usr/bin, etc, stuff is built in 32-bit mode, not 64-bit
+ mode. */
+ if ((0==strcmp(VG_PLATFORM,"x86-linux")) ||
+ (0==strcmp(VG_PLATFORM,"amd64-linux")) ||
+ (0==strcmp(VG_PLATFORM,"ppc32-linux")) ||
+ (0==strcmp(VG_PLATFORM,"ppc64-linux")))
+ default_platform = VG_PLATFORM;
+ else
+ barf("Unknown VG_PLATFORM '%s'", VG_PLATFORM);
+
+ /* Work out what platform to use, or use the default platform if
+ not possible. */
+ if (clientname == NULL) {
+ VG_(debugLog)(1, "launcher",
+ "no client specified, defaulting platform to '%s'\n",
+ default_platform);
+ platform = default_platform;
+ } else if ((platform = select_platform(clientname)) != NULL) {
+ VG_(debugLog)(1, "launcher", "selected platform '%s'\n", platform);
+ } else {
+ VG_(debugLog)(1, "launcher",
+ "no platform detected, defaulting platform to '%s'\n",
+ default_platform);
+ platform = default_platform;
+ }
+
+ /* Figure out the name of this executable (viz, the launcher), so
+ we can tell stage2. stage2 will use the name for recursive
+ invokations of valgrind on child processes. */
+ memset(launcher_name, 0, PATH_MAX+1);
+ r = readlink("/proc/self/exe", launcher_name, PATH_MAX);
+ if (r == -1) {
+ /* If /proc/self/exe can't be followed, don't give up. Instead
+ continue with an empty string for VALGRIND_LAUNCHER. In the
+ sys_execve wrapper, this is tested, and if found to be empty,
+ fail the execve. */
+ fprintf(stderr, "valgrind: warning (non-fatal): "
+ "readlink(\"/proc/self/exe\") failed.\n");
+ fprintf(stderr, "valgrind: continuing, however --trace-children=yes "
+ "will not work.\n");
+ }
+
+ /* tediously augment the env: VALGRIND_LAUNCHER=launcher_name */
+ new_line = malloc(strlen(VALGRIND_LAUNCHER) + 1
+ + strlen(launcher_name) + 1);
+ if (new_line == NULL)
+ barf("malloc of new_line failed.");
+ strcpy(new_line, VALGRIND_LAUNCHER);
+ strcat(new_line, "=");
+ strcat(new_line, launcher_name);
+
+ for (j = 0; envp[j]; j++)
+ ;
+ new_env = malloc((j+2) * sizeof(char*));
+ if (new_env == NULL)
+ barf("malloc of new_env failed.");
+ for (i = 0; i < j; i++)
+ new_env[i] = envp[i];
+ new_env[i++] = new_line;
+ new_env[i++] = NULL;
+ assert(i == j+2);
+
+ /* Establish the correct VALGRIND_LIB. */
+ cp = getenv(VALGRIND_LIB);
+
+ if (cp != NULL)
+ valgrind_lib = cp;
+
+ /* Build the stage2 invokation, and execve it. Bye! */
+ toolfile = malloc(strlen(valgrind_lib) + strlen(toolname) + strlen(platform) + 3);
+ if (toolfile == NULL)
+ barf("malloc of toolfile failed.");
+ sprintf(toolfile, "%s/%s-%s", valgrind_lib, toolname, platform);
+
+ VG_(debugLog)(1, "launcher", "launching %s\n", toolfile);
+
+ execve(toolfile, argv, new_env);
+
+ fprintf(stderr, "valgrind: failed to start tool '%s' for platform '%s': %s\n",
+ toolname, platform, strerror(errno));
+
+ exit(1);
+}
diff --git a/coregrind/.svn/text-base/m_clientstate.c.svn-base b/coregrind/.svn/text-base/m_clientstate.c.svn-base
new file mode 100644
index 0000000..180046e
--- /dev/null
+++ b/coregrind/.svn/text-base/m_clientstate.c.svn-base
@@ -0,0 +1,105 @@
+
+/*--------------------------------------------------------------------*/
+/*--- A home for miscellaneous bits of information which pertain ---*/
+/*--- to the client's state. ---*/
+/*--- m_clientstate.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_xarray.h"
+#include "pub_core_clientstate.h"
+
+/*-----------------------------------------------------------------*/
+/*--- ---*/
+/*--- Basic globals about the address space. ---*/
+/*--- ---*/
+/*-----------------------------------------------------------------*/
+
+/* Client address space, lowest to highest (see top of ume.c) */
+// TODO: get rid of as many of these as possible.
+
+Addr VG_(client_base) = 0; /* client address space limits */
+Addr VG_(client_end) = 0;
+
+Addr VG_(clstk_base) = 0;
+Addr VG_(clstk_end) = 0;
+UWord VG_(clstk_id) = 0;
+
+Addr VG_(brk_base) = 0; /* start of brk */
+Addr VG_(brk_limit) = 0; /* current brk */
+
+/* A fd which refers to the client executable. */
+Int VG_(cl_exec_fd) = -1;
+
+/* A fd which refers to the fake /proc/<pid>/cmdline in /tmp. */
+Int VG_(cl_cmdline_fd) = -1;
+
+// Command line pieces, after they have been extracted from argv in
+// m_main.main(). The payload vectors are allocated in VG_AR_TOOL
+// (the default arena). They are never freed.
+
+/* Args for the client. */
+XArray* /* of HChar* */ VG_(args_for_client) = NULL;
+
+/* Args for V (augments, then those from the launcher). */
+XArray* /* of HChar* */ VG_(args_for_valgrind) = NULL;
+
+/* How many of the above not to pass on at execve time? */
+Int VG_(args_for_valgrind_noexecpass) = 0;
+
+/* The name of the client executable, as specified on the command
+ line. */
+HChar* VG_(args_the_exename) = NULL;
+
+// Client's original rlimit data and rlimit stack
+struct vki_rlimit VG_(client_rlimit_data);
+struct vki_rlimit VG_(client_rlimit_stack);
+
+// Name of the launcher, as extracted from VALGRIND_LAUNCHER at
+// startup.
+HChar* VG_(name_of_launcher) = NULL;
+
+/* Application-visible file descriptor limits */
+Int VG_(fd_soft_limit) = -1;
+Int VG_(fd_hard_limit) = -1;
+
+/* Useful addresses extracted from the client */
+/* Where is the __libc_freeres_wrapper routine we made? */
+Addr VG_(client___libc_freeres_wrapper) = 0;
+
+/* x86-linux only: where is glibc's _dl_sysinfo_int80 function?
+ Finding it isn't essential, but knowing where it is does sometimes
+ help produce better back traces. See big comment in
+ VG_(get_StackTrace) in m_stacktrace.c for further info. */
+Addr VG_(client__dl_sysinfo_int80) = 0;
+
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_commandline.c.svn-base b/coregrind/.svn/text-base/m_commandline.c.svn-base
new file mode 100644
index 0000000..713fa4c
--- /dev/null
+++ b/coregrind/.svn/text-base/m_commandline.c.svn-base
@@ -0,0 +1,250 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Command line handling. m_commandline.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcfile.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_libcproc.h"
+#include "pub_core_mallocfree.h"
+#include "pub_core_xarray.h"
+#include "pub_core_clientstate.h"
+#include "pub_core_commandline.h" /* self */
+
+
+/* Add a string to an expandable array of strings. */
+
+static void add_string ( XArray* /* of HChar* */xa, HChar* str )
+{
+ (void) VG_(addToXA)( xa, (void*)(&str) );
+}
+
+
+/* Read the contents of .valgrindrc in 'dir' into malloc'd memory. */
+// Note that we deliberately don't free the malloc'd memory. See
+// comment at call site.
+
+static HChar* read_dot_valgrindrc ( HChar* dir )
+{
+ Int n;
+ SysRes fd;
+ struct vg_stat stat_buf;
+ HChar* f_clo = NULL;
+ HChar filename[VKI_PATH_MAX];
+
+ VG_(snprintf)(filename, VKI_PATH_MAX, "%s/.valgrindrc",
+ ( NULL == dir ? "" : dir ) );
+ fd = VG_(open)(filename, 0, VKI_S_IRUSR);
+ if ( !fd.isError ) {
+ Int res = VG_(fstat)( fd.res, &stat_buf );
+ // Ignore if not owned by current user or world writeable (CVE-2008-4865)
+ if (!res && stat_buf.st_uid == VG_(geteuid)()
+ && (!(stat_buf.st_mode & VKI_S_IWOTH))) {
+ if ( stat_buf.st_size > 0 ) {
+ f_clo = VG_(malloc)("commandline.rdv.1", stat_buf.st_size+1);
+ vg_assert(f_clo);
+ n = VG_(read)(fd.res, f_clo, stat_buf.st_size);
+ if (n == -1) n = 0;
+ vg_assert(n >= 0 && n <= stat_buf.st_size+1);
+ f_clo[n] = '\0';
+ }
+ }
+ else
+ VG_(message)(Vg_UserMsg,
+ "%s was not read as it is world writeable or not owned by the "
+ "current user", filename);
+
+ VG_(close)(fd.res);
+ }
+ return f_clo;
+}
+
+
+// Add args from a string into VG_(args_for_valgrind), splitting the
+// string at whitespace and adding each component as a separate arg.
+
+static void add_args_from_string ( HChar* s )
+{
+ HChar* tmp;
+ HChar* cp = s;
+ vg_assert(cp);
+ while (True) {
+ // We have alternating sequences: blanks, non-blanks, blanks...
+ // copy the non-blanks sequences, and add terminating '\0'
+ while (VG_(isspace)(*cp)) cp++;
+ if (*cp == 0) break;
+ tmp = cp;
+ while ( !VG_(isspace)(*cp) && *cp != 0 ) cp++;
+ if ( *cp != 0 ) *cp++ = '\0'; // terminate if not the last
+ add_string( VG_(args_for_valgrind), tmp );
+ }
+}
+
+
+/* Split up the args presented by the launcher to m_main.main(), and
+ park them in VG_(args_for_client) and VG_(args_for_valgrind).
+
+ The resulting arg list is the concatenation of the following:
+ - contents of ~/.valgrindrc
+ - contents of $VALGRIND_OPTS
+ - contents of ./.valgrindrc
+ - args from the command line
+ in the stated order.
+
+ VG_(args_for_valgrind_noexecpass) is set to be the number of items
+ in the first three categories. They are not passed to child invokations
+ at exec, whereas the last group is.
+
+ If the last group contains --command-line-only=yes, then the
+ first three groups are left empty.
+
+ Scheme: first examine the last group (the supplied argc/argv).
+ It should look like this.
+
+ args-for-v exe_name args-for-c
+
+ args-for-v are taken until either they don't start with '-' or
+ a "--" is seen.
+
+ The exe name and args-for-c are recorded without further ado.
+ Note that args-for-c[0] is the first real arg for the client, not
+ its executable name.
+
+ args-for-v are then copied into tmp_xarray.
+
+ if args-for-v does not include --command-line-only=yes:
+ contents of ~/.valgrindrc, $VALGRIND_OPTS and ./.valgrindrc
+ are copied into VG_(args_for_valgrind).
+ else
+ VG_(args_for_valgrind) is made empty.
+
+ Finally, tmp_xarray is copied onto the end of VG_(args_for_valgrind).
+*/
+
+void VG_(split_up_argv)( Int argc, HChar** argv )
+{
+ Int i;
+ Bool augment = True;
+ static Bool already_called = False;
+
+ XArray* /* of HChar* */ tmp_xarray;
+
+ /* This function should be called once, at startup, and then never
+ again. */
+ vg_assert(!already_called);
+ already_called = True;
+
+ tmp_xarray = VG_(newXA)( VG_(malloc), "commandline.sua.1",
+ VG_(free), sizeof(HChar*) );
+ vg_assert(tmp_xarray);
+
+ vg_assert( ! VG_(args_for_valgrind) );
+ VG_(args_for_valgrind)
+ = VG_(newXA)( VG_(malloc), "commandline.sua.2",
+ VG_(free), sizeof(HChar*) );
+ vg_assert( VG_(args_for_valgrind) );
+
+ vg_assert( ! VG_(args_for_client) );
+ VG_(args_for_client)
+ = VG_(newXA)( VG_(malloc), "commandline.sua.3",
+ VG_(free), sizeof(HChar*) );
+ vg_assert( VG_(args_for_client) );
+
+ /* Collect up the args-for-V. */
+ i = 1; /* skip the exe (stage2) name. */
+ for (; i < argc; i++) {
+ vg_assert(argv[i]);
+ if (0 == VG_(strcmp)(argv[i], "--")) {
+ i++;
+ break;
+ }
+ if (0 == VG_(strcmp)(argv[i], "--command-line-only=yes"))
+ augment = False;
+ if (argv[i][0] != '-')
+ break;
+ add_string( tmp_xarray, argv[i] );
+ }
+
+ /* Should now be looking at the exe name. */
+ if (i < argc) {
+ vg_assert(argv[i]);
+ VG_(args_the_exename) = argv[i];
+ i++;
+ }
+
+ /* The rest are args for the client. */
+ for (; i < argc; i++) {
+ vg_assert(argv[i]);
+ add_string( VG_(args_for_client), argv[i] );
+ }
+
+ /* Get extra args from ~/.valgrindrc, $VALGRIND_OPTS and
+ ./.valgrindrc into VG_(args_for_valgrind). */
+ if (augment) {
+ // read_dot_valgrindrc() allocates the return value with
+ // VG_(malloc)(). We do not free f1_clo and f2_clo as they get
+ // put into VG_(args_for_valgrind) and so must persist.
+ HChar* home = VG_(getenv)("HOME");
+ HChar* f1_clo = home ? read_dot_valgrindrc( home ) : NULL;
+ HChar* env_clo = VG_(strdup)( "commandline.sua.4",
+ VG_(getenv)(VALGRIND_OPTS) );
+ HChar* f2_clo = NULL;
+
+ // Don't read ./.valgrindrc if "." is the same as "$HOME", else its
+ // contents will be applied twice. (bug #142488)
+ if (home) {
+ HChar cwd[VKI_PATH_MAX+1];
+ Bool cwd_ok = VG_(get_startup_wd)(cwd, VKI_PATH_MAX);
+ f2_clo = ( (cwd_ok && VG_STREQ(home, cwd))
+ ? NULL : read_dot_valgrindrc(".") );
+ }
+
+ if (f1_clo) add_args_from_string( f1_clo );
+ if (env_clo) add_args_from_string( env_clo );
+ if (f2_clo) add_args_from_string( f2_clo );
+ }
+
+ /* .. and record how many extras we got. */
+ VG_(args_for_valgrind_noexecpass)
+ = VG_(sizeXA)( VG_(args_for_valgrind) );
+
+ /* Finally, copy tmp_xarray onto the end. */
+ for (i = 0; i < VG_(sizeXA)( tmp_xarray ); i++)
+ add_string( VG_(args_for_valgrind),
+ * (HChar**)VG_(indexXA)( tmp_xarray, i ) );
+
+ VG_(deleteXA)( tmp_xarray );
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_cpuid.S.svn-base b/coregrind/.svn/text-base/m_cpuid.S.svn-base
new file mode 100644
index 0000000..d7dcff3
--- /dev/null
+++ b/coregrind/.svn/text-base/m_cpuid.S.svn-base
@@ -0,0 +1,153 @@
+
+/*--------------------------------------------------------------------*/
+/*--- CPUID interface. m_cpuid.S ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics_asm.h"
+
+/*
+ Bool VG_(has_cpuid)(void)
+ */
+.globl VG_(has_cpuid)
+#if defined(VGA_x86)
+ VG_(has_cpuid):
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ecx
+ pushfl
+ pushfl
+ popl %eax
+ movl %eax, %ecx
+ xorl $0x200000, %eax
+ pushl %eax
+ popfl
+ pushfl
+ popl %eax
+ popfl
+ xorl %ecx, %eax
+ andl $0x200000, %eax
+ shrl $21, %eax
+ popl %ecx
+ movl %ebp, %esp
+ popl %ebp
+ ret
+#elif defined(VGA_amd64)
+ VG_(has_cpuid):
+ movq $1, %rax
+ ret
+#endif
+
+/*
+ void VG_(cpuid)(UInt eax,
+ UInt* eax_ret, UInt* ebx_ret, UInt* ecx_ret, UInt* edx_ret)
+ */
+.globl VG_(cpuid)
+#if defined(VGA_x86)
+ VG_(cpuid):
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %eax
+ pushl %ebx
+ pushl %ecx
+ pushl %edx
+ pushl %esi
+ movl 8(%ebp), %eax
+ cpuid
+ movl 12(%ebp), %esi
+ testl %esi, %esi
+ jz 1f
+ movl %eax, (%esi)
+ 1:
+ movl 16(%ebp), %esi
+ testl %esi, %esi
+ jz 2f
+ movl %ebx, (%esi)
+ 2:
+ movl 20(%ebp), %esi
+ testl %esi, %esi
+ jz 3f
+ movl %ecx, (%esi)
+ 3:
+ movl 24(%ebp), %esi
+ testl %esi, %esi
+ jz 4f
+ movl %edx, (%esi)
+ 4:
+ popl %esi
+ popl %edx
+ popl %ecx
+ popl %ebx
+ popl %eax
+ movl %ebp, %esp
+ popl %ebp
+ ret
+#elif defined(VGA_amd64)
+ VG_(cpuid):
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %rbx
+ movl %edi, %eax
+ movq %rdx, %rdi
+ movq %rcx, %r9
+ /*
+ eax_ret now in %rsi
+ ebx_ret now in %rdi
+ ecx_ret now in %r9
+ edx_ret now in %r8
+ */
+ cpuid
+ testq %rsi, %rsi
+ jz 1f
+ movl %eax, (%rsi)
+ 1:
+ testq %rdi, %rdi
+ jz 2f
+ movl %ebx, (%rdi)
+ 2:
+ testq %r9, %r9
+ jz 3f
+ movl %ecx, (%r9)
+ 3:
+ testq %r8, %r8
+ jz 4f
+ movl %edx, (%r8)
+ 4:
+ popq %rbx
+ movq %rbp, %rsp
+ popq %rbp
+ ret
+#endif
+
+#if defined(VGO_linux)
+/* Let the linker know we don't need an executable stack */
+.section .note.GNU-stack,"",@progbits
+#endif
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_debugger.c.svn-base b/coregrind/.svn/text-base/m_debugger.c.svn-base
new file mode 100644
index 0000000..de28e36
--- /dev/null
+++ b/coregrind/.svn/text-base/m_debugger.c.svn-base
@@ -0,0 +1,321 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Attaching a debugger. m_debugger.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_xarray.h"
+#include "pub_core_clientstate.h"
+#include "pub_core_debugger.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_libcproc.h"
+#include "pub_core_libcsignal.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_options.h"
+
+
+#define WIFSTOPPED(status) (((status) & 0xff) == 0x7f)
+#define WSTOPSIG(status) (((status) & 0xff00) >> 8)
+
+static Int ptrace_setregs(Int pid, VexGuestArchState* vex)
+{
+#if defined(VGP_x86_linux)
+ struct vki_user_regs_struct regs;
+ VG_(memset)(&regs, 0, sizeof(regs));
+ regs.cs = vex->guest_CS;
+ regs.ss = vex->guest_SS;
+ regs.ds = vex->guest_DS;
+ regs.es = vex->guest_ES;
+ regs.fs = vex->guest_FS;
+ regs.gs = vex->guest_GS;
+ regs.eax = vex->guest_EAX;
+ regs.ebx = vex->guest_EBX;
+ regs.ecx = vex->guest_ECX;
+ regs.edx = vex->guest_EDX;
+ regs.esi = vex->guest_ESI;
+ regs.edi = vex->guest_EDI;
+ regs.ebp = vex->guest_EBP;
+ regs.esp = vex->guest_ESP;
+ regs.eflags = LibVEX_GuestX86_get_eflags(vex);
+ regs.eip = vex->guest_EIP;
+ return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &regs);
+
+#elif defined(VGP_amd64_linux)
+ struct vki_user_regs_struct regs;
+ VG_(memset)(&regs, 0, sizeof(regs));
+ regs.rax = vex->guest_RAX;
+ regs.rbx = vex->guest_RBX;
+ regs.rcx = vex->guest_RCX;
+ regs.rdx = vex->guest_RDX;
+ regs.rsi = vex->guest_RSI;
+ regs.rdi = vex->guest_RDI;
+ regs.rbp = vex->guest_RBP;
+ regs.rsp = vex->guest_RSP;
+ regs.r8 = vex->guest_R8;
+ regs.r9 = vex->guest_R9;
+ regs.r10 = vex->guest_R10;
+ regs.r11 = vex->guest_R11;
+ regs.r12 = vex->guest_R12;
+ regs.r13 = vex->guest_R13;
+ regs.r14 = vex->guest_R14;
+ regs.r15 = vex->guest_R15;
+ regs.eflags = LibVEX_GuestAMD64_get_rflags(vex);
+ regs.rip = vex->guest_RIP;
+ /* Set %{c,d,e,f,s,g}s and %{fs,gs}_base (whatever those are) to
+ values which don't fail the kernel's sanity checks. I have no
+ idea what these should really be set to. Anyway, mostly it
+ seems that zero is an allowable value, except for %cs and %ss
+ which have to have their lowest 2 bits be 11. See putreg() in
+ linux-2.6.23/arch/x86_64/kernel/ptrace.c for the apparently
+ relevant sanity checks. This fixes #145622. */
+ regs.cs = 3;
+ regs.ds = 0;
+ regs.es = 0;
+ regs.fs = 0;
+ regs.ss = 3;
+ regs.gs = 0;
+ regs.fs_base = 0;
+ regs.gs_base = 0;
+ return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &regs);
+
+#elif defined(VGP_ppc32_linux)
+ Int rc = 0;
+ /* apparently the casting to void* is the Right Thing To Do */
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R0 * 4), (void*)vex->guest_GPR0);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R1 * 4), (void*)vex->guest_GPR1);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R2 * 4), (void*)vex->guest_GPR2);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R3 * 4), (void*)vex->guest_GPR3);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R4 * 4), (void*)vex->guest_GPR4);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R5 * 4), (void*)vex->guest_GPR5);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R6 * 4), (void*)vex->guest_GPR6);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R7 * 4), (void*)vex->guest_GPR7);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R8 * 4), (void*)vex->guest_GPR8);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R9 * 4), (void*)vex->guest_GPR9);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R10 * 4), (void*)vex->guest_GPR10);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R11 * 4), (void*)vex->guest_GPR11);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R12 * 4), (void*)vex->guest_GPR12);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R13 * 4), (void*)vex->guest_GPR13);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R14 * 4), (void*)vex->guest_GPR14);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R15 * 4), (void*)vex->guest_GPR15);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R16 * 4), (void*)vex->guest_GPR16);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R17 * 4), (void*)vex->guest_GPR17);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R18 * 4), (void*)vex->guest_GPR18);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R19 * 4), (void*)vex->guest_GPR19);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R20 * 4), (void*)vex->guest_GPR20);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R21 * 4), (void*)vex->guest_GPR21);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R22 * 4), (void*)vex->guest_GPR22);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R23 * 4), (void*)vex->guest_GPR23);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R24 * 4), (void*)vex->guest_GPR24);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R25 * 4), (void*)vex->guest_GPR25);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R26 * 4), (void*)vex->guest_GPR26);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R27 * 4), (void*)vex->guest_GPR27);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R28 * 4), (void*)vex->guest_GPR28);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R29 * 4), (void*)vex->guest_GPR29);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R30 * 4), (void*)vex->guest_GPR30);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R31 * 4), (void*)vex->guest_GPR31);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_NIP * 4), (void*)vex->guest_CIA);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CCR * 4),
+ (void*)LibVEX_GuestPPC32_get_CR(vex));
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_LNK * 4), (void*)vex->guest_LR);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CTR * 4), (void*)vex->guest_CTR);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_XER * 4),
+ (void*)LibVEX_GuestPPC32_get_XER(vex));
+ return rc;
+
+#elif defined(VGP_ppc64_linux)
+ Int rc = 0;
+ /* FRJ: copied nearly verbatim from the ppc32 case. I compared the
+ vki-ppc64-linux.h with its ppc32 counterpart and saw no
+ appreciable differences, other than the registers being 8 bytes
+ instead of 4. No idea why we don't set all of the entries
+ declared in vki_pt_regs, but ppc32 doesn't so there must be a
+ reason.
+
+ Finally, note that CR and XER are 32 bits even for ppc64 (see
+ libvex_guest_ppc64.h), but the vki_pt_regs struct still gives
+ them 64 bits.
+ */
+ /* apparently the casting to void* is the Right Thing To Do */
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R0 * 8), (void*)vex->guest_GPR0);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R1 * 8), (void*)vex->guest_GPR1);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R2 * 8), (void*)vex->guest_GPR2);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R3 * 8), (void*)vex->guest_GPR3);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R4 * 8), (void*)vex->guest_GPR4);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R5 * 8), (void*)vex->guest_GPR5);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R6 * 8), (void*)vex->guest_GPR6);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R7 * 8), (void*)vex->guest_GPR7);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R8 * 8), (void*)vex->guest_GPR8);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R9 * 8), (void*)vex->guest_GPR9);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R10 * 8), (void*)vex->guest_GPR10);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R11 * 8), (void*)vex->guest_GPR11);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R12 * 8), (void*)vex->guest_GPR12);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R13 * 8), (void*)vex->guest_GPR13);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R14 * 8), (void*)vex->guest_GPR14);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R15 * 8), (void*)vex->guest_GPR15);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R16 * 8), (void*)vex->guest_GPR16);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R17 * 8), (void*)vex->guest_GPR17);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R18 * 8), (void*)vex->guest_GPR18);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R19 * 8), (void*)vex->guest_GPR19);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R20 * 8), (void*)vex->guest_GPR20);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R21 * 8), (void*)vex->guest_GPR21);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R22 * 8), (void*)vex->guest_GPR22);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R23 * 8), (void*)vex->guest_GPR23);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R24 * 8), (void*)vex->guest_GPR24);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R25 * 8), (void*)vex->guest_GPR25);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R26 * 8), (void*)vex->guest_GPR26);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R27 * 8), (void*)vex->guest_GPR27);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R28 * 8), (void*)vex->guest_GPR28);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R29 * 8), (void*)vex->guest_GPR29);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R30 * 8), (void*)vex->guest_GPR30);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_R31 * 8), (void*)vex->guest_GPR31);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_NIP * 8), (void*)vex->guest_CIA);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CCR * 8),
+ (void*)(long)LibVEX_GuestPPC64_get_CR(vex));
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_LNK * 8), (void*)vex->guest_LR);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_CTR * 8), (void*)vex->guest_CTR);
+ rc |= VG_(ptrace)(VKI_PTRACE_POKEUSR, pid, (void*)(VKI_PT_XER * 8),
+ (void*)(long)LibVEX_GuestPPC64_get_XER(vex));
+ return rc;
+
+#elif defined(VGP_ppc32_aix5)
+ I_die_here;
+
+#elif defined(VGP_ppc64_aix5)
+ I_die_here;
+
+#else
+# error Unknown arch
+#endif
+}
+
+/* Start debugger and get it to attach to this process. Called if the
+ user requests this service after an error has been shown, so she can
+ poke around and look at parameters, memory, etc. You can't
+ meaningfully get the debugger to continue the program, though; to
+ continue, quit the debugger. */
+void VG_(start_debugger) ( ThreadId tid )
+{
+# define N_BUF 4096
+ Int pid, rc;
+
+ pid = VG_(fork)();
+
+ if (pid == 0) {
+ /* child */
+ rc = VG_(ptrace)(VKI_PTRACE_TRACEME, 0, NULL, NULL);
+ vg_assert(rc == 0);
+ rc = VG_(kill)(VG_(getpid)(), VKI_SIGSTOP);
+ vg_assert(rc == 0);
+
+ } else if (pid > 0) {
+ /* parent */
+ Int status;
+ Int res;
+
+ if ((res = VG_(waitpid)(pid, &status, 0)) == pid &&
+ WIFSTOPPED(status) && WSTOPSIG(status) == VKI_SIGSTOP &&
+ ptrace_setregs(pid, &(VG_(threads)[tid].arch.vex)) == 0 &&
+ VG_(kill)(pid, VKI_SIGSTOP) == 0 &&
+ VG_(ptrace)(VKI_PTRACE_DETACH, pid, NULL, 0) == 0)
+ {
+ Char pidbuf[15];
+ Char file[50];
+ Char buf[N_BUF];
+ Char *bufptr;
+ Char *cmdptr;
+
+ VG_(sprintf)(pidbuf, "%d", pid);
+ VG_(sprintf)(file, "/proc/%d/fd/%d", pid, VG_(cl_exec_fd));
+
+ bufptr = buf;
+ cmdptr = VG_(clo_db_command);
+
+ while (*cmdptr) {
+ /* each iteration can advance bufptr by at most the length
+ of file[], so the following assertion is generously
+ over-paranoid. */
+ vg_assert(bufptr - buf < N_BUF-15-50-10/*paranoia*/);
+ switch (*cmdptr) {
+ case '%':
+ switch (*++cmdptr) {
+ case 'f':
+ VG_(memcpy)(bufptr, file, VG_(strlen)(file));
+ bufptr += VG_(strlen)(file);
+ cmdptr++;
+ break;
+ case 'p':
+ VG_(memcpy)(bufptr, pidbuf, VG_(strlen)(pidbuf));
+ bufptr += VG_(strlen)(pidbuf);
+ cmdptr++;
+ break;
+ default:
+ *bufptr++ = *cmdptr++;
+ break;
+ }
+ break;
+ default:
+ *bufptr++ = *cmdptr++;
+ break;
+ }
+ vg_assert(bufptr - buf < N_BUF-15-50-10/*paranoia*/);
+ }
+
+ *bufptr++ = '\0';
+
+ VG_(message)(Vg_UserMsg, "starting debugger with cmd: %s", buf);
+ res = VG_(system)(buf);
+ if (res == 0) {
+ VG_(message)(Vg_UserMsg, "");
+ VG_(message)(Vg_UserMsg,
+ "Debugger has detached. Valgrind regains control."
+ " We continue.");
+ } else {
+ VG_(message)(Vg_UserMsg,
+ "Warning: Debugger attach failed! (sys_system)");
+ VG_(message)(Vg_UserMsg, "");
+ }
+ } else {
+ VG_(message)(Vg_UserMsg,
+ "Warning: Debugger attach failed! (ptrace problem?)");
+ VG_(message)(Vg_UserMsg, "");
+ }
+
+ VG_(kill)(pid, VKI_SIGKILL);
+ VG_(waitpid)(pid, &status, 0);
+ }
+# undef N_BUF
+}
+
+
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_debuglog.c.svn-base b/coregrind/.svn/text-base/m_debuglog.c.svn-base
new file mode 100644
index 0000000..c6b0329
--- /dev/null
+++ b/coregrind/.svn/text-base/m_debuglog.c.svn-base
@@ -0,0 +1,885 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Debug (not-for-user) logging; also vprintf. m_debuglog.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+
+/* Performs low-level debug logging that can safely run immediately
+ after startup. To minimise the dependencies on any other parts of
+ the system, the only place the debug output may go is file
+ descriptor 2 (stderr).
+*/
+/* This is the first-initialised module in the entire system!
+ Therefore it is CRITICAL that it does not depend on any other code
+ running first. Hence only the following very limited includes. We
+ cannot depend (directly or indirectly) on any dynamic memory
+ allocation facilities, nor on the m_libc facilities, since the
+ latter depend on this module. DO NOT MESS WITH THESE INCLUDES
+ UNLESS YOU ARE 100% CERTAIN YOU UNDERSTAND THE CONSEQUENCES.
+*/
+
+/* This module is also notable because it is linked into both
+ stage1 and stage2. */
+
+#include "pub_core_basics.h" /* basic types */
+#include "pub_core_vkiscnums.h" /* for syscall numbers */
+#include "pub_core_debuglog.h" /* our own iface */
+#include "valgrind.h" /* for RUNNING_ON_VALGRIND */
+
+/*------------------------------------------------------------*/
+/*--- Stuff to make us completely independent. ---*/
+/*------------------------------------------------------------*/
+
+/* ----- Platform-specifics ----- */
+
+#if defined(VGP_x86_linux)
+
+static UInt local_sys_write_stderr ( HChar* buf, Int n )
+{
+ volatile Int block[2];
+ block[0] = (Int)buf;
+ block[1] = n;
+ __asm__ volatile (
+ "pushl %%ebx\n" /* ebx is callee-save */
+ "movl %0, %%ebx\n" /* ebx = &block */
+ "pushl %%ebx\n" /* save &block */
+ "movl 0(%%ebx), %%ecx\n" /* %ecx = buf */
+ "movl 4(%%ebx), %%edx\n" /* %edx = n */
+ "movl $"VG_STRINGIFY(__NR_write)", %%eax\n" /* %eax = __NR_write */
+ "movl $1, %%ebx\n" /* %ebx = stderr */
+ "int $0x80\n" /* write(stderr, buf, n) */
+ "popl %%ebx\n" /* reestablish &block */
+ "movl %%eax, 0(%%ebx)\n" /* block[0] = result */
+ "popl %%ebx\n" /* restore ebx */
+ : /*wr*/
+ : /*rd*/ "g" (block)
+ : /*trash*/ "eax", "edi", "ecx", "edx", "memory", "cc"
+ );
+ if (block[0] < 0)
+ block[0] = -1;
+ return block[0];
+}
+
+static UInt local_sys_getpid ( void )
+{
+ UInt __res;
+ __asm__ volatile (
+ "movl $"VG_STRINGIFY(__NR_getpid)", %%eax\n" /* %eax = __NR_getpid */
+ "int $0x80\n" /* getpid() */
+ "movl %%eax, %0\n" /* set __res = eax */
+ : "=mr" (__res)
+ :
+ : "eax" );
+ return __res;
+}
+
+#elif defined(VGP_amd64_linux)
+__attribute__((noinline))
+static UInt local_sys_write_stderr ( HChar* buf, Int n )
+{
+ volatile Long block[2];
+ block[0] = (Long)buf;
+ block[1] = n;
+ __asm__ volatile (
+ "subq $256, %%rsp\n" /* don't trash the stack redzone */
+ "pushq %%r15\n" /* r15 is callee-save */
+ "movq %0, %%r15\n" /* r15 = &block */
+ "pushq %%r15\n" /* save &block */
+ "movq $"VG_STRINGIFY(__NR_write)", %%rax\n" /* rax = __NR_write */
+ "movq $2, %%rdi\n" /* rdi = stderr */
+ "movq 0(%%r15), %%rsi\n" /* rsi = buf */
+ "movq 8(%%r15), %%rdx\n" /* rdx = n */
+ "syscall\n" /* write(stderr, buf, n) */
+ "popq %%r15\n" /* reestablish &block */
+ "movq %%rax, 0(%%r15)\n" /* block[0] = result */
+ "popq %%r15\n" /* restore r15 */
+ "addq $256, %%rsp\n" /* restore stack ptr */
+ : /*wr*/
+ : /*rd*/ "g" (block)
+ : /*trash*/ "rax", "rdi", "rsi", "rdx", "memory", "cc"
+ );
+ if (block[0] < 0)
+ block[0] = -1;
+ return (UInt)block[0];
+}
+
+static UInt local_sys_getpid ( void )
+{
+ UInt __res;
+ __asm__ volatile (
+ "movq $"VG_STRINGIFY(__NR_getpid)", %%rax\n" /* %rax = __NR_getpid */
+ "syscall\n" /* getpid() */
+ "movl %%eax, %0\n" /* set __res = %eax */
+ : "=mr" (__res)
+ :
+ : "rax" );
+ return __res;
+}
+
+#elif defined(VGP_ppc32_linux)
+
+static UInt local_sys_write_stderr ( HChar* buf, Int n )
+{
+ volatile Int block[2];
+ block[0] = (Int)buf;
+ block[1] = n;
+ __asm__ volatile (
+ "addi 1,1,-256\n\t"
+ "mr 5,%0\n\t" /* r5 = &block[0] */
+ "stw 5,0(1)\n\t" /* stash on stack */
+ "li 0,"VG_STRINGIFY(__NR_write)"\n\t" /* set %r0 = __NR_write */
+ "li 3,2\n\t" /* set %r3 = stderr */
+ "lwz 4,0(5)\n\t" /* set %r4 = buf */
+ "lwz 5,4(5)\n\t" /* set %r5 = n */
+ "sc\n\t" /* write(stderr, buf, n) */
+ "lwz 5,0(1)\n\t"
+ "addi 1,1,256\n\t"
+ "stw 3,0(5)\n" /* block[0] = result */
+ :
+ : "b" (block)
+ : "cc","memory","cr0","ctr",
+ "r0","r2","r3","r4","r5","r6","r7","r8","r9","r10","r11","r12"
+ );
+ if (block[0] < 0)
+ block[0] = -1;
+ return (UInt)block[0];
+}
+
+static UInt local_sys_getpid ( void )
+{
+ register UInt __res __asm__ ("r3");
+ __asm__ volatile (
+ "li 0, %1\n\t"
+ "sc"
+ : "=&r" (__res)
+ : "i" (__NR_getpid)
+ : "cc","memory","cr0","ctr",
+ "r0","r2","r4","r5","r6","r7","r8","r9","r10","r11","r12"
+ );
+ return __res;
+}
+
+#elif defined(VGP_ppc64_linux)
+
+static UInt local_sys_write_stderr ( HChar* buf, Int n )
+{
+ volatile Long block[2];
+ block[0] = (Long)buf;
+ block[1] = (Long)n;
+ __asm__ volatile (
+ "addi 1,1,-256\n\t"
+ "mr 5,%0\n\t" /* r5 = &block[0] */
+ "std 5,0(1)\n\t" /* stash on stack */
+ "li 0,"VG_STRINGIFY(__NR_write)"\n\t" /* %r0 = __NR_write */
+ "li 3,2\n\t" /* set %r3 = stderr */
+ "ld 4,0(5)\n\t" /* set %r4 = buf */
+ "ld 5,8(5)\n\t" /* set %r5 = n */
+ "sc\n\t" /* write(stderr, buf, n) */
+ "ld 5,0(1)\n\t"
+ "addi 1,1,256\n\t"
+ "std 3,0(5)\n" /* block[0] = result */
+ :
+ : "b" (block)
+ : "cc","memory","cr0","ctr",
+ "r0","r2","r3","r4","r5","r6","r7","r8","r9","r10","r11","r12"
+ );
+ if (block[0] < 0)
+ block[0] = -1;
+ return (UInt)(Int)block[0];
+}
+
+static UInt local_sys_getpid ( void )
+{
+ register ULong __res __asm__ ("r3");
+ __asm__ volatile (
+ "li 0, %1\n\t"
+ "sc"
+ : "=&r" (__res)
+ : "i" (__NR_getpid)
+ : "cc","memory","cr0","ctr",
+ "r0","r2","r4","r5","r6","r7","r8","r9","r10","r11","r12"
+ );
+ return (UInt)__res;
+}
+
+#elif defined(VGP_ppc32_aix5)
+
+static UInt local_sys_write_stderr ( HChar* buf, Int n )
+{
+ /* For some reason gcc-3.3.2 doesn't preserve r31 across the asm
+ even though we state it to be trashed. So use r27 instead. */
+ volatile UInt block[3];
+ block[0] = (UInt)buf;
+ block[1] = n;
+ block[2] = __NR_write;
+ __asm__ __volatile__ (
+ "mr 28,%0\n\t" /* establish base ptr */
+ "mr 27,2\n\t" /* save r2 in r27 */
+ "mflr 30\n\t" /* save lr in r30 */
+
+ "lwz 2,8(28)\n\t" /* set %r2 = __NR_write */
+ "li 3,2\n\t" /* set %r3 = stderr */
+ "lwz 4,0(28)\n\t" /* set %r4 = buf */
+ "lwz 5,4(28)\n\t" /* set %r5 = n */
+
+ "crorc 6,6,6\n\t"
+ ".long 0x48000005\n\t" /* bl .+4 */
+ "mflr 29\n\t"
+ "addi 29,29,16\n\t"
+ "mtlr 29\n\t"
+ "sc\n\t" /* write() */
+
+ "stw 3,0(28)\n\t" /* result */
+ "stw 4,4(28)\n\t" /* error? */
+
+ "mr 2,27\n\t" /* restore r2 */
+ "mtlr 30" /* restore lr */
+
+ : /*out*/
+ : /*in*/ "b" (&block[0])
+ : /*trash*/
+ /*temps*/ "r31","r30","r29","r28","r27",
+ /*args*/ "r3","r4","r5","r6","r7","r8","r9","r10",
+ /*paranoia*/ "memory","cc","r0","r1","r11","r12","r13",
+ "xer","ctr","cr0","cr1","cr2","cr3",
+ "cr4","cr5","cr6","cr7"
+ );
+ if (block[1] != 0)
+ return -1;
+ else
+ return block[0];
+}
+
+static UInt local_sys_getpid ( void )
+{
+ /* For some reason gcc-3.3.2 doesn't preserve r31 across the asm
+ even though we state it to be trashed. So use r27 instead. */
+ volatile UInt block[1];
+ block[0] = __NR_getpid;
+ __asm__ __volatile__ (
+ "mr 28,%0\n\t" /* establish base ptr */
+ "mr 27,2\n\t" /* save r2 in r27 */
+ "mflr 30\n\t" /* save lr in r30 */
+
+ "lwz 2,0(28)\n\t" /* set %r2 = __NR_getpid */
+
+ "crorc 6,6,6\n\t"
+ ".long 0x48000005\n\t" /* bl .+4 */
+ "mflr 29\n\t"
+ "addi 29,29,16\n\t"
+ "mtlr 29\n\t"
+ "sc\n\t" /* getpid() */
+
+ "stw 3,0(28)\n\t" /* result -> block[0] */
+
+ "mr 2,27\n\t" /* restore r2 */
+ "mtlr 30" /* restore lr */
+
+ : /*out*/
+ : /*in*/ "b" (&block[0])
+ : /*trash*/
+ /*temps*/ "r31","r30","r29","r28","r27",
+ /*args*/ "r3","r4","r5","r6","r7","r8","r9","r10",
+ /*paranoia*/ "memory","cc","r0","r1","r11","r12","r13",
+ "xer","ctr","cr0","cr1","cr2","cr3",
+ "cr4","cr5","cr6","cr7"
+ );
+ return block[0];
+}
+
+#elif defined(VGP_ppc64_aix5)
+
+static UInt local_sys_write_stderr ( HChar* buf, Int n )
+{
+ volatile ULong block[3];
+ block[0] = (ULong)buf;
+ block[1] = n;
+ block[2] = (ULong)__NR_write;
+ __asm__ __volatile__ (
+ "mr 28,%0\n\t" /* establish base ptr */
+ "mr 27,2\n\t" /* save r2 in r27 */
+ "mflr 30\n\t" /* save lr in r30 */
+
+ "ld 2,16(28)\n\t" /* set %r2 = __NR_write */
+ "li 3,2\n\t" /* set %r3 = stderr */
+ "ld 4,0(28)\n\t" /* set %r4 = buf */
+ "ld 5,8(28)\n\t" /* set %r5 = n */
+
+ "crorc 6,6,6\n\t"
+ ".long 0x48000005\n\t" /* bl .+4 */
+ "mflr 29\n\t"
+ "addi 29,29,16\n\t"
+ "mtlr 29\n\t"
+ "sc\n\t" /* write() */
+
+ "std 3,0(28)\n\t" /* result */
+ "std 4,8(28)\n\t" /* error? */
+
+ "mr 2,27\n\t" /* restore r2 */
+ "mtlr 30" /* restore lr */
+
+ : /*out*/
+ : /*in*/ "b" (&block[0])
+ : /*trash*/
+ /*temps*/ "r31","r30","r29","r28","r27",
+ /*args*/ "r3","r4","r5","r6","r7","r8","r9","r10",
+ /*paranoia*/ "memory","cc","r0","r1","r11","r12","r13",
+ "xer","ctr","cr0","cr1","cr2","cr3",
+ "cr4","cr5","cr6","cr7"
+ );
+ if (block[1] != 0)
+ return (UInt)-1;
+ else
+ return (UInt)block[0];
+}
+
+static UInt local_sys_getpid ( void )
+{
+ volatile ULong block[1];
+ block[0] = __NR_getpid;
+ __asm__ __volatile__ (
+ "mr 28,%0\n\t" /* establish base ptr */
+ "mr 27,2\n\t" /* save r2 in r27 */
+ "mflr 30\n\t" /* save lr in r30 */
+
+ "ld 2,0(28)\n\t" /* set %r2 = __NR_getpid */
+
+ "crorc 6,6,6\n\t"
+ ".long 0x48000005\n\t" /* bl .+4 */
+ "mflr 29\n\t"
+ "addi 29,29,16\n\t"
+ "mtlr 29\n\t"
+ "sc\n\t" /* getpid() */
+
+ "std 3,0(28)\n\t" /* result -> block[0] */
+
+ "mr 2,27\n\t" /* restore r2 */
+ "mtlr 30" /* restore lr */
+
+ : /*out*/
+ : /*in*/ "b" (&block[0])
+ : /*trash*/
+ /*temps*/ "r31","r30","r29","r28","r27",
+ /*args*/ "r3","r4","r5","r6","r7","r8","r9","r10",
+ /*paranoia*/ "memory","cc","r0","r1","r11","r12","r13",
+ "xer","ctr","cr0","cr1","cr2","cr3",
+ "cr4","cr5","cr6","cr7"
+ );
+ return (UInt)block[0];
+}
+
+#else
+# error Unknown platform
+#endif
+
+
+/* ----- generic ----- */
+
+/* strlen, so we don't need m_libc */
+static Int local_strlen ( const HChar* str )
+{
+ Int i = 0;
+ while (str[i] != 0) i++;
+ return i;
+}
+
+static HChar local_toupper ( HChar c )
+{
+ if (c >= 'a' && c <= 'z')
+ return c + ('A' - 'a');
+ else
+ return c;
+}
+
+/* Emit buf[0 .. n-1] to stderr. Unfortunately platform-specific.
+*/
+static void emit ( HChar* buf, Int n )
+{
+ if (n >= 1)
+ (void)local_sys_write_stderr(buf, n);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- A simple, generic, vprintf implementation. ---*/
+/*------------------------------------------------------------*/
+
+/* -----------------------------------------------
+ Distantly derived from:
+
+ vprintf replacement for Checker.
+ Copyright 1993, 1994, 1995 Tristan Gingold
+ Written September 1993 Tristan Gingold
+ Tristan Gingold, 8 rue Parmentier, F-91120 PALAISEAU, FRANCE
+
+ (Checker itself was GPL'd.)
+ ----------------------------------------------- */
+
+/* Some flags. */
+#define VG_MSG_SIGNED 1 /* The value is signed. */
+#define VG_MSG_ZJUSTIFY 2 /* Must justify with '0'. */
+#define VG_MSG_LJUSTIFY 4 /* Must justify on the left. */
+#define VG_MSG_PAREN 8 /* Parenthesize if present (for %y) */
+#define VG_MSG_COMMA 16 /* Add commas to numbers (for %d, %u) */
+#define VG_MSG_ALTFORMAT 32 /* Convert the value to alternate format */
+
+/* Copy a string into the buffer. */
+static
+UInt myvprintf_str ( void(*send)(HChar,void*),
+ void* send_arg2,
+ Int flags,
+ Int width,
+ HChar* str,
+ Bool capitalise )
+{
+# define MAYBE_TOUPPER(ch) (capitalise ? local_toupper(ch) : (ch))
+ UInt ret = 0;
+ Int i, extra;
+ Int len = local_strlen(str);
+
+ if (width == 0) {
+ ret += len;
+ for (i = 0; i < len; i++)
+ send(MAYBE_TOUPPER(str[i]), send_arg2);
+ return ret;
+ }
+
+ if (len > width) {
+ ret += width;
+ for (i = 0; i < width; i++)
+ send(MAYBE_TOUPPER(str[i]), send_arg2);
+ return ret;
+ }
+
+ extra = width - len;
+ if (flags & VG_MSG_LJUSTIFY) {
+ ret += extra;
+ for (i = 0; i < extra; i++)
+ send(' ', send_arg2);
+ }
+ ret += len;
+ for (i = 0; i < len; i++)
+ send(MAYBE_TOUPPER(str[i]), send_arg2);
+ if (!(flags & VG_MSG_LJUSTIFY)) {
+ ret += extra;
+ for (i = 0; i < extra; i++)
+ send(' ', send_arg2);
+ }
+
+# undef MAYBE_TOUPPER
+ return ret;
+}
+
+
+/* Copy a string into the buffer, escaping bad XML chars. */
+static
+UInt myvprintf_str_XML_simplistic ( void(*send)(HChar,void*),
+ void* send_arg2,
+ HChar* str )
+{
+ UInt ret = 0;
+ Int i;
+ Int len = local_strlen(str);
+ HChar* alt;
+
+ for (i = 0; i < len; i++) {
+ switch (str[i]) {
+ case '&': alt = "&amp;"; break;
+ case '<': alt = "&lt;"; break;
+ case '>': alt = "&gt;"; break;
+ default: alt = NULL;
+ }
+
+ if (alt) {
+ while (*alt) {
+ send(*alt, send_arg2);
+ ret++;
+ alt++;
+ }
+ } else {
+ send(str[i], send_arg2);
+ ret++;
+ }
+ }
+
+ return ret;
+}
+
+
+/* Write P into the buffer according to these args:
+ * If SIGN is true, p is a signed.
+ * BASE is the base.
+ * If WITH_ZERO is true, '0' must be added.
+ * WIDTH is the width of the field.
+ */
+static
+UInt myvprintf_int64 ( void(*send)(HChar,void*),
+ void* send_arg2,
+ Int flags,
+ Int base,
+ Int width,
+ Bool capitalised,
+ ULong p )
+{
+ HChar buf[40];
+ Int ind = 0;
+ Int i, nc = 0;
+ Bool neg = False;
+ HChar* digits = capitalised ? "0123456789ABCDEF" : "0123456789abcdef";
+ UInt ret = 0;
+
+ if (base < 2 || base > 16)
+ return ret;
+
+ if ((flags & VG_MSG_SIGNED) && (Long)p < 0) {
+ p = - (Long)p;
+ neg = True;
+ }
+
+ if (p == 0)
+ buf[ind++] = '0';
+ else {
+ while (p > 0) {
+ if (flags & VG_MSG_COMMA && 10 == base &&
+ 0 == (ind-nc) % 3 && 0 != ind)
+ {
+ buf[ind++] = ',';
+ nc++;
+ }
+ buf[ind++] = digits[p % base];
+ p /= base;
+ }
+ }
+
+ if (neg)
+ buf[ind++] = '-';
+
+ if (width > 0 && !(flags & VG_MSG_LJUSTIFY)) {
+ for(; ind < width; ind++) {
+ /* vg_assert(ind < 39); */
+ if (ind > 39) {
+ buf[39] = 0;
+ break;
+ }
+ buf[ind] = (flags & VG_MSG_ZJUSTIFY) ? '0': ' ';
+ }
+ }
+
+ /* Reverse copy to buffer. */
+ ret += ind;
+ for (i = ind -1; i >= 0; i--) {
+ send(buf[i], send_arg2);
+ }
+ if (width > 0 && (flags & VG_MSG_LJUSTIFY)) {
+ for(; ind < width; ind++) {
+ ret++;
+ /* Never pad with zeroes on RHS -- changes the value! */
+ send(' ', send_arg2);
+ }
+ }
+ return ret;
+}
+
+
+/* A simple vprintf(). */
+/* EXPORTED */
+UInt
+VG_(debugLog_vprintf) (
+ void(*send)(HChar,void*),
+ void* send_arg2,
+ const HChar* format,
+ va_list vargs
+)
+{
+ UInt ret = 0;
+ Int i;
+ Int flags;
+ Int width;
+ Int n_ls = 0;
+ Bool is_long, caps;
+
+ /* We assume that vargs has already been initialised by the
+ caller, using va_start, and that the caller will similarly
+ clean up with va_end.
+ */
+
+ for (i = 0; format[i] != 0; i++) {
+ if (format[i] != '%') {
+ send(format[i], send_arg2);
+ ret++;
+ continue;
+ }
+ i++;
+ /* A '%' has been found. Ignore a trailing %. */
+ if (format[i] == 0)
+ break;
+ if (format[i] == '%') {
+ /* '%%' is replaced by '%'. */
+ send('%', send_arg2);
+ ret++;
+ continue;
+ }
+ flags = 0;
+ n_ls = 0;
+ width = 0; /* length of the field. */
+ while (1) {
+ switch (format[i]) {
+ case '(':
+ flags |= VG_MSG_PAREN;
+ break;
+ case ',':
+ case '\'':
+ /* If ',' or '\'' follows '%', commas will be inserted. */
+ flags |= VG_MSG_COMMA;
+ break;
+ case '-':
+ /* If '-' follows '%', justify on the left. */
+ flags |= VG_MSG_LJUSTIFY;
+ break;
+ case '0':
+ /* If '0' follows '%', pads will be inserted. */
+ flags |= VG_MSG_ZJUSTIFY;
+ break;
+ case '#':
+ /* If '#' follows '%', alternative format will be used. */
+ flags |= VG_MSG_ALTFORMAT;
+ break;
+ default:
+ goto parse_fieldwidth;
+ }
+ i++;
+ }
+ parse_fieldwidth:
+ /* Compute the field length. */
+ while (format[i] >= '0' && format[i] <= '9') {
+ width *= 10;
+ width += format[i++] - '0';
+ }
+ while (format[i] == 'l') {
+ i++;
+ n_ls++;
+ }
+
+ // %d means print a 32-bit integer.
+ // %ld means print a word-size integer.
+ // %lld means print a 64-bit integer.
+ if (0 == n_ls) { is_long = False; }
+ else if (1 == n_ls) { is_long = ( sizeof(void*) == sizeof(Long) ); }
+ else { is_long = True; }
+
+ switch (format[i]) {
+ case 'd': /* %d */
+ flags |= VG_MSG_SIGNED;
+ if (is_long)
+ ret += myvprintf_int64(send, send_arg2, flags, 10, width, False,
+ (ULong)(va_arg (vargs, Long)));
+ else
+ ret += myvprintf_int64(send, send_arg2, flags, 10, width, False,
+ (ULong)(va_arg (vargs, Int)));
+ break;
+ case 'u': /* %u */
+ if (is_long)
+ ret += myvprintf_int64(send, send_arg2, flags, 10, width, False,
+ (ULong)(va_arg (vargs, ULong)));
+ else
+ ret += myvprintf_int64(send, send_arg2, flags, 10, width, False,
+ (ULong)(va_arg (vargs, UInt)));
+ break;
+ case 'p': /* %p */
+ ret += 2;
+ send('0',send_arg2);
+ send('x',send_arg2);
+ ret += myvprintf_int64(send, send_arg2, flags, 16, width, True,
+ (ULong)((UWord)va_arg (vargs, void *)));
+ break;
+ case 'x': /* %x */
+ case 'X': /* %X */
+ caps = toBool(format[i] == 'X');
+ if (flags & VG_MSG_ALTFORMAT) {
+ ret += 2;
+ send('0',send_arg2);
+ send('x',send_arg2);
+ }
+ if (is_long)
+ ret += myvprintf_int64(send, send_arg2, flags, 16, width, caps,
+ (ULong)(va_arg (vargs, ULong)));
+ else
+ ret += myvprintf_int64(send, send_arg2, flags, 16, width, caps,
+ (ULong)(va_arg (vargs, UInt)));
+ break;
+ case 'c': /* %c */
+ ret++;
+ send(va_arg (vargs, int), send_arg2);
+ break;
+ case 's': case 'S': { /* %s */
+ char *str = va_arg (vargs, char *);
+ if (str == (char*) 0) str = "(null)";
+ ret += myvprintf_str(send, send_arg2,
+ flags, width, str, format[i]=='S');
+ break;
+ }
+ case 't': { /* %t, like %s but escaping chars for XML safety */
+ /* Note: simplistic; ignores field width and flags */
+ char *str = va_arg (vargs, char *);
+ if (str == (char*) 0) str = "(null)";
+ ret += myvprintf_str_XML_simplistic(send, send_arg2, str);
+ break;
+ }
+
+// case 'y': { /* %y - print symbol */
+// Char buf[100];
+// Char *cp = buf;
+// Addr a = va_arg(vargs, Addr);
+//
+// if (flags & VG_MSG_PAREN)
+// *cp++ = '(';
+// if (VG_(get_fnname_w_offset)(a, cp, sizeof(buf)-4)) {
+// if (flags & VG_MSG_PAREN) {
+// cp += VG_(strlen)(cp);
+// *cp++ = ')';
+// *cp = '\0';
+// }
+// ret += myvprintf_str(send, send_arg2, flags, width, buf, 0);
+// }
+// break;
+// }
+ default:
+ break;
+ }
+ }
+ return ret;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Debuglog stuff. ---*/
+/*------------------------------------------------------------*/
+
+/* Only print messages whose stated level is less than or equal to
+ this. By default, it makes this entire subsystem silent. */
+
+static Int loglevel = 0;
+
+/* Module startup. */
+/* EXPORTED */
+void VG_(debugLog_startup) ( Int level, HChar* who )
+{
+ if (level < 0) level = 0;
+ if (level > 10) level = 10;
+ loglevel = level;
+ VG_(debugLog)(1, "debuglog",
+ "DebugLog system started by %s, "
+ "level %d logging requested\n",
+ who, loglevel);
+}
+
+/* Get the logging threshold level, as set by the most recent call to
+ VG_(debugLog_startup), or zero if there have been no such calls so
+ far. */
+/* EXPORTED */
+Int VG_(debugLog_getLevel) ( void )
+{
+ return loglevel;
+}
+
+
+/* ------------ */
+
+typedef
+ struct {
+ HChar buf[100];
+ Int n;
+ }
+ printf_buf;
+
+static void add_to_buf ( HChar c, void* p )
+{
+ printf_buf* buf = (printf_buf*)p;
+
+ if (buf->n >= 100-10 /*paranoia*/ ) {
+ emit( buf->buf, local_strlen(buf->buf) );
+ buf->n = 0;
+ buf->buf[buf->n] = 0;
+ }
+ buf->buf[buf->n++] = c;
+ buf->buf[buf->n] = 0;
+}
+
+/* Send a logging message. Nothing is output unless 'level'
+ is <= the current loglevel. */
+/* EXPORTED */
+__attribute__((format(__printf__, 3, 4)))
+void VG_(debugLog) ( Int level, const HChar* modulename,
+ const HChar* format, ... )
+{
+ UInt ret, pid;
+ Int indent, depth, i;
+ va_list vargs;
+ printf_buf buf;
+
+ if (level > loglevel)
+ return;
+
+ indent = 2*level - 1;
+ if (indent < 1) indent = 1;
+
+ buf.n = 0;
+ buf.buf[0] = 0;
+ pid = local_sys_getpid();
+
+ // Print one '>' in front of the messages for each level of self-hosting
+ // being performed.
+ depth = RUNNING_ON_VALGRIND;
+ for (i = 0; i < depth; i++) {
+ (void)myvprintf_str ( add_to_buf, &buf, 0, 1, ">", False );
+ }
+
+ (void)myvprintf_str ( add_to_buf, &buf, 0, 2, "--", False );
+ (void)myvprintf_int64 ( add_to_buf, &buf, 0, 10, 1, False, (ULong)pid );
+ (void)myvprintf_str ( add_to_buf, &buf, 0, 1, ":", False );
+ (void)myvprintf_int64 ( add_to_buf, &buf, 0, 10, 1, False, (ULong)level );
+ (void)myvprintf_str ( add_to_buf, &buf, 0, 1, ":", False );
+ (void)myvprintf_str ( add_to_buf, &buf, 0, 8, (HChar*)modulename, False );
+ (void)myvprintf_str ( add_to_buf, &buf, 0, indent, "", False );
+
+ va_start(vargs,format);
+
+ ret = VG_(debugLog_vprintf) ( add_to_buf, &buf, format, vargs );
+
+ if (buf.n > 0) {
+ emit( buf.buf, local_strlen(buf.buf) );
+ }
+
+ va_end(vargs);
+}
+
+
+
+/*--------------------------------------------------------------------*/
+/*--- end m_debuglog.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_errormgr.c.svn-base b/coregrind/.svn/text-base/m_errormgr.c.svn-base
new file mode 100644
index 0000000..ec4862a
--- /dev/null
+++ b/coregrind/.svn/text-base/m_errormgr.c.svn-base
@@ -0,0 +1,1309 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Management of error messages. m_errormgr.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_threadstate.h" // For VG_N_THREADS
+#include "pub_core_debugger.h"
+#include "pub_core_debuginfo.h"
+#include "pub_core_errormgr.h"
+#include "pub_core_execontext.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcfile.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_libcproc.h" // For VG_(getpid)()
+#include "pub_core_seqmatch.h"
+#include "pub_core_mallocfree.h"
+#include "pub_core_options.h"
+#include "pub_core_stacktrace.h"
+#include "pub_core_tooliface.h"
+#include "pub_core_translate.h" // for VG_(translate)()
+
+/*------------------------------------------------------------*/
+/*--- Globals ---*/
+/*------------------------------------------------------------*/
+
+/* After this many different unsuppressed errors have been observed,
+ be more conservative about collecting new ones. */
+#define M_COLLECT_ERRORS_SLOWLY_AFTER 100
+
+/* After this many different unsuppressed errors have been observed,
+ stop collecting errors at all, and tell the user their program is
+ evidently a steaming pile of camel dung. */
+#define M_COLLECT_NO_ERRORS_AFTER_SHOWN 1000
+
+/* After this many total errors have been observed, stop collecting
+ errors at all. Counterpart to M_COLLECT_NO_ERRORS_AFTER_SHOWN. */
+#define M_COLLECT_NO_ERRORS_AFTER_FOUND 10000000
+
+/* The list of error contexts found, both suppressed and unsuppressed.
+ Initially empty, and grows as errors are detected. */
+static Error* errors = NULL;
+
+/* The list of suppression directives, as read from the specified
+ suppressions file. Note that the list gets rearranged as a result
+ of the searches done by is_suppressible_error(). */
+static Supp* suppressions = NULL;
+
+/* Running count of unsuppressed errors detected. */
+static UInt n_errs_found = 0;
+
+/* Running count of suppressed errors detected. */
+static UInt n_errs_suppressed = 0;
+
+/* forwards ... */
+static Supp* is_suppressible_error ( Error* err );
+
+static ThreadId last_tid_printed = 1;
+
+/* Stats: number of searches of the error list initiated. */
+static UWord em_errlist_searches = 0;
+
+/* Stats: number of comparisons done during error list
+ searching. */
+static UWord em_errlist_cmps = 0;
+
+/* Stats: number of searches of the suppression list initiated. */
+static UWord em_supplist_searches = 0;
+
+/* Stats: number of comparisons done during suppression list
+ searching. */
+static UWord em_supplist_cmps = 0;
+
+/*------------------------------------------------------------*/
+/*--- Error type ---*/
+/*------------------------------------------------------------*/
+
+/* Errors. Extensible (via the 'extra' field). Tools can use a normal
+ enum (with element values in the normal range (0..)) for 'ekind'.
+ Functions for getting/setting the tool-relevant fields are in
+ include/pub_tool_errormgr.h.
+
+ When errors are found and recorded with VG_(maybe_record_error)(), all
+ the tool must do is pass in the four parameters; core will
+ allocate/initialise the error record.
+*/
+struct _Error {
+ struct _Error* next;
+ // Unique tag. This gives the error a unique identity (handle) by
+ // which it can be referred to afterwords. Currently only used for
+ // XML printing.
+ UInt unique;
+ // NULL if unsuppressed; or ptr to suppression record.
+ Supp* supp;
+ Int count;
+
+ // The tool-specific part
+ ThreadId tid; // Initialised by core
+ ExeContext* where; // Initialised by core
+ ErrorKind ekind; // Used by ALL. Must be in the range (0..)
+ Addr addr; // Used frequently
+ Char* string; // Used frequently
+ void* extra; // For any tool-specific extras
+};
+
+
+ExeContext* VG_(get_error_where) ( Error* err )
+{
+ return err->where;
+}
+
+ErrorKind VG_(get_error_kind) ( Error* err )
+{
+ return err->ekind;
+}
+
+Addr VG_(get_error_address) ( Error* err )
+{
+ return err->addr;
+}
+
+Char* VG_(get_error_string) ( Error* err )
+{
+ return err->string;
+}
+
+void* VG_(get_error_extra) ( Error* err )
+{
+ return err->extra;
+}
+
+UInt VG_(get_n_errs_found)( void )
+{
+ return n_errs_found;
+}
+
+/*------------------------------------------------------------*/
+/*--- Suppression type ---*/
+/*------------------------------------------------------------*/
+
+/* Note: it is imperative this doesn't overlap with (0..) at all, as tools
+ * effectively extend it by defining their own enums in the (0..) range. */
+typedef
+ enum {
+ // Nb: thread errors are a relic of the time when Valgrind's core
+ // could detect them. This example is left commented-out as an
+ // example should new core errors ever be added.
+ ThreadSupp = -1, /* Matches ThreadErr */
+ }
+ CoreSuppKind;
+
+/* Max number of callers for context in a suppression. */
+#define VG_MAX_SUPP_CALLERS 24
+
+/* For each caller specified for a suppression, record the nature of
+ the caller name. Not of interest to tools. */
+typedef
+ enum {
+ NoName, /* Error case */
+ ObjName, /* Name is of an shared object file. */
+ FunName, /* Name is of a function. */
+ DotDotDot /* Frame-level wildcard */
+ }
+ SuppLocTy;
+
+typedef
+ struct {
+ SuppLocTy ty;
+ Char* name; /* NULL for NoName and DotDotDot */
+ }
+ SuppLoc;
+
+/* Suppressions. Tools can get/set tool-relevant parts with functions
+ declared in include/pub_tool_errormgr.h. Extensible via the 'extra' field.
+ Tools can use a normal enum (with element values in the normal range
+ (0..)) for 'skind'. */
+struct _Supp {
+ struct _Supp* next;
+ Int count; // The number of times this error has been suppressed.
+ Char* sname; // The name by which the suppression is referred to.
+
+ // Length of 'callers'
+ Int n_callers;
+ // Array of callers, for matching stack traces. First one (name of fn
+ // where err occurs) is mandatory; rest are optional.
+ SuppLoc* callers;
+
+ /* The tool-specific part */
+ SuppKind skind; // What kind of suppression. Must use the range (0..).
+ Char* string; // String -- use is optional. NULL by default.
+ void* extra; // Anything else -- use is optional. NULL by default.
+};
+
+SuppKind VG_(get_supp_kind) ( Supp* su )
+{
+ return su->skind;
+}
+
+Char* VG_(get_supp_string) ( Supp* su )
+{
+ return su->string;
+}
+
+void* VG_(get_supp_extra) ( Supp* su )
+{
+ return su->extra;
+}
+
+
+void VG_(set_supp_kind) ( Supp* su, SuppKind skind )
+{
+ su->skind = skind;
+}
+
+void VG_(set_supp_string) ( Supp* su, Char* string )
+{
+ su->string = string;
+}
+
+void VG_(set_supp_extra) ( Supp* su, void* extra )
+{
+ su->extra = extra;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Helper fns ---*/
+/*------------------------------------------------------------*/
+
+// Only show core errors if the tool wants to, we're not running with -q,
+// and were not outputting XML.
+Bool VG_(showing_core_errors)(void)
+{
+ return VG_(needs).core_errors && VG_(clo_verbosity) >= 1 && !VG_(clo_xml);
+}
+
+/* Compare errors, to detect duplicates. */
+static Bool eq_Error ( VgRes res, Error* e1, Error* e2 )
+{
+ if (e1->ekind != e2->ekind)
+ return False;
+ if (!VG_(eq_ExeContext)(res, e1->where, e2->where))
+ return False;
+
+ switch (e1->ekind) {
+ //(example code, see comment on CoreSuppKind above)
+ //case ThreadErr:
+ // vg_assert(VG_(needs).core_errors);
+ // return <something>
+ default:
+ if (VG_(needs).tool_errors) {
+ return VG_TDICT_CALL(tool_eq_Error, res, e1, e2);
+ } else {
+ VG_(printf)("\nUnhandled error type: %u. VG_(needs).tool_errors\n"
+ "probably needs to be set.\n",
+ e1->ekind);
+ VG_(tool_panic)("unhandled error type");
+ }
+ }
+}
+
+static void pp_Error ( Error* err )
+{
+ if (VG_(clo_xml)) {
+ VG_UMSG("<error>");
+ VG_UMSG(" <unique>0x%x</unique>", err->unique);
+ VG_UMSG(" <tid>%d</tid>", err->tid);
+ }
+
+ if (!VG_(clo_xml)) {
+ if (VG_(tdict).tool_show_ThreadIDs_for_errors
+ && err->tid > 0 && err->tid != last_tid_printed) {
+ VG_UMSG("Thread %d:", err->tid );
+ last_tid_printed = err->tid;
+ }
+ }
+
+ switch (err->ekind) {
+ //(example code, see comment on CoreSuppKind above)
+ //case ThreadErr:
+ // vg_assert(VG_(needs).core_errors);
+ // VG_(tm_error_print)(err);
+ // break;
+ default:
+ if (VG_(needs).tool_errors)
+ VG_TDICT_CALL( tool_pp_Error, err );
+ else {
+ VG_(printf)("\nUnhandled error type: %u. VG_(needs).tool_errors\n"
+ "probably needs to be set?\n",
+ err->ekind);
+ VG_(tool_panic)("unhandled error type");
+ }
+ }
+
+ if (VG_(clo_xml))
+ VG_UMSG("</error>");
+}
+
+/* Figure out if we want to perform a given action for this error, possibly
+ by asking the user. */
+Bool VG_(is_action_requested) ( Char* action, Bool* clo )
+{
+ Char ch, ch2;
+ Int res;
+
+ if (*clo == False)
+ return False;
+
+ VG_UMSG("");
+
+ again:
+ VG_(printf)(
+ "==%d== "
+ "---- %s ? --- [Return/N/n/Y/y/C/c] ---- ",
+ VG_(getpid)(), action
+ );
+
+ res = VG_(read)(VG_(clo_input_fd), &ch, 1);
+ if (res != 1) goto ioerror;
+ /* res == 1 */
+ if (ch == '\n') return False;
+ if (ch != 'N' && ch != 'n' && ch != 'Y' && ch != 'y'
+ && ch != 'C' && ch != 'c') goto again;
+
+ res = VG_(read)(VG_(clo_input_fd), &ch2, 1);
+ if (res != 1) goto ioerror;
+ if (ch2 != '\n') goto again;
+
+ /* No, don't want to do action. */
+ if (ch == 'n' || ch == 'N') return False;
+ /* Yes, want to do action. */
+ if (ch == 'y' || ch == 'Y') return True;
+ /* No, don't want to do action, and don't ask again either. */
+ vg_assert(ch == 'c' || ch == 'C');
+
+ ioerror:
+ *clo = False;
+ return False;
+}
+
+
+/* Construct an error */
+static __inline__
+void construct_error ( Error* err, ThreadId tid, ErrorKind ekind, Addr a,
+ Char* s, void* extra, ExeContext* where )
+{
+ /* DO NOT MAKE unique_counter NON-STATIC */
+ static UInt unique_counter = 0;
+
+ tl_assert(tid < VG_N_THREADS);
+
+ /* Core-only parts */
+ err->unique = unique_counter++;
+ err->next = NULL;
+ err->supp = NULL;
+ err->count = 1;
+ err->tid = tid;
+ if (NULL == where)
+ err->where = VG_(record_ExeContext)( tid, 0 );
+ else
+ err->where = where;
+
+ /* Tool-relevant parts */
+ err->ekind = ekind;
+ err->addr = a;
+ err->extra = extra;
+ err->string = s;
+
+ /* sanity... */
+ vg_assert( tid < VG_N_THREADS );
+}
+
+#define ERRTXT_LEN 4096
+
+static void printSuppForIp(UInt n, Addr ip)
+{
+ static UChar buf[ERRTXT_LEN];
+
+ if ( VG_(get_fnname_no_cxx_demangle) (ip, buf, ERRTXT_LEN) ) {
+ VG_(printf)(" fun:%s\n", buf);
+ } else if ( VG_(get_objname)(ip, buf, ERRTXT_LEN) ) {
+ VG_(printf)(" obj:%s\n", buf);
+ } else {
+ VG_(printf)(" obj:*\n");
+ }
+}
+
+static void gen_suppression(Error* err)
+{
+ ExeContext* ec = VG_(get_error_where)(err);
+ Int stop_at = VG_(clo_backtrace_size);
+
+ /* At most VG_MAX_SUPP_CALLERS names */
+ if (stop_at > VG_MAX_SUPP_CALLERS) stop_at = VG_MAX_SUPP_CALLERS;
+ vg_assert(stop_at > 0);
+
+ //(example code, see comment on CoreSuppKind above)
+ if (0) {
+ //if (0) ThreadErr == err->ekind) {
+ // VG_(printf)("{\n");
+ // VG_(printf)(" <insert a suppression name here>\n");
+ // VG_(printf)(" core:Thread\n");
+
+ } else {
+ Char* name = VG_TDICT_CALL(tool_get_error_name, err);
+ if (NULL == name) {
+ VG_UMSG("(%s does not allow error to be suppressed)",
+ VG_(details).name);
+ return;
+ }
+ VG_(printf)("{\n");
+ VG_(printf)(" <insert a suppression name here>\n");
+ VG_(printf)(" %s:%s\n", VG_(details).name, name);
+ VG_TDICT_CALL(tool_print_extra_suppression_info, err);
+ }
+
+ // Print stack trace elements
+ VG_(apply_StackTrace)(printSuppForIp,
+ VG_(get_ExeContext_StackTrace)(ec), stop_at);
+
+ VG_(printf)("}\n");
+}
+
+static
+void do_actions_on_error(Error* err, Bool allow_db_attach)
+{
+ Bool still_noisy = True;
+
+ /* Perhaps we want a debugger attach at this point? */
+ if (allow_db_attach &&
+ VG_(is_action_requested)( "Attach to debugger", & VG_(clo_db_attach) ))
+ {
+ if (0) VG_(printf)("starting debugger\n");
+ VG_(start_debugger)( err->tid );
+ }
+ /* Or maybe we want to generate the error's suppression? */
+ if (VG_(clo_gen_suppressions) == 2
+ || (VG_(clo_gen_suppressions) == 1
+ && VG_(is_action_requested)( "Print suppression", &still_noisy ))
+ ) {
+ gen_suppression(err);
+ }
+ if (VG_(clo_gen_suppressions) == 1 && !still_noisy)
+ VG_(clo_gen_suppressions) = 0;
+}
+
+/* Shared between VG_(maybe_record_error)() and VG_(unique_error)(),
+ just for pretty printing purposes. */
+static Bool is_first_shown_context = True;
+
+static Int n_errs_shown = 0;
+
+/* Top-level entry point to the error management subsystem.
+ All detected errors are notified here; this routine decides if/when the
+ user should see the error. */
+void VG_(maybe_record_error) ( ThreadId tid,
+ ErrorKind ekind, Addr a, Char* s, void* extra )
+{
+ Error err;
+ Error* p;
+ Error* p_prev;
+ UInt extra_size;
+ VgRes exe_res = Vg_MedRes;
+ static Bool stopping_message = False;
+ static Bool slowdown_message = False;
+
+ /* After M_COLLECT_NO_ERRORS_AFTER_SHOWN different errors have
+ been found, or M_COLLECT_NO_ERRORS_AFTER_FOUND total errors
+ have been found, just refuse to collect any more. This stops
+ the burden of the error-management system becoming excessive in
+ extremely buggy programs, although it does make it pretty
+ pointless to continue the Valgrind run after this point. */
+ if (VG_(clo_error_limit)
+ && (n_errs_shown >= M_COLLECT_NO_ERRORS_AFTER_SHOWN
+ || n_errs_found >= M_COLLECT_NO_ERRORS_AFTER_FOUND)
+ && !VG_(clo_xml)) {
+ if (!stopping_message) {
+ VG_UMSG("");
+
+ if (n_errs_shown >= M_COLLECT_NO_ERRORS_AFTER_SHOWN) {
+ VG_UMSG(
+ "More than %d different errors detected. "
+ "I'm not reporting any more.",
+ M_COLLECT_NO_ERRORS_AFTER_SHOWN );
+ } else {
+ VG_UMSG(
+ "More than %d total errors detected. "
+ "I'm not reporting any more.",
+ M_COLLECT_NO_ERRORS_AFTER_FOUND );
+ }
+
+ VG_UMSG("Final error counts will be inaccurate. Go fix your program!");
+ VG_UMSG("Rerun with --error-limit=no to disable this cutoff. Note");
+ VG_UMSG("that errors may occur in your program without prior warning from");
+ VG_UMSG("Valgrind, because errors are no longer being displayed.");
+ VG_UMSG("");
+ stopping_message = True;
+ }
+ return;
+ }
+
+ /* After M_COLLECT_ERRORS_SLOWLY_AFTER different errors have
+ been found, be much more conservative about collecting new
+ ones. */
+ if (n_errs_shown >= M_COLLECT_ERRORS_SLOWLY_AFTER
+ && !VG_(clo_xml)) {
+ exe_res = Vg_LowRes;
+ if (!slowdown_message) {
+ VG_UMSG("");
+ VG_UMSG("More than %d errors detected. Subsequent errors",
+ M_COLLECT_ERRORS_SLOWLY_AFTER);
+ VG_UMSG("will still be recorded, but in less detail than before.");
+ slowdown_message = True;
+ }
+ }
+
+ /* Build ourselves the error */
+ construct_error ( &err, tid, ekind, a, s, extra, NULL );
+
+ /* First, see if we've got an error record matching this one. */
+ em_errlist_searches++;
+ p = errors;
+ p_prev = NULL;
+ while (p != NULL) {
+ em_errlist_cmps++;
+ if (eq_Error(exe_res, p, &err)) {
+ /* Found it. */
+ p->count++;
+ if (p->supp != NULL) {
+ /* Deal correctly with suppressed errors. */
+ p->supp->count++;
+ n_errs_suppressed++;
+ } else {
+ n_errs_found++;
+ }
+
+ /* Move p to the front of the list so that future searches
+ for it are faster. */
+ if (p_prev != NULL) {
+ vg_assert(p_prev->next == p);
+ p_prev->next = p->next;
+ p->next = errors;
+ errors = p;
+ }
+
+ return;
+ }
+ p_prev = p;
+ p = p->next;
+ }
+
+ /* Didn't see it. Copy and add. */
+
+ /* OK, we're really going to collect it. The context is on the stack and
+ will disappear shortly, so we must copy it. First do the main
+ (non-'extra') part.
+
+ Then VG_(tdict).tool_update_extra can update the 'extra' part. This
+ is for when there are more details to fill in which take time to work
+ out but don't affect our earlier decision to include the error -- by
+ postponing those details until now, we avoid the extra work in the
+ case where we ignore the error. Ugly.
+
+ Then, if there is an 'extra' part, copy it too, using the size that
+ VG_(tdict).tool_update_extra returned. Also allow for people using
+ the void* extra field for a scalar value like an integer.
+ */
+
+ /* copy main part */
+ p = VG_(arena_malloc)(VG_AR_ERRORS, "errormgr.mre.1", sizeof(Error));
+ *p = err;
+
+ /* update 'extra' */
+ switch (ekind) {
+ //(example code, see comment on CoreSuppKind above)
+ //case ThreadErr:
+ // vg_assert(VG_(needs).core_errors);
+ // extra_size = <something>
+ // break;
+ default:
+ vg_assert(VG_(needs).tool_errors);
+ extra_size = VG_TDICT_CALL(tool_update_extra, p);
+ break;
+ }
+
+ /* copy block pointed to by 'extra', if there is one */
+ if (NULL != p->extra && 0 != extra_size) {
+ void* new_extra = VG_(malloc)("errormgr.mre.2", extra_size);
+ VG_(memcpy)(new_extra, p->extra, extra_size);
+ p->extra = new_extra;
+ }
+
+ p->next = errors;
+ p->supp = is_suppressible_error(&err);
+ errors = p;
+ if (p->supp == NULL) {
+ n_errs_found++;
+ if (!is_first_shown_context)
+ VG_UMSG("");
+ pp_Error(p);
+ is_first_shown_context = False;
+ n_errs_shown++;
+ do_actions_on_error(p, /*allow_db_attach*/True);
+ } else {
+ n_errs_suppressed++;
+ p->supp->count++;
+ }
+}
+
+/* Second top-level entry point to the error management subsystem, for
+ errors that the tool wants to report immediately, eg. because they're
+ guaranteed to only happen once. This avoids all the recording and
+ comparing stuff. But they can be suppressed; returns True if it is
+ suppressed. Bool 'print_error' dictates whether to print the error.
+ Bool 'count_error' dictates whether to count the error in n_errs_found.
+*/
+Bool VG_(unique_error) ( ThreadId tid, ErrorKind ekind, Addr a, Char* s,
+ void* extra, ExeContext* where, Bool print_error,
+ Bool allow_db_attach, Bool count_error )
+{
+ Error err;
+ Supp *su;
+
+ /* Build ourselves the error */
+ construct_error ( &err, tid, ekind, a, s, extra, where );
+
+ /* Unless it's suppressed, we're going to show it. Don't need to make
+ a copy, because it's only temporary anyway.
+
+ Then update the 'extra' part with VG_(tdict).tool_update_extra),
+ because that can have an affect on whether it's suppressed. Ignore
+ the size return value of VG_(tdict).tool_update_extra, because we're
+ not copying 'extra'. */
+ (void)VG_TDICT_CALL(tool_update_extra, &err);
+
+ su = is_suppressible_error(&err);
+ if (NULL == su) {
+ if (count_error)
+ n_errs_found++;
+
+ if (print_error) {
+ if (!is_first_shown_context)
+ VG_UMSG("");
+ pp_Error(&err);
+ is_first_shown_context = False;
+ n_errs_shown++;
+ do_actions_on_error(&err, allow_db_attach);
+ }
+ return False;
+
+ } else {
+ n_errs_suppressed++;
+ su->count++;
+ return True;
+ }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Exported fns ---*/
+/*------------------------------------------------------------*/
+
+/* Show the used suppressions. Returns False if no suppression
+ got used. */
+static Bool show_used_suppressions ( void )
+{
+ Supp *su;
+ Bool any_supp;
+
+ if (VG_(clo_xml))
+ VG_UMSG("<suppcounts>");
+
+ any_supp = False;
+ for (su = suppressions; su != NULL; su = su->next) {
+ if (su->count <= 0)
+ continue;
+ any_supp = True;
+ if (VG_(clo_xml)) {
+ VG_(message_no_f_c)(Vg_DebugMsg,
+ " <pair>\n"
+ " <count>%d</count>\n"
+ " <name>%t</name>\n"
+ " </pair>",
+ su->count, su->sname);
+ } else {
+ VG_DMSG("supp: %6d %s", su->count, su->sname);
+ }
+ }
+
+ if (VG_(clo_xml))
+ VG_UMSG("</suppcounts>");
+
+ return any_supp;
+}
+
+
+/* Show all the errors that occurred, and possibly also the
+ suppressions used. */
+void VG_(show_all_errors) ( void )
+{
+ Int i, n_min;
+ Int n_err_contexts, n_supp_contexts;
+ Error *p, *p_min;
+ Supp *su;
+ Bool any_supp;
+
+ if (VG_(clo_verbosity) == 0)
+ return;
+
+ n_err_contexts = 0;
+ for (p = errors; p != NULL; p = p->next) {
+ if (p->supp == NULL)
+ n_err_contexts++;
+ }
+
+ n_supp_contexts = 0;
+ for (su = suppressions; su != NULL; su = su->next) {
+ if (su->count > 0)
+ n_supp_contexts++;
+ }
+
+ /* If we're printing XML, just show the suppressions and stop.
+ */
+ if (VG_(clo_xml)) {
+ (void)show_used_suppressions();
+ return;
+ }
+
+ /* We only get here if not printing XML. */
+ VG_UMSG("ERROR SUMMARY: "
+ "%d errors from %d contexts (suppressed: %d from %d)",
+ n_errs_found, n_err_contexts,
+ n_errs_suppressed, n_supp_contexts );
+
+ if (VG_(clo_verbosity) <= 1)
+ return;
+
+ /* Print the contexts in order of increasing error count. */
+ for (i = 0; i < n_err_contexts; i++) {
+ n_min = (1 << 30) - 1;
+ p_min = NULL;
+ for (p = errors; p != NULL; p = p->next) {
+ if (p->supp != NULL) continue;
+ if (p->count < n_min) {
+ n_min = p->count;
+ p_min = p;
+ }
+ }
+ if (p_min == NULL) VG_(tool_panic)("show_all_errors()");
+
+ VG_UMSG("");
+ VG_UMSG("%d errors in context %d of %d:",
+ p_min->count, i+1, n_err_contexts);
+ pp_Error( p_min );
+
+ if ((i+1 == VG_(clo_dump_error))) {
+ StackTrace ips = VG_(get_ExeContext_StackTrace)(p_min->where);
+ VG_(translate) ( 0 /* dummy ThreadId; irrelevant due to debugging*/,
+ ips[0], /*debugging*/True, 0xFE/*verbosity*/,
+ /*bbs_done*/0,
+ /*allow redir?*/True);
+ }
+
+ p_min->count = 1 << 30;
+ }
+
+ if (n_supp_contexts > 0)
+ VG_UMSG( "");
+ any_supp = show_used_suppressions();
+
+ if (n_err_contexts > 0) {
+ if (any_supp)
+ VG_UMSG("");
+ VG_UMSG("IN SUMMARY: "
+ "%d errors from %d contexts (suppressed: %d from %d)",
+ n_errs_found, n_err_contexts, n_errs_suppressed,
+ n_supp_contexts );
+ VG_UMSG("");
+ }
+}
+
+
+/* Show occurrence counts of all errors, in XML form. */
+void VG_(show_error_counts_as_XML) ( void )
+{
+ Error* err;
+ VG_UMSG("<errorcounts>");
+ for (err = errors; err != NULL; err = err->next) {
+ if (err->supp != NULL)
+ continue;
+ if (err->count <= 0)
+ continue;
+ VG_UMSG(" <pair>");
+ VG_UMSG(" <count>%d</count>", err->count);
+ VG_UMSG(" <unique>0x%x</unique>", err->unique);
+ VG_UMSG(" </pair>");
+ }
+ VG_UMSG("</errorcounts>");
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Suppression parsing ---*/
+/*------------------------------------------------------------*/
+
+/* Get the next char from fd into *out_buf. Returns 1 if success,
+ 0 if eof or < 0 if error. */
+
+static Int get_char ( Int fd, Char* out_buf )
+{
+ Int r;
+ static Char buf[256];
+ static Int buf_size = 0;
+ static Int buf_used = 0;
+ vg_assert(buf_size >= 0 && buf_size <= 256);
+ vg_assert(buf_used >= 0 && buf_used <= buf_size);
+ if (buf_used == buf_size) {
+ r = VG_(read)(fd, buf, 256);
+ if (r < 0) return r; /* read failed */
+ vg_assert(r >= 0 && r <= 256);
+ buf_size = r;
+ buf_used = 0;
+ }
+ if (buf_size == 0)
+ return 0; /* eof */
+ vg_assert(buf_size >= 0 && buf_size <= 256);
+ vg_assert(buf_used >= 0 && buf_used < buf_size);
+ *out_buf = buf[buf_used];
+ buf_used++;
+ return 1;
+}
+
+
+/* Get a non-blank, non-comment line of at most nBuf chars from fd.
+ Skips leading spaces on the line. Return True if EOF was hit instead.
+*/
+Bool VG_(get_line) ( Int fd, Char* buf, Int nBuf )
+{
+ Char ch;
+ Int n, i;
+ while (True) {
+ /* First, read until a non-blank char appears. */
+ while (True) {
+ n = get_char(fd, &ch);
+ if (n == 1 && !VG_(isspace)(ch)) break;
+ if (n <= 0) return True;
+ }
+
+ /* Now, read the line into buf. */
+ i = 0;
+ buf[i++] = ch; buf[i] = 0;
+ while (True) {
+ n = get_char(fd, &ch);
+ if (n <= 0) return False; /* the next call will return True */
+ if (ch == '\n') break;
+ if (i > 0 && i == nBuf-1) i--;
+ buf[i++] = ch; buf[i] = 0;
+ }
+ while (i > 1 && VG_(isspace)(buf[i-1])) {
+ i--; buf[i] = 0;
+ };
+
+ /* VG_(printf)("The line is '%s'\n", buf); */
+ /* Ok, we have a line. If a non-comment line, return.
+ If a comment line, start all over again. */
+ if (buf[0] != '#') return False;
+ }
+}
+
+
+/* *p_caller contains the raw name of a caller, supposedly either
+ fun:some_function_name or
+ obj:some_object_name.
+ Set *p_ty accordingly and advance *p_caller over the descriptor
+ (fun: or obj:) part.
+ Returns False if failed.
+*/
+static Bool setLocationTy ( SuppLoc* p )
+{
+ if (VG_(strncmp)(p->name, "fun:", 4) == 0) {
+ p->name += 4;
+ p->ty = FunName;
+ return True;
+ }
+ if (VG_(strncmp)(p->name, "obj:", 4) == 0) {
+ p->name += 4;
+ p->ty = ObjName;
+ return True;
+ }
+ if (VG_(strcmp)(p->name, "...") == 0) {
+ p->name = NULL;
+ p->ty = DotDotDot;
+ return True;
+ }
+ VG_(printf)("location should be \"...\", or should start "
+ "with \"fun:\" or \"obj:\"\n");
+ return False;
+}
+
+
+/* Look for "tool" in a string like "tool1,tool2,tool3" */
+static Bool tool_name_present(Char *name, Char *names)
+{
+ Bool found;
+ Char *s = NULL; /* Shut gcc up */
+ Int len = VG_(strlen)(name);
+
+ found = (NULL != (s = VG_(strstr)(names, name)) &&
+ (s == names || *(s-1) == ',') &&
+ (*(s+len) == ',' || *(s+len) == '\0')
+ );
+
+ return found;
+}
+
+/* Read suppressions from the file specified in VG_(clo_suppressions)
+ and place them in the suppressions list. If there's any difficulty
+ doing this, just give up -- there's no point in trying to recover.
+*/
+static void load_one_suppressions_file ( Char* filename )
+{
+# define N_BUF 200
+ SysRes sres;
+ Int fd, i, j, lineno = 0;
+ Bool eof;
+ Char buf[N_BUF+1];
+ Char* tool_names;
+ Char* supp_name;
+ Char* err_str = NULL;
+ SuppLoc tmp_callers[VG_MAX_SUPP_CALLERS];
+
+ sres = VG_(open)( filename, VKI_O_RDONLY, 0 );
+ if (sres.isError) {
+ if (VG_(clo_xml))
+ VG_UMSG("</valgrindoutput>\n");
+ VG_UMSG("FATAL: can't open suppressions file \"%s\"", filename );
+ VG_(exit)(1);
+ }
+ fd = sres.res;
+
+# define BOMB(S) { err_str = S; goto syntax_error; }
+
+ while (True) {
+ /* Assign and initialise the two suppression halves (core and tool) */
+ Supp* supp;
+ supp = VG_(arena_malloc)(VG_AR_CORE, "errormgr.losf.1",
+ sizeof(Supp));
+ supp->count = 0;
+
+ // Initialise temporary reading-in buffer.
+ for (i = 0; i < VG_MAX_SUPP_CALLERS; i++) {
+ tmp_callers[i].ty = NoName;
+ tmp_callers[i].name = NULL;
+ }
+
+ supp->string = supp->extra = NULL;
+
+ eof = VG_(get_line) ( fd, buf, N_BUF );
+ lineno++;
+ if (eof) break;
+
+ if (!VG_STREQ(buf, "{")) BOMB("expected '{' or end-of-file");
+
+ eof = VG_(get_line) ( fd, buf, N_BUF );
+ lineno++;
+
+ if (eof || VG_STREQ(buf, "}")) BOMB("unexpected '}'");
+
+ supp->sname = VG_(arena_strdup)(VG_AR_CORE, "errormgr.losf.2", buf);
+
+ eof = VG_(get_line) ( fd, buf, N_BUF );
+ lineno++;
+
+ if (eof) BOMB("unexpected end-of-file");
+
+ /* Check it has the "tool1,tool2,...:supp" form (look for ':') */
+ i = 0;
+ while (True) {
+ if (buf[i] == ':') break;
+ if (buf[i] == '\0') BOMB("malformed 'tool1,tool2,...:supp' line");
+ i++;
+ }
+ buf[i] = '\0'; /* Replace ':', splitting into two strings */
+
+ tool_names = & buf[0];
+ supp_name = & buf[i+1];
+
+ if (VG_(needs).core_errors && tool_name_present("core", tool_names))
+ {
+ // A core suppression
+ //(example code, see comment on CoreSuppKind above)
+ //if (VG_STREQ(supp_name, "Thread"))
+ // supp->skind = ThreadSupp;
+ //else
+ BOMB("unknown core suppression type");
+ }
+ else if (VG_(needs).tool_errors &&
+ tool_name_present(VG_(details).name, tool_names))
+ {
+ // A tool suppression
+ if (VG_TDICT_CALL(tool_recognised_suppression, supp_name, supp)) {
+ /* Do nothing, function fills in supp->skind */
+ } else {
+ BOMB("unknown tool suppression type");
+ }
+ }
+ else {
+ // Ignore rest of suppression
+ while (True) {
+ eof = VG_(get_line) ( fd, buf, N_BUF );
+ lineno++;
+ if (eof) BOMB("unexpected end-of-file");
+ if (VG_STREQ(buf, "}"))
+ break;
+ }
+ continue;
+ }
+
+ if (VG_(needs).tool_errors &&
+ !VG_TDICT_CALL(tool_read_extra_suppression_info,
+ fd, buf, N_BUF, supp))
+ {
+ BOMB("bad or missing extra suppression info");
+ }
+
+ /* the main frame-descriptor reading loop */
+ i = 0;
+ while (True) {
+ eof = VG_(get_line) ( fd, buf, N_BUF );
+ lineno++;
+ if (eof)
+ BOMB("unexpected end-of-file");
+ if (VG_STREQ(buf, "}")) {
+ if (i > 0) {
+ break;
+ } else {
+ BOMB("missing stack trace");
+ }
+ }
+ if (i == VG_MAX_SUPP_CALLERS)
+ BOMB("too many callers in stack trace");
+ if (i > 0 && i >= VG_(clo_backtrace_size))
+ break;
+ tmp_callers[i].name = VG_(arena_strdup)(VG_AR_CORE,
+ "errormgr.losf.3", buf);
+ if (!setLocationTy(&(tmp_callers[i])))
+ BOMB("location should be \"...\", or should start "
+ "with \"fun:\" or \"obj:\"");
+ i++;
+ }
+
+ // If the num callers is >= VG_(clo_backtrace_size), ignore any extra
+ // lines and grab the '}'.
+ if (!VG_STREQ(buf, "}")) {
+ do {
+ eof = VG_(get_line) ( fd, buf, N_BUF );
+ lineno++;
+ } while (!eof && !VG_STREQ(buf, "}"));
+ }
+
+ // Reject entries which are entirely composed of frame
+ // level wildcards.
+ vg_assert(i > 0); // guaranteed by frame-descriptor reading loop
+ for (j = 0; j < i; j++) {
+ if (tmp_callers[j].ty == FunName || tmp_callers[j].ty == ObjName)
+ break;
+ vg_assert(tmp_callers[j].ty == DotDotDot);
+ }
+ vg_assert(j >= 0 && j <= i);
+ if (j == i) {
+ // we didn't find any non-"..." entries
+ BOMB("suppression must contain at least one location "
+ "line which is not \"...\"");
+ }
+
+ // Copy tmp_callers[] into supp->callers[]
+ supp->n_callers = i;
+ supp->callers = VG_(arena_malloc)(VG_AR_CORE, "errormgr.losf.4",
+ i*sizeof(SuppLoc));
+ for (i = 0; i < supp->n_callers; i++) {
+ supp->callers[i] = tmp_callers[i];
+ }
+
+ supp->next = suppressions;
+ suppressions = supp;
+ }
+ VG_(close)(fd);
+ return;
+
+ syntax_error:
+ if (VG_(clo_xml))
+ VG_UMSG("</valgrindoutput>\n");
+ VG_UMSG("FATAL: in suppressions file \"%s\" near line %d:",
+ filename, lineno );
+ VG_UMSG(" %s", err_str );
+
+ VG_(close)(fd);
+ VG_UMSG("exiting now.");
+ VG_(exit)(1);
+
+# undef BOMB
+# undef N_BUF
+}
+
+
+void VG_(load_suppressions) ( void )
+{
+ Int i;
+ suppressions = NULL;
+ for (i = 0; i < VG_(clo_n_suppressions); i++) {
+ if (VG_(clo_verbosity) > 1) {
+ VG_DMSG("Reading suppressions file: %s", VG_(clo_suppressions)[i] );
+ }
+ load_one_suppressions_file( VG_(clo_suppressions)[i] );
+ }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Matching errors to suppressions ---*/
+/*------------------------------------------------------------*/
+
+/* Parameterising functions for the use of VG_(generic_match) in
+ suppression-vs-error matching. The suppression frames (SuppLoc)
+ play the role of 'pattern'-element, and the error frames (IPs,
+ hence simply Addrs) play the role of 'input'. In short then, we're
+ matching a sequence of Addrs against a pattern composed of a
+ sequence of SuppLocs.
+*/
+static Bool supploc_IsStar ( void* supplocV )
+{
+ SuppLoc* supploc = (SuppLoc*)supplocV;
+ return supploc->ty == DotDotDot;
+}
+
+static Bool supploc_IsQuery ( void* supplocV )
+{
+ return False; /* there's no '?' equivalent in the supp syntax */
+}
+
+static Bool supp_pattEQinp ( void* supplocV, void* addrV )
+{
+ SuppLoc* supploc = (SuppLoc*)supplocV; /* PATTERN */
+ Addr ip = *(Addr*)addrV; /* INPUT */
+
+ Char caller_name[ERRTXT_LEN];
+ caller_name[0] = 0;
+
+ /* So, does this IP address match this suppression-line? */
+ switch (supploc->ty) {
+ case DotDotDot:
+ /* supp_pattEQinp is a callback from VG_(generic_match). As
+ per the spec thereof (see include/pub_tool_seqmatch.h), we
+ should never get called with a pattern value for which the
+ _IsStar or _IsQuery function would return True. Hence
+ this can't happen. */
+ vg_assert(0);
+ case ObjName:
+ /* Get the object name into 'caller_name', or "???"
+ if unknown. */
+ if (!VG_(get_objname)(ip, caller_name, ERRTXT_LEN))
+ VG_(strcpy)(caller_name, "???");
+ break;
+ case FunName:
+ /* Get the function name into 'caller_name', or "???"
+ if unknown. */
+ // Nb: C++-mangled names are used in suppressions. Do, though,
+ // Z-demangle them, since otherwise it's possible to wind
+ // up comparing "malloc" in the suppression against
+ // "_vgrZU_libcZdsoZa_malloc" in the backtrace, and the
+ // two of them need to be made to match.
+ if (!VG_(get_fnname_no_cxx_demangle)(ip, caller_name, ERRTXT_LEN))
+ VG_(strcpy)(caller_name, "???");
+ break;
+ default:
+ vg_assert(0);
+ }
+
+ /* So now we have the function or object name in caller_name, and
+ the pattern (at the character level) to match against is in
+ supploc->name. Hence (and leading to a re-entrant call of
+ VG_(generic_match)): */
+ return VG_(string_match)(supploc->name, caller_name);
+}
+
+/////////////////////////////////////////////////////
+
+static Bool supp_matches_callers(Error* err, Supp* su)
+{
+ /* Unwrap the args and set up the correct parameterisation of
+ VG_(generic_match), using supploc_IsStar, supploc_IsQuery and
+ supp_pattEQinp. */
+ /* note, StackTrace === Addr* */
+ StackTrace ips = VG_(get_ExeContext_StackTrace)(err->where);
+ UWord n_ips = VG_(get_ExeContext_n_ips)(err->where);
+ SuppLoc* supps = su->callers;
+ UWord n_supps = su->n_callers;
+ UWord szbPatt = sizeof(SuppLoc);
+ UWord szbInput = sizeof(Addr);
+ Bool matchAll = False; /* we just want to match a prefix */
+ return
+ VG_(generic_match)(
+ matchAll,
+ /*PATT*/supps, szbPatt, n_supps, 0/*initial Ix*/,
+ /*INPUT*/ips, szbInput, n_ips, 0/*initial Ix*/,
+ supploc_IsStar, supploc_IsQuery, supp_pattEQinp
+ );
+}
+
+/////////////////////////////////////////////////////
+
+static
+Bool supp_matches_error(Supp* su, Error* err)
+{
+ switch (su->skind) {
+ //(example code, see comment on CoreSuppKind above)
+ //case ThreadSupp:
+ // return (err->ekind == ThreadErr);
+ default:
+ if (VG_(needs).tool_errors) {
+ return VG_TDICT_CALL(tool_error_matches_suppression, err, su);
+ } else {
+ VG_(printf)(
+ "\nUnhandled suppression type: %u. VG_(needs).tool_errors\n"
+ "probably needs to be set.\n",
+ err->ekind);
+ VG_(tool_panic)("unhandled suppression type");
+ }
+ }
+}
+
+/////////////////////////////////////////////////////
+
+/* Does an error context match a suppression? ie is this a suppressible
+ error? If so, return a pointer to the Supp record, otherwise NULL.
+ Tries to minimise the number of symbol searches since they are expensive.
+*/
+static Supp* is_suppressible_error ( Error* err )
+{
+ Supp* su;
+ Supp* su_prev;
+
+ /* stats gathering */
+ em_supplist_searches++;
+
+ /* See if the error context matches any suppression. */
+ su_prev = NULL;
+ for (su = suppressions; su != NULL; su = su->next) {
+ em_supplist_cmps++;
+ if (supp_matches_error(su, err) && supp_matches_callers(err, su)) {
+ /* got a match. Move this entry to the head of the list
+ in the hope of making future searches cheaper. */
+ if (su_prev) {
+ vg_assert(su_prev->next == su);
+ su_prev->next = su->next;
+ su->next = suppressions;
+ suppressions = su;
+ }
+ return su;
+ }
+ su_prev = su;
+ }
+ return NULL; /* no matches */
+}
+
+/* Show accumulated error-list and suppression-list search stats.
+*/
+void VG_(print_errormgr_stats) ( void )
+{
+ VG_DMSG(
+ " errormgr: %'lu supplist searches, %'lu comparisons during search",
+ em_supplist_searches, em_supplist_cmps
+ );
+ VG_DMSG(
+ " errormgr: %'lu errlist searches, %'lu comparisons during search",
+ em_errlist_searches, em_errlist_cmps
+ );
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_execontext.c.svn-base b/coregrind/.svn/text-base/m_execontext.c.svn-base
new file mode 100644
index 0000000..bc5d8ce
--- /dev/null
+++ b/coregrind/.svn/text-base/m_execontext.c.svn-base
@@ -0,0 +1,479 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Store and compare stack backtraces m_execontext.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h" // For VG_(message)()
+#include "pub_core_mallocfree.h"
+#include "pub_core_options.h"
+#include "pub_core_stacktrace.h"
+#include "pub_core_machine.h" // VG_(get_IP)
+#include "pub_core_vki.h" // To keep pub_core_threadstate.h happy
+#include "pub_core_threadstate.h" // VG_(is_valid_tid)
+#include "pub_core_execontext.h" // self
+
+/*------------------------------------------------------------*/
+/*--- Low-level ExeContext storage. ---*/
+/*------------------------------------------------------------*/
+
+/* The first 4 IP values are used in comparisons to remove duplicate
+ errors, and for comparing against suppression specifications. The
+ rest are purely informational (but often important).
+
+ The contexts are stored in a traditional chained hash table, so as
+ to allow quick determination of whether a new context already
+ exists. The hash table starts small and expands dynamically, so as
+ to keep the load factor below 1.0.
+
+ The idea is only to ever store any one context once, so as to save
+ space and make exact comparisons faster. */
+
+
+/* Primes for the hash table */
+
+#define N_EC_PRIMES 18
+
+static SizeT ec_primes[N_EC_PRIMES] = {
+ 769UL, 1543UL, 3079UL, 6151UL,
+ 12289UL, 24593UL, 49157UL, 98317UL,
+ 196613UL, 393241UL, 786433UL, 1572869UL,
+ 3145739UL, 6291469UL, 12582917UL, 25165843UL,
+ 50331653UL, 100663319UL
+};
+
+
+/* Each element is present in a hash chain, and also contains a
+ variable length array of guest code addresses (the useful part). */
+
+struct _ExeContext {
+ struct _ExeContext* chain;
+ /* A 32-bit unsigned integer that uniquely identifies this
+ ExeContext. Memcheck uses these for origin tracking. Values
+ must be nonzero (else Memcheck's origin tracking is hosed), must
+ be a multiple of four, and must be unique. Hence they start at
+ 4. */
+ UInt ecu;
+ /* Variable-length array. The size is 'n_ips'; at
+ least 1, at most VG_DEEPEST_BACKTRACE. [0] is the current IP,
+ [1] is its caller, [2] is the caller of [1], etc. */
+ UInt n_ips;
+ Addr ips[0];
+};
+
+
+/* This is the dynamically expanding hash table. */
+static ExeContext** ec_htab; /* array [ec_htab_size] of ExeContext* */
+static SizeT ec_htab_size; /* one of the values in ec_primes */
+static SizeT ec_htab_size_idx; /* 0 .. N_EC_PRIMES-1 */
+
+/* ECU serial number */
+static UInt ec_next_ecu = 4; /* We must never issue zero */
+
+
+/* Stats only: the number of times the system was searched to locate a
+ context. */
+static ULong ec_searchreqs;
+
+/* Stats only: the number of full context comparisons done. */
+static ULong ec_searchcmps;
+
+/* Stats only: total number of stored contexts. */
+static ULong ec_totstored;
+
+/* Number of 2, 4 and (fast) full cmps done. */
+static ULong ec_cmp2s;
+static ULong ec_cmp4s;
+static ULong ec_cmpAlls;
+
+
+/*------------------------------------------------------------*/
+/*--- Exported functions. ---*/
+/*------------------------------------------------------------*/
+
+
+/* Initialise this subsystem. */
+static void init_ExeContext_storage ( void )
+{
+ Int i;
+ static Bool init_done = False;
+ if (LIKELY(init_done))
+ return;
+ ec_searchreqs = 0;
+ ec_searchcmps = 0;
+ ec_totstored = 0;
+ ec_cmp2s = 0;
+ ec_cmp4s = 0;
+ ec_cmpAlls = 0;
+
+ ec_htab_size_idx = 0;
+ ec_htab_size = ec_primes[ec_htab_size_idx];
+ ec_htab = VG_(arena_malloc)(VG_AR_EXECTXT, "execontext.iEs1",
+ sizeof(ExeContext*) * ec_htab_size);
+ for (i = 0; i < ec_htab_size; i++)
+ ec_htab[i] = NULL;
+
+ init_done = True;
+}
+
+
+/* Print stats. */
+void VG_(print_ExeContext_stats) ( void )
+{
+ init_ExeContext_storage();
+ VG_(message)(Vg_DebugMsg,
+ " exectx: %'lu lists, %'llu contexts (avg %'llu per list)",
+ ec_htab_size, ec_totstored, ec_totstored / (ULong)ec_htab_size
+ );
+ VG_(message)(Vg_DebugMsg,
+ " exectx: %'llu searches, %'llu full compares (%'llu per 1000)",
+ ec_searchreqs, ec_searchcmps,
+ ec_searchreqs == 0
+ ? 0ULL
+ : ( (ec_searchcmps * 1000ULL) / ec_searchreqs )
+ );
+ VG_(message)(Vg_DebugMsg,
+ " exectx: %'llu cmp2, %'llu cmp4, %'llu cmpAll",
+ ec_cmp2s, ec_cmp4s, ec_cmpAlls
+ );
+}
+
+
+/* Print an ExeContext. */
+void VG_(pp_ExeContext) ( ExeContext* ec )
+{
+ VG_(pp_StackTrace)( ec->ips, ec->n_ips );
+}
+
+
+/* Compare two ExeContexts, comparing all callers. */
+Bool VG_(eq_ExeContext) ( VgRes res, ExeContext* e1, ExeContext* e2 )
+{
+ Int i;
+
+ if (e1 == NULL || e2 == NULL)
+ return False;
+
+ // Must be at least one address in each trace.
+ tl_assert(e1->n_ips >= 1 && e2->n_ips >= 1);
+
+ switch (res) {
+ case Vg_LowRes:
+ /* Just compare the top two callers. */
+ ec_cmp2s++;
+ for (i = 0; i < 2; i++) {
+ if ( (e1->n_ips <= i) && (e2->n_ips <= i)) return True;
+ if ( (e1->n_ips <= i) && !(e2->n_ips <= i)) return False;
+ if (!(e1->n_ips <= i) && (e2->n_ips <= i)) return False;
+ if (e1->ips[i] != e2->ips[i]) return False;
+ }
+ return True;
+
+ case Vg_MedRes:
+ /* Just compare the top four callers. */
+ ec_cmp4s++;
+ for (i = 0; i < 4; i++) {
+ if ( (e1->n_ips <= i) && (e2->n_ips <= i)) return True;
+ if ( (e1->n_ips <= i) && !(e2->n_ips <= i)) return False;
+ if (!(e1->n_ips <= i) && (e2->n_ips <= i)) return False;
+ if (e1->ips[i] != e2->ips[i]) return False;
+ }
+ return True;
+
+ case Vg_HighRes:
+ ec_cmpAlls++;
+ /* Compare them all -- just do pointer comparison. */
+ if (e1 != e2) return False;
+ return True;
+
+ default:
+ VG_(core_panic)("VG_(eq_ExeContext): unrecognised VgRes");
+ }
+}
+
+/* VG_(record_ExeContext) is the head honcho here. Take a snapshot of
+ the client's stack. Search our collection of ExeContexts to see if
+ we already have it, and if not, allocate a new one. Either way,
+ return a pointer to the context. If there is a matching context we
+ guarantee to not allocate a new one. Thus we never store
+ duplicates, and so exact equality can be quickly done as equality
+ on the returned ExeContext* values themselves. Inspired by Hugs's
+ Text type.
+
+ Also checks whether the hash table needs expanding, and expands it
+ if so. */
+
+static inline UWord ROLW ( UWord w, Int n )
+{
+ Int bpw = 8 * sizeof(UWord);
+ w = (w << n) | (w >> (bpw-n));
+ return w;
+}
+
+static UWord calc_hash ( Addr* ips, UInt n_ips, UWord htab_sz )
+{
+ UInt i;
+ UWord hash = 0;
+ vg_assert(htab_sz > 0);
+ for (i = 0; i < n_ips; i++) {
+ hash ^= ips[i];
+ hash = ROLW(hash, 19);
+ }
+ return hash % htab_sz;
+}
+
+static void resize_ec_htab ( void )
+{
+ SizeT i;
+ SizeT new_size;
+ ExeContext** new_ec_htab;
+
+ vg_assert(ec_htab_size_idx >= 0 && ec_htab_size_idx < N_EC_PRIMES);
+ if (ec_htab_size_idx == N_EC_PRIMES-1)
+ return; /* out of primes - can't resize further */
+
+ new_size = ec_primes[ec_htab_size_idx + 1];
+ new_ec_htab = VG_(arena_malloc)(VG_AR_EXECTXT, "execontext.reh1",
+ sizeof(ExeContext*) * new_size);
+
+ VG_(debugLog)(
+ 1, "execontext",
+ "resizing htab from size %lu to %lu (idx %lu) Total#ECs=%llu\n",
+ ec_htab_size, new_size, ec_htab_size_idx + 1, ec_totstored);
+
+ for (i = 0; i < new_size; i++)
+ new_ec_htab[i] = NULL;
+
+ for (i = 0; i < ec_htab_size; i++) {
+ ExeContext* cur = ec_htab[i];
+ while (cur) {
+ ExeContext* next = cur->chain;
+ UWord hash = calc_hash(cur->ips, cur->n_ips, new_size);
+ vg_assert(hash < new_size);
+ cur->chain = new_ec_htab[hash];
+ new_ec_htab[hash] = cur;
+ cur = next;
+ }
+ }
+
+ VG_(arena_free)(VG_AR_EXECTXT, ec_htab);
+ ec_htab = new_ec_htab;
+ ec_htab_size = new_size;
+ ec_htab_size_idx++;
+}
+
+/* Do the first part of getting a stack trace: actually unwind the
+ stack, and hand the results off to the duplicate-trace-finder
+ (_wrk2). */
+static ExeContext* record_ExeContext_wrk2 ( Addr* ips, UInt n_ips ); /*fwds*/
+static ExeContext* record_ExeContext_wrk ( ThreadId tid, Word first_ip_delta,
+ Bool first_ip_only )
+{
+ Addr ips[VG_DEEPEST_BACKTRACE];
+ UInt n_ips;
+
+ init_ExeContext_storage();
+
+ vg_assert(sizeof(void*) == sizeof(UWord));
+ vg_assert(sizeof(void*) == sizeof(Addr));
+
+ vg_assert(VG_(is_valid_tid)(tid));
+
+ vg_assert(VG_(clo_backtrace_size) >= 1 &&
+ VG_(clo_backtrace_size) <= VG_DEEPEST_BACKTRACE);
+
+ if (first_ip_only) {
+ n_ips = 1;
+ ips[0] = VG_(get_IP)(tid);
+ } else {
+ n_ips = VG_(get_StackTrace)( tid, ips, VG_(clo_backtrace_size),
+ NULL/*array to dump SP values in*/,
+ NULL/*array to dump FP values in*/,
+ first_ip_delta );
+ }
+
+ return record_ExeContext_wrk2 ( &ips[0], n_ips );
+}
+
+/* Do the second part of getting a stack trace: ips[0 .. n_ips-1]
+ holds a proposed trace. Find or allocate a suitable ExeContext.
+ Note that callers must have done init_ExeContext_storage() before
+ getting to this point. */
+static ExeContext* record_ExeContext_wrk2 ( Addr* ips, UInt n_ips )
+{
+ Int i;
+ Bool same;
+ UWord hash;
+ ExeContext* new_ec;
+ ExeContext* list;
+ ExeContext *prev2, *prev;
+
+ static UInt ctr = 0;
+
+ tl_assert(n_ips >= 1 && n_ips <= VG_(clo_backtrace_size));
+
+ /* Now figure out if we've seen this one before. First hash it so
+ as to determine the list number. */
+ hash = calc_hash( ips, n_ips, ec_htab_size );
+
+ /* And (the expensive bit) look a for matching entry in the list. */
+
+ ec_searchreqs++;
+
+ prev2 = NULL;
+ prev = NULL;
+ list = ec_htab[hash];
+
+ while (True) {
+ if (list == NULL) break;
+ ec_searchcmps++;
+ same = True;
+ for (i = 0; i < n_ips; i++) {
+ if (list->ips[i] != ips[i]) {
+ same = False;
+ break;
+ }
+ }
+ if (same) break;
+ prev2 = prev;
+ prev = list;
+ list = list->chain;
+ }
+
+ if (list != NULL) {
+ /* Yay! We found it. Once every 8 searches, move it one step
+ closer to the start of the list to make future searches
+ cheaper. */
+ if (0 == ((ctr++) & 7)) {
+ if (prev2 != NULL && prev != NULL) {
+ /* Found at 3rd or later position in the chain. */
+ vg_assert(prev2->chain == prev);
+ vg_assert(prev->chain == list);
+ prev2->chain = list;
+ prev->chain = list->chain;
+ list->chain = prev;
+ }
+ else if (prev2 == NULL && prev != NULL) {
+ /* Found at 2nd position in the chain. */
+ vg_assert(ec_htab[hash] == prev);
+ vg_assert(prev->chain == list);
+ prev->chain = list->chain;
+ list->chain = prev;
+ ec_htab[hash] = list;
+ }
+ }
+ return list;
+ }
+
+ /* Bummer. We have to allocate a new context record. */
+ ec_totstored++;
+
+ new_ec = VG_(arena_malloc)( VG_AR_EXECTXT, "execontext.rEw2.2",
+ sizeof(struct _ExeContext)
+ + n_ips * sizeof(Addr) );
+
+ for (i = 0; i < n_ips; i++)
+ new_ec->ips[i] = ips[i];
+
+ vg_assert(VG_(is_plausible_ECU)(ec_next_ecu));
+ new_ec->ecu = ec_next_ecu;
+ ec_next_ecu += 4;
+ if (ec_next_ecu == 0) {
+ /* Urr. Now we're hosed; we emitted 2^30 ExeContexts already
+ and have run out of numbers. Not sure what to do. */
+ VG_(core_panic)("m_execontext: more than 2^30 ExeContexts created");
+ }
+
+ new_ec->n_ips = n_ips;
+ new_ec->chain = ec_htab[hash];
+ ec_htab[hash] = new_ec;
+
+ /* Resize the hash table, maybe? */
+ if ( ((ULong)ec_totstored) > ((ULong)ec_htab_size) ) {
+ vg_assert(ec_htab_size_idx >= 0 && ec_htab_size_idx < N_EC_PRIMES);
+ if (ec_htab_size_idx < N_EC_PRIMES-1)
+ resize_ec_htab();
+ }
+
+ return new_ec;
+}
+
+ExeContext* VG_(record_ExeContext)( ThreadId tid, Word first_ip_delta ) {
+ return record_ExeContext_wrk( tid, first_ip_delta,
+ False/*!first_ip_only*/ );
+}
+
+ExeContext* VG_(record_depth_1_ExeContext)( ThreadId tid ) {
+ return record_ExeContext_wrk( tid, 0/*first_ip_delta*/,
+ True/*first_ip_only*/ );
+}
+
+ExeContext* VG_(make_depth_1_ExeContext_from_Addr)( Addr a ) {
+ init_ExeContext_storage();
+ return record_ExeContext_wrk2( &a, 1 );
+}
+
+StackTrace VG_(get_ExeContext_StackTrace) ( ExeContext* e ) {
+ return e->ips;
+}
+
+UInt VG_(get_ECU_from_ExeContext)( ExeContext* e ) {
+ vg_assert(VG_(is_plausible_ECU)(e->ecu));
+ return e->ecu;
+}
+
+Int VG_(get_ExeContext_n_ips)( ExeContext* e ) {
+ vg_assert(e->n_ips >= 1);
+ return e->n_ips;
+}
+
+ExeContext* VG_(get_ExeContext_from_ECU)( UInt ecu )
+{
+ UWord i;
+ ExeContext* ec;
+ vg_assert(VG_(is_plausible_ECU)(ecu));
+ vg_assert(ec_htab_size > 0);
+ for (i = 0; i < ec_htab_size; i++) {
+ for (ec = ec_htab[i]; ec; ec = ec->chain) {
+ if (ec->ecu == ecu)
+ return ec;
+ }
+ }
+ return NULL;
+}
+
+ExeContext* VG_(make_ExeContext_from_StackTrace)( Addr* ips, UInt n_ips )
+{
+ return record_ExeContext_wrk2(ips, n_ips);
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end m_execontext.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_hashtable.c.svn-base b/coregrind/.svn/text-base/m_hashtable.c.svn-base
new file mode 100644
index 0000000..93827bb
--- /dev/null
+++ b/coregrind/.svn/text-base/m_hashtable.c.svn-base
@@ -0,0 +1,275 @@
+
+/*--------------------------------------------------------------------*/
+/*--- A separately-chained hash table. m_hashtable.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2005-2009 Nicholas Nethercote
+ njn@valgrind.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_hashtable.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_mallocfree.h"
+
+/*--------------------------------------------------------------------*/
+/*--- Declarations ---*/
+/*--------------------------------------------------------------------*/
+
+#define CHAIN_NO(key,tbl) (((UWord)(key)) % tbl->n_chains)
+
+struct _VgHashTable {
+ UInt n_chains; // should be prime
+ UInt n_elements;
+ VgHashNode* iterNode; // current iterator node
+ UInt iterChain; // next chain to be traversed by the iterator
+ VgHashNode** chains; // expanding array of hash chains
+ Bool iterOK; // table safe to iterate over?
+ HChar* name; // name of table (for debugging only)
+};
+
+#define N_HASH_PRIMES 20
+
+static SizeT primes[N_HASH_PRIMES] = {
+ 769UL, 1543UL, 3079UL, 6151UL,
+ 12289UL, 24593UL, 49157UL, 98317UL,
+ 196613UL, 393241UL, 786433UL, 1572869UL,
+ 3145739UL, 6291469UL, 12582917UL, 25165843UL,
+ 50331653UL, 100663319UL, 201326611UL, 402653189UL
+};
+
+/*--------------------------------------------------------------------*/
+/*--- Functions ---*/
+/*--------------------------------------------------------------------*/
+
+VgHashTable VG_(HT_construct) ( HChar* name )
+{
+ /* Initialises to zero, ie. all entries NULL */
+ SizeT n_chains = primes[0];
+ SizeT sz = n_chains * sizeof(VgHashNode*);
+ VgHashTable table = VG_(calloc)("hashtable.Hc.1",
+ 1, sizeof(struct _VgHashTable));
+ table->chains = VG_(calloc)("hashtable.Hc.2", 1, sz);
+ table->n_chains = n_chains;
+ table->n_elements = 0;
+ table->iterOK = True;
+ table->name = name;
+ vg_assert(name);
+ return table;
+}
+
+Int VG_(HT_count_nodes) ( VgHashTable table )
+{
+ return table->n_elements;
+}
+
+static void resize ( VgHashTable table )
+{
+ Int i;
+ SizeT sz;
+ SizeT old_chains = table->n_chains;
+ SizeT new_chains = old_chains + 1;
+ VgHashNode** chains;
+ VgHashNode * node;
+
+ /* If we've run out of primes, do nothing. */
+ if (old_chains == primes[N_HASH_PRIMES-1])
+ return;
+
+ vg_assert(old_chains >= primes[0]
+ && old_chains < primes[N_HASH_PRIMES-1]);
+
+ for (i = 0; i < N_HASH_PRIMES; i++) {
+ if (primes[i] > new_chains) {
+ new_chains = primes[i];
+ break;
+ }
+ }
+
+ vg_assert(new_chains > old_chains);
+ vg_assert(new_chains > primes[0]
+ && new_chains <= primes[N_HASH_PRIMES-1]);
+
+ VG_(debugLog)(
+ 1, "hashtable",
+ "resizing table `%s' from %lu to %lu (total elems %lu)\n",
+ table->name, (UWord)old_chains, (UWord)new_chains,
+ (UWord)table->n_elements );
+
+ table->n_chains = new_chains;
+ sz = new_chains * sizeof(VgHashNode*);
+ chains = VG_(calloc)("hashtable.resize.1", 1, sz);
+
+ for (i = 0; i < old_chains; i++) {
+ node = table->chains[i];
+ while (node != NULL) {
+ VgHashNode* next = node->next;
+ UWord chain = CHAIN_NO(node->key, table);
+ node->next = chains[chain];
+ chains[chain] = node;
+ node = next;
+ }
+ }
+
+ VG_(free)(table->chains);
+ table->chains = chains;
+}
+
+/* Puts a new, heap allocated VgHashNode, into the VgHashTable. Prepends
+ the node to the appropriate chain. */
+void VG_(HT_add_node) ( VgHashTable table, void* vnode )
+{
+ VgHashNode* node = (VgHashNode*)vnode;
+ UWord chain = CHAIN_NO(node->key, table);
+ node->next = table->chains[chain];
+ table->chains[chain] = node;
+ table->n_elements++;
+ if ( (1 * (ULong)table->n_elements) > (1 * (ULong)table->n_chains) ) {
+ resize(table);
+ }
+
+ /* Table has been modified; hence HT_Next should assert. */
+ table->iterOK = False;
+}
+
+/* Looks up a VgHashNode in the table. Returns NULL if not found. */
+void* VG_(HT_lookup) ( VgHashTable table, UWord key )
+{
+ VgHashNode* curr = table->chains[ CHAIN_NO(key, table) ];
+
+ while (curr) {
+ if (key == curr->key) {
+ return curr;
+ }
+ curr = curr->next;
+ }
+ return NULL;
+}
+
+/* Removes a VgHashNode from the table. Returns NULL if not found. */
+void* VG_(HT_remove) ( VgHashTable table, UWord key )
+{
+ UWord chain = CHAIN_NO(key, table);
+ VgHashNode* curr = table->chains[chain];
+ VgHashNode** prev_next_ptr = &(table->chains[chain]);
+
+ /* Table has been modified; hence HT_Next should assert. */
+ table->iterOK = False;
+
+ while (curr) {
+ if (key == curr->key) {
+ *prev_next_ptr = curr->next;
+ table->n_elements--;
+ return curr;
+ }
+ prev_next_ptr = &(curr->next);
+ curr = curr->next;
+ }
+ return NULL;
+}
+
+/* Allocates a suitably-sized array, copies all the hashtable elements
+ into it, then returns both the array and the size of it. This is
+ used by the memory-leak detector. The array must be freed with
+ VG_(free).
+*/
+VgHashNode** VG_(HT_to_array) ( VgHashTable table, /*OUT*/ UInt* n_elems )
+{
+ UInt i, j;
+ VgHashNode** arr;
+ VgHashNode* node;
+
+ *n_elems = 0;
+ for (i = 0; i < table->n_chains; i++) {
+ for (node = table->chains[i]; node != NULL; node = node->next) {
+ (*n_elems)++;
+ }
+ }
+ if (*n_elems == 0)
+ return NULL;
+
+ arr = VG_(malloc)( "hashtable.Hta.1", *n_elems * sizeof(VgHashNode*) );
+
+ j = 0;
+ for (i = 0; i < table->n_chains; i++) {
+ for (node = table->chains[i]; node != NULL; node = node->next) {
+ arr[j++] = node;
+ }
+ }
+ vg_assert(j == *n_elems);
+
+ return arr;
+}
+
+void VG_(HT_ResetIter)(VgHashTable table)
+{
+ vg_assert(table);
+ table->iterNode = NULL;
+ table->iterChain = 0;
+ table->iterOK = True;
+}
+
+void* VG_(HT_Next)(VgHashTable table)
+{
+ Int i;
+ vg_assert(table);
+ /* See long comment on HT_Next prototype in pub_tool_hashtable.h.
+ In short if this fails, it means the caller tried to modify the
+ table whilst iterating over it, which is a bug. */
+ vg_assert(table->iterOK);
+
+ if (table->iterNode && table->iterNode->next) {
+ table->iterNode = table->iterNode->next;
+ return table->iterNode;
+ }
+
+ for (i = table->iterChain; i < table->n_chains; i++) {
+ if (table->chains[i]) {
+ table->iterNode = table->chains[i];
+ table->iterChain = i + 1; // Next chain to be traversed
+ return table->iterNode;
+ }
+ }
+ return NULL;
+}
+
+void VG_(HT_destruct)(VgHashTable table)
+{
+ UInt i;
+ VgHashNode *node, *node_next;
+
+ for (i = 0; i < table->n_chains; i++) {
+ for (node = table->chains[i]; node != NULL; node = node_next) {
+ node_next = node->next;
+ VG_(free)(node);
+ }
+ }
+ VG_(free)(table->chains);
+ VG_(free)(table);
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_libcassert.c.svn-base b/coregrind/.svn/text-base/m_libcassert.c.svn-base
new file mode 100644
index 0000000..1d7e5a7
--- /dev/null
+++ b/coregrind/.svn/text-base/m_libcassert.c.svn-base
@@ -0,0 +1,261 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Assertions and panics. m_libcassert.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_libcproc.h" // For VG_(gettid)()
+#include "pub_core_stacktrace.h"
+#include "pub_core_syscall.h"
+#include "pub_core_tooliface.h" // For VG_(details).{name,bug_reports_to}
+#include "pub_core_options.h" // For VG_(clo_xml)
+
+/* ---------------------------------------------------------------------
+ Assertery.
+ ------------------------------------------------------------------ */
+
+#if defined(VGP_x86_linux)
+# define GET_REAL_PC_SP_AND_FP(pc, sp, fp) \
+ asm("call 0f;" \
+ "0: popl %0;" \
+ "movl %%esp, %1;" \
+ "movl %%ebp, %2;" \
+ : "=r" (pc),\
+ "=r" (sp),\
+ "=r" (fp));
+#elif defined(VGP_amd64_linux)
+# define GET_REAL_PC_SP_AND_FP(pc, sp, fp) \
+ asm("leaq 0(%%rip), %0;" \
+ "movq %%rsp, %1;" \
+ "movq %%rbp, %2;" \
+ : "=r" (pc),\
+ "=r" (sp),\
+ "=r" (fp));
+#elif defined(VGP_ppc32_linux) || defined(VGP_ppc32_aix5)
+# define GET_REAL_PC_SP_AND_FP(pc, sp, fp) \
+ asm("mflr 0;" /* r0 = lr */ \
+ "bl m_libcassert_get_ip;" /* lr = pc */ \
+ "m_libcassert_get_ip:\n" \
+ "mflr %0;" \
+ "mtlr 0;" /* restore lr */ \
+ "mr %1,1;" \
+ "mr %2,1;" \
+ : "=r" (pc), \
+ "=r" (sp), \
+ "=r" (fp) \
+ : /* reads none */ \
+ : "r0" /* trashed */ );
+#elif defined(VGP_ppc64_linux) || defined(VGP_ppc64_aix5)
+# define GET_REAL_PC_SP_AND_FP(pc, sp, fp) \
+ asm("mflr 0;" /* r0 = lr */ \
+ "bl .m_libcassert_get_ip;" /* lr = pc */ \
+ ".m_libcassert_get_ip:\n" \
+ "mflr %0;" \
+ "mtlr 0;" /* restore lr */ \
+ "mr %1,1;" \
+ "mr %2,1;" \
+ : "=r" (pc), \
+ "=r" (sp), \
+ "=r" (fp) \
+ : /* reads none */ \
+ : "r0" /* trashed */ );
+#else
+# error Unknown platform
+#endif
+
+#define BACKTRACE_DEPTH 100 // nice and deep!
+
+/* Pull down the entire world */
+void VG_(exit)( Int status )
+{
+# if defined(VGO_linux)
+ (void)VG_(do_syscall1)(__NR_exit_group, status );
+# endif
+ (void)VG_(do_syscall1)(__NR_exit, status );
+ /* Why are we still alive here? */
+ /*NOTREACHED*/
+ *(volatile Int *)0 = 'x';
+ vg_assert(2+2 == 5);
+}
+
+// Print the scheduler status.
+void VG_(show_sched_status) ( void )
+{
+ Int i;
+ VG_(printf)("\nsched status:\n");
+ VG_(printf)(" running_tid=%d\n", VG_(get_running_tid)());
+ for (i = 1; i < VG_N_THREADS; i++) {
+ if (VG_(threads)[i].status == VgTs_Empty) continue;
+ VG_(printf)( "\nThread %d: status = %s\n", i,
+ VG_(name_of_ThreadStatus)(VG_(threads)[i].status) );
+ VG_(get_and_pp_StackTrace)( i, BACKTRACE_DEPTH );
+ }
+ VG_(printf)("\n");
+}
+
+__attribute__ ((noreturn))
+static void report_and_quit ( const Char* report,
+ Addr ip, Addr sp, Addr fp, Addr lr )
+{
+ Addr stacktop;
+ Addr ips[BACKTRACE_DEPTH];
+ ThreadState *tst
+ = VG_(get_ThreadState)( VG_(lwpid_to_vgtid)( VG_(gettid)() ) );
+
+ // If necessary, fake up an ExeContext which is of our actual real CPU
+ // state. Could cause problems if we got the panic/exception within the
+ // execontext/stack dump/symtab code. But it's better than nothing.
+ if (0 == ip && 0 == sp && 0 == fp) {
+ GET_REAL_PC_SP_AND_FP(ip, sp, fp);
+ }
+
+ stacktop = tst->os_state.valgrind_stack_init_SP;
+
+ VG_(get_StackTrace_wrk)(
+ 0/*tid is unknown*/,
+ ips, BACKTRACE_DEPTH,
+ NULL/*array to dump SP values in*/,
+ NULL/*array to dump FP values in*/,
+ ip, sp, fp, lr, sp, stacktop
+ );
+ VG_(pp_StackTrace) (ips, BACKTRACE_DEPTH);
+
+ VG_(show_sched_status)();
+ VG_(printf)(
+ "\n"
+ "Note: see also the FAQ in the source distribution.\n"
+ "It contains workarounds to several common problems.\n"
+ "In particular, if Valgrind aborted or crashed after\n"
+ "identifying problems in your program, there's a good chance\n"
+ "that fixing those problems will prevent Valgrind aborting or\n"
+ "crashing, especially if it happened in m_mallocfree.c.\n"
+ "\n"
+ "If that doesn't help, please report this bug to: %s\n\n"
+ "In the bug report, send all the above text, the valgrind\n"
+ "version, and what OS and version you are using. Thanks.\n\n",
+ report);
+ VG_(exit)(1);
+}
+
+void VG_(assert_fail) ( Bool isCore, const Char* expr, const Char* file,
+ Int line, const Char* fn, const HChar* format, ... )
+{
+ va_list vargs;
+ Char buf[256];
+ Char* component;
+ Char* bugs_to;
+
+ static Bool entered = False;
+ if (entered)
+ VG_(exit)(2);
+ entered = True;
+
+ va_start(vargs, format);
+ VG_(vsprintf) ( buf, format, vargs );
+ va_end(vargs);
+
+ if (isCore) {
+ component = "valgrind";
+ bugs_to = VG_BUGS_TO;
+ } else {
+ component = VG_(details).name;
+ bugs_to = VG_(details).bug_reports_to;
+ }
+
+ if (VG_(clo_xml))
+ VG_UMSG("</valgrindoutput>\n");
+
+ // Treat vg_assert2(0, "foo") specially, as a panicky abort
+ if (VG_STREQ(expr, "0")) {
+ VG_(printf)("\n%s: %s:%d (%s): the 'impossible' happened.\n",
+ component, file, line, fn );
+ } else {
+ VG_(printf)("\n%s: %s:%d (%s): Assertion '%s' failed.\n",
+ component, file, line, fn, expr );
+ }
+ if (!VG_STREQ(buf, ""))
+ VG_(printf)("%s: %s\n", component, buf );
+
+ report_and_quit(bugs_to, 0,0,0,0);
+}
+
+__attribute__ ((noreturn))
+static void panic ( Char* name, Char* report, Char* str,
+ Addr ip, Addr sp, Addr fp, Addr lr )
+{
+ if (VG_(clo_xml))
+ VG_UMSG("</valgrindoutput>\n");
+ VG_(printf)("\n%s: the 'impossible' happened:\n %s\n", name, str);
+ report_and_quit(report, ip, sp, fp, lr);
+}
+
+void VG_(core_panic_at) ( Char* str, Addr ip, Addr sp, Addr fp, Addr lr )
+{
+ panic("valgrind", VG_BUGS_TO, str, ip, sp, fp, lr);
+}
+
+void VG_(core_panic) ( Char* str )
+{
+ VG_(core_panic_at)(str, 0,0,0,0);
+}
+
+void VG_(tool_panic) ( Char* str )
+{
+ panic(VG_(details).name, VG_(details).bug_reports_to, str, 0,0,0,0);
+}
+
+/* Print some helpful-ish text about unimplemented things, and give up. */
+void VG_(unimplemented) ( Char* msg )
+{
+ if (VG_(clo_xml))
+ VG_UMSG("</valgrindoutput>\n");
+ VG_UMSG("");
+ VG_UMSG("Valgrind detected that your program requires");
+ VG_UMSG("the following unimplemented functionality:");
+ VG_UMSG(" %s", msg);
+ VG_UMSG("This may be because the functionality is hard to implement,");
+ VG_UMSG("or because no reasonable program would behave this way,");
+ VG_UMSG("or because nobody has yet needed it. In any case, let us know at");
+ VG_UMSG("%s and/or try to work around the problem, if you can.", VG_BUGS_TO);
+ VG_UMSG("");
+ VG_UMSG("Valgrind has to exit now. Sorry. Bye!");
+ VG_UMSG("");
+ VG_(show_sched_status)();
+ VG_(exit)(1);
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
+
diff --git a/coregrind/.svn/text-base/m_libcbase.c.svn-base b/coregrind/.svn/text-base/m_libcbase.c.svn-base
new file mode 100644
index 0000000..0d6a6b1
--- /dev/null
+++ b/coregrind/.svn/text-base/m_libcbase.c.svn-base
@@ -0,0 +1,627 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Entirely standalone libc stuff. m_libcbase.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_libcbase.h"
+
+/* ---------------------------------------------------------------------
+ Char functions.
+ ------------------------------------------------------------------ */
+
+Bool VG_(isspace) ( Char c )
+{
+ return (c == ' ' || c == '\n' || c == '\t' ||
+ c == '\f' || c == '\v' || c == '\r');
+}
+
+Bool VG_(isdigit) ( Char c )
+{
+ return (c >= '0' && c <= '9');
+}
+
+/* ---------------------------------------------------------------------
+ Converting strings to numbers
+ ------------------------------------------------------------------ */
+
+static Bool is_dec_digit(Char c, Long* digit)
+{
+ if (c >= '0' && c <= '9') { *digit = (Long)(c - '0'); return True; }
+ return False;
+}
+
+static Bool is_hex_digit(Char c, Long* digit)
+{
+ if (c >= '0' && c <= '9') { *digit = (Long)(c - '0'); return True; }
+ if (c >= 'A' && c <= 'F') { *digit = (Long)((c - 'A') + 10); return True; }
+ if (c >= 'a' && c <= 'f') { *digit = (Long)((c - 'a') + 10); return True; }
+ return False;
+}
+
+Long VG_(strtoll10) ( Char* str, Char** endptr )
+{
+ Bool neg = False, converted = False;
+ Long n = 0, digit = 0;
+ Char* str0 = str;
+
+ // Skip leading whitespace.
+ while (VG_(isspace)(*str)) str++;
+
+ // Allow a leading '-' or '+'.
+ if (*str == '-') { str++; neg = True; }
+ else if (*str == '+') { str++; }
+
+ while (is_dec_digit(*str, &digit)) {
+ converted = True; // Ok, we've actually converted a digit.
+ n = 10*n + digit;
+ str++;
+ }
+
+ if (!converted) str = str0; // If nothing converted, endptr points to
+ if (neg) n = -n; // the start of the string.
+ if (endptr) *endptr = str; // Record first failing character.
+ return n;
+}
+
+Long VG_(strtoll16) ( Char* str, Char** endptr )
+{
+ Bool neg = False, converted = False;
+ Long n = 0, digit = 0;
+ Char* str0 = str;
+
+ // Skip leading whitespace.
+ while (VG_(isspace)(*str)) str++;
+
+ // Allow a leading '-' or '+'.
+ if (*str == '-') { str++; neg = True; }
+ else if (*str == '+') { str++; }
+
+ // Allow leading "0x", but only if there's a hex digit
+ // following it.
+ if (*str == '0'
+ && (*(str+1) == 'x' || *(str+1) == 'X')
+ && is_hex_digit( *(str+2), &digit )) {
+ str += 2;
+ }
+
+ while (is_hex_digit(*str, &digit)) {
+ converted = True; // Ok, we've actually converted a digit.
+ n = 16*n + digit;
+ str++;
+ }
+
+ if (!converted) str = str0; // If nothing converted, endptr points to
+ if (neg) n = -n; // the start of the string.
+ if (endptr) *endptr = str; // Record first failing character.
+ return n;
+}
+
+double VG_(strtod) ( Char* str, Char** endptr )
+{
+ Bool neg = False;
+ Long digit;
+ double n = 0, frac = 0, x = 0.1;
+
+ // Skip leading whitespace.
+ while (VG_(isspace)(*str)) str++;
+
+ // Allow a leading '-' or '+'.
+ if (*str == '-') { str++; neg = True; }
+ else if (*str == '+') { str++; }
+
+ while (is_dec_digit(*str, &digit)) {
+ n = 10*n + digit;
+ str++;
+ }
+
+ if (*str == '.') {
+ str++;
+ while (is_dec_digit(*str, &digit)) {
+ frac += x*digit;
+ x /= 10;
+ str++;
+ }
+ }
+
+ n += frac;
+ if (neg) n = -n;
+ if (endptr) *endptr = str; // Record first failing character.
+ return n;
+}
+
+/* ---------------------------------------------------------------------
+ String functions
+ ------------------------------------------------------------------ */
+
+SizeT VG_(strlen) ( const Char* str )
+{
+ SizeT i = 0;
+ while (str[i] != 0) i++;
+ return i;
+}
+
+Char* VG_(strcat) ( Char* dest, const Char* src )
+{
+ Char* dest_orig = dest;
+ while (*dest) dest++;
+ while (*src) *dest++ = *src++;
+ *dest = 0;
+ return dest_orig;
+}
+
+Char* VG_(strncat) ( Char* dest, const Char* src, SizeT n )
+{
+ Char* dest_orig = dest;
+ while (*dest) dest++;
+ while (*src && n > 0) { *dest++ = *src++; n--; }
+ *dest = 0;
+ return dest_orig;
+}
+
+Char* VG_(strpbrk) ( const Char* s, const Char* accept )
+{
+ const Char* a;
+ while (*s) {
+ a = accept;
+ while (*a)
+ if (*a++ == *s)
+ return (Char *) s;
+ s++;
+ }
+ return NULL;
+}
+
+Char* VG_(strcpy) ( Char* dest, const Char* src )
+{
+ Char* dest_orig = dest;
+ while (*src) *dest++ = *src++;
+ *dest = 0;
+ return dest_orig;
+}
+
+/* Copy bytes, not overrunning the end of dest and always ensuring
+ zero termination. */
+void VG_(strncpy_safely) ( Char* dest, const Char* src, SizeT ndest )
+{
+ SizeT i = 0;
+ while (True) {
+ dest[i] = 0;
+ if (src[i] == 0) return;
+ if (i >= ndest-1) return;
+ dest[i] = src[i];
+ i++;
+ }
+}
+
+Char* VG_(strncpy) ( Char* dest, const Char* src, SizeT ndest )
+{
+ SizeT i = 0;
+ while (True) {
+ if (i >= ndest) return dest; /* reached limit */
+ dest[i] = src[i];
+ if (src[i++] == 0) {
+ /* reached NUL; pad rest with zeroes as required */
+ while (i < ndest) dest[i++] = 0;
+ return dest;
+ }
+ }
+}
+
+Int VG_(strcmp) ( const Char* s1, const Char* s2 )
+{
+ while (True) {
+ if (*s1 == 0 && *s2 == 0) return 0;
+ if (*s1 == 0) return -1;
+ if (*s2 == 0) return 1;
+
+ if (*(UChar*)s1 < *(UChar*)s2) return -1;
+ if (*(UChar*)s1 > *(UChar*)s2) return 1;
+
+ s1++; s2++;
+ }
+}
+
+Int VG_(strncmp) ( const Char* s1, const Char* s2, SizeT nmax )
+{
+ SizeT n = 0;
+ while (True) {
+ if (n >= nmax) return 0;
+ if (*s1 == 0 && *s2 == 0) return 0;
+ if (*s1 == 0) return -1;
+ if (*s2 == 0) return 1;
+
+ if (*(UChar*)s1 < *(UChar*)s2) return -1;
+ if (*(UChar*)s1 > *(UChar*)s2) return 1;
+
+ s1++; s2++; n++;
+ }
+}
+
+Char* VG_(strstr) ( const Char* haystack, Char* needle )
+{
+ SizeT n;
+ if (haystack == NULL)
+ return NULL;
+ n = VG_(strlen)(needle);
+ while (True) {
+ if (haystack[0] == 0)
+ return NULL;
+ if (VG_(strncmp)(haystack, needle, n) == 0)
+ return (Char*)haystack;
+ haystack++;
+ }
+}
+
+Char* VG_(strchr) ( const Char* s, Char c )
+{
+ while (True) {
+ if (*s == c) return (Char*)s;
+ if (*s == 0) return NULL;
+ s++;
+ }
+}
+
+Char* VG_(strrchr) ( const Char* s, Char c )
+{
+ Int n = VG_(strlen)(s);
+ while (--n > 0) {
+ if (s[n] == c) return (Char*)s + n;
+ }
+ return NULL;
+}
+
+SizeT VG_(strspn) ( const Char* s, const Char* accept )
+{
+ const Char *p, *a;
+ SizeT count = 0;
+ for (p = s; *p != '\0'; ++p) {
+ for (a = accept; *a != '\0'; ++a)
+ if (*p == *a)
+ break;
+ if (*a == '\0')
+ return count;
+ else
+ ++count;
+ }
+ return count;
+}
+
+SizeT VG_(strcspn) ( const Char* s, const char* reject )
+{
+ SizeT count = 0;
+ while (*s != '\0') {
+ if (VG_(strchr) (reject, *s++) == NULL)
+ ++count;
+ else
+ return count;
+ }
+ return count;
+}
+
+
+/* ---------------------------------------------------------------------
+ mem* functions
+ ------------------------------------------------------------------ */
+
+void* VG_(memcpy) ( void *dest, const void *src, SizeT sz )
+{
+ const UChar* s = (const UChar*)src;
+ UChar* d = (UChar*)dest;
+ const UInt* sI = (const UInt*)src;
+ UInt* dI = (UInt*)dest;
+
+ if (VG_IS_4_ALIGNED(dI) && VG_IS_4_ALIGNED(sI)) {
+ while (sz >= 16) {
+ dI[0] = sI[0];
+ dI[1] = sI[1];
+ dI[2] = sI[2];
+ dI[3] = sI[3];
+ sz -= 16;
+ dI += 4;
+ sI += 4;
+ }
+ if (sz == 0)
+ return dest;
+ while (sz >= 4) {
+ dI[0] = sI[0];
+ sz -= 4;
+ dI += 1;
+ sI += 1;
+ }
+ if (sz == 0)
+ return dest;
+ s = (const UChar*)sI;
+ d = (UChar*)dI;
+ }
+
+ while (sz--)
+ *d++ = *s++;
+
+ return dest;
+}
+
+void* VG_(memmove)(void *dest, const void *src, SizeT sz)
+{
+ SizeT i;
+ if (sz == 0)
+ return dest;
+ if (dest < src) {
+ for (i = 0; i < sz; i++) {
+ ((UChar*)dest)[i] = ((UChar*)src)[i];
+ }
+ }
+ else if (dest > src) {
+ for (i = sz - 1; i >= 0; i--) {
+ ((UChar*)dest)[i] = ((UChar*)src)[i];
+ }
+ }
+ return dest;
+}
+
+void* VG_(memset) ( void *destV, Int c, SizeT sz )
+{
+ Int c4;
+ Char* d = (Char*)destV;
+ while ((!VG_IS_4_ALIGNED(d)) && sz >= 1) {
+ d[0] = c;
+ d++;
+ sz--;
+ }
+ if (sz == 0)
+ return destV;
+ c4 = c & 0xFF;
+ c4 |= (c4 << 8);
+ c4 |= (c4 << 16);
+ while (sz >= 16) {
+ ((Int*)d)[0] = c4;
+ ((Int*)d)[1] = c4;
+ ((Int*)d)[2] = c4;
+ ((Int*)d)[3] = c4;
+ d += 16;
+ sz -= 16;
+ }
+ while (sz >= 4) {
+ ((Int*)d)[0] = c4;
+ d += 4;
+ sz -= 4;
+ }
+ while (sz >= 1) {
+ d[0] = c;
+ d++;
+ sz--;
+ }
+ return destV;
+}
+
+Int VG_(memcmp) ( const void* s1, const void* s2, SizeT n )
+{
+ Int res;
+ const UChar *p1 = s1;
+ const UChar *p2 = s2;
+ UChar a0;
+ UChar b0;
+
+ while (n != 0) {
+ a0 = p1[0];
+ b0 = p2[0];
+ p1 += 1;
+ p2 += 1;
+ res = a0 - b0;
+ if (res != 0)
+ return res;
+ n -= 1;
+ }
+ return 0;
+}
+
+/* ---------------------------------------------------------------------
+ Misc useful functions
+ ------------------------------------------------------------------ */
+
+/////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////
+/// begin Bentley-McIlroy style quicksort
+/// See "Engineering a Sort Function". Jon L Bentley, M. Douglas
+/// McIlroy. Software Practice and Experience Vol 23(11), Nov 1993.
+
+#define BM_MIN(a, b) \
+ (a) < (b) ? a : b
+
+#define BM_SWAPINIT(a, es) \
+ swaptype = ((a-(Char*)0) | es) % sizeof(Word) ? 2 \
+ : es > (SizeT)sizeof(Word) ? 1 \
+ : 0
+
+#define BM_EXCH(a, b, t) \
+ (t = a, a = b, b = t)
+
+#define BM_SWAP(a, b) \
+ swaptype != 0 \
+ ? bm_swapfunc(a, b, es, swaptype) \
+ : (void)BM_EXCH(*(Word*)(a), *(Word*)(b), t)
+
+#define BM_VECSWAP(a, b, n) \
+ if (n > 0) bm_swapfunc(a, b, n, swaptype)
+
+#define BM_PVINIT(pv, pm) \
+ if (swaptype != 0) \
+ pv = a, BM_SWAP(pv, pm); \
+ else \
+ pv = (Char*)&v, v = *(Word*)pm
+
+static Char* bm_med3 ( Char* a, Char* b, Char* c,
+ Int (*cmp)(void*,void*) ) {
+ return cmp(a, b) < 0
+ ? (cmp(b, c) < 0 ? b : cmp(a, c) < 0 ? c : a)
+ : (cmp(b, c) > 0 ? b : cmp(a, c) > 0 ? c : a);
+}
+
+static void bm_swapfunc ( Char* a, Char* b, SizeT n, Int swaptype )
+{
+ if (swaptype <= 1) {
+ Word t;
+ for ( ; n > 0; a += sizeof(Word), b += sizeof(Word),
+ n -= sizeof(Word))
+ BM_EXCH(*(Word*)a, *(Word*)b, t);
+ } else {
+ Char t;
+ for ( ; n > 0; a += 1, b += 1, n -= 1)
+ BM_EXCH(*a, *b, t);
+ }
+}
+
+static void bm_qsort ( Char* a, SizeT n, SizeT es,
+ Int (*cmp)(void*,void*) )
+{
+ Char *pa, *pb, *pc, *pd, *pl, *pm, *pn, *pv;
+ Int r, swaptype;
+ Word t, v;
+ SizeT s, s1, s2;
+ tailcall:
+ BM_SWAPINIT(a, es);
+ if (n < 7) {
+ for (pm = a + es; pm < a + n*es; pm += es)
+ for (pl = pm; pl > a && cmp(pl-es, pl) > 0; pl -= es)
+ BM_SWAP(pl, pl-es);
+ return;
+ }
+ pm = a + (n/2)*es;
+ if (n > 7) {
+ pl = a;
+ pn = a + (n-1)*es;
+ if (n > 40) {
+ s = (n/8)*es;
+ pl = bm_med3(pl, pl+s, pl+2*s, cmp);
+ pm = bm_med3(pm-s, pm, pm+s, cmp);
+ pn = bm_med3(pn-2*s, pn-s, pn, cmp);
+ }
+ pm = bm_med3(pl, pm, pn, cmp);
+ }
+ BM_PVINIT(pv, pm);
+ pa = pb = a;
+ pc = pd = a + (n-1)*es;
+ for (;;) {
+ while (pb <= pc && (r = cmp(pb, pv)) <= 0) {
+ if (r == 0) { BM_SWAP(pa, pb); pa += es; }
+ pb += es;
+ }
+ while (pc >= pb && (r = cmp(pc, pv)) >= 0) {
+ if (r == 0) { BM_SWAP(pc, pd); pd -= es; }
+ pc -= es;
+ }
+ if (pb > pc) break;
+ BM_SWAP(pb, pc);
+ pb += es;
+ pc -= es;
+ }
+ pn = a + n*es;
+ s = BM_MIN(pa-a, pb-pa ); BM_VECSWAP(a, pb-s, s);
+ s = BM_MIN(pd-pc, pn-pd-es); BM_VECSWAP(pb, pn-s, s);
+ /* Now recurse. Do the smaller partition first with an explicit
+ recursion, then do the larger partition using a tail call.
+ Except we can't rely on gcc to implement a tail call in any sane
+ way, so simply jump back to the start. This guarantees stack
+ growth can never exceed O(log N) even in the worst case. */
+ s1 = pb-pa;
+ s2 = pd-pc;
+ if (s1 < s2) {
+ if (s1 > es) {
+ bm_qsort(a, s1/es, es, cmp);
+ }
+ if (s2 > es) {
+ /* bm_qsort(pn-s2, s2/es, es, cmp); */
+ a = pn-s2; n = s2/es; es = es; cmp = cmp;
+ goto tailcall;
+ }
+ } else {
+ if (s2 > es) {
+ bm_qsort(pn-s2, s2/es, es, cmp);
+ }
+ if (s1 > es) {
+ /* bm_qsort(a, s1/es, es, cmp); */
+ a = a; n = s1/es; es = es; cmp = cmp;
+ goto tailcall;
+ }
+ }
+}
+
+#undef BM_MIN
+#undef BM_SWAPINIT
+#undef BM_EXCH
+#undef BM_SWAP
+#undef BM_VECSWAP
+#undef BM_PVINIT
+
+/// end Bentley-McIlroy style quicksort
+/////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////
+
+/* Returns the base-2 logarithm of x. Returns -1 if x is not a power
+ of two. */
+Int VG_(log2) ( UInt x )
+{
+ Int i;
+ /* Any more than 32 and we overflow anyway... */
+ for (i = 0; i < 32; i++) {
+ if ((1U << i) == x) return i;
+ }
+ return -1;
+}
+
+
+// Generic quick sort.
+void VG_(ssort)( void* base, SizeT nmemb, SizeT size,
+ Int (*compar)(void*, void*) )
+{
+ bm_qsort(base,nmemb,size,compar);
+}
+
+
+// This random number generator is based on the one suggested in Kernighan
+// and Ritchie's "The C Programming Language".
+
+// A pseudo-random number generator returning a random UInt. If pSeed
+// is NULL, it uses its own seed, which starts at zero. If pSeed is
+// non-NULL, it uses and updates whatever pSeed points at.
+
+static UInt seed = 0;
+
+UInt VG_(random)( /*MOD*/UInt* pSeed )
+{
+ if (pSeed == NULL)
+ pSeed = &seed;
+
+ *pSeed = (1103515245 * *pSeed + 12345);
+ return *pSeed;
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
+
diff --git a/coregrind/.svn/text-base/m_libcfile.c.svn-base b/coregrind/.svn/text-base/m_libcfile.c.svn-base
new file mode 100644
index 0000000..d60c552
--- /dev/null
+++ b/coregrind/.svn/text-base/m_libcfile.c.svn-base
@@ -0,0 +1,858 @@
+
+/*--------------------------------------------------------------------*/
+/*--- File- and socket-related libc stuff. m_libcfile.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcfile.h"
+#include "pub_core_libcprint.h" // VG_(sprintf)
+#include "pub_core_libcproc.h" // VG_(getpid), VG_(getppid)
+#include "pub_core_xarray.h"
+#include "pub_core_clientstate.h" // VG_(fd_hard_limit)
+#include "pub_core_syscall.h"
+
+/* ---------------------------------------------------------------------
+ File stuff
+ ------------------------------------------------------------------ */
+
+static inline Bool fd_exists(Int fd)
+{
+ struct vg_stat st;
+ return VG_(fstat)(fd, &st) == 0;
+}
+
+/* Move an fd into the Valgrind-safe range */
+Int VG_(safe_fd)(Int oldfd)
+{
+ Int newfd;
+
+ vg_assert(VG_(fd_hard_limit) != -1);
+
+ newfd = VG_(fcntl)(oldfd, VKI_F_DUPFD, VG_(fd_hard_limit));
+ if (newfd != -1)
+ VG_(close)(oldfd);
+
+ /* Set the close-on-exec flag for this fd. */
+ VG_(fcntl)(newfd, VKI_F_SETFD, VKI_FD_CLOEXEC);
+
+ vg_assert(newfd >= VG_(fd_hard_limit));
+ return newfd;
+}
+
+/* Given a file descriptor, attempt to deduce its filename. To do
+ this, we use /proc/self/fd/<FD>. If this doesn't point to a file,
+ or if it doesn't exist, we return False. */
+Bool VG_(resolve_filename) ( Int fd, HChar* buf, Int n_buf )
+{
+# if defined(VGO_linux)
+ HChar tmp[64];
+ VG_(sprintf)(tmp, "/proc/self/fd/%d", fd);
+ VG_(memset)(buf, 0, n_buf);
+ if (VG_(readlink)(tmp, buf, n_buf) > 0 && buf[0] == '/')
+ return True;
+ else
+ return False;
+# elif defined(VGO_aix5)
+ I_die_here; /* maybe just return False? */
+ return False;
+# else
+# error "need fd-to-filename for this OS"
+# endif
+}
+
+SysRes VG_(open) ( const Char* pathname, Int flags, Int mode )
+{
+ SysRes res = VG_(do_syscall3)(__NR_open, (UWord)pathname, flags, mode);
+ return res;
+}
+
+void VG_(close) ( Int fd )
+{
+ (void)VG_(do_syscall1)(__NR_close, fd);
+}
+
+Int VG_(read) ( Int fd, void* buf, Int count)
+{
+ Int ret;
+ SysRes res = VG_(do_syscall3)(__NR_read, fd, (UWord)buf, count);
+ if (res.isError) {
+ ret = - (Int)(Word)res.err;
+ vg_assert(ret < 0);
+ } else {
+ ret = (Int)(Word)res.res;
+ vg_assert(ret >= 0);
+ }
+ return ret;
+}
+
+Int VG_(write) ( Int fd, const void* buf, Int count)
+{
+ Int ret;
+ SysRes res = VG_(do_syscall3)(__NR_write, fd, (UWord)buf, count);
+ if (res.isError) {
+ ret = - (Int)(Word)res.err;
+ vg_assert(ret < 0);
+ } else {
+ ret = (Int)(Word)res.res;
+ vg_assert(ret >= 0);
+ }
+ return ret;
+}
+
+Int VG_(pipe) ( Int fd[2] )
+{
+ SysRes res = VG_(do_syscall1)(__NR_pipe, (UWord)fd);
+ return res.isError ? -1 : 0;
+}
+
+OffT VG_(lseek) ( Int fd, OffT offset, Int whence )
+{
+ SysRes res = VG_(do_syscall3)(__NR_lseek, fd, offset, whence);
+ return res.isError ? (-1) : res.res;
+ /* if you change the error-reporting conventions of this, also
+ change VG_(pread) and all other usage points. */
+}
+
+
+/* stat/fstat support. It's uggerly. We have impedance-match into a
+ 'struct vg_stat' in order to have a single structure that callers
+ can use consistently on all platforms. */
+
+#define TRANSLATE_TO_vg_stat(_p_vgstat, _p_vkistat) \
+ do { \
+ (_p_vgstat)->st_dev = (ULong)( (_p_vkistat)->st_dev ); \
+ (_p_vgstat)->st_ino = (ULong)( (_p_vkistat)->st_ino ); \
+ (_p_vgstat)->st_nlink = (ULong)( (_p_vkistat)->st_nlink ); \
+ (_p_vgstat)->st_mode = (UInt)( (_p_vkistat)->st_mode ); \
+ (_p_vgstat)->st_uid = (UInt)( (_p_vkistat)->st_uid ); \
+ (_p_vgstat)->st_gid = (UInt)( (_p_vkistat)->st_gid ); \
+ (_p_vgstat)->st_rdev = (ULong)( (_p_vkistat)->st_rdev ); \
+ (_p_vgstat)->st_size = (Long)( (_p_vkistat)->st_size ); \
+ (_p_vgstat)->st_blksize = (ULong)( (_p_vkistat)->st_blksize ); \
+ (_p_vgstat)->st_blocks = (ULong)( (_p_vkistat)->st_blocks ); \
+ (_p_vgstat)->st_atime = (ULong)( (_p_vkistat)->st_atime ); \
+ (_p_vgstat)->st_atime_nsec = (ULong)( (_p_vkistat)->st_atime_nsec ); \
+ (_p_vgstat)->st_mtime = (ULong)( (_p_vkistat)->st_mtime ); \
+ (_p_vgstat)->st_mtime_nsec = (ULong)( (_p_vkistat)->st_mtime_nsec ); \
+ (_p_vgstat)->st_ctime = (ULong)( (_p_vkistat)->st_ctime ); \
+ (_p_vgstat)->st_ctime_nsec = (ULong)( (_p_vkistat)->st_ctime_nsec ); \
+ } while (0)
+
+SysRes VG_(stat) ( Char* file_name, struct vg_stat* vgbuf )
+{
+ SysRes res;
+ VG_(memset)(vgbuf, 0, sizeof(*vgbuf));
+# if defined(VGO_linux)
+# if defined(__NR_stat64)
+ { struct vki_stat64 buf64;
+ res = VG_(do_syscall2)(__NR_stat64, (UWord)file_name, (UWord)&buf64);
+ if (!(res.isError && res.err == VKI_ENOSYS)) {
+ /* Success, or any failure except ENOSYS */
+ if (!res.isError)
+ TRANSLATE_TO_vg_stat(vgbuf, &buf64);
+ return res;
+ }
+ }
+# endif /* if defined(__NR_stat64) */
+ { struct vki_stat buf;
+ res = VG_(do_syscall2)(__NR_stat, (UWord)file_name, (UWord)&buf);
+ if (!res.isError)
+ TRANSLATE_TO_vg_stat(vgbuf, &buf);
+ return res;
+ }
+# elif defined(VGO_aix5)
+ { struct vki_stat buf;
+ res = VG_(do_syscall4)(__NR_AIX5_statx,
+ (UWord)file_name,
+ (UWord)&buf,
+ sizeof(struct vki_stat),
+ VKI_STX_NORMAL);
+ if (!res.isError) {
+ VG_(memset)(vgbuf, 0, sizeof(*vgbuf));
+ vgbuf->st_dev = (ULong)buf.st_dev;
+ vgbuf->st_ino = (ULong)buf.st_ino;
+ vgbuf->st_mode = (UInt)buf.st_mode;
+ vgbuf->st_uid = (UInt)buf.st_uid;
+ vgbuf->st_gid = (UInt)buf.st_gid;
+ vgbuf->st_size = (Long)buf.st_size;
+ }
+ return res;
+ }
+# else
+# error Unknown OS
+# endif
+}
+
+Int VG_(fstat) ( Int fd, struct vg_stat* vgbuf )
+{
+ SysRes res;
+ VG_(memset)(vgbuf, 0, sizeof(*vgbuf));
+# if defined(VGO_linux)
+# if defined(__NR_fstat64)
+ { struct vki_stat64 buf64;
+ res = VG_(do_syscall2)(__NR_fstat64, (UWord)fd, (UWord)&buf64);
+ if (!(res.isError && res.err == VKI_ENOSYS)) {
+ /* Success, or any failure except ENOSYS */
+ if (!res.isError)
+ TRANSLATE_TO_vg_stat(vgbuf, &buf64);
+ return res.isError ? (-1) : 0;
+ }
+ }
+# endif /* if defined(__NR_fstat64) */
+ { struct vki_stat buf;
+ res = VG_(do_syscall2)(__NR_fstat, (UWord)fd, (UWord)&buf);
+ if (!res.isError)
+ TRANSLATE_TO_vg_stat(vgbuf, &buf);
+ return res.isError ? (-1) : 0;
+ }
+# elif defined(VGO_aix5)
+ I_die_here;
+# else
+# error Unknown OS
+# endif
+}
+
+#undef TRANSLATE_TO_vg_stat
+
+
+Long VG_(fsize) ( Int fd )
+{
+ struct vg_stat buf;
+ Int res = VG_(fstat)( fd, &buf );
+ return (res == -1) ? (-1LL) : buf.st_size;
+}
+
+Bool VG_(is_dir) ( HChar* f )
+{
+ struct vg_stat buf;
+ SysRes res = VG_(stat)(f, &buf);
+ return res.isError ? False
+ : VKI_S_ISDIR(buf.st_mode) ? True : False;
+}
+
+SysRes VG_(dup) ( Int oldfd )
+{
+ return VG_(do_syscall1)(__NR_dup, oldfd);
+}
+
+SysRes VG_(dup2) ( Int oldfd, Int newfd )
+{
+# if defined(VGO_linux)
+ return VG_(do_syscall2)(__NR_dup2, oldfd, newfd);
+# elif defined(VGO_aix5)
+ I_die_here;
+# else
+# error Unknown OS
+# endif
+}
+
+/* Returns -1 on error. */
+Int VG_(fcntl) ( Int fd, Int cmd, Int arg )
+{
+ SysRes res = VG_(do_syscall3)(__NR_fcntl, fd, cmd, arg);
+ return res.isError ? -1 : res.res;
+}
+
+Int VG_(rename) ( Char* old_name, Char* new_name )
+{
+ SysRes res = VG_(do_syscall2)(__NR_rename, (UWord)old_name, (UWord)new_name);
+ return res.isError ? (-1) : 0;
+}
+
+Int VG_(unlink) ( Char* file_name )
+{
+ SysRes res = VG_(do_syscall1)(__NR_unlink, (UWord)file_name);
+ return res.isError ? (-1) : 0;
+}
+
+/* The working directory at startup. AIX doesn't provide an easy
+ system call to do getcwd, but fortunately we don't need arbitrary
+ getcwd support. All that is really needed is to note the cwd at
+ process startup. Hence VG_(record_startup_wd) notes it (in a
+ platform dependent way) and VG_(get_startup_wd) produces the noted
+ value. Hence: */
+static HChar startup_wd[VKI_PATH_MAX];
+static Bool startup_wd_acquired = False;
+
+/* Record the process' working directory at startup. Is intended to
+ be called exactly once, at startup, before the working directory
+ changes. Return True for success, False for failure, so that the
+ caller can bomb out suitably without creating module cycles if
+ there is a problem. */
+Bool VG_(record_startup_wd) ( void )
+{
+ const Int szB = sizeof(startup_wd);
+ vg_assert(!startup_wd_acquired);
+ vg_assert(szB >= 512 && szB <= 16384/*let's say*/); /* stay sane */
+ VG_(memset)(startup_wd, 0, szB);
+# if defined(VGO_linux)
+ /* Simple: just ask the kernel */
+ { SysRes res
+ = VG_(do_syscall2)(__NR_getcwd, (UWord)startup_wd, szB-1);
+ vg_assert(startup_wd[szB-1] == 0);
+ if (res.isError) {
+ return False;
+ } else {
+ startup_wd_acquired = True;
+ return True;
+ }
+ }
+# elif defined(VGO_aix5)
+ /* We can't ask the kernel, so instead rely on launcher-aix5.c to
+ tell us the startup path. Note the env var is keyed to the
+ parent's PID, not ours, since our parent is the launcher
+ process. */
+ { Char envvar[100];
+ Char* wd = NULL;
+ VG_(memset)(envvar, 0, sizeof(envvar));
+ VG_(sprintf)(envvar, "VALGRIND_STARTUP_PWD_%d_XYZZY",
+ (Int)VG_(getppid)());
+ wd = VG_(getenv)( envvar );
+ if (wd == NULL || (1+VG_(strlen)(wd) >= szB))
+ return False;
+ VG_(strncpy_safely)(startup_wd, wd, szB);
+ vg_assert(startup_wd[szB-1] == 0);
+ startup_wd_acquired = True;
+ return True;
+ }
+# else
+# error Unknown OS
+# endif
+}
+
+/* Copy the previously acquired startup_wd into buf[0 .. size-1],
+ or return False if buf isn't big enough. */
+Bool VG_(get_startup_wd) ( Char* buf, SizeT size )
+{
+ vg_assert(startup_wd_acquired);
+ vg_assert(startup_wd[ sizeof(startup_wd)-1 ] == 0);
+ if (1+VG_(strlen)(startup_wd) >= size)
+ return False;
+ VG_(strncpy_safely)(buf, startup_wd, size);
+ return True;
+}
+
+Int VG_(readlink) (Char* path, Char* buf, UInt bufsiz)
+{
+ SysRes res;
+ /* res = readlink( path, buf, bufsiz ); */
+ res = VG_(do_syscall3)(__NR_readlink, (UWord)path, (UWord)buf, bufsiz);
+ return res.isError ? -1 : res.res;
+}
+
+Int VG_(getdents) (UInt fd, struct vki_dirent *dirp, UInt count)
+{
+ SysRes res;
+ /* res = getdents( fd, dirp, count ); */
+ res = VG_(do_syscall3)(__NR_getdents, fd, (UWord)dirp, count);
+ return res.isError ? -1 : res.res;
+}
+
+/* Check accessibility of a file. Returns zero for access granted,
+ nonzero otherwise. */
+Int VG_(access) ( HChar* path, Bool irusr, Bool iwusr, Bool ixusr )
+{
+# if defined(VGO_linux)
+ /* Very annoyingly, I cannot find any definition for R_OK et al in
+ the kernel interfaces. Therefore I reluctantly resort to
+ hardwiring in these magic numbers that I determined by
+ experimentation. */
+ UWord w = (irusr ? 4/*R_OK*/ : 0)
+ | (iwusr ? 2/*W_OK*/ : 0)
+ | (ixusr ? 1/*X_OK*/ : 0);
+ SysRes res = VG_(do_syscall2)(__NR_access, (UWord)path, w);
+ return res.isError ? 1 : 0;
+# elif defined(VGO_aix5)
+ UWord w = (irusr ? VKI_R_OK : 0)
+ | (iwusr ? VKI_W_OK : 0)
+ | (ixusr ? VKI_X_OK : 0);
+ SysRes res = VG_(do_syscall2)(__NR_access, (UWord)path, w);
+ return res.isError ? 1 : 0;
+# else
+# error "Don't know how to do VG_(access) on this OS"
+# endif
+}
+
+/*
+ Emulate the normal Unix permissions checking algorithm.
+
+ If owner matches, then use the owner permissions, else
+ if group matches, then use the group permissions, else
+ use other permissions.
+
+ Note that we can't deal properly with SUID/SGID. By default
+ (allow_setuid == False), we refuse to run them (otherwise the
+ executable may misbehave if it doesn't have the permissions it
+ thinks it does). However, the caller may indicate that setuid
+ executables are allowed, for example if we are going to exec them
+ but not trace into them (iow, client sys_execve when
+ clo_trace_children == False).
+
+ If VKI_EACCES is returned (iow, permission was refused), then
+ *is_setuid is set to True iff permission was refused because the
+ executable is setuid.
+*/
+/* returns: 0 = success, non-0 is failure */
+Int VG_(check_executable)(/*OUT*/Bool* is_setuid,
+ HChar* f, Bool allow_setuid)
+{
+ struct vg_stat st;
+ SysRes res = VG_(stat)(f, &st);
+
+ if (is_setuid)
+ *is_setuid = False;
+
+ if (res.isError) {
+ return res.err;
+ }
+
+ if ( (st.st_mode & (VKI_S_ISUID | VKI_S_ISGID)) && !allow_setuid ) {
+ if (is_setuid)
+ *is_setuid = True;
+ return VKI_EACCES;
+ }
+
+ if (VG_(geteuid)() == st.st_uid) {
+ if (!(st.st_mode & VKI_S_IXUSR))
+ return VKI_EACCES;
+ } else {
+ Int grpmatch = 0;
+
+ if (VG_(getegid)() == st.st_gid)
+ grpmatch = 1;
+ else {
+ UInt groups[32];
+ Int ngrp = VG_(getgroups)(32, groups);
+ Int i;
+ /* ngrp will be -1 if VG_(getgroups) failed. */
+ for (i = 0; i < ngrp; i++) {
+ if (groups[i] == st.st_gid) {
+ grpmatch = 1;
+ break;
+ }
+ }
+ }
+
+ if (grpmatch) {
+ if (!(st.st_mode & VKI_S_IXGRP)) {
+ return VKI_EACCES;
+ }
+ } else if (!(st.st_mode & VKI_S_IXOTH)) {
+ return VKI_EACCES;
+ }
+ }
+
+ return 0;
+}
+
+SysRes VG_(pread) ( Int fd, void* buf, Int count, OffT offset )
+{
+ OffT off = VG_(lseek)( fd, (OffT)offset, VKI_SEEK_SET);
+ if (off < 0)
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ return VG_(do_syscall3)(__NR_read, fd, (UWord)buf, count );
+}
+
+/* Create and open (-rw------) a tmp file name incorporating said arg.
+ Returns -1 on failure, else the fd of the file. If fullname is
+ non-NULL, the file's name is written into it. The number of bytes
+ written is guaranteed not to exceed 64+strlen(part_of_name). */
+
+Int VG_(mkstemp) ( HChar* part_of_name, /*OUT*/HChar* fullname )
+{
+ HChar buf[200];
+ Int n, tries, fd;
+ UInt seed;
+ SysRes sres;
+
+ vg_assert(part_of_name);
+ n = VG_(strlen)(part_of_name);
+ vg_assert(n > 0 && n < 100);
+
+ seed = (VG_(getpid)() << 9) ^ VG_(getppid)();
+
+ tries = 0;
+ while (True) {
+ if (tries > 10)
+ return -1;
+ VG_(sprintf)( buf, "/tmp/valgrind_%s_%08x",
+ part_of_name, VG_(random)( &seed ));
+ if (0)
+ VG_(printf)("VG_(mkstemp): trying: %s\n", buf);
+
+ sres = VG_(open)(buf,
+ VKI_O_CREAT|VKI_O_RDWR|VKI_O_EXCL|VKI_O_TRUNC,
+ VKI_S_IRUSR|VKI_S_IWUSR);
+ if (sres.isError)
+ continue;
+ /* VG_(safe_fd) doesn't return if it fails. */
+ fd = VG_(safe_fd)( sres.res );
+ if (fullname)
+ VG_(strcpy)( fullname, buf );
+ return fd;
+ }
+ /* NOTREACHED */
+}
+
+
+/* ---------------------------------------------------------------------
+ Socket-related stuff. This is very Linux-kernel specific.
+ ------------------------------------------------------------------ */
+
+static
+Int parse_inet_addr_and_port ( UChar* str, UInt* ip_addr, UShort* port );
+
+static
+Int my_socket ( Int domain, Int type, Int protocol );
+
+static
+Int my_connect ( Int sockfd,
+# if defined(VGO_linux)
+ struct vki_sockaddr_in* serv_addr,
+# else
+ void* serv_addr,
+# endif
+ Int addrlen );
+
+UInt VG_(htonl) ( UInt x )
+{
+# if defined(VG_BIGENDIAN)
+ return x;
+# else
+ return
+ (((x >> 24) & 0xFF) << 0) | (((x >> 16) & 0xFF) << 8)
+ | (((x >> 8) & 0xFF) << 16) | (((x >> 0) & 0xFF) << 24);
+# endif
+}
+
+UInt VG_(ntohl) ( UInt x )
+{
+# if defined(VG_BIGENDIAN)
+ return x;
+# else
+ return
+ (((x >> 24) & 0xFF) << 0) | (((x >> 16) & 0xFF) << 8)
+ | (((x >> 8) & 0xFF) << 16) | (((x >> 0) & 0xFF) << 24);
+# endif
+}
+
+UShort VG_(htons) ( UShort x )
+{
+# if defined(VG_BIGENDIAN)
+ return x;
+# else
+ return
+ (((x >> 8) & 0xFF) << 0) | (((x >> 0) & 0xFF) << 8);
+# endif
+}
+
+UShort VG_(ntohs) ( UShort x )
+{
+# if defined(VG_BIGENDIAN)
+ return x;
+# else
+ return
+ (((x >> 8) & 0xFF) << 0) | (((x >> 0) & 0xFF) << 8);
+# endif
+}
+
+
+/* The main function.
+
+ Supplied string contains either an ip address "192.168.0.1" or
+ an ip address and port pair, "192.168.0.1:1500". Parse these,
+ and return:
+ -1 if there is a parse error
+ -2 if no parse error, but specified host:port cannot be opened
+ the relevant file (socket) descriptor, otherwise.
+ is used.
+*/
+Int VG_(connect_via_socket)( UChar* str )
+{
+#if defined(VGO_aix5)
+ I_die_here;
+#else /* Yay, Linux */
+ Int sd, res;
+ struct vki_sockaddr_in servAddr;
+ UInt ip = 0;
+ UShort port = VG_CLO_DEFAULT_LOGPORT;
+ Bool ok = parse_inet_addr_and_port(str, &ip, &port);
+ if (!ok)
+ return -1;
+
+ //if (0)
+ // VG_(printf)("ip = %d.%d.%d.%d, port %d\n",
+ // (ip >> 24) & 0xFF, (ip >> 16) & 0xFF,
+ // (ip >> 8) & 0xFF, ip & 0xFF,
+ // (UInt)port );
+
+ servAddr.sin_family = VKI_AF_INET;
+ servAddr.sin_addr.s_addr = VG_(htonl)(ip);
+ servAddr.sin_port = VG_(htons)(port);
+
+ /* create socket */
+ sd = my_socket(VKI_AF_INET, VKI_SOCK_STREAM, 0 /* IPPROTO_IP ? */);
+ if (sd < 0) {
+ /* this shouldn't happen ... nevertheless */
+ return -2;
+ }
+
+ /* connect to server */
+ res = my_connect(sd, (struct vki_sockaddr_in *) &servAddr,
+ sizeof(servAddr));
+ if (res < 0) {
+ /* connection failed */
+ return -2;
+ }
+
+ return sd;
+#endif
+}
+
+
+/* Let d = one or more digits. Accept either:
+ d.d.d.d or d.d.d.d:d
+*/
+static Int parse_inet_addr_and_port ( UChar* str, UInt* ip_addr, UShort* port )
+{
+# define GET_CH ((*str) ? (*str++) : 0)
+ UInt ipa, i, j, c, any;
+ ipa = 0;
+ for (i = 0; i < 4; i++) {
+ j = 0;
+ any = 0;
+ while (1) {
+ c = GET_CH;
+ if (c < '0' || c > '9') break;
+ j = 10 * j + (int)(c - '0');
+ any = 1;
+ }
+ if (any == 0 || j > 255) goto syntaxerr;
+ ipa = (ipa << 8) + j;
+ if (i <= 2 && c != '.') goto syntaxerr;
+ }
+ if (c == 0 || c == ':')
+ *ip_addr = ipa;
+ if (c == 0) goto ok;
+ if (c != ':') goto syntaxerr;
+ j = 0;
+ any = 0;
+ while (1) {
+ c = GET_CH;
+ if (c < '0' || c > '9') break;
+ j = j * 10 + (int)(c - '0');
+ any = 1;
+ if (j > 65535) goto syntaxerr;
+ }
+ if (any == 0 || c != 0) goto syntaxerr;
+ if (j < 1024) goto syntaxerr;
+ *port = (UShort)j;
+ ok:
+ return 1;
+ syntaxerr:
+ return 0;
+# undef GET_CH
+}
+
+static
+Int my_socket ( Int domain, Int type, Int protocol )
+{
+#if defined(VGP_x86_linux) || defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
+ SysRes res;
+ UWord args[3];
+ args[0] = domain;
+ args[1] = type;
+ args[2] = protocol;
+ res = VG_(do_syscall2)(__NR_socketcall, VKI_SYS_SOCKET, (UWord)&args);
+ return res.isError ? -1 : res.res;
+
+#elif defined(VGP_amd64_linux)
+ SysRes res;
+ res = VG_(do_syscall3)(__NR_socket, domain, type, protocol );
+ return res.isError ? -1 : res.res;
+
+#elif defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ I_die_here;
+
+#else
+# error Unknown arch
+#endif
+}
+
+static
+Int my_connect ( Int sockfd,
+# if defined(VGO_linux)
+ struct vki_sockaddr_in* serv_addr,
+# else
+ void* serv_addr,
+# endif
+ Int addrlen )
+{
+#if defined(VGP_x86_linux) || defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
+ SysRes res;
+ UWord args[3];
+ args[0] = sockfd;
+ args[1] = (UWord)serv_addr;
+ args[2] = addrlen;
+ res = VG_(do_syscall2)(__NR_socketcall, VKI_SYS_CONNECT, (UWord)&args);
+ return res.isError ? -1 : res.res;
+
+#elif defined(VGP_amd64_linux)
+ SysRes res;
+ res = VG_(do_syscall3)(__NR_connect, sockfd, (UWord)serv_addr, addrlen);
+ return res.isError ? -1 : res.res;
+
+#elif defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ I_die_here;
+
+#else
+# error Unknown arch
+#endif
+}
+
+Int VG_(write_socket)( Int sd, void *msg, Int count )
+{
+ /* This is actually send(). */
+ /* For Linux, VKI_MSG_NOSIGNAL is a request not to send SIGPIPE on
+ errors on stream oriented sockets when the other end breaks the
+ connection. The EPIPE error is still returned. */
+
+#if defined(VGP_x86_linux) || defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
+ SysRes res;
+ UWord args[4];
+ args[0] = sd;
+ args[1] = (UWord)msg;
+ args[2] = count;
+ args[3] = VKI_MSG_NOSIGNAL;
+ res = VG_(do_syscall2)(__NR_socketcall, VKI_SYS_SEND, (UWord)&args);
+ return res.isError ? -1 : res.res;
+
+#elif defined(VGP_amd64_linux)
+ SysRes res;
+ res = VG_(do_syscall6)(__NR_sendto, sd, (UWord)msg,
+ count, VKI_MSG_NOSIGNAL, 0,0);
+ return res.isError ? -1 : res.res;
+
+#elif defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ I_die_here;
+
+#else
+# error Unknown arch
+#endif
+}
+
+Int VG_(getsockname) ( Int sd, struct vki_sockaddr *name, Int *namelen)
+{
+#if defined(VGP_x86_linux) || defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
+ SysRes res;
+ UWord args[3];
+ args[0] = sd;
+ args[1] = (UWord)name;
+ args[2] = (UWord)namelen;
+ res = VG_(do_syscall2)(__NR_socketcall, VKI_SYS_GETSOCKNAME, (UWord)&args);
+ return res.isError ? -1 : res.res;
+
+#elif defined(VGP_amd64_linux)
+ SysRes res;
+ res = VG_(do_syscall3)( __NR_getsockname,
+ (UWord)sd, (UWord)name, (UWord)namelen );
+ return res.isError ? -1 : res.res;
+
+#elif defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ I_die_here;
+
+#else
+# error Unknown arch
+#endif
+}
+
+Int VG_(getpeername) ( Int sd, struct vki_sockaddr *name, Int *namelen)
+{
+#if defined(VGP_x86_linux) || defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
+ SysRes res;
+ UWord args[3];
+ args[0] = sd;
+ args[1] = (UWord)name;
+ args[2] = (UWord)namelen;
+ res = VG_(do_syscall2)(__NR_socketcall, VKI_SYS_GETPEERNAME, (UWord)&args);
+ return res.isError ? -1 : res.res;
+
+#elif defined(VGP_amd64_linux)
+ SysRes res;
+ res = VG_(do_syscall3)( __NR_getpeername,
+ (UWord)sd, (UWord)name, (UWord)namelen );
+ return res.isError ? -1 : res.res;
+
+#elif defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ I_die_here;
+
+#else
+# error Unknown arch
+#endif
+}
+
+Int VG_(getsockopt) ( Int sd, Int level, Int optname, void *optval,
+ Int *optlen)
+{
+#if defined(VGP_x86_linux) || defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
+ SysRes res;
+ UWord args[5];
+ args[0] = sd;
+ args[1] = level;
+ args[2] = optname;
+ args[3] = (UWord)optval;
+ args[4] = (UWord)optlen;
+ res = VG_(do_syscall2)(__NR_socketcall, VKI_SYS_GETSOCKOPT, (UWord)&args);
+ return res.isError ? -1 : res.res;
+
+#elif defined(VGP_amd64_linux)
+ SysRes res;
+ res = VG_(do_syscall5)( __NR_getsockopt,
+ (UWord)sd, (UWord)level, (UWord)optname,
+ (UWord)optval, (UWord)optlen );
+ return res.isError ? -1 : res.res;
+
+#elif defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ I_die_here;
+
+#else
+# error Unknown arch
+#endif
+}
+
+
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
+
diff --git a/coregrind/.svn/text-base/m_libcprint.c.svn-base b/coregrind/.svn/text-base/m_libcprint.c.svn-base
new file mode 100644
index 0000000..8783f9c
--- /dev/null
+++ b/coregrind/.svn/text-base/m_libcprint.c.svn-base
@@ -0,0 +1,393 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Libc printing. m_libcprint.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcfile.h" // VG_(write)(), VG_(write_socket)()
+#include "pub_core_libcprint.h"
+#include "pub_core_libcproc.h" // VG_(getpid)(), VG_(read_millisecond_timer()
+#include "pub_core_options.h"
+#include "valgrind.h" // For RUNNING_ON_VALGRIND
+
+
+
+/* ---------------------------------------------------------------------
+ Writing to file or a socket
+ ------------------------------------------------------------------ */
+
+/* Tell the logging mechanism whether we are logging to a file
+ descriptor or a socket descriptor. */
+Bool VG_(logging_to_socket) = False;
+
+/* Do the low-level send of a message to the logging sink. */
+static void send_bytes_to_logging_sink ( Char* msg, Int nbytes )
+{
+ if (!VG_(logging_to_socket)) {
+ /* VG_(clo_log_fd) could have been set to -1 in the various
+ sys-wrappers for sys_fork, if --child-silent-after-fork=yes
+ is in effect. That is a signal that we should not produce
+ any more output. */
+ if (VG_(clo_log_fd) >= 0)
+ VG_(write)( VG_(clo_log_fd), msg, nbytes );
+ } else {
+ Int rc = VG_(write_socket)( VG_(clo_log_fd), msg, nbytes );
+ if (rc == -1) {
+ // For example, the listener process died. Switch back to stderr.
+ VG_(logging_to_socket) = False;
+ VG_(clo_log_fd) = 2;
+ VG_(write)( VG_(clo_log_fd), msg, nbytes );
+ }
+ }
+}
+
+/* ---------------------------------------------------------------------
+ printf() and friends
+ ------------------------------------------------------------------ */
+
+typedef
+ struct {
+ HChar buf[128];
+ Int n;
+ }
+ printf_buf;
+
+static UInt vprintf_to_buf ( printf_buf *prbuf,
+ const HChar *format, va_list vargs );
+static UInt printf_to_buf ( printf_buf* prbuf, const HChar *format, ... );
+
+// Adds a single char to the buffer. When the buffer gets sufficiently
+// full, we write its contents to the logging sink.
+static void add_to_myprintf_buf ( HChar c, void *p )
+{
+ printf_buf *myprintf_buf = (printf_buf *)p;
+
+ if (myprintf_buf->n > sizeof(myprintf_buf->buf) - 2 ) {
+ send_bytes_to_logging_sink( myprintf_buf->buf, myprintf_buf->n );
+ myprintf_buf->n = 0;
+ }
+ myprintf_buf->buf[myprintf_buf->n++] = c;
+ myprintf_buf->buf[myprintf_buf->n] = 0;
+ tl_assert(myprintf_buf->n < sizeof(myprintf_buf->buf));
+}
+
+UInt VG_(vprintf) ( const HChar *format, va_list vargs )
+{
+ UInt ret = 0;
+ printf_buf myprintf_buf = {"",0};
+
+ ret = vprintf_to_buf(&myprintf_buf, format, vargs);
+ // Write out any chars left in the buffer.
+ if (myprintf_buf.n > 0) {
+ send_bytes_to_logging_sink( myprintf_buf.buf, myprintf_buf.n );
+ }
+ return ret;
+}
+
+static UInt vprintf_to_buf ( printf_buf *prbuf,
+ const HChar *format, va_list vargs )
+{
+ UInt ret = 0;
+
+ if (VG_(clo_log_fd) >= 0) {
+ ret = VG_(debugLog_vprintf)
+ ( add_to_myprintf_buf, prbuf, format, vargs );
+ }
+ return ret;
+}
+
+UInt VG_(printf) ( const HChar *format, ... )
+{
+ UInt ret;
+ va_list vargs;
+
+ va_start(vargs, format);
+ ret = VG_(vprintf)(format, vargs);
+ va_end(vargs);
+
+ return ret;
+}
+
+static UInt printf_to_buf ( printf_buf* prbuf, const HChar *format, ... )
+{
+ UInt ret;
+ va_list vargs;
+
+ va_start(vargs, format);
+ ret = vprintf_to_buf(prbuf, format, vargs);
+ va_end(vargs);
+
+ return ret;
+}
+
+/* A general replacement for sprintf(). */
+static void add_to_vg_sprintf_buf ( HChar c, void *p )
+{
+ char **vg_sprintf_ptr = p;
+ *(*vg_sprintf_ptr)++ = c;
+}
+
+UInt VG_(vsprintf) ( Char* buf, const HChar *format, va_list vargs )
+{
+ Int ret;
+ Char *vg_sprintf_ptr = buf;
+
+ ret = VG_(debugLog_vprintf)
+ ( add_to_vg_sprintf_buf, &vg_sprintf_ptr, format, vargs );
+ add_to_vg_sprintf_buf('\0', &vg_sprintf_ptr);
+
+ vg_assert(VG_(strlen)(buf) == ret);
+
+ return ret;
+}
+
+UInt VG_(sprintf) ( Char* buf, const HChar *format, ... )
+{
+ UInt ret;
+ va_list vargs;
+
+ va_start(vargs,format);
+ ret = VG_(vsprintf)(buf, format, vargs);
+ va_end(vargs);
+
+ return ret;
+}
+
+
+/* A replacement for snprintf. */
+typedef
+ struct {
+ HChar* buf;
+ Int buf_size;
+ Int buf_used;
+ }
+ snprintf_buf;
+
+static void add_to_vg_snprintf_buf ( HChar c, void* p )
+{
+ snprintf_buf* b = p;
+ if (b->buf_size > 0 && b->buf_used < b->buf_size) {
+ b->buf[b->buf_used++] = c;
+ if (b->buf_used < b->buf_size)
+ b->buf[b->buf_used] = 0;
+ else
+ b->buf[b->buf_size-1] = 0; /* pre: b->buf_size > 0 */
+ }
+}
+
+UInt VG_(vsnprintf) ( Char* buf, Int size, const HChar *format, va_list vargs )
+{
+ Int ret;
+ snprintf_buf b;
+ b.buf = buf;
+ b.buf_size = size < 0 ? 0 : size;
+ b.buf_used = 0;
+
+ ret = VG_(debugLog_vprintf)
+ ( add_to_vg_snprintf_buf, &b, format, vargs );
+
+ return b.buf_used;
+}
+
+UInt VG_(snprintf) ( Char* buf, Int size, const HChar *format, ... )
+{
+ UInt ret;
+ va_list vargs;
+
+ va_start(vargs,format);
+ ret = VG_(vsnprintf)(buf, size, format, vargs);
+ va_end(vargs);
+
+ return ret;
+}
+
+
+/* ---------------------------------------------------------------------
+ percentify()
+ ------------------------------------------------------------------ */
+
+// Percentify n/m with d decimal places. Includes the '%' symbol at the end.
+// Right justifies in 'buf'.
+void VG_(percentify)(ULong n, ULong m, UInt d, Int n_buf, char buf[])
+{
+ Int i, len, space;
+ ULong p1;
+ Char fmt[32];
+
+ if (m == 0) {
+ // Have to generate the format string in order to be flexible about
+ // the width of the field.
+ VG_(sprintf)(fmt, "%%-%ds", n_buf);
+ // fmt is now "%<n_buf>s" where <d> is 1,2,3...
+ VG_(sprintf)(buf, fmt, "--%");
+ return;
+ }
+
+ p1 = (100*n) / m;
+
+ if (d == 0) {
+ VG_(sprintf)(buf, "%lld%%", p1);
+ } else {
+ ULong p2;
+ UInt ex;
+ switch (d) {
+ case 1: ex = 10; break;
+ case 2: ex = 100; break;
+ case 3: ex = 1000; break;
+ default: VG_(tool_panic)("Currently can only handle 3 decimal places");
+ }
+ p2 = ((100*n*ex) / m) % ex;
+ // Have to generate the format string in order to be flexible about
+ // the width of the post-decimal-point part.
+ VG_(sprintf)(fmt, "%%lld.%%0%dlld%%%%", d);
+ // fmt is now "%lld.%0<d>lld%%" where <d> is 1,2,3...
+ VG_(sprintf)(buf, fmt, p1, p2);
+ }
+
+ len = VG_(strlen)(buf);
+ space = n_buf - len;
+ if (space < 0) space = 0; /* Allow for v. small field_width */
+ i = len;
+
+ /* Right justify in field */
+ for ( ; i >= 0; i--) buf[i + space] = buf[i];
+ for (i = 0; i < space; i++) buf[i] = ' ';
+}
+
+
+/* ---------------------------------------------------------------------
+ elapsed_wallclock_time()
+ ------------------------------------------------------------------ */
+
+/* Get the elapsed wallclock time since startup into buf, which must
+ 16 chars long. This is unchecked. It also relies on the
+ millisecond timer having been set to zero by an initial read in
+ m_main during startup. */
+
+void VG_(elapsed_wallclock_time) ( /*OUT*/HChar* buf )
+{
+ UInt t, ms, s, mins, hours, days;
+
+ t = VG_(read_millisecond_timer)(); /* milliseconds */
+
+ ms = t % 1000;
+ t /= 1000; /* now in seconds */
+
+ s = t % 60;
+ t /= 60; /* now in minutes */
+
+ mins = t % 60;
+ t /= 60; /* now in hours */
+
+ hours = t % 24;
+ t /= 24; /* now in days */
+
+ days = t;
+
+ VG_(sprintf)(buf, "%02u:%02u:%02u:%02u.%03u", days, hours, mins, s, ms);
+}
+
+
+/* ---------------------------------------------------------------------
+ message()
+ ------------------------------------------------------------------ */
+
+UInt VG_(vmessage) ( VgMsgKind kind, const HChar* format, va_list vargs )
+{
+ UInt count = 0;
+ Char c;
+ Int i, depth;
+ printf_buf myprintf_buf = {"",0};
+
+ switch (kind) {
+ case Vg_UserMsg: c = '='; break;
+ case Vg_DebugMsg: c = '-'; break;
+ case Vg_DebugExtraMsg: c = '+'; break;
+ case Vg_ClientMsg: c = '*'; break;
+ default: c = '?'; break;
+ }
+
+ // Print one '>' in front of the messages for each level of self-hosting
+ // being performed.
+ depth = RUNNING_ON_VALGRIND;
+ for (i = 0; i < depth; i++) {
+ count += printf_to_buf (&myprintf_buf, ">");
+ }
+
+ if (!VG_(clo_xml))
+ count += printf_to_buf (&myprintf_buf, "%c%c", c,c);
+
+ if (VG_(clo_time_stamp)) {
+ HChar buf[50];
+ VG_(elapsed_wallclock_time)(buf);
+ count += printf_to_buf(&myprintf_buf, "%s ", buf);
+ }
+
+ if (!VG_(clo_xml))
+ count += printf_to_buf (&myprintf_buf, "%d%c%c ", VG_(getpid)(), c,c);
+
+ count += vprintf_to_buf (&myprintf_buf, format, vargs);
+ count += printf_to_buf (&myprintf_buf, "\n");
+
+ if (myprintf_buf.n > 0) {
+ send_bytes_to_logging_sink( myprintf_buf.buf, myprintf_buf.n );
+ }
+
+ return count;
+}
+
+/* Send a simple single-part XML message. */
+UInt VG_(message_no_f_c) ( VgMsgKind kind, const HChar* format, ... )
+{
+ UInt count;
+ va_list vargs;
+ va_start(vargs,format);
+ count = VG_(vmessage) ( kind, format, vargs );
+ va_end(vargs);
+ return count;
+}
+
+/* Send a simple single-part message. */
+UInt VG_(message) ( VgMsgKind kind, const HChar* format, ... )
+{
+ UInt count;
+ va_list vargs;
+ va_start(vargs,format);
+ count = VG_(vmessage) ( kind, format, vargs );
+ va_end(vargs);
+ return count;
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
+
diff --git a/coregrind/.svn/text-base/m_libcproc.c.svn-base b/coregrind/.svn/text-base/m_libcproc.c.svn-base
new file mode 100644
index 0000000..0af540a
--- /dev/null
+++ b/coregrind/.svn/text-base/m_libcproc.c.svn-base
@@ -0,0 +1,644 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Process-related libc stuff. m_libcproc.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_libcproc.h"
+#include "pub_core_libcsignal.h"
+#include "pub_core_seqmatch.h"
+#include "pub_core_mallocfree.h"
+#include "pub_core_syscall.h"
+#include "pub_core_xarray.h"
+#include "pub_core_clientstate.h"
+
+/* ---------------------------------------------------------------------
+ Command line and environment stuff
+ ------------------------------------------------------------------ */
+
+/* As deduced from sp_at_startup, the client's argc, argv[] and
+ envp[] as extracted from the client's stack at startup-time. */
+Char** VG_(client_envp) = NULL;
+
+/* Path to library directory */
+const Char *VG_(libdir) = VG_LIBDIR;
+
+/* We do getenv without libc's help by snooping around in
+ VG_(client_envp) as determined at startup time. */
+Char *VG_(getenv)(Char *varname)
+{
+ Int i, n;
+ vg_assert( VG_(client_envp) );
+ n = VG_(strlen)(varname);
+ for (i = 0; VG_(client_envp)[i] != NULL; i++) {
+ Char* s = VG_(client_envp)[i];
+ if (VG_(strncmp)(varname, s, n) == 0 && s[n] == '=') {
+ return & s[n+1];
+ }
+ }
+ return NULL;
+}
+
+void VG_(env_unsetenv) ( Char **env, const Char *varname )
+{
+ Char **from;
+ Char **to = NULL;
+ Int len = VG_(strlen)(varname);
+
+ for (from = to = env; from && *from; from++) {
+ if (!(VG_(strncmp)(varname, *from, len) == 0 && (*from)[len] == '=')) {
+ *to = *from;
+ to++;
+ }
+ }
+ *to = *from;
+}
+
+/* set the environment; returns the old env if a new one was allocated */
+Char **VG_(env_setenv) ( Char ***envp, const Char* varname, const Char *val )
+{
+ Char **env = (*envp);
+ Char **cpp;
+ Int len = VG_(strlen)(varname);
+ Char *valstr = VG_(arena_malloc)(VG_AR_CORE, "libcproc.es.1",
+ len + VG_(strlen)(val) + 2);
+ Char **oldenv = NULL;
+
+ VG_(sprintf)(valstr, "%s=%s", varname, val);
+
+ for (cpp = env; cpp && *cpp; cpp++) {
+ if (VG_(strncmp)(varname, *cpp, len) == 0 && (*cpp)[len] == '=') {
+ *cpp = valstr;
+ return oldenv;
+ }
+ }
+
+ if (env == NULL) {
+ env = VG_(arena_malloc)(VG_AR_CORE, "libcproc.es.2", sizeof(Char **) * 2);
+ env[0] = valstr;
+ env[1] = NULL;
+
+ *envp = env;
+
+ } else {
+ Int envlen = (cpp-env) + 2;
+ Char **newenv = VG_(arena_malloc)(VG_AR_CORE, "libcproc.es.3",
+ envlen * sizeof(Char **));
+
+ for (cpp = newenv; *env; )
+ *cpp++ = *env++;
+ *cpp++ = valstr;
+ *cpp++ = NULL;
+
+ oldenv = *envp;
+
+ *envp = newenv;
+ }
+
+ return oldenv;
+}
+
+/* Walk through a colon-separated environment variable, and remove the
+ entries which match remove_pattern. It slides everything down over
+ the removed entries, and pads the remaining space with '\0'. It
+ modifies the entries in place (in the client address space), but it
+ shouldn't matter too much, since we only do this just before an
+ execve().
+
+ This is also careful to mop up any excess ':'s, since empty strings
+ delimited by ':' are considered to be '.' in a path.
+*/
+static void mash_colon_env(Char *varp, const Char *remove_pattern)
+{
+ Char *const start = varp;
+ Char *entry_start = varp;
+ Char *output = varp;
+
+ if (varp == NULL)
+ return;
+
+ while(*varp) {
+ if (*varp == ':') {
+ Char prev;
+ Bool match;
+
+ /* This is a bit subtle: we want to match against the entry
+ we just copied, because it may have overlapped with
+ itself, junking the original. */
+
+ prev = *output;
+ *output = '\0';
+
+ match = VG_(string_match)(remove_pattern, entry_start);
+
+ *output = prev;
+
+ if (match) {
+ output = entry_start;
+ varp++; /* skip ':' after removed entry */
+ } else
+ entry_start = output+1; /* entry starts after ':' */
+ }
+
+ *output++ = *varp++;
+ }
+
+ /* match against the last entry */
+ if (VG_(string_match)(remove_pattern, entry_start)) {
+ output = entry_start;
+ if (output > start) {
+ /* remove trailing ':' */
+ output--;
+ vg_assert(*output == ':');
+ }
+ }
+
+ /* pad out the left-overs with '\0' */
+ while(output < varp)
+ *output++ = '\0';
+}
+
+
+// Removes all the Valgrind-added stuff from the passed environment. Used
+// when starting child processes, so they don't see that added stuff.
+void VG_(env_remove_valgrind_env_stuff)(Char** envp)
+{
+ Int i;
+ Char* ld_preload_str = NULL;
+ Char* ld_library_path_str = NULL;
+ Char* buf;
+
+ // Find LD_* variables
+ for (i = 0; envp[i] != NULL; i++) {
+ if (VG_(strncmp)(envp[i], "LD_PRELOAD=", 11) == 0)
+ ld_preload_str = &envp[i][11];
+ if (VG_(strncmp)(envp[i], "LD_LIBRARY_PATH=", 16) == 0)
+ ld_library_path_str = &envp[i][16];
+ }
+
+ buf = VG_(arena_malloc)(VG_AR_CORE, "libcproc.erves.1",
+ VG_(strlen)(VG_(libdir)) + 20);
+
+ // Remove Valgrind-specific entries from LD_*.
+ VG_(sprintf)(buf, "%s*/vgpreload_*.so", VG_(libdir));
+ mash_colon_env(ld_preload_str, buf);
+ VG_(sprintf)(buf, "%s*", VG_(libdir));
+ mash_colon_env(ld_library_path_str, buf);
+
+ // Remove VALGRIND_LAUNCHER variable.
+ VG_(env_unsetenv)(envp, VALGRIND_LAUNCHER);
+
+ // XXX if variable becomes empty, remove it completely?
+
+ VG_(arena_free)(VG_AR_CORE, buf);
+}
+
+/* ---------------------------------------------------------------------
+ Various important syscall wrappers
+ ------------------------------------------------------------------ */
+
+Int VG_(waitpid)(Int pid, Int *status, Int options)
+{
+# if defined(VGO_linux)
+ SysRes res = VG_(do_syscall4)(__NR_wait4, pid, (UWord)status, options, 0);
+ return res.isError ? -1 : res.res;
+# elif defined(VGO_aix5)
+ /* magic number 4 obtained by truss-ing a C program doing
+ 'waitpid'. Note status and pid args opposite way round from
+ POSIX. */
+ SysRes res = VG_(do_syscall5)(__NR_AIX5_kwaitpid,
+ (UWord)status, pid, 4 | options,0,0);
+ if (0) VG_(printf)("waitpid: got 0x%lx 0x%lx\n", res.res, res.err);
+ return res.isError ? -1 : res.res;
+# else
+# error Unknown OS
+# endif
+}
+
+/* clone the environment */
+Char **VG_(env_clone) ( Char **oldenv )
+{
+ Char **oldenvp;
+ Char **newenvp;
+ Char **newenv;
+ Int envlen;
+
+ for (oldenvp = oldenv; oldenvp && *oldenvp; oldenvp++);
+
+ envlen = oldenvp - oldenv + 1;
+
+ newenv = VG_(arena_malloc)(VG_AR_CORE, "libcproc.ec.1",
+ envlen * sizeof(Char **));
+
+ oldenvp = oldenv;
+ newenvp = newenv;
+
+ while (oldenvp && *oldenvp) {
+ *newenvp++ = *oldenvp++;
+ }
+
+ *newenvp = *oldenvp;
+
+ return newenv;
+}
+
+void VG_(execv) ( Char* filename, Char** argv )
+{
+ Char** envp;
+ SysRes res;
+
+ /* restore the DATA rlimit for the child */
+ VG_(setrlimit)(VKI_RLIMIT_DATA, &VG_(client_rlimit_data));
+
+ envp = VG_(env_clone)(VG_(client_envp));
+ VG_(env_remove_valgrind_env_stuff)( envp );
+
+ res = VG_(do_syscall3)(__NR_execve,
+ (UWord)filename, (UWord)argv, (UWord)envp);
+
+ VG_(printf)("EXEC failed, errno = %ld\n", res.res);
+}
+
+/* Return -1 if error, else 0. NOTE does not indicate return code of
+ child! */
+Int VG_(system) ( Char* cmd )
+{
+ Int pid;
+ if (cmd == NULL)
+ return 1;
+ pid = VG_(fork)();
+ if (pid < 0)
+ return -1;
+ if (pid == 0) {
+ /* child */
+ Char* argv[4] = { "/bin/sh", "-c", cmd, 0 };
+ VG_(execv)(argv[0], argv);
+
+ /* If we're still alive here, execve failed. */
+ VG_(exit)(1);
+ } else {
+ /* parent */
+ Int ir, zzz;
+ /* We have to set SIGCHLD to its default behaviour in order that
+ VG_(waitpid) works (at least on AIX). According to the Linux
+ man page for waitpid:
+
+ POSIX.1-2001 specifies that if the disposition of SIGCHLD is
+ set to SIG_IGN or the SA_NOCLDWAIT flag is set for SIGCHLD
+ (see sigaction(2)), then children that terminate do not
+ become zombies and a call to wait() or waitpid() will block
+ until all children have terminated, and then fail with errno
+ set to ECHILD. (The original POSIX standard left the
+ behaviour of setting SIGCHLD to SIG_IGN unspecified.)
+ */
+ struct vki_sigaction sa, saved_sa;
+ VG_(memset)( &sa, 0, sizeof(struct vki_sigaction) );
+ VG_(sigemptyset)(&sa.sa_mask);
+ sa.ksa_handler = VKI_SIG_DFL;
+ sa.sa_flags = 0;
+ ir = VG_(sigaction)(VKI_SIGCHLD, &sa, &saved_sa);
+ vg_assert(ir == 0);
+
+ zzz = VG_(waitpid)(pid, NULL, 0);
+
+ ir = VG_(sigaction)(VKI_SIGCHLD, &saved_sa, NULL);
+ vg_assert(ir == 0);
+
+ return zzz == -1 ? -1 : 0;
+ }
+}
+
+/* ---------------------------------------------------------------------
+ Resource limits
+ ------------------------------------------------------------------ */
+
+/* Support for getrlimit. */
+Int VG_(getrlimit) (Int resource, struct vki_rlimit *rlim)
+{
+ SysRes res = VG_(mk_SysRes_Error)(VKI_ENOSYS);
+ /* res = getrlimit( resource, rlim ); */
+# ifdef __NR_ugetrlimit
+ res = VG_(do_syscall2)(__NR_ugetrlimit, resource, (UWord)rlim);
+# endif
+ if (res.isError && res.err == VKI_ENOSYS)
+ res = VG_(do_syscall2)(__NR_getrlimit, resource, (UWord)rlim);
+ return res.isError ? -1 : res.res;
+}
+
+
+/* Support for setrlimit. */
+Int VG_(setrlimit) (Int resource, const struct vki_rlimit *rlim)
+{
+ SysRes res;
+ /* res = setrlimit( resource, rlim ); */
+ res = VG_(do_syscall2)(__NR_setrlimit, resource, (UWord)rlim);
+ return res.isError ? -1 : res.res;
+}
+
+/* ---------------------------------------------------------------------
+ pids, etc
+ ------------------------------------------------------------------ */
+
+Int VG_(gettid)(void)
+{
+# if defined(VGO_aix5)
+ SysRes res;
+ Int r;
+ vg_assert(__NR_AIX5__thread_self != __NR_AIX5_UNKNOWN);
+ res = VG_(do_syscall0)(__NR_AIX5__thread_self);
+ r = res.res;
+ return r;
+
+# else
+ SysRes res = VG_(do_syscall0)(__NR_gettid);
+
+ if (res.isError && res.res == VKI_ENOSYS) {
+ Char pid[16];
+ /*
+ * The gettid system call does not exist. The obvious assumption
+ * to make at this point would be that we are running on an older
+ * system where the getpid system call actually returns the ID of
+ * the current thread.
+ *
+ * Unfortunately it seems that there are some systems with a kernel
+ * where getpid has been changed to return the ID of the thread group
+ * leader but where the gettid system call has not yet been added.
+ *
+ * So instead of calling getpid here we use readlink to see where
+ * the /proc/self link is pointing...
+ */
+
+ res = VG_(do_syscall3)(__NR_readlink, (UWord)"/proc/self",
+ (UWord)pid, sizeof(pid));
+ if (!res.isError && res.res > 0) {
+ Char* s;
+ pid[res.res] = '\0';
+ res.res = VG_(strtoll10)(pid, &s);
+ if (*s != '\0') {
+ VG_(message)(Vg_DebugMsg,
+ "Warning: invalid file name linked to by /proc/self: %s", pid);
+ }
+ }
+ }
+
+ return res.res;
+# endif
+}
+
+/* You'd be amazed how many places need to know the current pid. */
+Int VG_(getpid) ( void )
+{
+ /* ASSUMES SYSCALL ALWAYS SUCCEEDS */
+ return VG_(do_syscall0)(__NR_getpid) . res;
+}
+
+Int VG_(getpgrp) ( void )
+{
+ /* ASSUMES SYSCALL ALWAYS SUCCEEDS */
+ return VG_(do_syscall0)(__NR_getpgrp) . res;
+}
+
+Int VG_(getppid) ( void )
+{
+ /* ASSUMES SYSCALL ALWAYS SUCCEEDS */
+ return VG_(do_syscall0)(__NR_getppid) . res;
+}
+
+Int VG_(geteuid) ( void )
+{
+ /* ASSUMES SYSCALL ALWAYS SUCCEEDS */
+# if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ return VG_(do_syscall1)(__NR_AIX5_getuidx, 1) . res;
+# elif defined(__NR_geteuid32)
+ // We use the 32-bit version if it's supported. Otherwise, IDs greater
+ // than 65536 cause problems, as bug #151209 showed.
+ return VG_(do_syscall0)(__NR_geteuid32) . res;
+# else
+ return VG_(do_syscall0)(__NR_geteuid) . res;
+# endif
+}
+
+Int VG_(getegid) ( void )
+{
+ /* ASSUMES SYSCALL ALWAYS SUCCEEDS */
+# if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ return VG_(do_syscall1)(__NR_AIX5_getgidx, 1) . res;
+# elif defined(__NR_getegid32)
+ // We use the 32-bit version if it's supported. Otherwise, IDs greater
+ // than 65536 cause problems, as bug #151209 showed.
+ return VG_(do_syscall0)(__NR_getegid32) . res;
+# else
+ return VG_(do_syscall0)(__NR_getegid) . res;
+# endif
+}
+
+/* Get supplementary groups into list[0 .. size-1]. Returns the
+ number of groups written, or -1 if error. Note that in order to be
+ portable, the groups are 32-bit unsigned ints regardless of the
+ platform. */
+Int VG_(getgroups)( Int size, UInt* list )
+{
+# if defined(VGP_x86_linux) || defined(VGP_ppc32_linux)
+ Int i;
+ SysRes sres;
+ UShort list16[64];
+ if (size < 0) return -1;
+ if (size > 64) size = 64;
+ sres = VG_(do_syscall2)(__NR_getgroups, size, (Addr)list16);
+ if (sres.isError)
+ return -1;
+ if (sres.res > size)
+ return -1;
+ for (i = 0; i < sres.res; i++)
+ list[i] = (UInt)list16[i];
+ return sres.res;
+
+# elif defined(VGP_amd64_linux) || defined(VGP_ppc64_linux) \
+ || defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ SysRes sres;
+ sres = VG_(do_syscall2)(__NR_getgroups, size, (Addr)list);
+ if (sres.isError)
+ return -1;
+ return sres.res;
+
+# else
+# error "VG_(getgroups): needs implementation on this platform"
+# endif
+}
+
+/* ---------------------------------------------------------------------
+ Process tracing
+ ------------------------------------------------------------------ */
+
+Int VG_(ptrace) ( Int request, Int pid, void *addr, void *data )
+{
+ SysRes res;
+ res = VG_(do_syscall4)(__NR_ptrace, request, pid, (UWord)addr, (UWord)data);
+ if (res.isError)
+ return -1;
+ return res.res;
+}
+
+/* ---------------------------------------------------------------------
+ Fork
+ ------------------------------------------------------------------ */
+
+Int VG_(fork) ( void )
+{
+ SysRes res;
+ res = VG_(do_syscall0)(__NR_fork);
+ if (res.isError)
+ return -1;
+ return res.res;
+}
+
+/* ---------------------------------------------------------------------
+ Timing stuff
+ ------------------------------------------------------------------ */
+
+UInt VG_(read_millisecond_timer) ( void )
+{
+ /* 'now' and 'base' are in microseconds */
+ static ULong base = 0;
+ ULong now;
+
+# if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ /* AIX requires a totally different implementation since
+ sys_gettimeofday doesn't exist. We use the POWER real-time
+ register facility. This will SIGILL on PowerPC 970 on AIX,
+ since PowerPC doesn't support these instructions. */
+ UWord nsec, sec1, sec2;
+ while (1) {
+ __asm__ __volatile__ ("\n"
+ "\tmfspr %0,4\n" /* 4==RTCU */
+ "\tmfspr %1,5\n" /* 5==RTCL */
+ "\tmfspr %2,4\n" /* 4==RTCU */
+ : "=b" (sec1), "=b" (nsec), "=b" (sec2)
+ );
+ if (sec1 == sec2) break;
+ }
+ vg_assert(nsec < 1000*1000*1000);
+ now = ((ULong)sec1) * 1000000ULL;
+ now += (ULong)(nsec / 1000);
+# else
+
+ struct vki_timespec ts_now;
+ SysRes res;
+ res = VG_(do_syscall2)(__NR_clock_gettime, VKI_CLOCK_MONOTONIC,
+ (UWord)&ts_now);
+ if (res.isError == 0)
+ {
+ now = ts_now.tv_sec * 1000000ULL + ts_now.tv_nsec / 1000;
+ }
+ else
+ {
+ struct vki_timeval tv_now;
+ res = VG_(do_syscall2)(__NR_gettimeofday, (UWord)&tv_now, (UWord)NULL);
+ vg_assert(! res.isError);
+ now = tv_now.tv_sec * 1000000ULL + tv_now.tv_usec;
+ }
+# endif
+
+ if (base == 0)
+ base = now;
+
+ return (now - base) / 1000;
+}
+
+/* ---------------------------------------------------------------------
+ atfork()
+ ------------------------------------------------------------------ */
+
+struct atfork {
+ vg_atfork_t pre;
+ vg_atfork_t parent;
+ vg_atfork_t child;
+};
+
+#define VG_MAX_ATFORK 10
+
+static struct atfork atforks[VG_MAX_ATFORK];
+static Int n_atfork = 0;
+
+void VG_(atfork)(vg_atfork_t pre, vg_atfork_t parent, vg_atfork_t child)
+{
+ Int i;
+
+ for (i = 0; i < n_atfork; i++) {
+ if (atforks[i].pre == pre &&
+ atforks[i].parent == parent &&
+ atforks[i].child == child)
+ return;
+ }
+
+ if (n_atfork >= VG_MAX_ATFORK)
+ VG_(core_panic)(
+ "Too many VG_(atfork) handlers requested: raise VG_MAX_ATFORK");
+
+ atforks[n_atfork].pre = pre;
+ atforks[n_atfork].parent = parent;
+ atforks[n_atfork].child = child;
+
+ n_atfork++;
+}
+
+void VG_(do_atfork_pre)(ThreadId tid)
+{
+ Int i;
+
+ for (i = 0; i < n_atfork; i++)
+ if (atforks[i].pre != NULL)
+ (*atforks[i].pre)(tid);
+}
+
+void VG_(do_atfork_parent)(ThreadId tid)
+{
+ Int i;
+
+ for (i = 0; i < n_atfork; i++)
+ if (atforks[i].parent != NULL)
+ (*atforks[i].parent)(tid);
+}
+
+void VG_(do_atfork_child)(ThreadId tid)
+{
+ Int i;
+
+ for (i = 0; i < n_atfork; i++)
+ if (atforks[i].child != NULL)
+ (*atforks[i].child)(tid);
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_libcsignal.c.svn-base b/coregrind/.svn/text-base/m_libcsignal.c.svn-base
new file mode 100644
index 0000000..0fbc353
--- /dev/null
+++ b/coregrind/.svn/text-base/m_libcsignal.c.svn-base
@@ -0,0 +1,309 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Signal-related libc stuff. m_libcsignal.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_syscall.h"
+#include "pub_core_libcsignal.h" /* self */
+
+/* sigemptyset, sigfullset, sigaddset and sigdelset return 0 on
+ success and -1 on error. */
+/* I believe the indexing scheme in ->sig[] is also correct for
+ 32- and 64-bit AIX (verified 27 July 06). */
+
+Int VG_(sigfillset)( vki_sigset_t* set )
+{
+ Int i;
+ if (set == NULL)
+ return -1;
+ for (i = 0; i < _VKI_NSIG_WORDS; i++)
+ set->sig[i] = ~(UWord)0x0;
+ return 0;
+}
+
+Int VG_(sigemptyset)( vki_sigset_t* set )
+{
+ Int i;
+ if (set == NULL)
+ return -1;
+ for (i = 0; i < _VKI_NSIG_WORDS; i++)
+ set->sig[i] = 0x0;
+ return 0;
+}
+
+Bool VG_(isemptysigset)( const vki_sigset_t* set )
+{
+ Int i;
+ vg_assert(set != NULL);
+ for (i = 0; i < _VKI_NSIG_WORDS; i++)
+ if (set->sig[i] != 0x0) return False;
+ return True;
+}
+
+Bool VG_(isfullsigset)( const vki_sigset_t* set )
+{
+ Int i;
+ vg_assert(set != NULL);
+ for (i = 0; i < _VKI_NSIG_WORDS; i++)
+ if (set->sig[i] != ~(UWord)0x0) return False;
+ return True;
+}
+
+Bool VG_(iseqsigset)( const vki_sigset_t* set1, const vki_sigset_t* set2 )
+{
+ Int i;
+ vg_assert(set1 != NULL && set2 != NULL);
+ for (i = 0; i < _VKI_NSIG_WORDS; i++)
+ if (set1->sig[i] != set2->sig[i]) return False;
+ return True;
+}
+
+
+Int VG_(sigaddset)( vki_sigset_t* set, Int signum )
+{
+ if (set == NULL)
+ return -1;
+ if (signum < 1 || signum > _VKI_NSIG)
+ return -1;
+ signum--;
+ set->sig[signum / _VKI_NSIG_BPW] |= (1UL << (signum % _VKI_NSIG_BPW));
+ return 0;
+}
+
+Int VG_(sigdelset)( vki_sigset_t* set, Int signum )
+{
+ if (set == NULL)
+ return -1;
+ if (signum < 1 || signum > _VKI_NSIG)
+ return -1;
+ signum--;
+ set->sig[signum / _VKI_NSIG_BPW] &= ~(1UL << (signum % _VKI_NSIG_BPW));
+ return 0;
+}
+
+Int VG_(sigismember) ( const vki_sigset_t* set, Int signum )
+{
+ if (set == NULL)
+ return 0;
+ if (signum < 1 || signum > _VKI_NSIG)
+ return 0;
+ signum--;
+ if (1 & ((set->sig[signum / _VKI_NSIG_BPW]) >> (signum % _VKI_NSIG_BPW)))
+ return 1;
+ else
+ return 0;
+}
+
+/* Add all signals in src to dst. */
+void VG_(sigaddset_from_set)( vki_sigset_t* dst, vki_sigset_t* src )
+{
+ Int i;
+ vg_assert(dst != NULL && src != NULL);
+ for (i = 0; i < _VKI_NSIG_WORDS; i++)
+ dst->sig[i] |= src->sig[i];
+}
+
+/* Remove all signals in src from dst. */
+void VG_(sigdelset_from_set)( vki_sigset_t* dst, vki_sigset_t* src )
+{
+ Int i;
+ vg_assert(dst != NULL && src != NULL);
+ for (i = 0; i < _VKI_NSIG_WORDS; i++)
+ dst->sig[i] &= ~(src->sig[i]);
+}
+
+
+/* The functions sigaction, sigprocmask, sigpending and sigsuspend
+ return 0 on success and -1 on error.
+*/
+Int VG_(sigprocmask)( Int how, const vki_sigset_t* set, vki_sigset_t* oldset)
+{
+ SysRes res = VG_(do_syscall4)(__NR_rt_sigprocmask,
+ how, (UWord)set, (UWord)oldset,
+ _VKI_NSIG_WORDS * sizeof(UWord));
+ return res.isError ? -1 : 0;
+}
+
+
+Int VG_(sigaction) ( Int signum, const struct vki_sigaction* act,
+ struct vki_sigaction* oldact)
+{
+ SysRes res = VG_(do_syscall4)(__NR_rt_sigaction,
+ signum, (UWord)act, (UWord)oldact,
+ _VKI_NSIG_WORDS * sizeof(UWord));
+ return res.isError ? -1 : 0;
+}
+
+
+Int VG_(kill)( Int pid, Int signo )
+{
+ SysRes res = VG_(do_syscall2)(__NR_kill, pid, signo);
+ return res.isError ? -1 : 0;
+}
+
+
+Int VG_(tkill)( ThreadId tid, Int signo )
+{
+ SysRes res = VG_(mk_SysRes_Error)(VKI_ENOSYS);
+ res = VG_(do_syscall2)(__NR_tkill, tid, signo);
+ if (res.isError && res.err == VKI_ENOSYS)
+ res = VG_(do_syscall2)(__NR_kill, tid, signo);
+ return res.isError ? -1 : 0;
+}
+
+
+/* A cut-down version of POSIX sigtimedwait: poll for pending signals
+ mentioned in the sigset_t, and if any are present, select one
+ arbitrarily, return its number (which must be > 0), and put
+ auxiliary info about it in the siginfo_t, and make it
+ not-pending-any-more. If none are pending, return zero. The _zero
+ refers to the fact that there is zero timeout, so if no signals are
+ pending it returns immediately. Perhaps a better name would be
+ 'sigpoll'. Returns -1 on error, 0 if no signals pending, and n > 0
+ if signal n was selected.
+
+ The Linux implementation is trivial: do the corresponding syscall.
+
+ The AIX implementation is horrible and probably broken in a dozen
+ obscure ways. I suspect it's only thread-safe because V forces
+ single-threadedness. */
+
+#if defined(VGO_linux)
+Int VG_(sigtimedwait_zero)( const vki_sigset_t *set,
+ vki_siginfo_t *info )
+{
+ static const struct vki_timespec zero = { 0, 0 };
+ SysRes res = VG_(do_syscall4)(__NR_rt_sigtimedwait, (UWord)set, (UWord)info,
+ (UWord)&zero, sizeof(*set));
+ return res.isError ? -1 : res.res;
+}
+
+#elif defined(VGO_aix5)
+/* The general idea is:
+ - use sigpending to find out which signals are pending
+ - choose one
+ - temporarily set its handler to sigtimedwait_zero_handler
+ - use sigsuspend atomically unblock it and wait for the signal.
+ Upon return, sigsuspend restores the signal mask to what it
+ was to start with.
+ - Restore the handler for the signal to whatever it was before.
+*/
+
+/* A signal handler which does nothing (it doesn't need to). It does
+ however check that it's not handing a sync signal for which
+ returning is meaningless. */
+static void sigtimedwait_zero_handler ( Int sig )
+{
+ vg_assert(sig != VKI_SIGILL);
+ vg_assert(sig != VKI_SIGSEGV);
+ vg_assert(sig != VKI_SIGBUS);
+ vg_assert(sig != VKI_SIGTRAP);
+ /* do nothing */
+}
+
+Int VG_(sigtimedwait_zero)( const vki_sigset_t *set,
+ vki_siginfo_t *info )
+{
+ Int i, ir;
+ SysRes sr;
+ vki_sigset_t pending, blocked, allbutone;
+ struct vki_sigaction sa, saved_sa;
+
+ /* Find out what's pending: AIX _sigpending */
+ sr = VG_(do_syscall1)(__NR__sigpending, (UWord)&pending);
+ vg_assert(!sr.isError);
+
+ /* don't try for signals not in 'set' */
+ /* pending = pending `intersect` set */
+ for (i = 0; i < _VKI_NSIG_WORDS; i++)
+ pending.sig[i] &= set->sig[i];
+
+ /* don't try for signals not blocked at the moment */
+ ir = VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &blocked);
+ vg_assert(ir == 0);
+
+ /* pending = pending `intersect` blocked */
+ for (i = 0; i < _VKI_NSIG_WORDS; i++)
+ pending.sig[i] &= blocked.sig[i];
+
+ /* decide which signal we're going to snarf */
+ for (i = 1; i < _VKI_NSIG; i++)
+ if (VG_(sigismember)(&pending,i))
+ break;
+
+ if (i == _VKI_NSIG)
+ return 0;
+
+ /* fetch signal i.
+ pre: i is blocked and pending
+ pre: we are the only thread running
+ */
+ /* Set up alternative signal handler */
+ VG_(sigfillset)(&allbutone);
+ VG_(sigdelset)(&allbutone, i);
+ sa.sa_mask = allbutone;
+ sa.ksa_handler = &sigtimedwait_zero_handler;
+ sa.sa_flags = 0;
+ ir = VG_(sigaction)(i, &sa, &saved_sa);
+ vg_assert(ir == 0);
+
+ /* Switch signal masks and wait for the signal. This should happen
+ immediately, since we've already established it is pending and
+ blocked. */
+ sr = VG_(do_syscall1)(__NR__sigsuspend, (UWord)&allbutone);
+ vg_assert(sr.isError);
+ if (0)
+ VG_(debugLog)(0, "libcsignal",
+ "sigtimedwait_zero: sigsuspend got res %ld err %ld\n",
+ sr.res, sr.err);
+ vg_assert(sr.res == (UWord)-1);
+
+ /* Restore signal's handler to whatever it was before */
+ ir = VG_(sigaction)(i, &saved_sa, NULL);
+ vg_assert(ir == 0);
+
+ /* This is bogus - we could get more info from the sighandler. */
+ VG_(memset)( info, 0, sizeof(*info) );
+ info->si_signo = i;
+
+ return i;
+}
+
+#else
+# error Unknown OS
+#endif
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_machine.c.svn-base b/coregrind/.svn/text-base/m_machine.c.svn-base
new file mode 100644
index 0000000..8d5901f
--- /dev/null
+++ b/coregrind/.svn/text-base/m_machine.c.svn-base
@@ -0,0 +1,676 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Machine-related stuff. m_machine.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_machine.h"
+#include "pub_core_cpuid.h"
+#include "pub_core_libcsignal.h" // for ppc32 messing with SIGILL and SIGFPE
+#include "pub_core_debuglog.h"
+
+
+#define INSTR_PTR(regs) ((regs).vex.VG_INSTR_PTR)
+#define STACK_PTR(regs) ((regs).vex.VG_STACK_PTR)
+#define FRAME_PTR(regs) ((regs).vex.VG_FRAME_PTR)
+
+Addr VG_(get_SP) ( ThreadId tid )
+{
+ return STACK_PTR( VG_(threads)[tid].arch );
+}
+
+Addr VG_(get_IP) ( ThreadId tid )
+{
+ return INSTR_PTR( VG_(threads)[tid].arch );
+}
+
+Addr VG_(get_FP) ( ThreadId tid )
+{
+ return FRAME_PTR( VG_(threads)[tid].arch );
+}
+
+Addr VG_(get_LR) ( ThreadId tid )
+{
+# if defined(VGA_ppc32) || defined(VGA_ppc64)
+ return VG_(threads)[tid].arch.vex.guest_LR;
+# elif defined(VGA_x86) || defined(VGA_amd64)
+ return 0;
+# else
+# error "Unknown arch"
+# endif
+}
+
+void VG_(set_SP) ( ThreadId tid, Addr sp )
+{
+ STACK_PTR( VG_(threads)[tid].arch ) = sp;
+}
+
+void VG_(set_IP) ( ThreadId tid, Addr ip )
+{
+ INSTR_PTR( VG_(threads)[tid].arch ) = ip;
+}
+
+void VG_(set_syscall_return_shadows) ( ThreadId tid,
+ /* shadow vals for the result */
+ UWord s1res, UWord s2res,
+ /* shadow vals for the error val */
+ UWord s1err, UWord s2err )
+{
+# if defined(VGP_x86_linux)
+ VG_(threads)[tid].arch.vex_shadow1.guest_EAX = s1res;
+ VG_(threads)[tid].arch.vex_shadow2.guest_EAX = s2res;
+# elif defined(VGP_amd64_linux)
+ VG_(threads)[tid].arch.vex_shadow1.guest_RAX = s1res;
+ VG_(threads)[tid].arch.vex_shadow2.guest_RAX = s2res;
+# elif defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
+ VG_(threads)[tid].arch.vex_shadow1.guest_GPR3 = s1res;
+ VG_(threads)[tid].arch.vex_shadow2.guest_GPR3 = s2res;
+# elif defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ VG_(threads)[tid].arch.vex_shadow1.guest_GPR3 = s1res;
+ VG_(threads)[tid].arch.vex_shadow2.guest_GPR3 = s2res;
+ VG_(threads)[tid].arch.vex_shadow1.guest_GPR4 = s1err;
+ VG_(threads)[tid].arch.vex_shadow2.guest_GPR4 = s2err;
+# else
+# error "Unknown plat"
+# endif
+}
+
+void
+VG_(get_shadow_regs_area) ( ThreadId tid,
+ /*DST*/UChar* dst,
+ /*SRC*/Int shadowNo, PtrdiffT offset, SizeT size )
+{
+ void* src;
+ ThreadState* tst;
+ vg_assert(shadowNo == 0 || shadowNo == 1 || shadowNo == 2);
+ vg_assert(VG_(is_valid_tid)(tid));
+ // Bounds check
+ vg_assert(0 <= offset && offset < sizeof(VexGuestArchState));
+ vg_assert(offset + size <= sizeof(VexGuestArchState));
+ // Copy
+ tst = & VG_(threads)[tid];
+ src = NULL;
+ switch (shadowNo) {
+ case 0: src = (void*)(((Addr)&(tst->arch.vex)) + offset); break;
+ case 1: src = (void*)(((Addr)&(tst->arch.vex_shadow1)) + offset); break;
+ case 2: src = (void*)(((Addr)&(tst->arch.vex_shadow2)) + offset); break;
+ }
+ tl_assert(src != NULL);
+ VG_(memcpy)( dst, src, size);
+}
+
+void
+VG_(set_shadow_regs_area) ( ThreadId tid,
+ /*DST*/Int shadowNo, PtrdiffT offset, SizeT size,
+ /*SRC*/const UChar* src )
+{
+ void* dst;
+ ThreadState* tst;
+ vg_assert(shadowNo == 0 || shadowNo == 1 || shadowNo == 2);
+ vg_assert(VG_(is_valid_tid)(tid));
+ // Bounds check
+ vg_assert(0 <= offset && offset < sizeof(VexGuestArchState));
+ vg_assert(offset + size <= sizeof(VexGuestArchState));
+ // Copy
+ tst = & VG_(threads)[tid];
+ dst = NULL;
+ switch (shadowNo) {
+ case 0: dst = (void*)(((Addr)&(tst->arch.vex)) + offset); break;
+ case 1: dst = (void*)(((Addr)&(tst->arch.vex_shadow1)) + offset); break;
+ case 2: dst = (void*)(((Addr)&(tst->arch.vex_shadow2)) + offset); break;
+ }
+ tl_assert(dst != NULL);
+ VG_(memcpy)( dst, src, size);
+}
+
+
+static void apply_to_GPs_of_tid(VexGuestArchState* vex, void (*f)(Addr))
+{
+#if defined(VGA_x86)
+ (*f)(vex->guest_EAX);
+ (*f)(vex->guest_ECX);
+ (*f)(vex->guest_EDX);
+ (*f)(vex->guest_EBX);
+ (*f)(vex->guest_ESI);
+ (*f)(vex->guest_EDI);
+ (*f)(vex->guest_ESP);
+ (*f)(vex->guest_EBP);
+#elif defined(VGA_amd64)
+ (*f)(vex->guest_RAX);
+ (*f)(vex->guest_RCX);
+ (*f)(vex->guest_RDX);
+ (*f)(vex->guest_RBX);
+ (*f)(vex->guest_RSI);
+ (*f)(vex->guest_RDI);
+ (*f)(vex->guest_RSP);
+ (*f)(vex->guest_RBP);
+ (*f)(vex->guest_R8);
+ (*f)(vex->guest_R9);
+ (*f)(vex->guest_R10);
+ (*f)(vex->guest_R11);
+ (*f)(vex->guest_R12);
+ (*f)(vex->guest_R13);
+ (*f)(vex->guest_R14);
+ (*f)(vex->guest_R15);
+#elif defined(VGA_ppc32) || defined(VGA_ppc64)
+ /* XXX ask tool about validity? */
+ (*f)(vex->guest_GPR0);
+ (*f)(vex->guest_GPR1);
+ (*f)(vex->guest_GPR2);
+ (*f)(vex->guest_GPR3);
+ (*f)(vex->guest_GPR4);
+ (*f)(vex->guest_GPR5);
+ (*f)(vex->guest_GPR6);
+ (*f)(vex->guest_GPR7);
+ (*f)(vex->guest_GPR8);
+ (*f)(vex->guest_GPR9);
+ (*f)(vex->guest_GPR10);
+ (*f)(vex->guest_GPR11);
+ (*f)(vex->guest_GPR12);
+ (*f)(vex->guest_GPR13);
+ (*f)(vex->guest_GPR14);
+ (*f)(vex->guest_GPR15);
+ (*f)(vex->guest_GPR16);
+ (*f)(vex->guest_GPR17);
+ (*f)(vex->guest_GPR18);
+ (*f)(vex->guest_GPR19);
+ (*f)(vex->guest_GPR20);
+ (*f)(vex->guest_GPR21);
+ (*f)(vex->guest_GPR22);
+ (*f)(vex->guest_GPR23);
+ (*f)(vex->guest_GPR24);
+ (*f)(vex->guest_GPR25);
+ (*f)(vex->guest_GPR26);
+ (*f)(vex->guest_GPR27);
+ (*f)(vex->guest_GPR28);
+ (*f)(vex->guest_GPR29);
+ (*f)(vex->guest_GPR30);
+ (*f)(vex->guest_GPR31);
+ (*f)(vex->guest_CTR);
+ (*f)(vex->guest_LR);
+
+#else
+# error Unknown arch
+#endif
+}
+
+
+void VG_(apply_to_GP_regs)(void (*f)(UWord))
+{
+ ThreadId tid;
+
+ for (tid = 1; tid < VG_N_THREADS; tid++) {
+ if (VG_(is_valid_tid)(tid)) {
+ ThreadState* tst = VG_(get_ThreadState)(tid);
+ apply_to_GPs_of_tid(&(tst->arch.vex), f);
+ }
+ }
+}
+
+void VG_(thread_stack_reset_iter)(/*OUT*/ThreadId* tid)
+{
+ *tid = (ThreadId)(-1);
+}
+
+Bool VG_(thread_stack_next)(/*MOD*/ThreadId* tid,
+ /*OUT*/Addr* stack_min,
+ /*OUT*/Addr* stack_max)
+{
+ ThreadId i;
+ for (i = (*tid)+1; i < VG_N_THREADS; i++) {
+ if (i == VG_INVALID_THREADID)
+ continue;
+ if (VG_(threads)[i].status != VgTs_Empty) {
+ *tid = i;
+ *stack_min = VG_(get_SP)(i);
+ *stack_max = VG_(threads)[i].client_stack_highest_word;
+ return True;
+ }
+ }
+ return False;
+}
+
+Addr VG_(thread_get_stack_max)(ThreadId tid)
+{
+ vg_assert(0 <= tid && tid < VG_N_THREADS && tid != VG_INVALID_THREADID);
+ vg_assert(VG_(threads)[tid].status != VgTs_Empty);
+ return VG_(threads)[tid].client_stack_highest_word;
+}
+
+SizeT VG_(thread_get_stack_size)(ThreadId tid)
+{
+ vg_assert(0 <= tid && tid < VG_N_THREADS && tid != VG_INVALID_THREADID);
+ vg_assert(VG_(threads)[tid].status != VgTs_Empty);
+ return VG_(threads)[tid].client_stack_szB;
+}
+
+//-------------------------------------------------------------
+/* Details about the capabilities of the underlying (host) CPU. These
+ details are acquired by (1) enquiring with the CPU at startup, or
+ (2) from the AT_SYSINFO entries the kernel gave us (ppc32 cache
+ line size). It's a bit nasty in the sense that there's no obvious
+ way to stop uses of some of this info before it's ready to go.
+
+ Current dependencies are:
+
+ x86: initially: call VG_(machine_get_hwcaps)
+
+ then safe to use VG_(machine_get_VexArchInfo)
+ and VG_(machine_x86_have_mxcsr)
+ -------------
+ amd64: initially: call VG_(machine_get_hwcaps)
+
+ then safe to use VG_(machine_get_VexArchInfo)
+ -------------
+ ppc32: initially: call VG_(machine_get_hwcaps)
+ call VG_(machine_ppc32_set_clszB)
+
+ then safe to use VG_(machine_get_VexArchInfo)
+ and VG_(machine_ppc32_has_FP)
+ and VG_(machine_ppc32_has_VMX)
+ -------------
+ ppc64: initially: call VG_(machine_get_hwcaps)
+ call VG_(machine_ppc64_set_clszB)
+
+ then safe to use VG_(machine_get_VexArchInfo)
+ and VG_(machine_ppc64_has_VMX)
+
+ VG_(machine_get_hwcaps) may use signals (although it attempts to
+ leave signal state unchanged) and therefore should only be
+ called before m_main sets up the client's signal state.
+*/
+
+/* --------- State --------- */
+static Bool hwcaps_done = False;
+
+/* --- all archs --- */
+static VexArch va;
+static VexArchInfo vai;
+
+#if defined(VGA_x86)
+UInt VG_(machine_x86_have_mxcsr) = 0;
+#endif
+#if defined(VGA_ppc32)
+UInt VG_(machine_ppc32_has_FP) = 0;
+UInt VG_(machine_ppc32_has_VMX) = 0;
+#endif
+#if defined(VGA_ppc64)
+ULong VG_(machine_ppc64_has_VMX) = 0;
+#endif
+
+
+/* Determine what insn set and insn set variant the host has, and
+ record it. To be called once at system startup. Returns False if
+ this a CPU incapable of running Valgrind. */
+
+#if defined(VGA_ppc32) || defined(VGA_ppc64)
+#include <setjmp.h> // For jmp_buf
+static jmp_buf env_unsup_insn;
+static void handler_unsup_insn ( Int x ) { __builtin_longjmp(env_unsup_insn,1); }
+#endif
+
+Bool VG_(machine_get_hwcaps)( void )
+{
+ vg_assert(hwcaps_done == False);
+ hwcaps_done = True;
+
+ // Whack default settings into vai, so that we only need to fill in
+ // any interesting bits.
+ LibVEX_default_VexArchInfo(&vai);
+
+#if defined(VGA_x86)
+ { Bool have_sse1, have_sse2;
+ UInt eax, ebx, ecx, edx;
+
+ if (!VG_(has_cpuid)())
+ /* we can't do cpuid at all. Give up. */
+ return False;
+
+ VG_(cpuid)(0, &eax, &ebx, &ecx, &edx);
+ if (eax < 1)
+ /* we can't ask for cpuid(x) for x > 0. Give up. */
+ return False;
+
+ /* get capabilities bits into edx */
+ VG_(cpuid)(1, &eax, &ebx, &ecx, &edx);
+
+ have_sse1 = (edx & (1<<25)) != 0; /* True => have sse insns */
+ have_sse2 = (edx & (1<<26)) != 0; /* True => have sse2 insns */
+
+ if (have_sse2 && have_sse1) {
+ va = VexArchX86;
+ vai.hwcaps = VEX_HWCAPS_X86_SSE1;
+ vai.hwcaps |= VEX_HWCAPS_X86_SSE2;
+ VG_(machine_x86_have_mxcsr) = 1;
+ return True;
+ }
+
+ if (have_sse1) {
+ va = VexArchX86;
+ vai.hwcaps = VEX_HWCAPS_X86_SSE1;
+ VG_(machine_x86_have_mxcsr) = 1;
+ return True;
+ }
+
+ va = VexArchX86;
+ vai.hwcaps = 0; /*baseline - no sse at all*/
+ VG_(machine_x86_have_mxcsr) = 0;
+ return True;
+ }
+
+#elif defined(VGA_amd64)
+ vg_assert(VG_(has_cpuid)());
+ va = VexArchAMD64;
+ vai.hwcaps = 0; /*baseline - SSE2 */
+ return True;
+
+#elif defined(VGA_ppc32)
+ {
+ /* Find out which subset of the ppc32 instruction set is supported by
+ verifying whether various ppc32 instructions generate a SIGILL
+ or a SIGFPE. An alternative approach is to check the AT_HWCAP and
+ AT_PLATFORM entries in the ELF auxiliary table -- see also
+ the_iifii.client_auxv in m_main.c.
+ */
+ vki_sigset_t saved_set, tmp_set;
+ struct vki_sigaction saved_sigill_act, tmp_sigill_act;
+ struct vki_sigaction saved_sigfpe_act, tmp_sigfpe_act;
+
+ volatile Bool have_F, have_V, have_FX, have_GX;
+ Int r;
+
+ VG_(sigemptyset)(&tmp_set);
+ VG_(sigaddset)(&tmp_set, VKI_SIGILL);
+ VG_(sigaddset)(&tmp_set, VKI_SIGFPE);
+
+ r = VG_(sigprocmask)(VKI_SIG_UNBLOCK, &tmp_set, &saved_set);
+ vg_assert(r == 0);
+
+ r = VG_(sigaction)(VKI_SIGILL, NULL, &saved_sigill_act);
+ vg_assert(r == 0);
+ tmp_sigill_act = saved_sigill_act;
+
+ r = VG_(sigaction)(VKI_SIGFPE, NULL, &saved_sigfpe_act);
+ vg_assert(r == 0);
+ tmp_sigfpe_act = saved_sigfpe_act;
+
+ /* NODEFER: signal handler does not return (from the kernel's point of
+ view), hence if it is to successfully catch a signal more than once,
+ we need the NODEFER flag. */
+ tmp_sigill_act.sa_flags &= ~VKI_SA_RESETHAND;
+ tmp_sigill_act.sa_flags &= ~VKI_SA_SIGINFO;
+ tmp_sigill_act.sa_flags |= VKI_SA_NODEFER;
+ tmp_sigill_act.ksa_handler = handler_unsup_insn;
+ r = VG_(sigaction)(VKI_SIGILL, &tmp_sigill_act, NULL);
+ vg_assert(r == 0);
+
+ tmp_sigfpe_act.sa_flags &= ~VKI_SA_RESETHAND;
+ tmp_sigfpe_act.sa_flags &= ~VKI_SA_SIGINFO;
+ tmp_sigfpe_act.sa_flags |= VKI_SA_NODEFER;
+ tmp_sigfpe_act.ksa_handler = handler_unsup_insn;
+ r = VG_(sigaction)(VKI_SIGFPE, &tmp_sigfpe_act, NULL);
+ vg_assert(r == 0);
+
+ /* standard FP insns */
+ have_F = True;
+ if (__builtin_setjmp(env_unsup_insn)) {
+ have_F = False;
+ } else {
+ __asm__ __volatile__(".long 0xFC000090"); /*fmr 0,0 */
+ }
+
+ /* Altivec insns */
+ have_V = True;
+ if (__builtin_setjmp(env_unsup_insn)) {
+ have_V = False;
+ } else {
+ /* Unfortunately some older assemblers don't speak Altivec (or
+ choose not to), so to be safe we directly emit the 32-bit
+ word corresponding to "vor 0,0,0". This fixes a build
+ problem that happens on Debian 3.1 (ppc32), and probably
+ various other places. */
+ __asm__ __volatile__(".long 0x10000484"); /*vor 0,0,0*/
+ }
+
+ /* General-Purpose optional (fsqrt, fsqrts) */
+ have_FX = True;
+ if (__builtin_setjmp(env_unsup_insn)) {
+ have_FX = False;
+ } else {
+ __asm__ __volatile__(".long 0xFC00002C"); /*fsqrt 0,0 */
+ }
+
+ /* Graphics optional (stfiwx, fres, frsqrte, fsel) */
+ have_GX = True;
+ if (__builtin_setjmp(env_unsup_insn)) {
+ have_GX = False;
+ } else {
+ __asm__ __volatile__(".long 0xFC000034"); /* frsqrte 0,0 */
+ }
+
+ r = VG_(sigaction)(VKI_SIGILL, &saved_sigill_act, NULL);
+ vg_assert(r == 0);
+ r = VG_(sigaction)(VKI_SIGFPE, &saved_sigfpe_act, NULL);
+ vg_assert(r == 0);
+ r = VG_(sigprocmask)(VKI_SIG_SETMASK, &saved_set, NULL);
+ vg_assert(r == 0);
+ VG_(debugLog)(1, "machine", "F %d V %d FX %d GX %d\n",
+ (Int)have_F, (Int)have_V, (Int)have_FX, (Int)have_GX);
+ /* Make FP a prerequisite for VMX (bogusly so), and for FX and GX. */
+ if (have_V && !have_F)
+ have_V = False;
+ if (have_FX && !have_F)
+ have_FX = False;
+ if (have_GX && !have_F)
+ have_GX = False;
+
+ VG_(machine_ppc32_has_FP) = have_F ? 1 : 0;
+ VG_(machine_ppc32_has_VMX) = have_V ? 1 : 0;
+
+ va = VexArchPPC32;
+
+ vai.hwcaps = 0;
+ if (have_F) vai.hwcaps |= VEX_HWCAPS_PPC32_F;
+ if (have_V) vai.hwcaps |= VEX_HWCAPS_PPC32_V;
+ if (have_FX) vai.hwcaps |= VEX_HWCAPS_PPC32_FX;
+ if (have_GX) vai.hwcaps |= VEX_HWCAPS_PPC32_GX;
+
+ /* But we're not done yet: VG_(machine_ppc32_set_clszB) must be
+ called before we're ready to go. */
+ return True;
+ }
+
+#elif defined(VGA_ppc64)
+ {
+ /* Same instruction set detection algorithm as for ppc32. */
+ vki_sigset_t saved_set, tmp_set;
+ struct vki_sigaction saved_sigill_act, tmp_sigill_act;
+ struct vki_sigaction saved_sigfpe_act, tmp_sigfpe_act;
+
+ volatile Bool have_F, have_V, have_FX, have_GX;
+
+ VG_(sigemptyset)(&tmp_set);
+ VG_(sigaddset)(&tmp_set, VKI_SIGILL);
+ VG_(sigaddset)(&tmp_set, VKI_SIGFPE);
+
+ VG_(sigprocmask)(VKI_SIG_UNBLOCK, &tmp_set, &saved_set);
+
+ VG_(sigaction)(VKI_SIGILL, NULL, &saved_sigill_act);
+ tmp_sigill_act = saved_sigill_act;
+
+ VG_(sigaction)(VKI_SIGFPE, NULL, &saved_sigfpe_act);
+ tmp_sigfpe_act = saved_sigfpe_act;
+
+
+ /* NODEFER: signal handler does not return (from the kernel's point of
+ view), hence if it is to successfully catch a signal more than once,
+ we need the NODEFER flag. */
+ tmp_sigill_act.sa_flags &= ~VKI_SA_RESETHAND;
+ tmp_sigill_act.sa_flags &= ~VKI_SA_SIGINFO;
+ tmp_sigill_act.sa_flags |= VKI_SA_NODEFER;
+ tmp_sigill_act.ksa_handler = handler_unsup_insn;
+ VG_(sigaction)(VKI_SIGILL, &tmp_sigill_act, NULL);
+
+ tmp_sigfpe_act.sa_flags &= ~VKI_SA_RESETHAND;
+ tmp_sigfpe_act.sa_flags &= ~VKI_SA_SIGINFO;
+ tmp_sigfpe_act.sa_flags |= VKI_SA_NODEFER;
+ tmp_sigfpe_act.ksa_handler = handler_unsup_insn;
+ VG_(sigaction)(VKI_SIGFPE, &tmp_sigfpe_act, NULL);
+
+ /* standard FP insns */
+ have_F = True;
+ if (__builtin_setjmp(env_unsup_insn)) {
+ have_F = False;
+ } else {
+ __asm__ __volatile__("fmr 0,0");
+ }
+
+ /* Altivec insns */
+ have_V = True;
+ if (__builtin_setjmp(env_unsup_insn)) {
+ have_V = False;
+ } else {
+ __asm__ __volatile__(".long 0x10000484"); /*vor 0,0,0*/
+ }
+
+ /* General-Purpose optional (fsqrt, fsqrts) */
+ have_FX = True;
+ if (__builtin_setjmp(env_unsup_insn)) {
+ have_FX = False;
+ } else {
+ __asm__ __volatile__(".long 0xFC00002C"); /*fsqrt 0,0*/
+ }
+
+ /* Graphics optional (stfiwx, fres, frsqrte, fsel) */
+ have_GX = True;
+ if (__builtin_setjmp(env_unsup_insn)) {
+ have_GX = False;
+ } else {
+ __asm__ __volatile__(".long 0xFC000034"); /*frsqrte 0,0*/
+ }
+
+ VG_(sigaction)(VKI_SIGILL, &saved_sigill_act, NULL);
+ VG_(sigaction)(VKI_SIGFPE, &saved_sigfpe_act, NULL);
+ VG_(sigprocmask)(VKI_SIG_SETMASK, &saved_set, NULL);
+ VG_(debugLog)(1, "machine", "F %d V %d FX %d GX %d\n",
+ (Int)have_F, (Int)have_V, (Int)have_FX, (Int)have_GX);
+ /* on ppc64, if we don't even have FP, just give up. */
+ if (!have_F)
+ return False;
+
+ VG_(machine_ppc64_has_VMX) = have_V ? 1 : 0;
+
+ va = VexArchPPC64;
+
+ vai.hwcaps = 0;
+ if (have_V) vai.hwcaps |= VEX_HWCAPS_PPC64_V;
+ if (have_FX) vai.hwcaps |= VEX_HWCAPS_PPC64_FX;
+ if (have_GX) vai.hwcaps |= VEX_HWCAPS_PPC64_GX;
+
+ /* But we're not done yet: VG_(machine_ppc64_set_clszB) must be
+ called before we're ready to go. */
+ return True;
+ }
+
+#else
+# error "Unknown arch"
+#endif
+}
+
+/* Notify host cpu cache line size. */
+#if defined(VGA_ppc32)
+void VG_(machine_ppc32_set_clszB)( Int szB )
+{
+ vg_assert(hwcaps_done);
+
+ /* Either the value must not have been set yet (zero) or we can
+ tolerate it being set to the same value multiple times, as the
+ stack scanning logic in m_main is a bit stupid. */
+ vg_assert(vai.ppc_cache_line_szB == 0
+ || vai.ppc_cache_line_szB == szB);
+
+ vg_assert(szB == 32 || szB == 64 || szB == 128);
+ vai.ppc_cache_line_szB = szB;
+}
+#endif
+
+
+/* Notify host cpu cache line size. */
+#if defined(VGA_ppc64)
+void VG_(machine_ppc64_set_clszB)( Int szB )
+{
+ vg_assert(hwcaps_done);
+
+ /* Either the value must not have been set yet (zero) or we can
+ tolerate it being set to the same value multiple times, as the
+ stack scanning logic in m_main is a bit stupid. */
+ vg_assert(vai.ppc_cache_line_szB == 0
+ || vai.ppc_cache_line_szB == szB);
+
+ vg_assert(szB == 32 || szB == 64 || szB == 128);
+ vai.ppc_cache_line_szB = szB;
+}
+#endif
+
+
+/* Fetch host cpu info, once established. */
+void VG_(machine_get_VexArchInfo)( /*OUT*/VexArch* pVa,
+ /*OUT*/VexArchInfo* pVai )
+{
+ vg_assert(hwcaps_done);
+ if (pVa) *pVa = va;
+ if (pVai) *pVai = vai;
+}
+
+
+// Given a pointer to a function as obtained by "& functionname" in C,
+// produce a pointer to the actual entry point for the function.
+void* VG_(fnptr_to_fnentry)( void* f )
+{
+#if defined(VGP_x86_linux) || defined(VGP_amd64_linux) \
+ || defined(VGP_ppc32_linux)
+ return f;
+#elif defined(VGP_ppc64_linux) || defined(VGP_ppc32_aix5) \
+ || defined(VGP_ppc64_aix5)
+ /* All other ppc variants use the AIX scheme, in which f is a
+ pointer to a 3-word function descriptor, of which the first word
+ is the entry address. */
+ UWord* descr = (UWord*)f;
+ return (void*)(descr[0]);
+#else
+# error "Unknown platform"
+#endif
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_main.c.svn-base b/coregrind/.svn/text-base/m_main.c.svn-base
new file mode 100644
index 0000000..0f9aac6
--- /dev/null
+++ b/coregrind/.svn/text-base/m_main.c.svn-base
@@ -0,0 +1,2623 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Startup: the real stuff m_main.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_xarray.h"
+#include "pub_core_clientstate.h"
+#include "pub_core_aspacemgr.h"
+#include "pub_core_commandline.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_errormgr.h"
+#include "pub_core_execontext.h"
+#include "pub_core_initimg.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcfile.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_libcproc.h"
+#include "pub_core_libcsignal.h"
+#include "pub_core_syscall.h" // VG_(strerror)
+#include "pub_core_machine.h"
+#include "pub_core_mallocfree.h"
+#include "pub_core_options.h"
+#include "pub_core_debuginfo.h"
+#include "pub_core_redir.h"
+#include "pub_core_scheduler.h"
+#include "pub_core_signals.h"
+#include "pub_core_stacks.h" // For VG_(register_stack)
+#include "pub_core_syswrap.h"
+#include "pub_core_tooliface.h"
+#include "pub_core_translate.h" // For VG_(translate)
+#include "pub_core_trampoline.h"
+#include "pub_core_transtab.h"
+
+/* Stuff for reading AIX5 /proc/<pid>/sysent files */
+#if defined(VGO_aix5)
+ /* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
+# include <sys/procfs.h> /* prsysent_t */
+ /* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
+# define VG_AIX5_SYSENT_SIZE 100000
+ static UChar aix5_sysent_buf[VG_AIX5_SYSENT_SIZE];
+#endif
+
+
+/*====================================================================*/
+/*=== Counters, for profiling purposes only ===*/
+/*====================================================================*/
+
+static void print_all_stats ( void )
+{
+ VG_(print_translation_stats)();
+ VG_(print_tt_tc_stats)();
+ VG_(print_scheduler_stats)();
+ VG_(print_ExeContext_stats)();
+ VG_(print_errormgr_stats)();
+
+ // Memory stats
+ if (VG_(clo_verbosity) > 2) {
+ VG_(message)(Vg_DebugMsg, "");
+ VG_(message)(Vg_DebugMsg,
+ "------ Valgrind's internal memory use stats follow ------" );
+ VG_(sanity_check_malloc_all)();
+ VG_(message)(Vg_DebugMsg, "------" );
+ VG_(print_all_arena_stats)();
+ VG_(message)(Vg_DebugMsg, "");
+ }
+}
+
+
+/*====================================================================*/
+/*=== Command-line: variables, processing, etc ===*/
+/*====================================================================*/
+
+// See pub_{core,tool}_options.h for explanations of all these.
+
+static void usage_NORETURN ( Bool debug_help )
+{
+ /* 'usage1' contains a %s for the name of the GDB executable, which
+ must be supplied when it is VG_(printf)'d. */
+ Char* usage1 =
+"usage: valgrind [options] prog-and-args\n"
+"\n"
+" common user options for all Valgrind tools, with defaults in [ ]:\n"
+" --tool=<name> use the Valgrind tool named <name> [memcheck]\n"
+" -h --help show this message\n"
+" --help-debug show this message, plus debugging options\n"
+" --version show version\n"
+" -q --quiet run silently; only print error msgs\n"
+" -v --verbose be more verbose, incl counts of errors\n"
+" --trace-children=no|yes Valgrind-ise child processes (follow execve)? [no]\n"
+" --child-silent-after-fork=no|yes omit child output between fork & exec? [no]\n"
+" --track-fds=no|yes track open file descriptors? [no]\n"
+" --time-stamp=no|yes add timestamps to log messages? [no]\n"
+" --log-fd=<number> log messages to file descriptor [2=stderr]\n"
+" --log-file=<file> log messages to <file>\n"
+" --log-socket=ipaddr:port log messages to socket ipaddr:port\n"
+"\n"
+" uncommon user options for all Valgrind tools:\n"
+" --run-libc-freeres=no|yes free up glibc memory at exit? [yes]\n"
+" --sim-hints=hint1,hint2,... known hints:\n"
+" lax-ioctls, enable-outer [none]\n"
+" --show-emwarns=no|yes show warnings about emulation limits? [no]\n"
+" --smc-check=none|stack|all checks for self-modifying code: none,\n"
+" only for code found in stacks, or all [stack]\n"
+" --kernel-variant=variant1,variant2,... known variants: bproc [none]\n"
+" handle non-standard kernel variants\n"
+" --read-var-info=yes|no read debug info on stack and global variables\n"
+" and use it to print better error messages in\n"
+" tools that make use of it (Memcheck, Helgrind)\n"
+"\n"
+" user options for Valgrind tools that report errors:\n"
+" --xml=yes all output is in XML (some tools only)\n"
+" --xml-user-comment=STR copy STR verbatim to XML output\n"
+" --demangle=no|yes automatically demangle C++ names? [yes]\n"
+" --num-callers=<number> show <number> callers in stack traces [12]\n"
+" --error-limit=no|yes stop showing new errors if too many? [yes]\n"
+" --error-exitcode=<number> exit code to return if errors found [0=disable]\n"
+" --show-below-main=no|yes continue stack traces below main() [no]\n"
+" --suppressions=<filename> suppress errors described in <filename>\n"
+" --gen-suppressions=no|yes|all print suppressions for errors? [no]\n"
+" --db-attach=no|yes start debugger when errors detected? [no]\n"
+" --db-command=<command> command to start debugger [%s -nw %%f %%p]\n"
+" --input-fd=<number> file descriptor for input [0=stdin]\n"
+" --max-stackframe=<number> assume stack switch for SP changes larger\n"
+" than <number> bytes [2000000]\n"
+" --main-stacksize=<number> set size of main thread's stack (in bytes)\n"
+" [use current 'ulimit' value]\n"
+"\n";
+
+ Char* usage2 =
+"\n"
+" debugging options for all Valgrind tools:\n"
+" --sanity-level=<number> level of sanity checking to do [1]\n"
+" --trace-flags=<XXXXXXXX> show generated code? (X = 0|1) [00000000]\n"
+" --profile-flags=<XXXXXXXX> ditto, but for profiling (X = 0|1) [00000000]\n"
+" --trace-notbelow=<number> only show BBs above <number> [999999999]\n"
+" --trace-syscalls=no|yes show all system calls? [no]\n"
+" --trace-signals=no|yes show signal handling details? [no]\n"
+" --trace-symtab=no|yes show symbol table details? [no]\n"
+" --trace-symtab-patt=<patt> limit debuginfo tracing to obj name <patt>\n"
+" --trace-cfi=no|yes show call-frame-info details? [no]\n"
+" --debug-dump=syms mimic /usr/bin/readelf --syms\n"
+" --debug-dump=line mimic /usr/bin/readelf --debug-dump=line\n"
+" --debug-dump=frames mimic /usr/bin/readelf --debug-dump=frames\n"
+" --trace-redir=no|yes show redirection details? [no]\n"
+" --trace-sched=no|yes show thread scheduler details? [no]\n"
+" --profile-heap=no|yes profile Valgrind's own space use\n"
+" --wait-for-gdb=yes|no pause on startup to wait for gdb attach\n"
+" --sym-offsets=yes|no show syms in form 'name+offset' ? [no]\n"
+" --command-line-only=no|yes only use command line options [no]\n"
+"\n"
+" --vex-iropt-verbosity 0 .. 9 [0]\n"
+" --vex-iropt-level 0 .. 2 [2]\n"
+" --vex-iropt-precise-memory-exns [no]\n"
+" --vex-iropt-unroll-thresh 0 .. 400 [120]\n"
+" --vex-guest-max-insns 1 .. 100 [50]\n"
+" --vex-guest-chase-thresh 0 .. 99 [10]\n"
+"\n"
+" --trace-flags and --profile-flags values (omit the middle space):\n"
+" 1000 0000 show conversion into IR\n"
+" 0100 0000 show after initial opt\n"
+" 0010 0000 show after instrumentation\n"
+" 0001 0000 show after second opt\n"
+" 0000 1000 show after tree building\n"
+" 0000 0100 show selecting insns\n"
+" 0000 0010 show after reg-alloc\n"
+" 0000 0001 show final assembly\n"
+" (Nb: you need --trace-notbelow with --trace-flags for full details)\n"
+"\n"
+" debugging options for Valgrind tools that report errors\n"
+" --dump-error=<number> show translation for basic block associated\n"
+" with <number>'th error context [0=show none]\n"
+"\n";
+
+ Char* usage3 =
+"\n"
+" Extra options read from ~/.valgrindrc, $VALGRIND_OPTS, ./.valgrindrc\n"
+"\n"
+" Valgrind is Copyright (C) 2000-2009 Julian Seward et al.\n"
+" and licensed under the GNU General Public License, version 2.\n"
+" Bug reports, feedback, admiration, abuse, etc, to: %s.\n"
+"\n"
+" Tools are copyright and licensed by their authors. See each\n"
+" tool's start-up message for more information.\n"
+"\n";
+
+ Char* gdb_path = GDB_PATH;
+
+ // Ensure the message goes to stdout
+ VG_(clo_log_fd) = 1;
+ vg_assert( !VG_(logging_to_socket) );
+
+ /* 'usage1' expects one char* argument */
+ VG_(printf)(usage1, gdb_path);
+ if (VG_(details).name) {
+ VG_(printf)(" user options for %s:\n", VG_(details).name);
+ if (VG_(needs).command_line_options)
+ VG_TDICT_CALL(tool_print_usage);
+ else
+ VG_(printf)(" (none)\n");
+ }
+ if (debug_help) {
+ VG_(printf)("%s", usage2);
+
+ if (VG_(details).name) {
+ VG_(printf)(" debugging options for %s:\n", VG_(details).name);
+
+ if (VG_(needs).command_line_options)
+ VG_TDICT_CALL(tool_print_debug_usage);
+ else
+ VG_(printf)(" (none)\n");
+ }
+ }
+ VG_(printf)(usage3, VG_BUGS_TO);
+ VG_(exit)(0);
+}
+
+
+/* Peer at previously set up VG_(args_for_valgrind) and do some
+ minimal command line processing that must happen early on:
+
+ - show the version string, if requested (-v)
+ - extract any request for help (--help, -h, --help-debug)
+ - get the toolname (--tool=)
+ - set VG_(clo_max_stackframe) (--max-stackframe=)
+ - set VG_(clo_main_stacksize) (--main-stacksize=)
+
+ That's all it does. The main command line processing is done below
+ by main_process_cmd_line_options. Note that
+ main_process_cmd_line_options has to handle but ignore the ones we
+ have handled here.
+*/
+static void early_process_cmd_line_options ( /*OUT*/Int* need_help,
+ /*OUT*/HChar** tool )
+{
+ UInt i;
+ HChar* str;
+
+ vg_assert( VG_(args_for_valgrind) );
+
+ /* parse the options we have (only the options we care about now) */
+ for (i = 0; i < VG_(sizeXA)( VG_(args_for_valgrind) ); i++) {
+
+ str = * (HChar**) VG_(indexXA)( VG_(args_for_valgrind), i );
+ vg_assert(str);
+
+ // Nb: the version string goes to stdout.
+ if VG_XACT_CLO(str, "--version", VG_(clo_log_fd), 1) {
+ VG_(printf)("valgrind-" VERSION "\n");
+ VG_(exit)(0);
+ }
+ else if VG_XACT_CLO(str, "--help", *need_help, 1) {}
+ else if VG_XACT_CLO(str, "-h", *need_help, 1) {}
+
+ else if VG_XACT_CLO(str, "--help-debug", *need_help, 2) {}
+
+ // The tool has already been determined, but we need to know the name
+ // here.
+ else if VG_STR_CLO(str, "--tool", *tool) {}
+
+ // Set up VG_(clo_max_stackframe) and VG_(clo_main_stacksize).
+ // These are needed by VG_(ii_create_image), which happens
+ // before main_process_cmd_line_options().
+ else if VG_INT_CLO(str, "--max-stackframe", VG_(clo_max_stackframe)) {}
+ else if VG_INT_CLO(str, "--main-stacksize", VG_(clo_main_stacksize)) {}
+ }
+}
+
+/* The main processing for command line options. See comments above
+ on early_process_cmd_line_options.
+*/
+static Bool main_process_cmd_line_options( const HChar* toolname )
+{
+ // VG_(clo_log_fd) is used by all the messaging. It starts as 2 (stderr)
+ // and we cannot change it until we know what we are changing it to is
+ // ok. So we have tmp_log_fd to hold the tmp fd prior to that point.
+ SysRes sres;
+ Int i, tmp_log_fd;
+ Int toolname_len = VG_(strlen)(toolname);
+ Char* tmp_str; // Used in a couple of places.
+ enum {
+ VgLogTo_Fd,
+ VgLogTo_File,
+ VgLogTo_Socket
+ } log_to = VgLogTo_Fd; // Where is logging output to be sent?
+
+ /* log to stderr by default, but usage message goes to stdout */
+ tmp_log_fd = 2;
+
+ /* Check for sane path in ./configure --prefix=... */
+ if (VG_LIBDIR[0] != '/')
+ VG_(err_config_error)("Please use absolute paths in "
+ "./configure --prefix=... or --libdir=...");
+
+ vg_assert( VG_(args_for_valgrind) );
+
+ for (i = 0; i < VG_(sizeXA)( VG_(args_for_valgrind) ); i++) {
+
+ HChar* arg = * (HChar**) VG_(indexXA)( VG_(args_for_valgrind), i );
+ HChar* colon = arg;
+
+ // Look for a colon in the option name.
+ while (*colon && *colon != ':' && *colon != '=')
+ colon++;
+
+ // Does it have the form "--toolname:foo"? We have to do it at the start
+ // in case someone has combined a prefix with a core-specific option,
+ // eg. "--memcheck:verbose".
+ if (*colon == ':') {
+ if (VG_STREQN(2, arg, "--") &&
+ VG_STREQN(toolname_len, arg+2, toolname) &&
+ VG_STREQN(1, arg+2+toolname_len, ":"))
+ {
+ // Prefix matches, convert "--toolname:foo" to "--foo".
+ // Two things to note:
+ // - We cannot modify the option in-place. If we did, and then
+ // a child was spawned with --trace-children=yes, the
+ // now-non-prefixed option would be passed and could screw up
+ // the child.
+ // - We create copies, and never free them. Why? Non-prefixed
+ // options hang around forever, so tools need not make copies
+ // of strings within them. We need to have the same behaviour
+ // for prefixed options. The pointer to the copy will be lost
+ // once we leave this function (although a tool may keep a
+ // pointer into it), but the space wasted is insignificant.
+ // (In bug #142197, the copies were being freed, which caused
+ // problems for tools that reasonably assumed that arguments
+ // wouldn't disappear on them.)
+ if (0)
+ VG_(printf)("tool-specific arg: %s\n", arg);
+ arg = VG_(strdup)("main.mpclo.1", arg + toolname_len + 1);
+ arg[0] = '-';
+ arg[1] = '-';
+
+ } else {
+ // prefix doesn't match, skip to next arg
+ continue;
+ }
+ }
+
+ /* Ignore these options - they've already been handled */
+ if VG_STREQN( 7, arg, "--tool=") {}
+ else if VG_STREQN(20, arg, "--command-line-only=") {}
+ else if VG_STREQ( arg, "--") {}
+ else if VG_STREQ( arg, "-d") {}
+ else if VG_STREQN(16, arg, "--max-stackframe") {}
+ else if VG_STREQN(16, arg, "--main-stacksize") {}
+ else if VG_STREQN(14, arg, "--profile-heap") {}
+
+ // These options are new.
+ else if (VG_STREQ(arg, "-v") ||
+ VG_STREQ(arg, "--verbose"))
+ VG_(clo_verbosity)++;
+
+ else if (VG_STREQ(arg, "-q") ||
+ VG_STREQ(arg, "--quiet"))
+ VG_(clo_verbosity)--;
+
+ else if VG_BOOL_CLO(arg, "--xml", VG_(clo_xml)) {}
+ else if VG_BOOL_CLO(arg, "--db-attach", VG_(clo_db_attach)) {}
+ else if VG_BOOL_CLO(arg, "--demangle", VG_(clo_demangle)) {}
+ else if VG_BOOL_CLO(arg, "--error-limit", VG_(clo_error_limit)) {}
+ else if VG_INT_CLO (arg, "--error-exitcode", VG_(clo_error_exitcode)) {}
+ else if VG_BOOL_CLO(arg, "--show-emwarns", VG_(clo_show_emwarns)) {}
+
+ else if VG_BOOL_CLO(arg, "--run-libc-freeres", VG_(clo_run_libc_freeres)) {}
+ else if VG_BOOL_CLO(arg, "--show-below-main", VG_(clo_show_below_main)) {}
+ else if VG_BOOL_CLO(arg, "--time-stamp", VG_(clo_time_stamp)) {}
+ else if VG_BOOL_CLO(arg, "--track-fds", VG_(clo_track_fds)) {}
+ else if VG_BOOL_CLO(arg, "--trace-children", VG_(clo_trace_children)) {}
+ else if VG_BOOL_CLO(arg, "--child-silent-after-fork",
+ VG_(clo_child_silent_after_fork)) {}
+ else if VG_BOOL_CLO(arg, "--trace-sched", VG_(clo_trace_sched)) {}
+ else if VG_BOOL_CLO(arg, "--trace-signals", VG_(clo_trace_signals)) {}
+ else if VG_BOOL_CLO(arg, "--trace-symtab", VG_(clo_trace_symtab)) {}
+ else if VG_STR_CLO (arg, "--trace-symtab-patt", VG_(clo_trace_symtab_patt)) {}
+ else if VG_BOOL_CLO(arg, "--trace-cfi", VG_(clo_trace_cfi)) {}
+ else if VG_XACT_CLO(arg, "--debug-dump=syms", VG_(clo_debug_dump_syms),
+ True) {}
+ else if VG_XACT_CLO(arg, "--debug-dump=line", VG_(clo_debug_dump_line),
+ True) {}
+ else if VG_XACT_CLO(arg, "--debug-dump=frames",
+ VG_(clo_debug_dump_frames), True) {}
+ else if VG_BOOL_CLO(arg, "--trace-redir", VG_(clo_trace_redir)) {}
+
+ else if VG_BOOL_CLO(arg, "--trace-syscalls", VG_(clo_trace_syscalls)) {}
+ else if VG_BOOL_CLO(arg, "--wait-for-gdb", VG_(clo_wait_for_gdb)) {}
+ else if VG_STR_CLO (arg, "--db-command", VG_(clo_db_command)) {}
+ else if VG_STR_CLO (arg, "--sim-hints", VG_(clo_sim_hints)) {}
+ else if VG_BOOL_CLO(arg, "--sym-offsets", VG_(clo_sym_offsets)) {}
+ else if VG_BOOL_CLO(arg, "--read-var-info", VG_(clo_read_var_info)) {}
+
+ else if VG_INT_CLO (arg, "--dump-error", VG_(clo_dump_error)) {}
+ else if VG_INT_CLO (arg, "--input-fd", VG_(clo_input_fd)) {}
+ else if VG_INT_CLO (arg, "--sanity-level", VG_(clo_sanity_level)) {}
+ else if VG_BINT_CLO(arg, "--num-callers", VG_(clo_backtrace_size), 1,
+ VG_DEEPEST_BACKTRACE) {}
+
+ else if VG_XACT_CLO(arg, "--smc-check=none", VG_(clo_smc_check),
+ Vg_SmcNone);
+ else if VG_XACT_CLO(arg, "--smc-check=stack", VG_(clo_smc_check),
+ Vg_SmcStack);
+ else if VG_XACT_CLO(arg, "--smc-check=all", VG_(clo_smc_check),
+ Vg_SmcAll);
+
+ else if VG_STR_CLO (arg, "--kernel-variant", VG_(clo_kernel_variant)) {}
+
+ else if VG_BINT_CLO(arg, "--vex-iropt-verbosity",
+ VG_(clo_vex_control).iropt_verbosity, 0, 10) {}
+ else if VG_BINT_CLO(arg, "--vex-iropt-level",
+ VG_(clo_vex_control).iropt_level, 0, 2) {}
+ else if VG_BOOL_CLO(arg, "--vex-iropt-precise-memory-exns",
+ VG_(clo_vex_control).iropt_precise_memory_exns) {}
+ else if VG_BINT_CLO(arg, "--vex-iropt-unroll-thresh",
+ VG_(clo_vex_control).iropt_unroll_thresh, 0, 400) {}
+ else if VG_BINT_CLO(arg, "--vex-guest-max-insns",
+ VG_(clo_vex_control).guest_max_insns, 1, 100) {}
+ else if VG_BINT_CLO(arg, "--vex-guest-chase-thresh",
+ VG_(clo_vex_control).guest_chase_thresh, 0, 99) {}
+
+ else if VG_INT_CLO(arg, "--log-fd", tmp_log_fd) {
+ log_to = VgLogTo_Fd;
+ VG_(clo_log_name) = NULL;
+ }
+
+ else if VG_STR_CLO(arg, "--log-file", VG_(clo_log_name)) {
+ log_to = VgLogTo_File;
+ }
+
+ else if VG_STR_CLO(arg, "--log-socket", VG_(clo_log_name)) {
+ log_to = VgLogTo_Socket;
+ }
+
+ else if VG_STR_CLO(arg, "--xml-user-comment",
+ VG_(clo_xml_user_comment)) {}
+
+ else if VG_STR_CLO(arg, "--suppressions", tmp_str) {
+ if (VG_(clo_n_suppressions) >= VG_CLO_MAX_SFILES) {
+ VG_(message)(Vg_UserMsg, "Too many suppression files specified.");
+ VG_(message)(Vg_UserMsg,
+ "Increase VG_CLO_MAX_SFILES and recompile.");
+ VG_(err_bad_option)(arg);
+ }
+ VG_(clo_suppressions)[VG_(clo_n_suppressions)] = tmp_str;
+ VG_(clo_n_suppressions)++;
+ }
+
+ /* "stuvwxyz" --> stuvwxyz (binary) */
+ else if VG_STR_CLO(arg, "--trace-flags", tmp_str) {
+ Int j;
+
+ if (8 != VG_(strlen)(tmp_str)) {
+ VG_(message)(Vg_UserMsg,
+ "--trace-flags argument must have 8 digits");
+ VG_(err_bad_option)(arg);
+ }
+ for (j = 0; j < 8; j++) {
+ if ('0' == tmp_str[j]) { /* do nothing */ }
+ else if ('1' == tmp_str[j]) VG_(clo_trace_flags) |= (1 << (7-j));
+ else {
+ VG_(message)(Vg_UserMsg, "--trace-flags argument can only "
+ "contain 0s and 1s");
+ VG_(err_bad_option)(arg);
+ }
+ }
+ }
+
+ /* "stuvwxyz" --> stuvwxyz (binary) */
+ else if VG_STR_CLO(arg, "--profile-flags", tmp_str) {
+ Int j;
+
+ if (8 != VG_(strlen)(tmp_str)) {
+ VG_(message)(Vg_UserMsg,
+ "--profile-flags argument must have 8 digits");
+ VG_(err_bad_option)(arg);
+ }
+ for (j = 0; j < 8; j++) {
+ if ('0' == tmp_str[j]) { /* do nothing */ }
+ else if ('1' == tmp_str[j]) VG_(clo_profile_flags) |= (1 << (7-j));
+ else {
+ VG_(message)(Vg_UserMsg, "--profile-flags argument can only "
+ "contain 0s and 1s");
+ VG_(err_bad_option)(arg);
+ }
+ }
+ }
+
+ else if VG_INT_CLO (arg, "--trace-notbelow", VG_(clo_trace_notbelow)) {}
+
+ else if VG_XACT_CLO(arg, "--gen-suppressions=no",
+ VG_(clo_gen_suppressions), 0) {}
+ else if VG_XACT_CLO(arg, "--gen-suppressions=yes",
+ VG_(clo_gen_suppressions), 1) {}
+ else if VG_XACT_CLO(arg, "--gen-suppressions=all",
+ VG_(clo_gen_suppressions), 2) {}
+
+ else if ( ! VG_(needs).command_line_options
+ || ! VG_TDICT_CALL(tool_process_cmd_line_option, arg) ) {
+ VG_(err_bad_option)(arg);
+ }
+ }
+
+ /* Make VEX control parameters sane */
+
+ if (VG_(clo_vex_control).guest_chase_thresh
+ >= VG_(clo_vex_control).guest_max_insns)
+ VG_(clo_vex_control).guest_chase_thresh
+ = VG_(clo_vex_control).guest_max_insns - 1;
+
+ if (VG_(clo_vex_control).guest_chase_thresh < 0)
+ VG_(clo_vex_control).guest_chase_thresh = 0;
+
+ /* Check various option values */
+
+ if (VG_(clo_verbosity) < 0)
+ VG_(clo_verbosity) = 0;
+
+ if (VG_(clo_gen_suppressions) > 0 &&
+ !VG_(needs).core_errors && !VG_(needs).tool_errors) {
+ VG_(message)(Vg_UserMsg,
+ "Can't use --gen-suppressions= with this tool,");
+ VG_(message)(Vg_UserMsg,
+ "as it doesn't generate errors.");
+ VG_(err_bad_option)("--gen-suppressions=");
+ }
+
+ /* If we've been asked to emit XML, mash around various other
+ options so as to constrain the output somewhat, and to remove
+ any need for user input during the run. */
+ if (VG_(clo_xml)) {
+ /* Disable suppression generation (requires user input) */
+ VG_(clo_gen_suppressions) = 0;
+ /* Disable attaching to GDB (requires user input) */
+ VG_(clo_db_attach) = False;
+ /* Set a known verbosity level */
+ VG_(clo_verbosity) = 1;
+ /* Disable error limits (this might be a bad idea!) */
+ VG_(clo_error_limit) = False;
+ /* Disable emulation warnings */
+ VG_(clo_show_emwarns) = False;
+ /* Disable waiting for GDB to debug Valgrind */
+ VG_(clo_wait_for_gdb) = False;
+ /* No file-descriptor leak checking yet */
+ VG_(clo_track_fds) = False;
+ /* Disable timestamped output */
+ VG_(clo_time_stamp) = False;
+ /* Disable heap profiling, since that prints lots of stuff. */
+ VG_(clo_profile_heap) = False;
+ /* Also, we want to set options for the leak checker, but that
+ will have to be done in Memcheck's flag-handling code, not
+ here. */
+ }
+
+ /* All non-logging-related options have been checked. If the logging
+ option specified is ok, we can switch to it, as we know we won't
+ have to generate any other command-line-related error messages.
+ (So far we should be still attached to stderr, so we can show on
+ the terminal any problems to do with processing command line
+ opts.)
+
+ So set up logging now. After this is done, VG_(clo_log_fd)
+ should be connected to whatever sink has been selected, and we
+ indiscriminately chuck stuff into it without worrying what the
+ nature of it is. Oh the wonder of Unix streams. */
+
+ vg_assert(VG_(clo_log_fd) == 2 /* stderr */);
+ vg_assert(VG_(logging_to_socket) == False);
+
+ switch (log_to) {
+
+ case VgLogTo_Fd:
+ vg_assert(VG_(clo_log_name) == NULL);
+ break;
+
+ case VgLogTo_File: {
+ Char* logfilename;
+
+ vg_assert(VG_(clo_log_name) != NULL);
+ vg_assert(VG_(strlen)(VG_(clo_log_name)) <= 900); /* paranoia */
+
+ // Nb: we overwrite an existing file of this name without asking
+ // any questions.
+ logfilename = VG_(expand_file_name)("--log-file", VG_(clo_log_name));
+ sres = VG_(open)(logfilename,
+ VKI_O_CREAT|VKI_O_WRONLY|VKI_O_TRUNC,
+ VKI_S_IRUSR|VKI_S_IWUSR);
+ if (!sres.isError) {
+ tmp_log_fd = sres.res;
+ } else {
+ VG_(message)(Vg_UserMsg,
+ "Can't create log file '%s' (%s); giving up!",
+ logfilename, VG_(strerror)(sres.err));
+ VG_(err_bad_option)(
+ "--log-file=<file> (didn't work out for some reason.)");
+ /*NOTREACHED*/
+ }
+ break; /* switch (VG_(clo_log_to)) */
+ }
+
+ case VgLogTo_Socket: {
+ vg_assert(VG_(clo_log_name) != NULL);
+ vg_assert(VG_(strlen)(VG_(clo_log_name)) <= 900); /* paranoia */
+ tmp_log_fd = VG_(connect_via_socket)( VG_(clo_log_name) );
+ if (tmp_log_fd == -1) {
+ VG_(message)(Vg_UserMsg,
+ "Invalid --log-socket=ipaddr or --log-socket=ipaddr:port spec");
+ VG_(message)(Vg_UserMsg,
+ "of '%s'; giving up!", VG_(clo_log_name) );
+ VG_(err_bad_option)(
+ "--log-socket=");
+ /*NOTREACHED*/
+ }
+ if (tmp_log_fd == -2) {
+ VG_(message)(Vg_UserMsg,
+ "valgrind: failed to connect to logging server '%s'.",
+ VG_(clo_log_name) );
+ VG_(message)(Vg_UserMsg,
+ "Log messages will sent to stderr instead." );
+ VG_(message)(Vg_UserMsg,
+ "" );
+ /* We don't change anything here. */
+ vg_assert(VG_(clo_log_fd) == 2);
+ tmp_log_fd = 2;
+ } else {
+ vg_assert(tmp_log_fd > 0);
+ VG_(logging_to_socket) = True;
+ }
+ break;
+ }
+ }
+
+
+ /* Check that the requested tool actually supports XML output. */
+ if (VG_(clo_xml) && !VG_(needs).xml_output) {
+ VG_(clo_xml) = False;
+ VG_(message)(Vg_UserMsg,
+ "%s does not support XML output.", VG_(details).name);
+ VG_(err_bad_option)("--xml=yes");
+ /*NOTREACHED*/
+ }
+
+ if (tmp_log_fd >= 0) {
+ // Move log_fd into the safe range, so it doesn't conflict with any app fds.
+ tmp_log_fd = VG_(fcntl)(tmp_log_fd, VKI_F_DUPFD, VG_(fd_hard_limit));
+ if (tmp_log_fd < 0) {
+ VG_(message)(Vg_UserMsg, "valgrind: failed to move logfile fd into safe range, using stderr");
+ VG_(clo_log_fd) = 2; // stderr
+ } else {
+ VG_(clo_log_fd) = tmp_log_fd;
+ VG_(fcntl)(VG_(clo_log_fd), VKI_F_SETFD, VKI_FD_CLOEXEC);
+ }
+ } else {
+ // If they said --log-fd=-1, don't print anything. Plausible for use in
+ // regression testing suites that use client requests to count errors.
+ VG_(clo_log_fd) = tmp_log_fd;
+ }
+
+ if (VG_(clo_n_suppressions) < VG_CLO_MAX_SFILES-1 &&
+ (VG_(needs).core_errors || VG_(needs).tool_errors)) {
+ /* If we haven't reached the max number of suppressions, load
+ the default one. */
+ static const Char default_supp[] = "default.supp";
+ Int len = VG_(strlen)(VG_(libdir)) + 1 + sizeof(default_supp);
+ Char *buf = VG_(arena_malloc)(VG_AR_CORE, "main.mpclo.2", len);
+ VG_(sprintf)(buf, "%s/%s", VG_(libdir), default_supp);
+ VG_(clo_suppressions)[VG_(clo_n_suppressions)] = buf;
+ VG_(clo_n_suppressions)++;
+ }
+
+ return (log_to == VgLogTo_Fd);
+}
+
+// Write the name and value of log file qualifiers to the xml file.
+static void print_file_vars(Char* format)
+{
+ Int i = 0;
+
+ while (format[i]) {
+ if (format[i] == '%') {
+ // We saw a '%'. What's next...
+ i++;
+ if ('q' == format[i]) {
+ i++;
+ if ('{' == format[i]) {
+ // Get the env var name, print its contents.
+ Char* qualname;
+ Char* qual;
+ i++;
+ qualname = &format[i];
+ while (True) {
+ if ('}' == format[i]) {
+ // Temporarily replace the '}' with NUL to extract var
+ // name.
+ format[i] = 0;
+ qual = VG_(getenv)(qualname);
+ break;
+ }
+ i++;
+ }
+
+ VG_(message_no_f_c)(Vg_UserMsg,
+ "<logfilequalifier> <var>%t</var> "
+ "<value>%t</value> </logfilequalifier>",
+ qualname,qual);
+ format[i] = '}';
+ i++;
+ }
+ }
+ } else {
+ i++;
+ }
+ }
+}
+
+
+/*====================================================================*/
+/*=== Printing the preamble ===*/
+/*====================================================================*/
+
+/* Ok, the logging sink is running now. Print a suitable preamble.
+ If logging to file or a socket, write details of parent PID and
+ command line args, to help people trying to interpret the
+ results of a run which encompasses multiple processes. */
+static void print_preamble(Bool logging_to_fd, const char* toolname)
+{
+ HChar* xpre = VG_(clo_xml) ? " <line>" : "";
+ HChar* xpost = VG_(clo_xml) ? "</line>" : "";
+ Int i;
+
+ vg_assert( VG_(args_for_client) );
+ vg_assert( VG_(args_for_valgrind) );
+ vg_assert( toolname );
+
+ if (VG_(clo_xml)) {
+ VG_(message)(Vg_UserMsg, "<?xml version=\"1.0\"?>");
+ VG_(message)(Vg_UserMsg, "");
+ VG_(message)(Vg_UserMsg, "<valgrindoutput>");
+ VG_(message)(Vg_UserMsg, "");
+ VG_(message)(Vg_UserMsg, "<protocolversion>3</protocolversion>");
+ VG_(message)(Vg_UserMsg, "");
+ }
+
+ if (VG_(clo_verbosity > 0)) {
+
+ if (VG_(clo_xml))
+ VG_(message)(Vg_UserMsg, "<preamble>");
+
+ /* Tool details */
+ VG_(message)(Vg_UserMsg, "%s%s%s%s, %s.%s",
+ xpre,
+ VG_(details).name,
+ NULL == VG_(details).version ? "" : "-",
+ NULL == VG_(details).version
+ ? (Char*)"" : VG_(details).version,
+ VG_(details).description,
+ xpost);
+
+ if (VG_(strlen)(toolname) >= 4
+ && 0 == VG_(strncmp)(toolname, "exp-", 4)) {
+ VG_(message)(
+ Vg_UserMsg,
+ "%sNOTE: This is an Experimental-Class Valgrind Tool.%s",
+ xpre, xpost
+ );
+ }
+
+ VG_(message)(Vg_UserMsg, "%s%s%s",
+ xpre, VG_(details).copyright_author, xpost);
+
+ /* Core details */
+ VG_(message)(Vg_UserMsg,
+ "%sUsing LibVEX rev %s, a library for dynamic binary translation.%s",
+ xpre, LibVEX_Version(), xpost );
+ VG_(message)(Vg_UserMsg,
+ "%sCopyright (C) 2004-2009, and GNU GPL'd, by OpenWorks LLP.%s",
+ xpre, xpost );
+ VG_(message)(Vg_UserMsg,
+ "%sUsing valgrind-%s, a dynamic binary instrumentation framework.%s",
+ xpre, VERSION, xpost);
+ VG_(message)(Vg_UserMsg,
+ "%sCopyright (C) 2000-2009, and GNU GPL'd, by Julian Seward et al.%s",
+ xpre, xpost );
+
+ if (VG_(clo_verbosity) == 1 && !VG_(clo_xml))
+ VG_(message)(Vg_UserMsg, "For more details, rerun with: -v");
+
+ if (VG_(clo_xml))
+ VG_(message)(Vg_UserMsg, "</preamble>");
+ }
+
+ if (!VG_(clo_xml) && VG_(clo_verbosity) > 0 && !logging_to_fd) {
+ VG_(message)(Vg_UserMsg, "");
+ VG_(message)(Vg_UserMsg,
+ "My PID = %d, parent PID = %d. Prog and args are:",
+ VG_(getpid)(), VG_(getppid)() );
+ if (VG_(args_the_exename))
+ VG_(message)(Vg_UserMsg, " %s", VG_(args_the_exename));
+ for (i = 0; i < VG_(sizeXA)( VG_(args_for_client) ); i++)
+ VG_(message)(Vg_UserMsg,
+ " %s",
+ * (HChar**) VG_(indexXA)( VG_(args_for_client), i ));
+ }
+ else
+ if (VG_(clo_xml)) {
+ VG_(message)(Vg_UserMsg, "");
+ VG_(message)(Vg_UserMsg, "<pid>%d</pid>", VG_(getpid)());
+ VG_(message)(Vg_UserMsg, "<ppid>%d</ppid>", VG_(getppid)());
+ VG_(message_no_f_c)(Vg_UserMsg, "<tool>%t</tool>", toolname);
+ if (VG_(clo_log_name))
+ print_file_vars(VG_(clo_log_name));
+ if (VG_(clo_xml_user_comment)) {
+ /* Note: the user comment itself is XML and is therefore to
+ be passed through verbatim (%s) rather than escaped
+ (%t). */
+ VG_(message)(Vg_UserMsg, "<usercomment>%s</usercomment>",
+ VG_(clo_xml_user_comment));
+ }
+ VG_(message)(Vg_UserMsg, "");
+ VG_(message)(Vg_UserMsg, "<args>");
+
+ VG_(message)(Vg_UserMsg, " <vargv>");
+ if (VG_(name_of_launcher))
+ VG_(message_no_f_c)(Vg_UserMsg, " <exe>%t</exe>",
+ VG_(name_of_launcher));
+ else
+ VG_(message_no_f_c)(Vg_UserMsg, " <exe>%t</exe>",
+ "(launcher name unknown)");
+ for (i = 0; i < VG_(sizeXA)( VG_(args_for_valgrind) ); i++) {
+ VG_(message_no_f_c)(Vg_UserMsg,
+ " <arg>%t</arg>",
+ * (HChar**) VG_(indexXA)( VG_(args_for_valgrind), i ));
+ }
+ VG_(message)(Vg_UserMsg, " </vargv>");
+
+ VG_(message)(Vg_UserMsg, " <argv>");
+ if (VG_(args_the_exename))
+ VG_(message_no_f_c)(Vg_UserMsg, " <exe>%t</exe>",
+ VG_(args_the_exename));
+ for (i = 0; i < VG_(sizeXA)( VG_(args_for_client) ); i++) {
+ VG_(message_no_f_c)(Vg_UserMsg,
+ " <arg>%t</arg>",
+ * (HChar**) VG_(indexXA)( VG_(args_for_client), i ));
+ }
+ VG_(message)(Vg_UserMsg, " </argv>");
+
+ VG_(message)(Vg_UserMsg, "</args>");
+ }
+
+ // Empty line after the preamble
+ if (VG_(clo_verbosity) > 0)
+ VG_(message)(Vg_UserMsg, "");
+
+ if (VG_(clo_verbosity) > 1) {
+ SysRes fd;
+ VexArch vex_arch;
+ VexArchInfo vex_archinfo;
+ if (!logging_to_fd)
+ VG_(message)(Vg_DebugMsg, "");
+ VG_(message)(Vg_DebugMsg, "Command line");
+ if (VG_(args_the_exename))
+ VG_(message)(Vg_DebugMsg, " %s", VG_(args_the_exename));
+ for (i = 0; i < VG_(sizeXA)( VG_(args_for_client) ); i++)
+ VG_(message)(Vg_DebugMsg,
+ " %s",
+ * (HChar**) VG_(indexXA)( VG_(args_for_client), i ));
+
+ VG_(message)(Vg_DebugMsg, "Startup, with flags:");
+ for (i = 0; i < VG_(sizeXA)( VG_(args_for_valgrind) ); i++) {
+ VG_(message)(Vg_DebugMsg,
+ " %s",
+ * (HChar**) VG_(indexXA)( VG_(args_for_valgrind), i ));
+ }
+
+ VG_(message)(Vg_DebugMsg, "Contents of /proc/version:");
+ fd = VG_(open) ( "/proc/version", VKI_O_RDONLY, 0 );
+ if (fd.isError) {
+ VG_(message)(Vg_DebugMsg, " can't open /proc/version");
+ } else {
+# define BUF_LEN 256
+ Char version_buf[BUF_LEN];
+ Int n = VG_(read) ( fd.res, version_buf, BUF_LEN );
+ vg_assert(n <= BUF_LEN);
+ if (n > 0) {
+ version_buf[n-1] = '\0';
+ VG_(message)(Vg_DebugMsg, " %s", version_buf);
+ } else {
+ VG_(message)(Vg_DebugMsg, " (empty?)");
+ }
+ VG_(close)(fd.res);
+# undef BUF_LEN
+ }
+
+ VG_(machine_get_VexArchInfo)( &vex_arch, &vex_archinfo );
+ VG_(message)(
+ Vg_DebugMsg,
+ "Arch and hwcaps: %s, %s",
+ LibVEX_ppVexArch ( vex_arch ),
+ LibVEX_ppVexHwCaps ( vex_arch, vex_archinfo.hwcaps )
+ );
+ VG_(message)(
+ Vg_DebugMsg,
+ "Page sizes: currently %d, max supported %d",
+ (Int)VKI_PAGE_SIZE, (Int)VKI_MAX_PAGE_SIZE
+ );
+ VG_(message)(Vg_DebugMsg, "Valgrind library directory: %s", VG_(libdir));
+ }
+}
+
+
+/*====================================================================*/
+/*=== File descriptor setup ===*/
+/*====================================================================*/
+
+/* Number of file descriptors that Valgrind tries to reserve for
+ it's own use - just a small constant. */
+#define N_RESERVED_FDS (10)
+
+static void setup_file_descriptors(void)
+{
+ struct vki_rlimit rl;
+ Bool show = False;
+
+ /* Get the current file descriptor limits. */
+ if (VG_(getrlimit)(VKI_RLIMIT_NOFILE, &rl) < 0) {
+ rl.rlim_cur = 1024;
+ rl.rlim_max = 1024;
+ }
+
+ if (show)
+ VG_(printf)("fd limits: host, before: cur %lu max %lu\n",
+ (UWord)rl.rlim_cur, (UWord)rl.rlim_max);
+
+# if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ /* I don't know why this kludge is needed; however if rl.rlim_cur
+ is RLIM_INFINITY, then VG_(safe_fd)'s attempts using VG_(fcntl)
+ to lift V's file descriptors above the threshold RLIM_INFINITY -
+ N_RESERVED_FDS fail. So just use a relatively conservative
+ value in this case. */
+ if (rl.rlim_cur > 1024)
+ rl.rlim_cur = 1024;
+# endif
+
+ /* Work out where to move the soft limit to. */
+ if (rl.rlim_cur + N_RESERVED_FDS <= rl.rlim_max) {
+ rl.rlim_cur = rl.rlim_cur + N_RESERVED_FDS;
+ } else {
+ rl.rlim_cur = rl.rlim_max;
+ }
+
+ /* Reserve some file descriptors for our use. */
+ VG_(fd_soft_limit) = rl.rlim_cur - N_RESERVED_FDS;
+ VG_(fd_hard_limit) = rl.rlim_cur - N_RESERVED_FDS;
+
+ /* Update the soft limit. */
+ VG_(setrlimit)(VKI_RLIMIT_NOFILE, &rl);
+
+ if (show) {
+ VG_(printf)("fd limits: host, after: cur %lu max %lu\n",
+ (UWord)rl.rlim_cur, (UWord)rl.rlim_max);
+ VG_(printf)("fd limits: guest : cur %u max %u\n",
+ VG_(fd_soft_limit), VG_(fd_hard_limit));
+ }
+
+ if (VG_(cl_exec_fd) != -1)
+ VG_(cl_exec_fd) = VG_(safe_fd)( VG_(cl_exec_fd) );
+}
+
+
+/*====================================================================*/
+/*=== BB profiling ===*/
+/*====================================================================*/
+
+static
+void show_BB_profile ( BBProfEntry tops[], UInt n_tops, ULong score_total )
+{
+ ULong score_cumul, score_here;
+ Char buf_cumul[10], buf_here[10];
+ Char name[64];
+ Int r;
+
+ VG_(printf)("\n");
+ VG_(printf)("-----------------------------------------------------------\n");
+ VG_(printf)("--- BEGIN BB Profile (summary of scores) ---\n");
+ VG_(printf)("-----------------------------------------------------------\n");
+ VG_(printf)("\n");
+
+ VG_(printf)("Total score = %lld\n\n", score_total);
+
+ score_cumul = 0;
+ for (r = 0; r < n_tops; r++) {
+ if (tops[r].addr == 0)
+ continue;
+ name[0] = 0;
+ VG_(get_fnname_w_offset)(tops[r].addr, name, 64);
+ name[63] = 0;
+ score_here = tops[r].score;
+ score_cumul += score_here;
+ VG_(percentify)(score_cumul, score_total, 2, 6, buf_cumul);
+ VG_(percentify)(score_here, score_total, 2, 6, buf_here);
+ VG_(printf)("%3d: (%9lld %s) %9lld %s 0x%llx %s\n",
+ r,
+ score_cumul, buf_cumul,
+ score_here, buf_here, tops[r].addr, name );
+ }
+
+ VG_(printf)("\n");
+ VG_(printf)("-----------------------------------------------------------\n");
+ VG_(printf)("--- BB Profile (BB details) ---\n");
+ VG_(printf)("-----------------------------------------------------------\n");
+ VG_(printf)("\n");
+
+ score_cumul = 0;
+ for (r = 0; r < n_tops; r++) {
+ if (tops[r].addr == 0)
+ continue;
+ name[0] = 0;
+ VG_(get_fnname_w_offset)(tops[r].addr, name, 64);
+ name[63] = 0;
+ score_here = tops[r].score;
+ score_cumul += score_here;
+ VG_(percentify)(score_cumul, score_total, 2, 6, buf_cumul);
+ VG_(percentify)(score_here, score_total, 2, 6, buf_here);
+ VG_(printf)("\n");
+ VG_(printf)("=-=-=-=-=-=-=-=-=-=-=-=-=-= begin BB rank %d "
+ "=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n", r);
+ VG_(printf)("%3d: (%9lld %s) %9lld %s 0x%llx %s\n",
+ r,
+ score_cumul, buf_cumul,
+ score_here, buf_here, tops[r].addr, name );
+ VG_(printf)("\n");
+ VG_(discard_translations)(tops[r].addr, 1, "bb profile");
+ VG_(translate)(0, tops[r].addr, True, VG_(clo_profile_flags), 0, True);
+ VG_(printf)("=-=-=-=-=-=-=-=-=-=-=-=-=-= end BB rank %d "
+ "=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n", r);
+ }
+
+ VG_(printf)("\n");
+ VG_(printf)("-----------------------------------------------------------\n");
+ VG_(printf)("--- END BB Profile ---\n");
+ VG_(printf)("-----------------------------------------------------------\n");
+ VG_(printf)("\n");
+}
+
+
+/*====================================================================*/
+/*=== main() ===*/
+/*====================================================================*/
+
+/* When main() is entered, we should be on the following stack, not
+ the one the kernel gave us. We will run on this stack until
+ simulation of the root thread is started, at which point a transfer
+ is made to a dynamically allocated stack. This is for the sake of
+ uniform overflow detection for all Valgrind threads. This is
+ marked global even though it isn't, because assembly code below
+ needs to reference the name. */
+
+/*static*/ VgStack VG_(interim_stack);
+
+/* These are the structures used to hold info for creating the initial
+ client image.
+
+ 'iicii' mostly holds important register state present at system
+ startup (_start_valgrind). valgrind_main() then fills in the rest
+ of it and passes it to VG_(ii_create_image)(). That produces
+ 'iifii', which is later handed to VG_(ii_finalise_image). */
+
+/* In all OS-instantiations, the_iicii has a field .sp_at_startup.
+ This should get some address inside the stack on which we gained
+ control (eg, it could be the SP at startup). It doesn't matter
+ exactly where in the stack it is. This value is passed to the
+ address space manager at startup. On Linux, aspacem then uses it
+ to identify the initial stack segment and hence the upper end of
+ the usable address space. */
+
+static IICreateImageInfo the_iicii;
+static IIFinaliseImageInfo the_iifii;
+
+
+/* A simple pair structure, used for conveying debuginfo handles to
+ calls to VG_TRACK(new_mem_startup, ...). */
+typedef struct { Addr a; ULong ull; } Addr_n_ULong;
+
+
+/* --- Forwards decls to do with shutdown --- */
+
+static void final_tidyup(ThreadId tid);
+
+/* Do everything which needs doing when the last thread exits */
+static
+void shutdown_actions_NORETURN( ThreadId tid,
+ VgSchedReturnCode tids_schedretcode );
+
+/* --- end of Forwards decls to do with shutdown --- */
+
+
+/* TODO: GIVE THIS A PROPER HOME
+ TODO: MERGE THIS WITH DUPLICATE IN mc_leakcheck.c and coredump-elf.c.
+ Extract from aspacem a vector of the current segment start
+ addresses. The vector is dynamically allocated and should be freed
+ by the caller when done. REQUIRES m_mallocfree to be running.
+ Writes the number of addresses required into *n_acquired. */
+
+static Addr* get_seg_starts ( /*OUT*/Int* n_acquired )
+{
+ Addr* starts;
+ Int n_starts, r = 0;
+
+ n_starts = 1;
+ while (True) {
+ starts = VG_(malloc)( "main.gss.1", n_starts * sizeof(Addr) );
+ if (starts == NULL)
+ break;
+ r = VG_(am_get_segment_starts)( starts, n_starts );
+ if (r >= 0)
+ break;
+ VG_(free)(starts);
+ n_starts *= 2;
+ }
+
+ if (starts == NULL) {
+ *n_acquired = 0;
+ return NULL;
+ }
+
+ *n_acquired = r;
+ return starts;
+}
+
+
+/* By the time we get to valgrind_main, the_iicii should already have
+ been filled in with any important details as required by whatever
+ OS we have been built for.
+*/
+static
+Int valgrind_main ( Int argc, HChar **argv, HChar **envp )
+{
+ HChar* toolname = "memcheck"; // default to Memcheck
+ Int need_help = 0; // 0 = no, 1 = --help, 2 = --help-debug
+ ThreadId tid_main = VG_INVALID_THREADID;
+ Int loglevel, i;
+ Bool logging_to_fd;
+ struct vki_rlimit zero = { 0, 0 };
+ XArray* addr2dihandle = NULL;
+
+ //============================================================
+ //
+ // Nb: startup is complex. Prerequisites are shown at every step.
+ // *** Be very careful when messing with the order ***
+ //
+ // The first order of business is to get debug logging, the address
+ // space manager and the dynamic memory manager up and running.
+ // Once that's done, we can relax a bit.
+ //
+ //============================================================
+
+ /* This is needed to make VG_(getenv) usable early. */
+ VG_(client_envp) = (Char**)envp;
+
+ //--------------------------------------------------------------
+ // Start up the logging mechanism
+ // p: none
+ //--------------------------------------------------------------
+ /* Start the debugging-log system ASAP. First find out how many
+ "-d"s were specified. This is a pre-scan of the command line. Also
+ get --profile-heap=yes which is needed by the time we start up dynamic
+ memory management. */
+ loglevel = 0;
+ for (i = 1; i < argc; i++) {
+ if (argv[i][0] != '-') break;
+ if VG_STREQ(argv[i], "--") break;
+ if VG_STREQ(argv[i], "-d") loglevel++;
+ if VG_BOOL_CLO(argv[i], "--profile-heap", VG_(clo_profile_heap)) {}
+ }
+
+ /* ... and start the debug logger. Now we can safely emit logging
+ messages all through startup. */
+ VG_(debugLog_startup)(loglevel, "Stage 2 (main)");
+ VG_(debugLog)(1, "main", "Welcome to Valgrind version "
+ VERSION " debug logging\n");
+
+ //--------------------------------------------------------------
+ // AIX5 only: register the system call numbers
+ // p: logging
+ // p: that the initial few syscall numbers stated in the
+ // bootblock have been installed (else we can't
+ // open/read/close).
+ //--------------------------------------------------------------
+# if defined(VGO_aix5)
+ VG_(debugLog)(1, "main", "aix5: registering syscalls ..\n");
+ { UChar sysent_name[50];
+ SysRes fd;
+ Bool ok;
+ Int n_unregd, sysent_used = 0;
+ prsysent_t* sysent_hdr;
+
+ VG_(sprintf)(sysent_name, "/proc/%d/sysent", VG_(getpid)());
+ fd = VG_(open)(sysent_name, VKI_O_RDONLY, 0);
+ if (fd.isError)
+ VG_(err_config_error)("aix5: can't open /proc/<pid>/sysent");
+
+ sysent_used = VG_(read)(fd.res, aix5_sysent_buf, VG_AIX5_SYSENT_SIZE);
+ if (sysent_used < 0)
+ VG_(err_config_error)("aix5: error reading /proc/<pid>/sysent");
+ if (sysent_used >= VG_AIX5_SYSENT_SIZE)
+ VG_(err_config_error)("aix5: VG_AIX5_SYSENT_SIZE is too low; "
+ "increase and recompile");
+ VG_(close)(fd.res);
+
+ vg_assert(sysent_used > 0 && sysent_used < VG_AIX5_SYSENT_SIZE);
+
+ sysent_hdr = (prsysent_t*)&aix5_sysent_buf[0];
+
+ n_unregd = 0;
+ for (i = 0; i < sysent_hdr->pr_nsyscalls; i++) {
+ UChar* name = &aix5_sysent_buf[ sysent_hdr
+ ->pr_syscall[i].pr_nameoff ];
+ UInt nmbr = sysent_hdr->pr_syscall[i].pr_number;
+ VG_(debugLog)(3, "main", "aix5: bind syscall %d to \"%s\"\n",
+ nmbr, name);
+ ok = VG_(aix5_register_syscall)(nmbr, name);
+ if (!ok)
+ n_unregd++;
+ if (!ok)
+ VG_(debugLog)(3, "main",
+ "aix5: bind FAILED: %d to \"%s\"\n",
+ nmbr, name);
+ }
+ VG_(debugLog)(1, "main", "aix5: .. %d syscalls known, %d unknown\n",
+ sysent_hdr->pr_nsyscalls - n_unregd, n_unregd );
+ VG_(debugLog)(1, "main", "aix5: __NR_AIX5_FAKE_SIGRETURN = %d\n",
+ __NR_AIX5_FAKE_SIGRETURN );
+ }
+# endif
+
+ //--------------------------------------------------------------
+ // Ensure we're on a plausible stack.
+ // p: logging
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Checking current stack is plausible\n");
+ { HChar* limLo = (HChar*)(&VG_(interim_stack).bytes[0]);
+ HChar* limHi = limLo + sizeof(VG_(interim_stack));
+ HChar* aLocal = (HChar*)&zero; /* any auto local will do */
+ if (aLocal < limLo || aLocal >= limHi) {
+ /* something's wrong. Stop. */
+ VG_(debugLog)(0, "main", "Root stack %p to %p, a local %p\n",
+ limLo, limHi, aLocal );
+ VG_(debugLog)(0, "main", "Valgrind: FATAL: "
+ "Initial stack switched failed.\n");
+ VG_(debugLog)(0, "main", " Cannot continue. Sorry.\n");
+ VG_(exit)(1);
+ }
+ }
+
+ //--------------------------------------------------------------
+ // Ensure we have a plausible pointer to the stack on which
+ // we gained control (not the current stack!)
+ // p: logging
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Checking initial stack was noted\n");
+ if (the_iicii.sp_at_startup == 0) {
+ VG_(debugLog)(0, "main", "Valgrind: FATAL: "
+ "Initial stack was not noted.\n");
+ VG_(debugLog)(0, "main", " Cannot continue. Sorry.\n");
+ VG_(exit)(1);
+ }
+
+ //--------------------------------------------------------------
+ // Start up the address space manager, and determine the
+ // approximate location of the client's stack
+ // p: logging, plausible-stack
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Starting the address space manager\n");
+ vg_assert(VKI_PAGE_SIZE == 4096 || VKI_PAGE_SIZE == 65536);
+ vg_assert(VKI_MAX_PAGE_SIZE == 4096 || VKI_MAX_PAGE_SIZE == 65536);
+ vg_assert(VKI_PAGE_SIZE <= VKI_MAX_PAGE_SIZE);
+ vg_assert(VKI_PAGE_SIZE == (1 << VKI_PAGE_SHIFT));
+ vg_assert(VKI_MAX_PAGE_SIZE == (1 << VKI_MAX_PAGE_SHIFT));
+ the_iicii.clstack_top = VG_(am_startup)( the_iicii.sp_at_startup );
+ VG_(debugLog)(1, "main", "Address space manager is running\n");
+
+ //--------------------------------------------------------------
+ // Start up the dynamic memory manager
+ // p: address space management
+ // p: getting --profile-heap
+ // In fact m_mallocfree is self-initialising, so there's no
+ // initialisation call to do. Instead, try a simple malloc/
+ // free pair right now to check that nothing is broken.
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Starting the dynamic memory manager\n");
+ { void* p = VG_(malloc)( "main.vm.1", 12345 );
+ if (p) VG_(free)( p );
+ }
+ VG_(debugLog)(1, "main", "Dynamic memory manager is running\n");
+
+ //============================================================
+ //
+ // Dynamic memory management is now available.
+ //
+ //============================================================
+
+ //--------------------------------------------------------------
+ // Initialise m_debuginfo
+ // p: dynamic memory allocation
+ VG_(debugLog)(1, "main", "Initialise m_debuginfo\n");
+ VG_(di_initialise)();
+
+ //--------------------------------------------------------------
+ // Look for alternative libdir
+ { HChar *cp = VG_(getenv)(VALGRIND_LIB);
+ if (cp != NULL)
+ VG_(libdir) = cp;
+ }
+
+ //--------------------------------------------------------------
+ // Extract the launcher name from the environment.
+ VG_(debugLog)(1, "main", "Getting stage1's name\n");
+ VG_(name_of_launcher) = VG_(getenv)(VALGRIND_LAUNCHER);
+ if (VG_(name_of_launcher) == NULL) {
+ VG_(printf)("valgrind: You cannot run '%s' directly.\n", argv[0]);
+ VG_(printf)("valgrind: You should use $prefix/bin/valgrind.\n");
+ VG_(exit)(1);
+ }
+
+ //--------------------------------------------------------------
+ // Get the current process datasize rlimit, and set it to zero.
+ // This prevents any internal uses of brk() from having any effect.
+ // We remember the old value so we can restore it on exec, so that
+ // child processes will have a reasonable brk value.
+ VG_(getrlimit)(VKI_RLIMIT_DATA, &VG_(client_rlimit_data));
+ zero.rlim_max = VG_(client_rlimit_data).rlim_max;
+ VG_(setrlimit)(VKI_RLIMIT_DATA, &zero);
+
+ // Get the current process stack rlimit.
+ VG_(getrlimit)(VKI_RLIMIT_STACK, &VG_(client_rlimit_stack));
+
+ //--------------------------------------------------------------
+ // Figure out what sort of CPU we're on, and whether it is
+ // able to run V.
+ VG_(debugLog)(1, "main", "Get hardware capabilities ...\n");
+ { VexArch vex_arch;
+ VexArchInfo vex_archinfo;
+ Bool ok = VG_(machine_get_hwcaps)();
+ if (!ok) {
+ VG_(printf)("\n");
+ VG_(printf)("valgrind: fatal error: unsupported CPU.\n");
+ VG_(printf)(" Supported CPUs are:\n");
+ VG_(printf)(" * x86 (practically any; Pentium-I or above), "
+ "AMD Athlon or above)\n");
+ VG_(printf)(" * AMD Athlon64/Opteron\n");
+ VG_(printf)(" * PowerPC (most; ppc405 and above)\n");
+ VG_(printf)("\n");
+ VG_(exit)(1);
+ }
+ VG_(machine_get_VexArchInfo)( &vex_arch, &vex_archinfo );
+ VG_(debugLog)(
+ 1, "main", "... arch = %s, hwcaps = %s\n",
+ LibVEX_ppVexArch ( vex_arch ),
+ LibVEX_ppVexHwCaps ( vex_arch, vex_archinfo.hwcaps )
+ );
+ }
+
+ //--------------------------------------------------------------
+ // Record the working directory at startup
+ // p: none (Linux), getenv and sys_getpid work (AIX)
+ VG_(debugLog)(1, "main", "Getting the working directory at startup\n");
+ { Bool ok = VG_(record_startup_wd)();
+ if (!ok)
+ VG_(err_config_error)( "Can't establish current working "
+ "directory at startup");
+ }
+ { Char buf[VKI_PATH_MAX+1];
+ Bool ok = VG_(get_startup_wd)( buf, sizeof(buf) );
+ vg_assert(ok);
+ buf[VKI_PATH_MAX] = 0;
+ VG_(debugLog)(1, "main", "... %s\n", buf );
+ }
+
+ //============================================================
+ // Command line argument handling order:
+ // * If --help/--help-debug are present, show usage message
+ // (including the tool-specific usage)
+ // * (If no --tool option given, default to Memcheck)
+ // * Then, if client is missing, abort with error msg
+ // * Then, if any cmdline args are bad, abort with error msg
+ //============================================================
+
+ //--------------------------------------------------------------
+ // Split up argv into: C args, V args, V extra args, and exename.
+ // p: dynamic memory allocation
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Split up command line\n");
+ VG_(split_up_argv)( argc, argv );
+ vg_assert( VG_(args_for_valgrind) );
+ vg_assert( VG_(args_for_client) );
+ if (0) {
+ for (i = 0; i < VG_(sizeXA)( VG_(args_for_valgrind) ); i++)
+ VG_(printf)(
+ "varg %s\n",
+ * (HChar**) VG_(indexXA)( VG_(args_for_valgrind), i )
+ );
+ VG_(printf)(" exe %s\n", VG_(args_the_exename));
+ for (i = 0; i < VG_(sizeXA)( VG_(args_for_client) ); i++)
+ VG_(printf)(
+ "carg %s\n",
+ * (HChar**) VG_(indexXA)( VG_(args_for_client), i )
+ );
+ }
+
+# if defined(VGO_aix5)
+ /* Tolerate ptraced-based launchers. They can't run 'no program'
+ if the user types "valgrind --help", so they run a do-nothing
+ program $prefix/bin/no_op_client_for_valgrind, and we catch that
+ here and turn it the exe name back into NULL. Then --help,
+ --version etc work as they should. */
+ if (VG_(args_the_exename)
+ && VG_(strstr)( VG_(args_the_exename), "/no_op_client_for_valgrind" )) {
+ VG_(args_the_exename) = NULL;
+ }
+# endif
+
+ //--------------------------------------------------------------
+ // Extract tool name and whether help has been requested.
+ // Note we can't print the help message yet, even if requested,
+ // because the tool has not been initialised.
+ // p: split_up_argv [for VG_(args_for_valgrind)]
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main",
+ "(early_) Process Valgrind's command line options\n");
+ early_process_cmd_line_options(&need_help, &toolname);
+
+ // Set default vex control params
+ LibVEX_default_VexControl(& VG_(clo_vex_control));
+
+ //--------------------------------------------------------------
+ // Load client executable, finding in $PATH if necessary
+ // p: early_process_cmd_line_options() [for 'exec', 'need_help',
+ // clo_max_stackframe,
+ // clo_main_stacksize]
+ // p: layout_remaining_space [so there's space]
+ //
+ // Set up client's environment
+ // p: set-libdir [for VG_(libdir)]
+ // p: early_process_cmd_line_options [for toolname]
+ //
+ // Setup client stack, eip, and VG_(client_arg[cv])
+ // p: load_client() [for 'info']
+ // p: fix_environment() [for 'env']
+ //
+ // Setup client data (brk) segment. Initially a 1-page segment
+ // which abuts a shrinkable reservation.
+ // p: load_client() [for 'info' and hence VG_(brk_base)]
+ //
+ // p: _start_in_C (for zeroing out the_iicii and putting some
+ // initial values into it)
+ //--------------------------------------------------------------
+ if (!need_help) {
+ VG_(debugLog)(1, "main", "Create initial image\n");
+
+# if defined(VGO_linux)
+ the_iicii.argv = argv;
+ the_iicii.envp = envp;
+ the_iicii.toolname = toolname;
+# elif defined(VGO_aix5)
+ /* the_iicii.intregs37 already set up */
+ /* the_iicii.bootblock already set up */
+ /* the_iicii.adler32_exp already set up */
+ /* the_iicii.sp_at_startup is irrelevant */
+ /* the_iicii.clstack_top is irrelevant */
+ the_iicii.toolname = toolname;
+# else
+# error "Uknown platform"
+# endif
+
+ /* NOTE: this call reads VG_(clo_main_stacksize). */
+ the_iifii = VG_(ii_create_image)( the_iicii );
+
+# if defined(VGO_aix5)
+ /* Tell aspacem where the initial client stack is, so that it
+ can later produce a faked-up NSegment in response to
+ VG_(am_find_nsegment) for that address range, if asked. */
+ /* NOTE: this call reads VG_(clo_main_stacksize). */
+ VG_(am_aix5_set_initial_client_sp)( the_iifii.initial_client_SP );
+ /* Now have a look at said fake segment, so we can find out
+ the size of it. */
+ { SizeT sz;
+ NSegment const* seg
+ = VG_(am_find_nsegment)( the_iifii.initial_client_SP );
+ vg_assert(seg);
+ sz = seg->end - seg->start + 1;
+ vg_assert(sz >= 0 && sz <= (256+1)*1024*1024); /* stay sane */
+ the_iifii.clstack_max_size = sz;
+ }
+# endif
+ }
+
+ //==============================================================
+ //
+ // Finished loading/setting up the client address space.
+ //
+ //==============================================================
+
+ //--------------------------------------------------------------
+ // setup file descriptors
+ // p: n/a
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Setup file descriptors\n");
+ setup_file_descriptors();
+
+ //--------------------------------------------------------------
+ // create the fake /proc/<pid>/cmdline file and then unlink it,
+ // but hold onto the fd, so we can hand it out to the client
+ // when it tries to open /proc/<pid>/cmdline for itself.
+ // p: setup file descriptors
+ //--------------------------------------------------------------
+ if (!need_help) {
+ HChar buf[50], buf2[50+64];
+ HChar nul[1];
+ Int fd, r;
+ HChar* exename;
+
+ VG_(debugLog)(1, "main", "Create fake /proc/<pid>/cmdline\n");
+
+ VG_(sprintf)(buf, "proc_%d_cmdline", VG_(getpid)());
+ fd = VG_(mkstemp)( buf, buf2 );
+ if (fd == -1)
+ VG_(err_config_error)("Can't create client cmdline file in /tmp.");
+
+ nul[0] = 0;
+ exename = VG_(args_the_exename) ? VG_(args_the_exename)
+ : "unknown_exename";
+ VG_(write)(fd, VG_(args_the_exename),
+ VG_(strlen)( VG_(args_the_exename) ));
+ VG_(write)(fd, nul, 1);
+
+ for (i = 0; i < VG_(sizeXA)( VG_(args_for_client) ); i++) {
+ HChar* arg = * (HChar**) VG_(indexXA)( VG_(args_for_client), i );
+ VG_(write)(fd, arg, VG_(strlen)( arg ));
+ VG_(write)(fd, nul, 1);
+ }
+
+ /* Don't bother to seek the file back to the start; instead do
+ it every time a copy of it is given out (by PRE(sys_open)).
+ That is probably more robust across fork() etc. */
+
+ /* Now delete it, but hang on to the fd. */
+ r = VG_(unlink)( buf2 );
+ if (r)
+ VG_(err_config_error)("Can't delete client cmdline file in /tmp.");
+
+ VG_(cl_cmdline_fd) = fd;
+ }
+
+ //--------------------------------------------------------------
+ // Init tool part 1: pre_clo_init
+ // p: setup_client_stack() [for 'VG_(client_arg[cv]']
+ // p: setup_file_descriptors() [for 'VG_(fd_xxx_limit)']
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Initialise the tool part 1 (pre_clo_init)\n");
+ VG_(tl_pre_clo_init)();
+
+ //--------------------------------------------------------------
+ // If --tool and --help/--help-debug was given, now give the core+tool
+ // help message
+ // p: early_process_cmd_line_options() [for 'need_help']
+ // p: tl_pre_clo_init [for 'VG_(tdict).usage']
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Print help and quit, if requested\n");
+ if (need_help) {
+ usage_NORETURN(/*--help-debug?*/2 == need_help);
+ }
+
+ //--------------------------------------------------------------
+ // Process command line options to Valgrind + tool
+ // p: setup_client_stack() [for 'VG_(client_arg[cv]']
+ // p: setup_file_descriptors() [for 'VG_(fd_xxx_limit)']
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main",
+ "(main_) Process Valgrind's command line options, "
+ "setup logging\n");
+ logging_to_fd = main_process_cmd_line_options(toolname);
+
+ //--------------------------------------------------------------
+ // Zeroise the millisecond counter by doing a first read of it.
+ // p: none
+ //--------------------------------------------------------------
+ (void) VG_(read_millisecond_timer)();
+
+ //--------------------------------------------------------------
+ // Print the preamble
+ // p: tl_pre_clo_init [for 'VG_(details).name' and friends]
+ // p: main_process_cmd_line_options() [for VG_(clo_verbosity),
+ // VG_(clo_xml),
+ // logging_to_fd]
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Print the preamble...\n");
+ print_preamble(logging_to_fd, toolname);
+ VG_(debugLog)(1, "main", "...finished the preamble\n");
+
+ //--------------------------------------------------------------
+ // Init tool part 2: post_clo_init
+ // p: setup_client_stack() [for 'VG_(client_arg[cv]']
+ // p: setup_file_descriptors() [for 'VG_(fd_xxx_limit)']
+ // p: print_preamble() [so any warnings printed in post_clo_init
+ // are shown after the preamble]
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Initialise the tool part 2 (post_clo_init)\n");
+ VG_TDICT_CALL(tool_post_clo_init);
+ {
+ /* The tool's "needs" will by now be finalised, since it has no
+ further opportunity to specify them. So now sanity check
+ them. */
+ Char* s;
+ Bool ok;
+ ok = VG_(sanity_check_needs)( &s );
+ if (!ok) {
+ VG_(tool_panic)(s);
+ }
+ }
+
+ //--------------------------------------------------------------
+ // Initialise translation table and translation cache
+ // p: aspacem [??]
+ // p: tl_pre_clo_init [for 'VG_(details).avg_translation_sizeB']
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Initialise TT/TC\n");
+ VG_(init_tt_tc)();
+
+ //--------------------------------------------------------------
+ // Initialise the redirect table.
+ // p: init_tt_tc [so it can call VG_(search_transtab) safely]
+ // p: aspacem [so can change ownership of sysinfo pages]
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Initialise redirects\n");
+ VG_(redir_initialise)();
+
+ //--------------------------------------------------------------
+ // Allow GDB attach
+ // p: main_process_cmd_line_options() [for VG_(clo_wait_for_gdb)]
+ //--------------------------------------------------------------
+ /* Hook to delay things long enough so we can get the pid and
+ attach GDB in another shell. */
+ if (VG_(clo_wait_for_gdb)) {
+ Long iters;
+ volatile Long q;
+ VG_(debugLog)(1, "main", "Wait for GDB\n");
+ VG_(printf)("pid=%d, entering delay loop\n", VG_(getpid)());
+
+# if defined(VGP_x86_linux)
+ iters = 5;
+# elif defined(VGP_amd64_linux) || defined(VGP_ppc64_linux)
+ iters = 10;
+# elif defined(VGP_ppc32_linux)
+ iters = 5;
+# elif defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ iters = 4;
+# else
+# error "Unknown plat"
+# endif
+
+ iters *= 1000*1000*1000;
+ for (q = 0; q < iters; q++)
+ ;
+ }
+
+ //--------------------------------------------------------------
+ // Search for file descriptors that are inherited from our parent
+ // p: main_process_cmd_line_options [for VG_(clo_track_fds)]
+ //--------------------------------------------------------------
+ if (VG_(clo_track_fds)) {
+ VG_(debugLog)(1, "main", "Init preopened fds\n");
+ VG_(init_preopened_fds)();
+ }
+
+ //--------------------------------------------------------------
+ // Load debug info for the existing segments.
+ // p: setup_code_redirect_table [so that redirs can be recorded]
+ // p: mallocfree
+ // p: probably: setup fds and process CLOs, so that logging works
+ // p: initialise m_debuginfo
+ //
+ // While doing this, make a note of the debuginfo-handles that
+ // come back from VG_(di_notify_mmap)/VG_(di_aix5_notify_segchange).
+ // Later, in "Tell the tool about the initial client memory permissions"
+ // (just below) we can then hand these handles off to the tool in
+ // calls to VG_TRACK(new_mem_startup, ...). This gives the tool the
+ // opportunity to make further queries to m_debuginfo before the
+ // client is started, if it wants. We put this information into an
+ // XArray, each handle along with the associated segment start address,
+ // and search the XArray for the handles later, when calling
+ // VG_TRACK(new_mem_startup, ...).
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Load initial debug info\n");
+
+ tl_assert(!addr2dihandle);
+ addr2dihandle = VG_(newXA)( VG_(malloc), "main.vm.2",
+ VG_(free), sizeof(Addr_n_ULong) );
+ tl_assert(addr2dihandle);
+
+# if defined(VGO_linux)
+ { Addr* seg_starts;
+ Int n_seg_starts;
+ Addr_n_ULong anu;
+
+ seg_starts = get_seg_starts( &n_seg_starts );
+ vg_assert(seg_starts && n_seg_starts >= 0);
+
+ /* show them all to the debug info reader. allow_SkFileV has to
+ be True here so that we read info from the valgrind executable
+ itself. */
+ for (i = 0; i < n_seg_starts; i++) {
+ anu.ull = VG_(di_notify_mmap)( seg_starts[i], True/*allow_SkFileV*/ );
+ /* anu.ull holds the debuginfo handle returned by di_notify_mmap,
+ if any. */
+ if (anu.ull > 0) {
+ anu.a = seg_starts[i];
+ VG_(addToXA)( addr2dihandle, &anu );
+ }
+ }
+
+ VG_(free)( seg_starts );
+ }
+# elif defined(VGO_aix5)
+ { AixCodeSegChange* changes;
+ Int changes_size, changes_used;
+ Addr_n_ULong anu;
+
+ /* Find out how many AixCodeSegChange records we will need,
+ and acquire them. */
+ changes_size = VG_(am_aix5_reread_procmap_howmany_directives)();
+ changes = VG_(malloc)("main.vm.3", changes_size * sizeof(AixCodeSegChange));
+ vg_assert(changes);
+
+ /* Now re-read /proc/<pid>/map and acquire a change set */
+ VG_(am_aix5_reread_procmap)( changes, &changes_used );
+ vg_assert(changes_used >= 0 && changes_used <= changes_size);
+
+ /* And notify m_debuginfo of the changes. */
+ for (i = 0; i < changes_used; i++) {
+ anu.ull = VG_(di_aix5_notify_segchange)(
+ changes[i].code_start,
+ changes[i].code_len,
+ changes[i].data_start,
+ changes[i].data_len,
+ changes[i].file_name,
+ changes[i].mem_name,
+ changes[i].is_mainexe,
+ changes[i].acquire
+ );
+ if (anu.ull > 0) {
+ tl_assert(changes[i].acquire);
+ anu.a = changes[i].code_start; /* is this correct? */
+ VG_(addToXA)( addr2dihandle, &anu );
+ }
+ }
+
+ VG_(free)(changes);
+ }
+# else
+# error Unknown OS
+# endif
+
+ //--------------------------------------------------------------
+ // Tell aspacem of ownership change of the asm helpers, so that
+ // m_translate allows them to be translated. However, only do this
+ // after the initial debug info read, since making a hole in the
+ // address range for the stage2 binary confuses the debug info reader.
+ // p: aspacem
+ //--------------------------------------------------------------
+ { Bool change_ownership_v_c_OK;
+ Addr co_start = VG_PGROUNDDN( (Addr)&VG_(trampoline_stuff_start) );
+ Addr co_endPlus = VG_PGROUNDUP( (Addr)&VG_(trampoline_stuff_end) );
+ VG_(debugLog)(1,"redir",
+ "transfer ownership V -> C of 0x%llx .. 0x%llx\n",
+ (ULong)co_start, (ULong)co_endPlus-1 );
+
+ change_ownership_v_c_OK
+ = VG_(am_change_ownership_v_to_c)( co_start, co_endPlus - co_start );
+ vg_assert(change_ownership_v_c_OK);
+ }
+
+ //--------------------------------------------------------------
+ // Initialise the scheduler (phase 1) [generates tid_main]
+ // p: none, afaics
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Initialise scheduler (phase 1)\n");
+ tid_main = VG_(scheduler_init_phase1)();
+ vg_assert(tid_main >= 0 && tid_main < VG_N_THREADS
+ && tid_main != VG_INVALID_THREADID);
+ /* Tell the tool about tid_main */
+ VG_TRACK( pre_thread_ll_create, VG_INVALID_THREADID, tid_main );
+
+ //--------------------------------------------------------------
+ // Tell the tool about the initial client memory permissions
+ // p: aspacem
+ // p: mallocfree
+ // p: setup_client_stack
+ // p: setup_client_dataseg
+ //
+ // For each segment we tell the client about, look up in
+ // addr2dihandle as created above, to see if there's a debuginfo
+ // handle associated with the segment, that we can hand along
+ // to the tool, to be helpful.
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Tell tool about initial permissions\n");
+ { Addr* seg_starts;
+ Int n_seg_starts;
+
+ tl_assert(addr2dihandle);
+
+ /* Mark the main thread as running while we tell the tool about
+ the client memory so that the tool can associate that memory
+ with the main thread. */
+ tl_assert(VG_(running_tid) == VG_INVALID_THREADID);
+ VG_(running_tid) = tid_main;
+
+ seg_starts = get_seg_starts( &n_seg_starts );
+ vg_assert(seg_starts && n_seg_starts >= 0);
+
+ /* show interesting ones to the tool */
+ for (i = 0; i < n_seg_starts; i++) {
+ Word j, n;
+ NSegment const* seg
+ = VG_(am_find_nsegment)( seg_starts[i] );
+ vg_assert(seg);
+ if (seg->kind == SkFileC || seg->kind == SkAnonC) {
+ /* This next assertion is tricky. If it is placed
+ immediately before this 'if', it very occasionally fails.
+ Why? Because previous iterations of the loop may have
+ caused tools (via the new_mem_startup calls) to do
+ dynamic memory allocation, and that may affect the mapped
+ segments; in particular it may cause segment merging to
+ happen. Hence we cannot assume that seg_starts[i], which
+ reflects the state of the world before we started this
+ loop, is the same as seg->start, as the latter reflects
+ the state of the world (viz, mappings) at this particular
+ iteration of the loop.
+
+ Why does moving it inside the 'if' make it safe? Because
+ any dynamic memory allocation done by the tools will
+ affect only the state of Valgrind-owned segments, not of
+ Client-owned segments. And the 'if' guards against that
+ -- we only get in here for Client-owned segments.
+
+ In other words: the loop may change the state of
+ Valgrind-owned segments as it proceeds. But it should
+ not cause the Client-owned segments to change. */
+ vg_assert(seg->start == seg_starts[i]);
+ VG_(debugLog)(2, "main",
+ "tell tool about %010lx-%010lx %c%c%c\n",
+ seg->start, seg->end,
+ seg->hasR ? 'r' : '-',
+ seg->hasW ? 'w' : '-',
+ seg->hasX ? 'x' : '-' );
+ /* search addr2dihandle to see if we have an entry
+ matching seg->start. */
+ n = VG_(sizeXA)( addr2dihandle );
+ for (j = 0; j < n; j++) {
+ Addr_n_ULong* anl = VG_(indexXA)( addr2dihandle, j );
+ if (anl->a == seg->start) {
+ tl_assert(anl->ull > 0); /* check it's a valid handle */
+ break;
+ }
+ }
+ vg_assert(j >= 0 && j <= n);
+ VG_TRACK( new_mem_startup, seg->start, seg->end+1-seg->start,
+ seg->hasR, seg->hasW, seg->hasX,
+ /* and the retrieved debuginfo handle, if any */
+ j < n
+ ? ((Addr_n_ULong*)VG_(indexXA)( addr2dihandle, j ))->ull
+ : 0 );
+ }
+ }
+
+ VG_(free)( seg_starts );
+ VG_(deleteXA)( addr2dihandle );
+
+ /* Also do the initial stack permissions. */
+ { NSegment const* seg
+ = VG_(am_find_nsegment)( the_iifii.initial_client_SP );
+ vg_assert(seg);
+ vg_assert(seg->kind == SkAnonC);
+ vg_assert(the_iifii.initial_client_SP >= seg->start);
+ vg_assert(the_iifii.initial_client_SP <= seg->end);
+# if defined(VGO_aix5)
+ VG_(clstk_base) = seg->start;
+ VG_(clstk_end) = seg->end;
+# endif
+
+ /* Stuff below the initial SP is unaddressable. Take into
+ account any ABI-mandated space below the stack pointer that
+ is required (VG_STACK_REDZONE_SZB). setup_client_stack()
+ will have allocated an extra page if a red zone is required,
+ to be on the safe side. */
+ vg_assert(the_iifii.initial_client_SP - VG_STACK_REDZONE_SZB
+ >= seg->start);
+ VG_TRACK( die_mem_stack,
+ seg->start,
+ the_iifii.initial_client_SP - VG_STACK_REDZONE_SZB
+ - seg->start );
+ VG_(debugLog)(2, "main", "mark stack inaccessible %010lx-%010lx\n",
+ seg->start,
+ the_iifii.initial_client_SP-1 - VG_STACK_REDZONE_SZB);
+ }
+
+ /* Also the assembly helpers. */
+ VG_TRACK( new_mem_startup,
+ (Addr)&VG_(trampoline_stuff_start),
+ (Addr)&VG_(trampoline_stuff_end)
+ - (Addr)&VG_(trampoline_stuff_start),
+ False, /* readable? */
+ False, /* writable? */
+ True /* executable? */,
+ 0 /* di_handle: no associated debug info */ );
+
+ /* Clear the running thread indicator */
+ VG_(running_tid) = VG_INVALID_THREADID;
+ tl_assert(VG_(running_tid) == VG_INVALID_THREADID);
+ }
+
+ //--------------------------------------------------------------
+ // Initialise the scheduler (phase 2)
+ // p: Initialise the scheduler (phase 1) [for tid_main]
+ // p: setup_file_descriptors() [else VG_(safe_fd)() breaks]
+ // p: setup_client_stack
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Initialise scheduler (phase 2)\n");
+ { NSegment const* seg
+ = VG_(am_find_nsegment)( the_iifii.initial_client_SP );
+ vg_assert(seg);
+ vg_assert(seg->kind == SkAnonC);
+ vg_assert(the_iifii.initial_client_SP >= seg->start);
+ vg_assert(the_iifii.initial_client_SP <= seg->end);
+ VG_(scheduler_init_phase2)( tid_main,
+ seg->end, the_iifii.clstack_max_size );
+ }
+
+ //--------------------------------------------------------------
+ // Set up state for the root thread
+ // p: ?
+ // setup_scheduler() [for sched-specific thread 1 stuff]
+ // VG_(ii_create_image) [for 'the_iicii' initial info]
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "Finalise initial image\n");
+ VG_(ii_finalise_image)( the_iifii );
+
+ //--------------------------------------------------------------
+ // Initialise the signal handling subsystem
+ // p: n/a
+ //--------------------------------------------------------------
+ // Nb: temporarily parks the saved blocking-mask in saved_sigmask.
+ VG_(debugLog)(1, "main", "Initialise signal management\n");
+ VG_(sigstartup_actions)();
+
+ //--------------------------------------------------------------
+ // Read suppression file
+ // p: main_process_cmd_line_options() [for VG_(clo_suppressions)]
+ //--------------------------------------------------------------
+ if (VG_(needs).core_errors || VG_(needs).tool_errors) {
+ VG_(debugLog)(1, "main", "Load suppressions\n");
+ VG_(load_suppressions)();
+ }
+
+ //--------------------------------------------------------------
+ // register client stack
+ //--------------------------------------------------------------
+ VG_(clstk_id) = VG_(register_stack)(VG_(clstk_base), VG_(clstk_end));
+
+ //--------------------------------------------------------------
+ // Show the address space state so far
+ //--------------------------------------------------------------
+ VG_(debugLog)(1, "main", "\n");
+ VG_(debugLog)(1, "main", "\n");
+ VG_(am_show_nsegments)(1,"Memory layout at client startup");
+ VG_(debugLog)(1, "main", "\n");
+ VG_(debugLog)(1, "main", "\n");
+
+ //--------------------------------------------------------------
+ // Run!
+ //--------------------------------------------------------------
+ if (VG_(clo_xml)) {
+ HChar buf[50];
+ VG_(elapsed_wallclock_time)(buf);
+ VG_(message_no_f_c)(Vg_UserMsg,
+ "<status>\n"
+ " <state>RUNNING</state>\n"
+ " <time>%t</time>\n"
+ "</status>",
+ buf);
+ VG_(message)(Vg_UserMsg, "");
+ }
+
+ VG_(debugLog)(1, "main", "Running thread 1\n");
+
+ /* As a result of the following call, the last thread standing
+ eventually winds up running shutdown_actions_NORETURN
+ just below. Unfortunately, simply exporting said function
+ causes m_main to be part of a module cycle, which is pretty
+ nonsensical. So instead of doing that, the address of said
+ function is stored in a global variable 'owned' by m_syswrap,
+ and it uses that function pointer to get back here when it needs
+ to. */
+
+ /* Set continuation address. */
+ VG_(address_of_m_main_shutdown_actions_NORETURN)
+ = & shutdown_actions_NORETURN;
+
+ /* Run the first thread, eventually ending up at the continuation
+ address. */
+ VG_(main_thread_wrapper_NORETURN)(1);
+
+ /*NOTREACHED*/
+ vg_assert(0);
+}
+
+/* Do everything which needs doing when the last thread exits or when
+ a thread exits requesting a complete process exit (exit on AIX).
+
+ We enter here holding The Lock. For the case VgSrc_ExitProcess we
+ must never release it, because to do so would allow other threads
+ to continue after the system is ostensibly shut down. So we must
+ go to our grave, so to speak, holding the lock.
+
+ In fact, there is never any point in releasing the lock at this
+ point - we have it, we're shutting down the entire system, and
+ for the case VgSrc_ExitProcess doing so positively causes trouble.
+ So don't.
+
+ The final_tidyup call makes a bit of a nonsense of the ExitProcess
+ case, since it will run the libc_freeres function, thus allowing
+ other lurking threads to run again. Hmm. */
+
+static
+void shutdown_actions_NORETURN( ThreadId tid,
+ VgSchedReturnCode tids_schedretcode )
+{
+ VG_(debugLog)(1, "main", "entering VG_(shutdown_actions_NORETURN)\n");
+ VG_(am_show_nsegments)(1,"Memory layout at client shutdown");
+
+ vg_assert(VG_(is_running_thread)(tid));
+
+ vg_assert(tids_schedretcode == VgSrc_ExitThread
+ || tids_schedretcode == VgSrc_ExitProcess
+ || tids_schedretcode == VgSrc_FatalSig );
+
+ if (tids_schedretcode == VgSrc_ExitThread) {
+
+ // We are the last surviving thread. Right?
+ vg_assert( VG_(count_living_threads)() == 1 );
+
+ // Wait for all other threads to exit.
+ // jrs: Huh? but they surely are already gone
+ VG_(reap_threads)(tid);
+
+ // Clean the client up before the final report
+ // this causes the libc_freeres function to run
+ final_tidyup(tid);
+
+ /* be paranoid */
+ vg_assert(VG_(is_running_thread)(tid));
+ vg_assert(VG_(count_living_threads)() == 1);
+
+ } else {
+
+ // We may not be the last surviving thread. However, we
+ // want to shut down the entire process. We hold the lock
+ // and we need to keep hold of it all the way out, in order
+ // that none of the other threads ever run again.
+ vg_assert( VG_(count_living_threads)() >= 1 );
+
+ // Clean the client up before the final report
+ // this causes the libc_freeres function to run
+ // perhaps this is unsafe, as per comment above
+ final_tidyup(tid);
+
+ /* be paranoid */
+ vg_assert(VG_(is_running_thread)(tid));
+ vg_assert(VG_(count_living_threads)() >= 1);
+ }
+
+ VG_(threads)[tid].status = VgTs_Empty;
+ //--------------------------------------------------------------
+ // Finalisation: cleanup, messages, etc. Order no so important, only
+ // affects what order the messages come.
+ //--------------------------------------------------------------
+ if (VG_(clo_verbosity) > 0)
+ VG_(message)(Vg_UserMsg, "");
+
+ if (VG_(clo_xml)) {
+ HChar buf[50];
+ if (VG_(needs).core_errors || VG_(needs).tool_errors) {
+ VG_(show_error_counts_as_XML)();
+ VG_(message)(Vg_UserMsg, "");
+ }
+ VG_(elapsed_wallclock_time)(buf);
+ VG_(message_no_f_c)(Vg_UserMsg,
+ "<status>\n"
+ " <state>FINISHED</state>\n"
+ " <time>%t</time>\n"
+ "</status>",
+ buf);
+ VG_(message)(Vg_UserMsg, "");
+ }
+
+ /* Print out file descriptor summary and stats. */
+ if (VG_(clo_track_fds))
+ VG_(show_open_fds)();
+
+ if (VG_(needs).core_errors || VG_(needs).tool_errors)
+ VG_(show_all_errors)();
+
+ VG_TDICT_CALL(tool_fini, 0/*exitcode*/);
+
+ if (VG_(clo_xml)) {
+ VG_(message)(Vg_UserMsg, "");
+ VG_(message)(Vg_UserMsg, "</valgrindoutput>");
+ VG_(message)(Vg_UserMsg, "");
+ }
+
+ VG_(sanity_check_general)( True /*include expensive checks*/ );
+
+ if (VG_(clo_verbosity) > 1)
+ print_all_stats();
+
+ /* Show a profile of the heap(s) at shutdown. Optionally, first
+ throw away all the debug info, as that makes it easy to spot
+ leaks in the debuginfo reader. */
+ if (VG_(clo_profile_heap)) {
+ if (0) VG_(di_discard_ALL_debuginfo)();
+ VG_(print_arena_cc_analysis)();
+ }
+
+ if (VG_(clo_profile_flags) > 0) {
+ #define N_MAX 200
+ BBProfEntry tops[N_MAX];
+ ULong score_total = VG_(get_BB_profile) (tops, N_MAX);
+ show_BB_profile(tops, N_MAX, score_total);
+ }
+
+ /* Print Vex storage stats */
+ if (0)
+ LibVEX_ShowAllocStats();
+
+ /* Ok, finally exit in the os-specific way, according to the scheduler's
+ return code. In short, if the (last) thread exited by calling
+ sys_exit, do likewise; if the (last) thread stopped due to a fatal
+ signal, terminate the entire system with that same fatal signal. */
+ VG_(debugLog)(1, "core_os",
+ "VG_(terminate_NORETURN)(tid=%lld)\n", (ULong)tid);
+
+ switch (tids_schedretcode) {
+ case VgSrc_ExitThread: /* the normal way out (Linux) */
+ case VgSrc_ExitProcess: /* the normal way out (AIX) */
+ /* Change the application return code to user's return code,
+ if an error was found */
+ if (VG_(clo_error_exitcode) > 0
+ && VG_(get_n_errs_found)() > 0) {
+ VG_(exit)( VG_(clo_error_exitcode) );
+ } else {
+ /* otherwise, return the client's exit code, in the normal
+ way. */
+ VG_(exit)( VG_(threads)[tid].os_state.exitcode );
+ }
+ /* NOT ALIVE HERE! */
+ VG_(core_panic)("entered the afterlife in main() -- ExitT/P");
+ break; /* what the hell :) */
+
+ case VgSrc_FatalSig:
+ /* We were killed by a fatal signal, so replicate the effect */
+ vg_assert(VG_(threads)[tid].os_state.fatalsig != 0);
+ VG_(kill_self)(VG_(threads)[tid].os_state.fatalsig);
+ VG_(core_panic)("main(): signal was supposed to be fatal");
+ break;
+
+ default:
+ VG_(core_panic)("main(): unexpected scheduler return code");
+ }
+}
+
+/* -------------------- */
+
+/* Final clean-up before terminating the process.
+ Clean up the client by calling __libc_freeres() (if requested)
+ This is Linux-specific?
+*/
+static void final_tidyup(ThreadId tid)
+{
+# if defined(VGP_ppc64_linux)
+ Addr r2;
+# endif
+ Addr __libc_freeres_wrapper = VG_(client___libc_freeres_wrapper);
+
+ vg_assert(VG_(is_running_thread)(tid));
+
+ if ( !VG_(needs).libc_freeres ||
+ !VG_(clo_run_libc_freeres) ||
+ 0 == __libc_freeres_wrapper )
+ return; /* can't/won't do it */
+# if defined(VGO_aix5)
+ return; /* inapplicable on non-Linux platforms */
+# endif
+
+# if defined(VGP_ppc64_linux)
+ r2 = VG_(get_tocptr)( __libc_freeres_wrapper );
+ if (r2 == 0) {
+ VG_(message)(Vg_UserMsg,
+ "Caught __NR_exit, but can't run __libc_freeres()");
+ VG_(message)(Vg_UserMsg,
+ " since cannot establish TOC pointer for it.");
+ return;
+ }
+# endif
+
+ if (VG_(clo_verbosity) > 2 ||
+ VG_(clo_trace_syscalls) ||
+ VG_(clo_trace_sched))
+ VG_(message)(Vg_DebugMsg,
+ "Caught __NR_exit; running __libc_freeres()");
+
+ /* set thread context to point to libc_freeres_wrapper */
+ /* ppc64-linux note: __libc_freeres_wrapper gives us the real
+ function entry point, not a fn descriptor, so can use it
+ directly. However, we need to set R2 (the toc pointer)
+ appropriately. */
+ VG_(set_IP)(tid, __libc_freeres_wrapper);
+# if defined(VGP_ppc64_linux)
+ VG_(threads)[tid].arch.vex.guest_GPR2 = r2;
+# endif
+
+ /* Block all blockable signals by copying the real block state into
+ the thread's block state*/
+ VG_(sigprocmask)(VKI_SIG_BLOCK, NULL, &VG_(threads)[tid].sig_mask);
+ VG_(threads)[tid].tmp_sig_mask = VG_(threads)[tid].sig_mask;
+
+ /* and restore handlers to default */
+ VG_(set_default_handler)(VKI_SIGSEGV);
+ VG_(set_default_handler)(VKI_SIGBUS);
+ VG_(set_default_handler)(VKI_SIGILL);
+ VG_(set_default_handler)(VKI_SIGFPE);
+
+ // We were exiting, so assert that...
+ vg_assert(VG_(is_exiting)(tid));
+ // ...but now we're not again
+ VG_(threads)[tid].exitreason = VgSrc_None;
+
+ // run until client thread exits - ideally with LIBC_FREERES_DONE,
+ // but exit/exitgroup/signal will do
+ VG_(scheduler)(tid);
+
+ vg_assert(VG_(is_exiting)(tid));
+}
+
+
+/*====================================================================*/
+/*=== Getting to main() alive: LINUX (for AIX5 see below) ===*/
+/*====================================================================*/
+
+#if defined(VGO_linux)
+
+/* If linking of the final executables is done with glibc present,
+ then Valgrind starts at main() above as usual, and all of the
+ following code is irrelevant.
+
+ However, this is not the intended mode of use. The plan is to
+ avoid linking against glibc, by giving gcc the flags
+ -nodefaultlibs -lgcc -nostartfiles at startup.
+
+ From this derive two requirements:
+
+ 1. gcc may emit calls to memcpy and memset to deal with structure
+ assignments etc. Since we have chosen to ignore all the
+ "normal" supporting libraries, we have to provide our own
+ implementations of them. No problem.
+
+ 2. We have to provide a symbol "_start", to which the kernel
+ hands control at startup. Hence the code below.
+*/
+
+/* ---------------- Requirement 1 ---------------- */
+
+void* memcpy(void *dest, const void *src, SizeT n);
+void* memcpy(void *dest, const void *src, SizeT n) {
+ return VG_(memcpy)(dest,src,n);
+}
+void* memset(void *s, int c, SizeT n);
+void* memset(void *s, int c, SizeT n) {
+ return VG_(memset)(s,c,n);
+}
+
+/* ---------------- Requirement 2 ---------------- */
+
+/* Glibc's sysdeps/i386/elf/start.S has the following gem of a
+ comment, which explains how the stack looks right at process start
+ (when _start is jumped to). Hence _start passes %esp to
+ _start_in_C_linux, which extracts argc/argv/envp and starts up
+ correctly. */
+
+/* This is the canonical entry point, usually the first thing in the text
+ segment. The SVR4/i386 ABI (pages 3-31, 3-32) says that when the entry
+ point runs, most registers' values are unspecified, except for:
+
+ %edx Contains a function pointer to be registered with `atexit'.
+ This is how the dynamic linker arranges to have DT_FINI
+ functions called for shared libraries that have been loaded
+ before this code runs.
+
+ %esp The stack contains the arguments and environment:
+ 0(%esp) argc
+ 4(%esp) argv[0]
+ ...
+ (4*argc)(%esp) NULL
+ (4*(argc+1))(%esp) envp[0]
+ ...
+ NULL
+*/
+
+/* The kernel hands control to _start, which extracts the initial
+ stack pointer and calls onwards to _start_in_C_linux. This also switches
+ the new stack. */
+#if defined(VGP_x86_linux)
+asm("\n"
+ ".text\n"
+ "\t.globl _start\n"
+ "\t.type _start,@function\n"
+ "_start:\n"
+ /* set up the new stack in %eax */
+ "\tmovl $vgPlain_interim_stack, %eax\n"
+ "\taddl $"VG_STRINGIFY(VG_STACK_GUARD_SZB)", %eax\n"
+ "\taddl $"VG_STRINGIFY(VG_STACK_ACTIVE_SZB)", %eax\n"
+ "\tsubl $16, %eax\n"
+ "\tandl $~15, %eax\n"
+ /* install it, and collect the original one */
+ "\txchgl %eax, %esp\n"
+ /* call _start_in_C_linux, passing it the startup %esp */
+ "\tpushl %eax\n"
+ "\tcall _start_in_C_linux\n"
+ "\thlt\n"
+ ".previous\n"
+);
+#elif defined(VGP_amd64_linux)
+asm("\n"
+ ".text\n"
+ "\t.globl _start\n"
+ "\t.type _start,@function\n"
+ "_start:\n"
+ /* set up the new stack in %rdi */
+ "\tmovq $vgPlain_interim_stack, %rdi\n"
+ "\taddq $"VG_STRINGIFY(VG_STACK_GUARD_SZB)", %rdi\n"
+ "\taddq $"VG_STRINGIFY(VG_STACK_ACTIVE_SZB)", %rdi\n"
+ "\tandq $~15, %rdi\n"
+ /* install it, and collect the original one */
+ "\txchgq %rdi, %rsp\n"
+ /* call _start_in_C_linux, passing it the startup %rsp */
+ "\tcall _start_in_C_linux\n"
+ "\thlt\n"
+ ".previous\n"
+);
+#elif defined(VGP_ppc32_linux)
+asm("\n"
+ ".text\n"
+ "\t.globl _start\n"
+ "\t.type _start,@function\n"
+ "_start:\n"
+ /* set up the new stack in r16 */
+ "\tlis 16,vgPlain_interim_stack@ha\n"
+ "\tla 16,vgPlain_interim_stack@l(16)\n"
+ "\tlis 17,("VG_STRINGIFY(VG_STACK_GUARD_SZB)" >> 16)\n"
+ "\tori 17,17,("VG_STRINGIFY(VG_STACK_GUARD_SZB)" & 0xFFFF)\n"
+ "\tlis 18,("VG_STRINGIFY(VG_STACK_ACTIVE_SZB)" >> 16)\n"
+ "\tori 18,18,("VG_STRINGIFY(VG_STACK_ACTIVE_SZB)" & 0xFFFF)\n"
+ "\tadd 16,17,16\n"
+ "\tadd 16,18,16\n"
+ "\trlwinm 16,16,0,0,27\n"
+ /* now r16 = &vgPlain_interim_stack + VG_STACK_GUARD_SZB +
+ VG_STACK_ACTIVE_SZB rounded down to the nearest 16-byte
+ boundary. And r1 is the original SP. Set the SP to r16 and
+ call _start_in_C_linux, passing it the initial SP. */
+ "\tmr 3,1\n"
+ "\tmr 1,16\n"
+ "\tbl _start_in_C_linux\n"
+ "\ttrap\n"
+ ".previous\n"
+);
+#elif defined(VGP_ppc64_linux)
+asm("\n"
+ /* PPC64 ELF ABI says '_start' points to a function descriptor.
+ So we must have one, and that is what goes into the .opd section. */
+ "\t.align 2\n"
+ "\t.global _start\n"
+ "\t.section \".opd\",\"aw\"\n"
+ "\t.align 3\n"
+ "_start:\n"
+ "\t.quad ._start,.TOC.@tocbase,0\n"
+ "\t.previous\n"
+ "\t.type ._start,@function\n"
+ "\t.global ._start\n"
+ "._start:\n"
+ /* set up the new stack in r16 */
+ "\tlis 16, vgPlain_interim_stack@highest\n"
+ "\tori 16,16,vgPlain_interim_stack@higher\n"
+ "\tsldi 16,16,32\n"
+ "\toris 16,16,vgPlain_interim_stack@h\n"
+ "\tori 16,16,vgPlain_interim_stack@l\n"
+ "\txor 17,17,17\n"
+ "\tlis 17,("VG_STRINGIFY(VG_STACK_GUARD_SZB)" >> 16)\n"
+ "\tori 17,17,("VG_STRINGIFY(VG_STACK_GUARD_SZB)" & 0xFFFF)\n"
+ "\txor 18,18,18\n"
+ "\tlis 18,("VG_STRINGIFY(VG_STACK_ACTIVE_SZB)" >> 16)\n"
+ "\tori 18,18,("VG_STRINGIFY(VG_STACK_ACTIVE_SZB)" & 0xFFFF)\n"
+ "\tadd 16,17,16\n"
+ "\tadd 16,18,16\n"
+ "\trldicr 16,16,0,59\n"
+ /* now r16 = &vgPlain_interim_stack + VG_STACK_GUARD_SZB +
+ VG_STACK_ACTIVE_SZB rounded down to the nearest 16-byte
+ boundary. And r1 is the original SP. Set the SP to r16 and
+ call _start_in_C_linux, passing it the initial SP. */
+ "\tmr 3,1\n"
+ "\tmr 1,16\n"
+ "\tbl ._start_in_C_linux\n"
+ "\tnop\n"
+ "\ttrap\n"
+);
+#else
+#error "_start: needs implementation on this platform"
+#endif
+
+/* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
+#define _GNU_SOURCE
+#define _FILE_OFFSET_BITS 64
+/* This is in order to get AT_NULL and AT_PAGESIZE. */
+#include <elf.h>
+/* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
+
+/* Avoid compiler warnings: this fn _is_ used, but labelling it
+ 'static' causes gcc to complain it isn't. */
+void _start_in_C_linux ( UWord* pArgc );
+void _start_in_C_linux ( UWord* pArgc )
+{
+ Int r;
+ Word argc = pArgc[0];
+ HChar** argv = (HChar**)&pArgc[1];
+ HChar** envp = (HChar**)&pArgc[1+argc+1];
+
+ VG_(memset)( &the_iicii, 0, sizeof(the_iicii) );
+ VG_(memset)( &the_iifii, 0, sizeof(the_iifii) );
+
+ the_iicii.sp_at_startup = (Addr)pArgc;
+
+# if defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
+ {
+ /* ppc/ppc64 can be configured with different page sizes.
+ Determine this early. This is an ugly hack and really should
+ be moved into valgrind_main. */
+ UWord *sp = &pArgc[1+argc+1];
+ while (*sp++ != 0)
+ ;
+ for (; *sp != AT_NULL && *sp != AT_PAGESZ; sp += 2);
+ if (*sp == AT_PAGESZ) {
+ VKI_PAGE_SIZE = sp[1];
+ for (VKI_PAGE_SHIFT = 12;
+ VKI_PAGE_SHIFT <= VKI_MAX_PAGE_SHIFT; VKI_PAGE_SHIFT++)
+ if (VKI_PAGE_SIZE == (1UL << VKI_PAGE_SHIFT))
+ break;
+ }
+ }
+# endif
+
+ r = valgrind_main( (Int)argc, argv, envp );
+ /* NOTREACHED */
+ VG_(exit)(r);
+}
+
+#endif /* defined(VGO_linux) */
+
+
+/*====================================================================*/
+/*=== Getting to main() alive: AIX5 ===*/
+/*====================================================================*/
+
+#if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+
+/* This is somewhat simpler than the Linux case. _start_valgrind
+ receives control from the magic piece of code created in this
+ process' address space by the launcher, via use of ptrace(). At
+ the point of entry:
+
+ - the initial client process image is in memory and ready to roll,
+ except that we've partially trashed its integer register state
+ in order to get this far. So ..
+
+ - intregs37 holds the client's initial integer register state, so
+ we can restore it before starting the client on the VCPU.
+
+ - we're on the client's stack. This is not good; therefore the
+ first order of business is to switch to our temporary stack.
+
+ - the client's initial argc/v/envp is in r3/r4/r5 (32 bit mode) or
+ r14/r15/r16 (64 bit mode). They are pulled out of the stashed
+ integer register state and passed to our main().
+
+ The launcher will have played some games with argv. If the launcher
+ ($prefix/bin/valgrind) was started like this
+
+ valgrind [args-for-V] app [args-for-app]
+
+ then the launcher will have started the client as
+
+ app [args-for-V] app [args-for-app]
+
+ m_initimg will have to mess with the client's initial r4/r5
+ (32-bit) or r15/r16 (64-bit) so that it believes it was execd as
+ "app [args-for-app]". Well, that's no big deal.
+*/
+
+#include "launcher-aix5-bootblock.h"
+
+void _start_in_C_aix5 ( AIX5Bootblock* bootblock );
+void _start_in_C_aix5 ( AIX5Bootblock* bootblock )
+{
+ Int r;
+ ULong* intregs37;
+ UWord argc, argv, envp;
+ __NR_getpid = bootblock->__NR_getpid;
+ __NR_write = bootblock->__NR_write;
+ __NR_exit = bootblock->__NR_exit;
+ __NR_open = bootblock->__NR_open;
+ __NR_read = bootblock->__NR_read;
+ __NR_close = bootblock->__NR_close;
+
+ VG_(memset)( &the_iicii, 0, sizeof(the_iicii) );
+ VG_(memset)( &the_iifii, 0, sizeof(the_iifii) );
+
+ intregs37 = &bootblock->iregs_pc_cr_lr_ctr_xer[0];
+ the_iicii.intregs37 = intregs37;
+ the_iicii.bootblock = (void*)bootblock;
+ the_iicii.adler32_exp = bootblock->adler32;
+
+ /* Not important on AIX. */
+ the_iicii.sp_at_startup = (Addr)0x31415927ULL;
+
+# if defined(VGP_ppc32_aix5)
+ argc = (UWord)intregs37[3]; /* client's r3 == argc */
+ argv = (UWord)intregs37[4];
+ envp = (UWord)intregs37[5];
+# else /* defined(VGP_ppc64_aix5) */
+ argc = (UWord)intregs37[14]; /* client's r14 == argc */
+ argv = (UWord)intregs37[15];
+ envp = (UWord)intregs37[16];
+# endif
+
+ r = valgrind_main( (Int)argc, (HChar**)argv, (HChar**)envp );
+
+ /* NOTREACHED */
+ VG_(exit)(r);
+}
+
+/* THE ENTRY POINT */
+void _start_valgrind ( AIX5Bootblock* bootblock );
+void _start_valgrind ( AIX5Bootblock* bootblock )
+{
+ /* Switch immediately to our temporary stack, and continue. This
+ is pretty dodgy in that it assumes that gcc does not place on
+ the stack, anything needed to form the _start_in_C_aix5 call,
+ since it will be on the old stack. */
+ register UWord new_r1;
+ new_r1 = (UWord)&VG_(interim_stack);
+ new_r1 += VG_STACK_GUARD_SZB; /* step over lower guard page */
+ new_r1 += VG_STACK_ACTIVE_SZB; /* step to top of active area */
+ new_r1 -= 512; /* paranoia */
+ __asm__ __volatile__("mr 1,%0" :/*wr*/
+ :/*rd*/ "b"(new_r1)
+ :/*trash*/"r1","memory");
+ _start_in_C_aix5(bootblock);
+ /*NOTREACHED*/
+ VG_(exit)(0);
+}
+
+/* At some point in Oct 2008, static linking appeared to stop working
+ on AIX 5.3. This breaks the build since we link statically. The
+ linking fails citing absence of the following five symbols as the
+ reason. In the absence of a better solution, here are stand-ins
+ for them. Kludge appears to work; presumably said functions,
+ assuming they are indeed functions, are never called. */
+void encrypted_pw_passlen ( void ) { vg_assert(0); }
+void crypt_r ( void ) { vg_assert(0); }
+void max_history_size ( void ) { vg_assert(0); }
+void getpass_auto ( void ) { vg_assert(0); }
+void max_pw_passlen ( void ) { vg_assert(0); }
+
+#endif /* defined(VGP_ppc{32,64}_aix5) */
+
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_mallocfree.c.svn-base b/coregrind/.svn/text-base/m_mallocfree.c.svn-base
new file mode 100644
index 0000000..966d965
--- /dev/null
+++ b/coregrind/.svn/text-base/m_mallocfree.c.svn-base
@@ -0,0 +1,1869 @@
+
+/*--------------------------------------------------------------------*/
+/*--- An implementation of malloc/free which doesn't use sbrk. ---*/
+/*--- m_mallocfree.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_aspacemgr.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_mallocfree.h"
+#include "pub_core_options.h"
+#include "pub_core_threadstate.h" // For VG_INVALID_THREADID
+#include "pub_core_tooliface.h"
+#include "valgrind.h"
+
+//zz#include "memcheck/memcheck.h"
+
+// #define DEBUG_MALLOC // turn on heavyweight debugging machinery
+// #define VERBOSE_MALLOC // make verbose, esp. in debugging machinery
+
+/* Number and total size of blocks in free queue. Used by mallinfo(). */
+Long VG_(free_queue_volume) = 0;
+Long VG_(free_queue_length) = 0;
+
+static void cc_analyse_alloc_arena ( ArenaId aid ); /* fwds */
+
+/*------------------------------------------------------------*/
+/*--- Main types ---*/
+/*------------------------------------------------------------*/
+
+#define N_MALLOC_LISTS 112 // do not change this
+
+// The amount you can ask for is limited only by sizeof(SizeT)...
+#define MAX_PSZB (~((SizeT)0x0))
+
+// Each arena has a sorted array of superblocks, which expands
+// dynamically. This is its initial size.
+#define SBLOCKS_SIZE_INITIAL 50
+
+typedef UChar UByte;
+
+/* Layout of an in-use block:
+
+ cost center (OPTIONAL) (sizeof(ULong) bytes, only when h-p enabled)
+ this block total szB (sizeof(SizeT) bytes)
+ red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
+ (payload bytes)
+ red zone bytes (depends on Arena.rz_szB, but >= sizeof(void*))
+ this block total szB (sizeof(SizeT) bytes)
+
+ Layout of a block on the free list:
+
+ cost center (OPTIONAL) (sizeof(ULong) bytes, only when h-p enabled)
+ this block total szB (sizeof(SizeT) bytes)
+ freelist previous ptr (sizeof(void*) bytes)
+ excess red zone bytes (if Arena.rz_szB > sizeof(void*))
+ (payload bytes)
+ excess red zone bytes (if Arena.rz_szB > sizeof(void*))
+ freelist next ptr (sizeof(void*) bytes)
+ this block total szB (sizeof(SizeT) bytes)
+
+ Total size in bytes (bszB) and payload size in bytes (pszB)
+ are related by:
+
+ bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB
+
+ when heap profiling is not enabled, and
+
+ bszB == pszB + 2*sizeof(SizeT) + 2*a->rz_szB + sizeof(ULong)
+
+ when it is enabled. It follows that the minimum overhead per heap
+ block for arenas used by the core is:
+
+ 32-bit platforms: 2*4 + 2*4 == 16 bytes
+ 64-bit platforms: 2*8 + 2*8 == 32 bytes
+
+ when heap profiling is not enabled, and
+
+ 32-bit platforms: 2*4 + 2*4 + 8 == 24 bytes
+ 64-bit platforms: 2*8 + 2*8 + 8 == 40 bytes
+
+ when it is enabled. In all cases, extra overhead may be incurred
+ when rounding the payload size up to VG_MIN_MALLOC_SZB.
+
+ Furthermore, both size fields in the block have their least-significant
+ bit set if the block is not in use, and unset if it is in use.
+ (The bottom 3 or so bits are always free for this because of alignment.)
+ A block size of zero is not possible, because a block always has at
+ least two SizeTs and two pointers of overhead.
+
+ Nb: All Block payloads must be VG_MIN_MALLOC_SZB-aligned. This is
+ achieved by ensuring that Superblocks are VG_MIN_MALLOC_SZB-aligned
+ (see newSuperblock() for how), and that the lengths of the following
+ things are a multiple of VG_MIN_MALLOC_SZB:
+ - Superblock admin section lengths (due to elastic padding)
+ - Block admin section (low and high) lengths (due to elastic redzones)
+ - Block payload lengths (due to req_pszB rounding up)
+
+ The heap-profile cost-center field is 8 bytes even on 32 bit
+ platforms. This is so as to keep the payload field 8-aligned. On
+ a 64-bit platform, this cc-field contains a pointer to a const
+ HChar*, which is the cost center name. On 32-bit platforms, the
+ pointer lives in the lower-addressed half of the field, regardless
+ of the endianness of the host.
+*/
+typedef
+ struct {
+ // No fields are actually used in this struct, because a Block has
+ // many variable sized fields and so can't be accessed
+ // meaningfully with normal fields. So we use access functions all
+ // the time. This struct gives us a type to use, though. Also, we
+ // make sizeof(Block) 1 byte so that we can do arithmetic with the
+ // Block* type in increments of 1!
+ UByte dummy;
+ }
+ Block;
+
+// A superblock. 'padding' is never used, it just ensures that if the
+// entire Superblock is aligned to VG_MIN_MALLOC_SZB, then payload_bytes[]
+// will be too. It can add small amounts of padding unnecessarily -- eg.
+// 8-bytes on 32-bit machines with an 8-byte VG_MIN_MALLOC_SZB -- because
+// it's too hard to make a constant expression that works perfectly in all
+// cases.
+// payload_bytes[] is made a single big Block when the Superblock is
+// created, and then can be split and the splittings remerged, but Blocks
+// always cover its entire length -- there's never any unused bytes at the
+// end, for example.
+typedef
+ struct _Superblock {
+ SizeT n_payload_bytes;
+ void* padding2;
+ UByte padding[ VG_MIN_MALLOC_SZB -
+ ((sizeof(struct _Superblock*) + sizeof(SizeT)) %
+ VG_MIN_MALLOC_SZB) ];
+ UByte payload_bytes[0];
+ }
+ Superblock;
+
+// An arena. 'freelist' is a circular, doubly-linked list. 'rz_szB' is
+// elastic, in that it can be bigger than asked-for to ensure alignment.
+typedef
+ struct {
+ Char* name;
+ Bool clientmem; // Allocates in the client address space?
+ SizeT rz_szB; // Red zone size in bytes
+ SizeT min_sblock_szB; // Minimum superblock size in bytes
+ Block* freelist[N_MALLOC_LISTS];
+ // A dynamically expanding, ordered array of (pointers to)
+ // superblocks in the arena. If this array is expanded, which
+ // is rare, the previous space it occupies is simply abandoned.
+ // To avoid having to get yet another block from m_aspacemgr for
+ // the first incarnation of this array, the first allocation of
+ // it is within this struct. If it has to be expanded then the
+ // new space is acquired from m_aspacemgr as you would expect.
+ Superblock** sblocks;
+ SizeT sblocks_size;
+ SizeT sblocks_used;
+ Superblock* sblocks_initial[SBLOCKS_SIZE_INITIAL];
+ // Stats only.
+ SizeT bytes_on_loan;
+ SizeT bytes_mmaped;
+ SizeT bytes_on_loan_max;
+ SizeT next_profile_at;
+ }
+ Arena;
+
+
+/*------------------------------------------------------------*/
+/*--- Low-level functions for working with Blocks. ---*/
+/*------------------------------------------------------------*/
+
+#define SIZE_T_0x1 ((SizeT)0x1)
+
+static char* probably_your_fault =
+ "This is probably caused by your program erroneously writing past the\n"
+ "end of a heap block and corrupting heap metadata. If you fix any\n"
+ "invalid writes reported by Memcheck, this assertion failure will\n"
+ "probably go away. Please try that before reporting this as a bug.\n";
+
+// Mark a bszB as in-use, and not in-use, and remove the in-use attribute.
+static __inline__
+SizeT mk_inuse_bszB ( SizeT bszB )
+{
+ vg_assert2(bszB != 0, probably_your_fault);
+ return bszB & (~SIZE_T_0x1);
+}
+static __inline__
+SizeT mk_free_bszB ( SizeT bszB )
+{
+ vg_assert2(bszB != 0, probably_your_fault);
+ return bszB | SIZE_T_0x1;
+}
+static __inline__
+SizeT mk_plain_bszB ( SizeT bszB )
+{
+ vg_assert2(bszB != 0, probably_your_fault);
+ return bszB & (~SIZE_T_0x1);
+}
+
+// return either 0 or sizeof(ULong) depending on whether or not
+// heap profiling is engaged
+static __inline__
+SizeT hp_overhead_szB ( void )
+{
+ return VG_(clo_profile_heap) ? sizeof(ULong) : 0;
+}
+
+//---------------------------------------------------------------------------
+
+// Get a block's size as stored, ie with the in-use/free attribute.
+static __inline__
+SizeT get_bszB_as_is ( Block* b )
+{
+ UByte* b2 = (UByte*)b;
+ SizeT bszB_lo = *(SizeT*)&b2[0 + hp_overhead_szB()];
+ SizeT bszB_hi = *(SizeT*)&b2[mk_plain_bszB(bszB_lo) - sizeof(SizeT)];
+ vg_assert2(bszB_lo == bszB_hi,
+ "Heap block lo/hi size mismatch: lo = %llu, hi = %llu.\n%s",
+ (ULong)bszB_lo, (ULong)bszB_hi, probably_your_fault);
+ return bszB_lo;
+}
+
+// Get a block's plain size, ie. remove the in-use/free attribute.
+static __inline__
+SizeT get_bszB ( Block* b )
+{
+ return mk_plain_bszB(get_bszB_as_is(b));
+}
+
+// Set the size fields of a block. bszB may have the in-use/free attribute.
+static __inline__
+void set_bszB ( Block* b, SizeT bszB )
+{
+ UByte* b2 = (UByte*)b;
+ *(SizeT*)&b2[0 + hp_overhead_szB()] = bszB;
+ *(SizeT*)&b2[mk_plain_bszB(bszB) - sizeof(SizeT)] = bszB;
+}
+
+//---------------------------------------------------------------------------
+
+// Does this block have the in-use attribute?
+static __inline__
+Bool is_inuse_block ( Block* b )
+{
+ SizeT bszB = get_bszB_as_is(b);
+ vg_assert2(bszB != 0, probably_your_fault);
+ return (0 != (bszB & SIZE_T_0x1)) ? False : True;
+}
+
+//---------------------------------------------------------------------------
+
+// Return the lower, upper and total overhead in bytes for a block.
+// These are determined purely by which arena the block lives in.
+static __inline__
+SizeT overhead_szB_lo ( Arena* a )
+{
+ return hp_overhead_szB() + sizeof(SizeT) + a->rz_szB;
+}
+static __inline__
+SizeT overhead_szB_hi ( Arena* a )
+{
+ return a->rz_szB + sizeof(SizeT);
+}
+static __inline__
+SizeT overhead_szB ( Arena* a )
+{
+ return overhead_szB_lo(a) + overhead_szB_hi(a);
+}
+
+//---------------------------------------------------------------------------
+
+// Return the minimum bszB for a block in this arena. Can have zero-length
+// payloads, so it's the size of the admin bytes.
+static __inline__
+SizeT min_useful_bszB ( Arena* a )
+{
+ return overhead_szB(a);
+}
+
+//---------------------------------------------------------------------------
+
+// Convert payload size <--> block size (both in bytes).
+static __inline__
+SizeT pszB_to_bszB ( Arena* a, SizeT pszB )
+{
+ return pszB + overhead_szB(a);
+}
+static __inline__
+SizeT bszB_to_pszB ( Arena* a, SizeT bszB )
+{
+ vg_assert2(bszB >= overhead_szB(a), probably_your_fault);
+ return bszB - overhead_szB(a);
+}
+
+//---------------------------------------------------------------------------
+
+// Get a block's payload size.
+static __inline__
+SizeT get_pszB ( Arena* a, Block* b )
+{
+ return bszB_to_pszB(a, get_bszB(b));
+}
+
+//---------------------------------------------------------------------------
+
+// Given the addr of a block, return the addr of its payload, and vice versa.
+static __inline__
+UByte* get_block_payload ( Arena* a, Block* b )
+{
+ UByte* b2 = (UByte*)b;
+ return & b2[ overhead_szB_lo(a) ];
+}
+// Given the addr of a block's payload, return the addr of the block itself.
+static __inline__
+Block* get_payload_block ( Arena* a, UByte* payload )
+{
+ return (Block*)&payload[ -overhead_szB_lo(a) ];
+}
+
+//---------------------------------------------------------------------------
+
+// Set and get the next and previous link fields of a block.
+static __inline__
+void set_prev_b ( Block* b, Block* prev_p )
+{
+ UByte* b2 = (UByte*)b;
+ *(Block**)&b2[hp_overhead_szB() + sizeof(SizeT)] = prev_p;
+}
+static __inline__
+void set_next_b ( Block* b, Block* next_p )
+{
+ UByte* b2 = (UByte*)b;
+ *(Block**)&b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)] = next_p;
+}
+static __inline__
+Block* get_prev_b ( Block* b )
+{
+ UByte* b2 = (UByte*)b;
+ return *(Block**)&b2[hp_overhead_szB() + sizeof(SizeT)];
+}
+static __inline__
+Block* get_next_b ( Block* b )
+{
+ UByte* b2 = (UByte*)b;
+ return *(Block**)&b2[get_bszB(b) - sizeof(SizeT) - sizeof(void*)];
+}
+
+//---------------------------------------------------------------------------
+
+// Set and get the cost-center field of a block.
+static __inline__
+void set_cc ( Block* b, HChar* cc )
+{
+ UByte* b2 = (UByte*)b;
+ vg_assert( VG_(clo_profile_heap) );
+ *(HChar**)&b2[0] = cc;
+}
+static __inline__
+HChar* get_cc ( Block* b )
+{
+ UByte* b2 = (UByte*)b;
+ vg_assert( VG_(clo_profile_heap) );
+ return *(HChar**)&b2[0];
+}
+
+//---------------------------------------------------------------------------
+
+// Get the block immediately preceding this one in the Superblock.
+static __inline__
+Block* get_predecessor_block ( Block* b )
+{
+ UByte* b2 = (UByte*)b;
+ SizeT bszB = mk_plain_bszB( (*(SizeT*)&b2[-sizeof(SizeT)]) );
+ return (Block*)&b2[-bszB];
+}
+
+//---------------------------------------------------------------------------
+
+// Read and write the lower and upper red-zone bytes of a block.
+static __inline__
+void set_rz_lo_byte ( Block* b, UInt rz_byteno, UByte v )
+{
+ UByte* b2 = (UByte*)b;
+ b2[hp_overhead_szB() + sizeof(SizeT) + rz_byteno] = v;
+}
+static __inline__
+void set_rz_hi_byte ( Block* b, UInt rz_byteno, UByte v )
+{
+ UByte* b2 = (UByte*)b;
+ b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1] = v;
+}
+static __inline__
+UByte get_rz_lo_byte ( Block* b, UInt rz_byteno )
+{
+ UByte* b2 = (UByte*)b;
+ return b2[hp_overhead_szB() + sizeof(SizeT) + rz_byteno];
+}
+static __inline__
+UByte get_rz_hi_byte ( Block* b, UInt rz_byteno )
+{
+ UByte* b2 = (UByte*)b;
+ return b2[get_bszB(b) - sizeof(SizeT) - rz_byteno - 1];
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Arena management ---*/
+/*------------------------------------------------------------*/
+
+#define CORE_ARENA_MIN_SZB 1048576
+
+// The arena structures themselves.
+static Arena vg_arena[VG_N_ARENAS];
+
+// Functions external to this module identify arenas using ArenaIds,
+// not Arena*s. This fn converts the former to the latter.
+static Arena* arenaId_to_ArenaP ( ArenaId arena )
+{
+ vg_assert(arena >= 0 && arena < VG_N_ARENAS);
+ return & vg_arena[arena];
+}
+
+// Initialise an arena. rz_szB is the minimum redzone size; it might be
+// made bigger to ensure that VG_MIN_MALLOC_SZB is observed.
+static
+void arena_init ( ArenaId aid, Char* name, SizeT rz_szB, SizeT min_sblock_szB )
+{
+ SizeT i;
+ Arena* a = arenaId_to_ArenaP(aid);
+
+ // Ensure redzones are a reasonable size. They must always be at least
+ // the size of a pointer, for holding the prev/next pointer (see the layout
+ // details at the top of this file).
+ vg_assert(rz_szB < 128);
+ if (rz_szB < sizeof(void*)) rz_szB = sizeof(void*);
+
+ vg_assert((min_sblock_szB % VKI_PAGE_SIZE) == 0);
+ a->name = name;
+ a->clientmem = ( VG_AR_CLIENT == aid ? True : False );
+
+ // The size of the low and high admin sections in a block must be a
+ // multiple of VG_MIN_MALLOC_SZB. So we round up the asked-for
+ // redzone size if necessary to achieve this.
+ a->rz_szB = rz_szB;
+ while (0 != overhead_szB_lo(a) % VG_MIN_MALLOC_SZB) a->rz_szB++;
+ // vg_assert(overhead_szB_lo(a) == overhead_szB_hi(a));
+ vg_assert(0 == overhead_szB_lo(a) % VG_MIN_MALLOC_SZB);
+ vg_assert(0 == overhead_szB_hi(a) % VG_MIN_MALLOC_SZB);
+
+ a->min_sblock_szB = min_sblock_szB;
+ for (i = 0; i < N_MALLOC_LISTS; i++) a->freelist[i] = NULL;
+
+ a->sblocks = & a->sblocks_initial[0];
+ a->sblocks_size = SBLOCKS_SIZE_INITIAL;
+ a->sblocks_used = 0;
+ a->bytes_on_loan = 0;
+ a->bytes_mmaped = 0;
+ a->bytes_on_loan_max = 0;
+ a->next_profile_at = 25 * 1000 * 1000;
+ vg_assert(sizeof(a->sblocks_initial)
+ == SBLOCKS_SIZE_INITIAL * sizeof(Superblock*));
+}
+
+/* Print vital stats for an arena. */
+void VG_(print_all_arena_stats) ( void )
+{
+ UInt i;
+ for (i = 0; i < VG_N_ARENAS; i++) {
+ Arena* a = arenaId_to_ArenaP(i);
+ VG_(message)(Vg_DebugMsg,
+ "%8s: %8ld mmap'd, %8ld/%8ld max/curr",
+ a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
+ );
+ }
+}
+
+void VG_(print_arena_cc_analysis) ( void )
+{
+ UInt i;
+ vg_assert( VG_(clo_profile_heap) );
+ for (i = 0; i < VG_N_ARENAS; i++) {
+ cc_analyse_alloc_arena(i);
+ }
+}
+
+
+/* This library is self-initialising, as it makes this more self-contained,
+ less coupled with the outside world. Hence VG_(arena_malloc)() and
+ VG_(arena_free)() below always call ensure_mm_init() to ensure things are
+ correctly initialised.
+
+ We initialise the client arena separately (and later) because the core
+ must do non-client allocation before the tool has a chance to set the
+ client arena's redzone size.
+*/
+static Bool client_inited = False;
+static Bool nonclient_inited = False;
+
+static
+void ensure_mm_init ( ArenaId aid )
+{
+ static SizeT client_rz_szB = 8; // default: be paranoid
+
+ /* We use checked red zones (of various sizes) for our internal stuff,
+ and an unchecked zone of arbitrary size for the client. Of
+ course the client's red zone can be checked by the tool, eg.
+ by using addressibility maps, but not by the mechanism implemented
+ here, which merely checks at the time of freeing that the red
+ zone bytes are unchanged.
+
+ Nb: redzone sizes are *minimums*; they could be made bigger to ensure
+ alignment. Eg. with 8 byte alignment, on 32-bit machines 4 stays as
+ 4, but 16 becomes 20; but on 64-bit machines 4 becomes 8, and 16
+ stays as 16 --- the extra 4 bytes in both are accounted for by the
+ larger prev/next ptr.
+ */
+ if (VG_AR_CLIENT == aid) {
+ Int ar_client_sbszB;
+ if (client_inited) {
+ // This assertion ensures that a tool cannot try to change the client
+ // redzone size with VG_(needs_malloc_replacement)() after this module
+ // has done its first allocation from the client arena.
+ if (VG_(needs).malloc_replacement)
+ vg_assert(client_rz_szB == VG_(tdict).tool_client_redzone_szB);
+ return;
+ }
+
+ // Check and set the client arena redzone size
+ if (VG_(needs).malloc_replacement) {
+ client_rz_szB = VG_(tdict).tool_client_redzone_szB;
+ // 128 is no special figure, just something not too big
+ if (client_rz_szB > 128) {
+ VG_(printf)( "\nTool error:\n"
+ " specified redzone size is too big (%llu)\n",
+ (ULong)client_rz_szB);
+ VG_(exit)(1);
+ }
+ }
+ // Initialise the client arena. On AIX it's important to have
+ // relatively large client blocks so as not to cause excessively
+ // fine-grained interleaving of V and C address space. On Linux
+ // this is irrelevant since aspacem can keep the two spaces
+ // well apart, but not so on AIX. On all platforms though,
+ // increasing the superblock size reduces the number of superblocks
+ // in the client arena, which makes findSb cheaper.
+# if defined(VGO_aix5)
+ ar_client_sbszB = 16777216;
+# else
+ ar_client_sbszB = 4194304;
+# endif
+ arena_init ( VG_AR_CLIENT, "client", client_rz_szB, ar_client_sbszB );
+ client_inited = True;
+
+ } else {
+ if (nonclient_inited) {
+ return;
+ }
+ // Initialise the non-client arenas
+ arena_init ( VG_AR_CORE, "core", 4, 1048576 );
+ arena_init ( VG_AR_TOOL, "tool", 4, 4194304 );
+ arena_init ( VG_AR_DINFO, "dinfo", 4, 1048576 );
+ arena_init ( VG_AR_DEMANGLE, "demangle", 4, 65536 );
+ arena_init ( VG_AR_EXECTXT, "exectxt", 4, 1048576 );
+ arena_init ( VG_AR_ERRORS, "errors", 4, 65536 );
+ arena_init ( VG_AR_TTAUX, "ttaux", 4, 65536 );
+ nonclient_inited = True;
+ }
+
+# ifdef DEBUG_MALLOC
+ VG_(printf)("ZZZ1\n");
+ VG_(sanity_check_malloc_all)();
+ VG_(printf)("ZZZ2\n");
+# endif
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Superblock management ---*/
+/*------------------------------------------------------------*/
+
+__attribute__((noreturn))
+void VG_(out_of_memory_NORETURN) ( HChar* who, SizeT szB )
+{
+ static Bool alreadyCrashing = False;
+ ULong tot_alloc = VG_(am_get_anonsize_total)();
+ Char* s1 =
+ "\n"
+ " Valgrind's memory management: out of memory:\n"
+ " %s's request for %llu bytes failed.\n"
+ " %llu bytes have already been allocated.\n"
+ " Valgrind cannot continue. Sorry.\n\n"
+ " There are several possible reasons for this.\n"
+ " - You have some kind of memory limit in place. Look at the\n"
+ " output of 'ulimit -a'. Is there a limit on the size of\n"
+ " virtual memory or address space?\n"
+ " - You have run out of swap space.\n"
+ " - Valgrind has a bug. If you think this is the case or you are\n"
+ " not sure, please let us know and we'll try to fix it.\n"
+ " Please note that programs can take substantially more memory than\n"
+ " normal when running under Valgrind tools, eg. up to twice or\n"
+ " more, depending on the tool. On a 64-bit machine, Valgrind\n"
+ " should be able to make use of up 32GB memory. On a 32-bit\n"
+ " machine, Valgrind should be able to use all the memory available\n"
+ " to a single process, up to 4GB if that's how you have your\n"
+ " kernel configured. Most 32-bit Linux setups allow a maximum of\n"
+ " 3GB per process.\n\n"
+ " Whatever the reason, Valgrind cannot continue. Sorry.\n";
+
+ if (!alreadyCrashing) {
+ alreadyCrashing = True;
+ VG_(message)(Vg_UserMsg, s1, who, (ULong)szB, tot_alloc);
+ } else {
+ VG_(debugLog)(0,"mallocfree", s1, who, (ULong)szB, tot_alloc);
+ }
+ VG_(exit)(1);
+}
+
+
+// Align ptr p upwards to an align-sized boundary.
+static
+void* align_upwards ( void* p, SizeT align )
+{
+ Addr a = (Addr)p;
+ if ((a % align) == 0) return (void*)a;
+ return (void*)(a - (a % align) + align);
+}
+
+// If not enough memory available, either aborts (for non-client memory)
+// or returns 0 (for client memory).
+static
+Superblock* newSuperblock ( Arena* a, SizeT cszB )
+{
+ Superblock* sb;
+ SysRes sres;
+
+ // Take into account admin bytes in the Superblock.
+ cszB += sizeof(Superblock);
+
+ if (cszB < a->min_sblock_szB) cszB = a->min_sblock_szB;
+ cszB = VG_PGROUNDUP(cszB);
+
+ if (a->clientmem) {
+ // client allocation -- return 0 to client if it fails
+ sres = VG_(am_sbrk_anon_float_client)
+ ( cszB, VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC );
+ if (sres.isError)
+ return 0;
+ sb = (Superblock*)sres.res;
+ // Mark this segment as containing client heap. The leak
+ // checker needs to be able to identify such segments so as not
+ // to use them as sources of roots during leak checks.
+ VG_(am_set_segment_isCH_if_SkAnonC)(
+ (NSegment*) VG_(am_find_nsegment)( (Addr)sb )
+ );
+ } else {
+ // non-client allocation -- abort if it fails
+ sres = VG_(am_sbrk_anon_float_valgrind)( cszB );
+ if (sres.isError) {
+ VG_(out_of_memory_NORETURN)("newSuperblock", cszB);
+ /* NOTREACHED */
+ sb = NULL; /* keep gcc happy */
+ } else {
+ sb = (Superblock*)sres.res;
+ }
+ }
+ vg_assert(NULL != sb);
+ //zzVALGRIND_MAKE_MEM_UNDEFINED(sb, cszB);
+ vg_assert(0 == (Addr)sb % VG_MIN_MALLOC_SZB);
+ sb->n_payload_bytes = cszB - sizeof(Superblock);
+ a->bytes_mmaped += cszB;
+ VG_(debugLog)(1, "mallocfree",
+ "newSuperblock at %p (pszB %7ld) owner %s/%s\n",
+ sb, sb->n_payload_bytes,
+ a->clientmem ? "CLIENT" : "VALGRIND", a->name );
+ return sb;
+}
+
+// Find the superblock containing the given chunk.
+static
+Superblock* findSb ( Arena* a, Block* b )
+{
+ SizeT min = 0;
+ SizeT max = a->sblocks_used;
+
+ while (min <= max) {
+ Superblock * sb;
+ SizeT pos = min + (max - min)/2;
+
+ vg_assert(pos >= 0 && pos < a->sblocks_used);
+ sb = a->sblocks[pos];
+ if ((Block*)&sb->payload_bytes[0] <= b
+ && b < (Block*)&sb->payload_bytes[sb->n_payload_bytes])
+ {
+ return sb;
+ } else if ((Block*)&sb->payload_bytes[0] <= b) {
+ min = pos + 1;
+ } else {
+ max = pos - 1;
+ }
+ }
+ VG_(printf)("findSb: can't find pointer %p in arena '%s'\n",
+ b, a->name );
+ VG_(core_panic)("findSb: VG_(arena_free)() in wrong arena?");
+ return NULL; /*NOTREACHED*/
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Functions for working with freelists. ---*/
+/*------------------------------------------------------------*/
+
+// Nb: Determination of which freelist a block lives on is based on the
+// payload size, not block size.
+
+// Convert a payload size in bytes to a freelist number.
+static
+UInt pszB_to_listNo ( SizeT pszB )
+{
+ SizeT n = pszB / VG_MIN_MALLOC_SZB;
+ vg_assert(0 == pszB % VG_MIN_MALLOC_SZB);
+
+ // The first 64 lists hold blocks of size VG_MIN_MALLOC_SZB * list_num.
+ // The final 48 hold bigger blocks.
+ if (n < 64) return (UInt)n;
+ /* Exponential slope up, factor 1.05 */
+ if (n < 67) return 64;
+ if (n < 70) return 65;
+ if (n < 74) return 66;
+ if (n < 77) return 67;
+ if (n < 81) return 68;
+ if (n < 85) return 69;
+ if (n < 90) return 70;
+ if (n < 94) return 71;
+ if (n < 99) return 72;
+ if (n < 104) return 73;
+ if (n < 109) return 74;
+ if (n < 114) return 75;
+ if (n < 120) return 76;
+ if (n < 126) return 77;
+ if (n < 133) return 78;
+ if (n < 139) return 79;
+ /* Exponential slope up, factor 1.10 */
+ if (n < 153) return 80;
+ if (n < 169) return 81;
+ if (n < 185) return 82;
+ if (n < 204) return 83;
+ if (n < 224) return 84;
+ if (n < 247) return 85;
+ if (n < 272) return 86;
+ if (n < 299) return 87;
+ if (n < 329) return 88;
+ if (n < 362) return 89;
+ if (n < 398) return 90;
+ if (n < 438) return 91;
+ if (n < 482) return 92;
+ if (n < 530) return 93;
+ if (n < 583) return 94;
+ if (n < 641) return 95;
+ /* Exponential slope up, factor 1.20 */
+ if (n < 770) return 96;
+ if (n < 924) return 97;
+ if (n < 1109) return 98;
+ if (n < 1331) return 99;
+ if (n < 1597) return 100;
+ if (n < 1916) return 101;
+ if (n < 2300) return 102;
+ if (n < 2760) return 103;
+ if (n < 3312) return 104;
+ if (n < 3974) return 105;
+ if (n < 4769) return 106;
+ if (n < 5723) return 107;
+ if (n < 6868) return 108;
+ if (n < 8241) return 109;
+ if (n < 9890) return 110;
+ return 111;
+}
+
+// What is the minimum payload size for a given list?
+static
+SizeT listNo_to_pszB_min ( UInt listNo )
+{
+ /* Repeatedly computing this function at every request is
+ expensive. Hence at the first call just cache the result for
+ every possible argument. */
+ static SizeT cache[N_MALLOC_LISTS];
+ static Bool cache_valid = False;
+ if (!cache_valid) {
+ UInt i;
+ for (i = 0; i < N_MALLOC_LISTS; i++) {
+ SizeT pszB = 0;
+ while (pszB_to_listNo(pszB) < i)
+ pszB += VG_MIN_MALLOC_SZB;
+ cache[i] = pszB;
+ }
+ cache_valid = True;
+ }
+ /* Returned cached answer. */
+ vg_assert(listNo <= N_MALLOC_LISTS);
+ return cache[listNo];
+}
+
+// What is the maximum payload size for a given list?
+static
+SizeT listNo_to_pszB_max ( UInt listNo )
+{
+ vg_assert(listNo <= N_MALLOC_LISTS);
+ if (listNo == N_MALLOC_LISTS-1) {
+ return MAX_PSZB;
+ } else {
+ return listNo_to_pszB_min(listNo+1) - 1;
+ }
+}
+
+
+/* A nasty hack to try and reduce fragmentation. Try and replace
+ a->freelist[lno] with another block on the same list but with a
+ lower address, with the idea of attempting to recycle the same
+ blocks rather than cruise through the address space. */
+static
+void swizzle ( Arena* a, UInt lno )
+{
+ Block* p_best;
+ Block* pp;
+ Block* pn;
+ UInt i;
+
+ p_best = a->freelist[lno];
+ if (p_best == NULL) return;
+
+ pn = pp = p_best;
+
+ // This loop bound was 20 for a long time, but experiments showed that
+ // reducing it to 10 gave the same result in all the tests, and 5 got the
+ // same result in 85--100% of cases. And it's called often enough to be
+ // noticeable in programs that allocated a lot.
+ for (i = 0; i < 5; i++) {
+ pn = get_next_b(pn);
+ pp = get_prev_b(pp);
+ if (pn < p_best) p_best = pn;
+ if (pp < p_best) p_best = pp;
+ }
+ if (p_best < a->freelist[lno]) {
+# ifdef VERBOSE_MALLOC
+ VG_(printf)("retreat by %ld\n", (Word)(a->freelist[lno] - p_best));
+# endif
+ a->freelist[lno] = p_best;
+ }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Sanity-check/debugging machinery. ---*/
+/*------------------------------------------------------------*/
+
+#define REDZONE_LO_MASK 0x31
+#define REDZONE_HI_MASK 0x7c
+
+// Do some crude sanity checks on a Block.
+static
+Bool blockSane ( Arena* a, Block* b )
+{
+# define BLEAT(str) VG_(printf)("blockSane: fail -- %s\n",str)
+ UInt i;
+ // The lo and hi size fields will be checked (indirectly) by the call
+ // to get_rz_hi_byte().
+ if (!a->clientmem && is_inuse_block(b)) {
+ for (i = 0; i < a->rz_szB; i++) {
+ if (get_rz_lo_byte(b, i) !=
+ (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK))
+ {BLEAT("redzone-lo");return False;}
+ if (get_rz_hi_byte(b, i) !=
+ (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK))
+ {BLEAT("redzone-hi");return False;}
+ }
+ }
+ return True;
+# undef BLEAT
+}
+
+// Print superblocks (only for debugging).
+static
+void ppSuperblocks ( Arena* a )
+{
+ UInt i, j, blockno = 1;
+ SizeT b_bszB;
+
+ for (j = 0; j < a->sblocks_used; ++j) {
+ Superblock * sb = a->sblocks[j];
+
+ VG_(printf)( "\n" );
+ VG_(printf)( "superblock %d at %p, sb->n_pl_bs = %lu\n",
+ blockno++, sb, sb->n_payload_bytes);
+ for (i = 0; i < sb->n_payload_bytes; i += b_bszB) {
+ Block* b = (Block*)&sb->payload_bytes[i];
+ b_bszB = get_bszB(b);
+ VG_(printf)( " block at %d, bszB %lu: ", i, b_bszB );
+ VG_(printf)( "%s, ", is_inuse_block(b) ? "inuse" : "free");
+ VG_(printf)( "%s\n", blockSane(a, b) ? "ok" : "BAD" );
+ }
+ vg_assert(i == sb->n_payload_bytes); // no overshoot at end of Sb
+ }
+ VG_(printf)( "end of superblocks\n\n" );
+}
+
+// Sanity check both the superblocks and the chains.
+static void sanity_check_malloc_arena ( ArenaId aid )
+{
+ UInt i, j, superblockctr, blockctr_sb, blockctr_li;
+ UInt blockctr_sb_free, listno;
+ SizeT b_bszB, b_pszB, list_min_pszB, list_max_pszB;
+ Bool thisFree, lastWasFree, sblockarrOK;
+ Block* b;
+ Block* b_prev;
+ SizeT arena_bytes_on_loan;
+ Arena* a;
+
+# define BOMB VG_(core_panic)("sanity_check_malloc_arena")
+
+ a = arenaId_to_ArenaP(aid);
+
+ // Check the superblock array.
+ sblockarrOK
+ = a->sblocks != NULL
+ && a->sblocks_size >= SBLOCKS_SIZE_INITIAL
+ && a->sblocks_used <= a->sblocks_size
+ && (a->sblocks_size == SBLOCKS_SIZE_INITIAL
+ ? (a->sblocks == &a->sblocks_initial[0])
+ : (a->sblocks != &a->sblocks_initial[0]));
+ if (!sblockarrOK) {
+ VG_(printf)("sanity_check_malloc_arena: sblock array BAD\n");
+ BOMB;
+ }
+
+ // First, traverse all the superblocks, inspecting the Blocks in each.
+ superblockctr = blockctr_sb = blockctr_sb_free = 0;
+ arena_bytes_on_loan = 0;
+ for (j = 0; j < a->sblocks_used; ++j) {
+ Superblock * sb = a->sblocks[j];
+ lastWasFree = False;
+ superblockctr++;
+ for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
+ blockctr_sb++;
+ b = (Block*)&sb->payload_bytes[i];
+ b_bszB = get_bszB_as_is(b);
+ if (!blockSane(a, b)) {
+ VG_(printf)("sanity_check_malloc_arena: sb %p, block %d "
+ "(bszB %lu): BAD\n", sb, i, b_bszB );
+ BOMB;
+ }
+ thisFree = !is_inuse_block(b);
+ if (thisFree && lastWasFree) {
+ VG_(printf)("sanity_check_malloc_arena: sb %p, block %d "
+ "(bszB %lu): UNMERGED FREES\n", sb, i, b_bszB );
+ BOMB;
+ }
+ if (thisFree) blockctr_sb_free++;
+ if (!thisFree)
+ arena_bytes_on_loan += bszB_to_pszB(a, b_bszB);
+ lastWasFree = thisFree;
+ }
+ if (i > sb->n_payload_bytes) {
+ VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
+ "overshoots end\n", sb);
+ BOMB;
+ }
+ }
+
+ if (arena_bytes_on_loan != a->bytes_on_loan) {
+# ifdef VERBOSE_MALLOC
+ VG_(printf)( "sanity_check_malloc_arena: a->bytes_on_loan %ld, "
+ "arena_bytes_on_loan %ld: "
+ "MISMATCH\n", a->bytes_on_loan, arena_bytes_on_loan);
+# endif
+ ppSuperblocks(a);
+ BOMB;
+ }
+
+ /* Second, traverse each list, checking that the back pointers make
+ sense, counting blocks encountered, and checking that each block
+ is an appropriate size for this list. */
+ blockctr_li = 0;
+ for (listno = 0; listno < N_MALLOC_LISTS; listno++) {
+ list_min_pszB = listNo_to_pszB_min(listno);
+ list_max_pszB = listNo_to_pszB_max(listno);
+ b = a->freelist[listno];
+ if (b == NULL) continue;
+ while (True) {
+ b_prev = b;
+ b = get_next_b(b);
+ if (get_prev_b(b) != b_prev) {
+ VG_(printf)( "sanity_check_malloc_arena: list %d at %p: "
+ "BAD LINKAGE\n",
+ listno, b );
+ BOMB;
+ }
+ b_pszB = get_pszB(a, b);
+ if (b_pszB < list_min_pszB || b_pszB > list_max_pszB) {
+ VG_(printf)(
+ "sanity_check_malloc_arena: list %d at %p: "
+ "WRONG CHAIN SIZE %luB (%luB, %luB)\n",
+ listno, b, b_pszB, list_min_pszB, list_max_pszB );
+ BOMB;
+ }
+ blockctr_li++;
+ if (b == a->freelist[listno]) break;
+ }
+ }
+
+ if (blockctr_sb_free != blockctr_li) {
+# ifdef VERBOSE_MALLOC
+ VG_(printf)( "sanity_check_malloc_arena: BLOCK COUNT MISMATCH "
+ "(via sbs %d, via lists %d)\n",
+ blockctr_sb_free, blockctr_li );
+# endif
+ ppSuperblocks(a);
+ BOMB;
+ }
+
+ if (VG_(clo_verbosity) > 2)
+ VG_(message)(Vg_DebugMsg,
+ "%8s: %2d sbs, %5d bs, %2d/%-2d free bs, "
+ "%7ld mmap, %7ld loan",
+ a->name,
+ superblockctr,
+ blockctr_sb, blockctr_sb_free, blockctr_li,
+ a->bytes_mmaped, a->bytes_on_loan);
+# undef BOMB
+}
+
+
+#define N_AN_CCS 1000
+
+typedef struct { ULong nBytes; ULong nBlocks; HChar* cc; } AnCC;
+
+static AnCC anCCs[N_AN_CCS];
+
+static Int cmp_AnCC_by_vol ( void* v1, void* v2 ) {
+ AnCC* ancc1 = (AnCC*)v1;
+ AnCC* ancc2 = (AnCC*)v2;
+ if (ancc1->nBytes < ancc2->nBytes) return -1;
+ if (ancc1->nBytes > ancc2->nBytes) return 1;
+ return 0;
+}
+
+static void cc_analyse_alloc_arena ( ArenaId aid )
+{
+ Word i, j, k;
+ Arena* a;
+ Block* b;
+ Bool thisFree, lastWasFree;
+ SizeT b_bszB;
+
+ HChar* cc;
+ UInt n_ccs = 0;
+ //return;
+ a = arenaId_to_ArenaP(aid);
+ if (a->name == NULL) {
+ /* arena is not in use, is not initialised and will fail the
+ sanity check that follows. */
+ return;
+ }
+
+ sanity_check_malloc_arena(aid);
+
+ VG_(printf)(
+ "-------- Arena \"%s\": %ld mmap'd, %ld/%ld max/curr --------\n",
+ a->name, a->bytes_mmaped, a->bytes_on_loan_max, a->bytes_on_loan
+ );
+
+ for (j = 0; j < a->sblocks_used; ++j) {
+ Superblock * sb = a->sblocks[j];
+ lastWasFree = False;
+ for (i = 0; i < sb->n_payload_bytes; i += mk_plain_bszB(b_bszB)) {
+ b = (Block*)&sb->payload_bytes[i];
+ b_bszB = get_bszB_as_is(b);
+ if (!blockSane(a, b)) {
+ VG_(printf)("sanity_check_malloc_arena: sb %p, block %ld "
+ "(bszB %lu): BAD\n", sb, i, b_bszB );
+ tl_assert(0);
+ }
+ thisFree = !is_inuse_block(b);
+ if (thisFree && lastWasFree) {
+ VG_(printf)("sanity_check_malloc_arena: sb %p, block %ld "
+ "(bszB %lu): UNMERGED FREES\n", sb, i, b_bszB );
+ tl_assert(0);
+ }
+ lastWasFree = thisFree;
+
+ if (thisFree) continue;
+
+ if (0)
+ VG_(printf)("block: inUse=%d pszB=%d cc=%s\n",
+ (Int)(!thisFree),
+ (Int)bszB_to_pszB(a, b_bszB),
+ get_cc(b));
+ cc = get_cc(b);
+ tl_assert(cc);
+ for (k = 0; k < n_ccs; k++) {
+ tl_assert(anCCs[k].cc);
+ if (0 == VG_(strcmp)(cc, anCCs[k].cc))
+ break;
+ }
+ tl_assert(k >= 0 && k <= n_ccs);
+
+ if (k == n_ccs) {
+ tl_assert(n_ccs < N_AN_CCS-1);
+ n_ccs++;
+ anCCs[k].nBytes = 0;
+ anCCs[k].nBlocks = 0;
+ anCCs[k].cc = cc;
+ }
+
+ tl_assert(k >= 0 && k < n_ccs && k < N_AN_CCS);
+ anCCs[k].nBytes += (ULong)bszB_to_pszB(a, b_bszB);
+ anCCs[k].nBlocks++;
+ }
+ if (i > sb->n_payload_bytes) {
+ VG_(printf)( "sanity_check_malloc_arena: sb %p: last block "
+ "overshoots end\n", sb);
+ tl_assert(0);
+ }
+ }
+
+ VG_(ssort)( &anCCs[0], n_ccs, sizeof(anCCs[0]), cmp_AnCC_by_vol );
+
+ for (k = 0; k < n_ccs; k++) {
+ VG_(printf)("%'13llu in %'9llu: %s\n",
+ anCCs[k].nBytes, anCCs[k].nBlocks, anCCs[k].cc );
+ }
+
+ VG_(printf)("\n");
+}
+
+
+void VG_(sanity_check_malloc_all) ( void )
+{
+ UInt i;
+ for (i = 0; i < VG_N_ARENAS; i++) {
+ if (i == VG_AR_CLIENT && !client_inited)
+ continue;
+ sanity_check_malloc_arena ( i );
+ }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Creating and deleting blocks. ---*/
+/*------------------------------------------------------------*/
+
+// Mark the bytes at b .. b+bszB-1 as not in use, and add them to the
+// relevant free list.
+
+static
+void mkFreeBlock ( Arena* a, Block* b, SizeT bszB, UInt b_lno )
+{
+ SizeT pszB = bszB_to_pszB(a, bszB);
+ vg_assert(b_lno == pszB_to_listNo(pszB));
+ //zzVALGRIND_MAKE_MEM_UNDEFINED(b, bszB);
+ // Set the size fields and indicate not-in-use.
+ set_bszB(b, mk_free_bszB(bszB));
+
+ // Add to the relevant list.
+ if (a->freelist[b_lno] == NULL) {
+ set_prev_b(b, b);
+ set_next_b(b, b);
+ a->freelist[b_lno] = b;
+ } else {
+ Block* b_prev = get_prev_b(a->freelist[b_lno]);
+ Block* b_next = a->freelist[b_lno];
+ set_next_b(b_prev, b);
+ set_prev_b(b_next, b);
+ set_next_b(b, b_next);
+ set_prev_b(b, b_prev);
+ }
+# ifdef DEBUG_MALLOC
+ (void)blockSane(a,b);
+# endif
+}
+
+// Mark the bytes at b .. b+bszB-1 as in use, and set up the block
+// appropriately.
+static
+void mkInuseBlock ( Arena* a, Block* b, SizeT bszB )
+{
+ UInt i;
+ vg_assert(bszB >= min_useful_bszB(a));
+ //zzVALGRIND_MAKE_MEM_UNDEFINED(b, bszB);
+ set_bszB(b, mk_inuse_bszB(bszB));
+ set_prev_b(b, NULL); // Take off freelist
+ set_next_b(b, NULL); // ditto
+ if (!a->clientmem) {
+ for (i = 0; i < a->rz_szB; i++) {
+ set_rz_lo_byte(b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_LO_MASK));
+ set_rz_hi_byte(b, i, (UByte)(((Addr)b&0xff) ^ REDZONE_HI_MASK));
+ }
+ }
+# ifdef DEBUG_MALLOC
+ (void)blockSane(a,b);
+# endif
+}
+
+// Remove a block from a given list. Does no sanity checking.
+static
+void unlinkBlock ( Arena* a, Block* b, UInt listno )
+{
+ vg_assert(listno < N_MALLOC_LISTS);
+ if (get_prev_b(b) == b) {
+ // Only one element in the list; treat it specially.
+ vg_assert(get_next_b(b) == b);
+ a->freelist[listno] = NULL;
+ } else {
+ Block* b_prev = get_prev_b(b);
+ Block* b_next = get_next_b(b);
+ a->freelist[listno] = b_prev;
+ set_next_b(b_prev, b_next);
+ set_prev_b(b_next, b_prev);
+ swizzle ( a, listno );
+ }
+ set_prev_b(b, NULL);
+ set_next_b(b, NULL);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Core-visible functions. ---*/
+/*------------------------------------------------------------*/
+
+// Align the request size.
+static __inline__
+SizeT align_req_pszB ( SizeT req_pszB )
+{
+ SizeT n = VG_MIN_MALLOC_SZB-1;
+ return ((req_pszB + n) & (~n));
+}
+
+void* VG_(arena_malloc) ( ArenaId aid, HChar* cc, SizeT req_pszB )
+{
+ SizeT req_bszB, frag_bszB, b_bszB;
+ UInt lno, i;
+ Superblock* new_sb;
+ Block* b = NULL;
+ Arena* a;
+ void* v;
+
+ ensure_mm_init(aid);
+ a = arenaId_to_ArenaP(aid);
+
+ vg_assert(req_pszB < MAX_PSZB);
+ req_pszB = align_req_pszB(req_pszB);
+ req_bszB = pszB_to_bszB(a, req_pszB);
+
+ // You must provide a cost-center name against which to charge
+ // this allocation; it isn't optional.
+ vg_assert(cc);
+
+ // Scan through all the big-enough freelists for a block.
+ //
+ // Nb: this scanning might be expensive in some cases. Eg. if you
+ // allocate lots of small objects without freeing them, but no
+ // medium-sized objects, it will repeatedly scanning through the whole
+ // list, and each time not find any free blocks until the last element.
+ //
+ // If this becomes a noticeable problem... the loop answers the question
+ // "where is the first nonempty list above me?" And most of the time,
+ // you ask the same question and get the same answer. So it would be
+ // good to somehow cache the results of previous searches.
+ // One possibility is an array (with N_MALLOC_LISTS elements) of
+ // shortcuts. shortcut[i] would give the index number of the nearest
+ // larger list above list i which is non-empty. Then this loop isn't
+ // necessary. However, we'd have to modify some section [ .. i-1] of the
+ // shortcut array every time a list [i] changes from empty to nonempty or
+ // back. This would require care to avoid pathological worst-case
+ // behaviour.
+ //
+ for (lno = pszB_to_listNo(req_pszB); lno < N_MALLOC_LISTS; lno++) {
+ b = a->freelist[lno];
+ if (NULL == b) continue; // If this list is empty, try the next one.
+ while (True) {
+ b_bszB = get_bszB(b);
+ if (b_bszB >= req_bszB) goto obtained_block; // success!
+ b = get_next_b(b);
+ if (b == a->freelist[lno]) break; // traversed entire freelist
+ }
+ }
+
+ // If we reach here, no suitable block found, allocate a new superblock
+ vg_assert(lno == N_MALLOC_LISTS);
+ new_sb = newSuperblock(a, req_bszB);
+ if (NULL == new_sb) {
+ // Should only fail if for client, otherwise, should have aborted
+ // already.
+ vg_assert(VG_AR_CLIENT == aid);
+ return NULL;
+ }
+
+ vg_assert(a->sblocks_used <= a->sblocks_size);
+ if (a->sblocks_used == a->sblocks_size) {
+ Superblock ** array;
+ SysRes sres = VG_(am_sbrk_anon_float_valgrind)(sizeof(Superblock *) *
+ a->sblocks_size * 2);
+ if (sres.isError) {
+ VG_(out_of_memory_NORETURN)("arena_init", sizeof(Superblock *) *
+ a->sblocks_size * 2);
+ /* NOTREACHED */
+ }
+ array = (Superblock**) sres.res;
+ for (i = 0; i < a->sblocks_used; ++i) array[i] = a->sblocks[i];
+
+ a->sblocks_size *= 2;
+ a->sblocks = array;
+ VG_(debugLog)(1, "mallocfree",
+ "sblock array for arena `%s' resized to %ld\n",
+ a->name, a->sblocks_size);
+ }
+
+ vg_assert(a->sblocks_used < a->sblocks_size);
+
+ i = a->sblocks_used;
+ while (i > 0) {
+ if (a->sblocks[i-1] > new_sb) {
+ a->sblocks[i] = a->sblocks[i-1];
+ } else {
+ break;
+ }
+ --i;
+ }
+ a->sblocks[i] = new_sb;
+ a->sblocks_used++;
+
+ b = (Block*)&new_sb->payload_bytes[0];
+ lno = pszB_to_listNo(bszB_to_pszB(a, new_sb->n_payload_bytes));
+ mkFreeBlock ( a, b, new_sb->n_payload_bytes, lno);
+ if (VG_(clo_profile_heap))
+ set_cc(b, "admin.free-new-sb-1");
+ // fall through
+
+ obtained_block:
+ // Ok, we can allocate from b, which lives in list lno.
+ vg_assert(b != NULL);
+ vg_assert(lno < N_MALLOC_LISTS);
+ vg_assert(a->freelist[lno] != NULL);
+ b_bszB = get_bszB(b);
+ // req_bszB is the size of the block we are after. b_bszB is the
+ // size of what we've actually got. */
+ vg_assert(b_bszB >= req_bszB);
+
+ // Could we split this block and still get a useful fragment?
+ frag_bszB = b_bszB - req_bszB;
+ if (frag_bszB >= min_useful_bszB(a)) {
+ // Yes, split block in two, put the fragment on the appropriate free
+ // list, and update b_bszB accordingly.
+ // printf( "split %dB into %dB and %dB\n", b_bszB, req_bszB, frag_bszB );
+ unlinkBlock(a, b, lno);
+ mkInuseBlock(a, b, req_bszB);
+ if (VG_(clo_profile_heap))
+ set_cc(b, cc);
+ mkFreeBlock(a, &b[req_bszB], frag_bszB,
+ pszB_to_listNo(bszB_to_pszB(a, frag_bszB)));
+ if (VG_(clo_profile_heap))
+ set_cc(&b[req_bszB], "admin.fragmentation-1");
+ b_bszB = get_bszB(b);
+ } else {
+ // No, mark as in use and use as-is.
+ unlinkBlock(a, b, lno);
+ mkInuseBlock(a, b, b_bszB);
+ if (VG_(clo_profile_heap))
+ set_cc(b, cc);
+ }
+
+ // Update stats
+ a->bytes_on_loan += bszB_to_pszB(a, b_bszB);
+ if (a->bytes_on_loan > a->bytes_on_loan_max) {
+ a->bytes_on_loan_max = a->bytes_on_loan;
+ if (a->bytes_on_loan_max >= a->next_profile_at) {
+ /* next profile after 10% more growth */
+ a->next_profile_at
+ = (SizeT)(
+ (((ULong)a->bytes_on_loan_max) * 110ULL) / 100ULL );
+ if (VG_(clo_profile_heap))
+ cc_analyse_alloc_arena(aid);
+ }
+ }
+
+# ifdef DEBUG_MALLOC
+ sanity_check_malloc_arena(aid);
+# endif
+
+ v = get_block_payload(a, b);
+ vg_assert( (((Addr)v) & (VG_MIN_MALLOC_SZB-1)) == 0 );
+
+ /* VALGRIND_MALLOCLIKE_BLOCK(v, req_pszB, 0, False); */
+
+ /* For debugging/testing purposes, fill the newly allocated area
+ with a definite value in an attempt to shake out any
+ uninitialised uses of the data (by V core / V tools, not by the
+ client). Testing on 25 Nov 07 with the values 0x00, 0xFF, 0x55,
+ 0xAA showed no differences in the regression tests on
+ amd64-linux. Note, is disabled by default. */
+ if (0 && aid != VG_AR_CLIENT)
+ VG_(memset)(v, 0xAA, (SizeT)req_pszB);
+
+ return v;
+}
+
+
+void VG_(arena_free) ( ArenaId aid, void* ptr )
+{
+ Superblock* sb;
+ UByte* sb_start;
+ UByte* sb_end;
+ Block* other_b;
+ Block* b;
+ SizeT b_bszB, b_pszB, other_bszB;
+ UInt b_listno;
+ Arena* a;
+
+ ensure_mm_init(aid);
+ a = arenaId_to_ArenaP(aid);
+
+ if (ptr == NULL) {
+ return;
+ }
+
+ b = get_payload_block(a, ptr);
+
+ /* If this is one of V's areas, check carefully the block we're
+ getting back. This picks up simple block-end overruns. */
+ if (aid != VG_AR_CLIENT)
+ vg_assert(blockSane(a, b));
+
+ b_bszB = get_bszB(b);
+ b_pszB = bszB_to_pszB(a, b_bszB);
+ sb = findSb( a, b );
+ sb_start = &sb->payload_bytes[0];
+ sb_end = &sb->payload_bytes[sb->n_payload_bytes - 1];
+
+ a->bytes_on_loan -= b_pszB;
+
+ /* If this is one of V's areas, fill it up with junk to enhance the
+ chances of catching any later reads of it. Note, 0xDD is
+ carefully chosen junk :-), in that: (1) 0xDDDDDDDD is an invalid
+ and non-word-aligned address on most systems, and (2) 0xDD is a
+ value which is unlikely to be generated by the new compressed
+ Vbits representation for memcheck. */
+ if (aid != VG_AR_CLIENT)
+ VG_(memset)(ptr, 0xDD, (SizeT)b_pszB);
+
+ // Put this chunk back on a list somewhere.
+ b_listno = pszB_to_listNo(b_pszB);
+ mkFreeBlock( a, b, b_bszB, b_listno );
+ if (VG_(clo_profile_heap))
+ set_cc(b, "admin.free-1");
+
+ // See if this block can be merged with its successor.
+ // First test if we're far enough before the superblock's end to possibly
+ // have a successor.
+ other_b = b + b_bszB;
+ if (other_b+min_useful_bszB(a)-1 <= (Block*)sb_end) {
+ // Ok, we have a successor, merge if it's not in use.
+ other_bszB = get_bszB(other_b);
+ if (!is_inuse_block(other_b)) {
+ // VG_(printf)( "merge-successor\n");
+# ifdef DEBUG_MALLOC
+ vg_assert(blockSane(a, other_b));
+# endif
+ unlinkBlock( a, b, b_listno );
+ unlinkBlock( a, other_b, pszB_to_listNo(bszB_to_pszB(a,other_bszB)) );
+ b_bszB += other_bszB;
+ b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
+ mkFreeBlock( a, b, b_bszB, b_listno );
+ if (VG_(clo_profile_heap))
+ set_cc(b, "admin.free-2");
+ }
+ } else {
+ // Not enough space for successor: check that b is the last block
+ // ie. there are no unused bytes at the end of the Superblock.
+ vg_assert(other_b-1 == (Block*)sb_end);
+ }
+
+ // Then see if this block can be merged with its predecessor.
+ // First test if we're far enough after the superblock's start to possibly
+ // have a predecessor.
+ if (b >= (Block*)sb_start + min_useful_bszB(a)) {
+ // Ok, we have a predecessor, merge if it's not in use.
+ other_b = get_predecessor_block( b );
+ other_bszB = get_bszB(other_b);
+ if (!is_inuse_block(other_b)) {
+ // VG_(printf)( "merge-predecessor\n");
+ unlinkBlock( a, b, b_listno );
+ unlinkBlock( a, other_b, pszB_to_listNo(bszB_to_pszB(a, other_bszB)) );
+ b = other_b;
+ b_bszB += other_bszB;
+ b_listno = pszB_to_listNo(bszB_to_pszB(a, b_bszB));
+ mkFreeBlock( a, b, b_bszB, b_listno );
+ if (VG_(clo_profile_heap))
+ set_cc(b, "admin.free-3");
+ }
+ } else {
+ // Not enough space for predecessor: check that b is the first block,
+ // ie. there are no unused bytes at the start of the Superblock.
+ vg_assert((Block*)sb_start == b);
+ }
+
+# ifdef DEBUG_MALLOC
+ sanity_check_malloc_arena(aid);
+# endif
+
+ //zzVALGRIND_FREELIKE_BLOCK(ptr, 0);
+}
+
+
+/*
+ The idea for malloc_aligned() is to allocate a big block, base, and
+ then split it into two parts: frag, which is returned to the the
+ free pool, and align, which is the bit we're really after. Here's
+ a picture. L and H denote the block lower and upper overheads, in
+ bytes. The details are gruesome. Note it is slightly complicated
+ because the initial request to generate base may return a bigger
+ block than we asked for, so it is important to distinguish the base
+ request size and the base actual size.
+
+ frag_b align_b
+ | |
+ | frag_p | align_p
+ | | | |
+ v v v v
+
+ +---+ +---+---+ +---+
+ | L |----------------| H | L |---------------| H |
+ +---+ +---+---+ +---+
+
+ ^ ^ ^
+ | | :
+ | base_p this addr must be aligned
+ |
+ base_b
+
+ . . . . . . .
+ <------ frag_bszB -------> . . .
+ . <------------- base_pszB_act -----------> .
+ . . . . . . .
+
+*/
+void* VG_(arena_memalign) ( ArenaId aid, HChar* cc,
+ SizeT req_alignB, SizeT req_pszB )
+{
+ SizeT base_pszB_req, base_pszB_act, frag_bszB;
+ Block *base_b, *align_b;
+ UByte *base_p, *align_p;
+ SizeT saved_bytes_on_loan;
+ Arena* a;
+
+ ensure_mm_init(aid);
+ a = arenaId_to_ArenaP(aid);
+
+ vg_assert(req_pszB < MAX_PSZB);
+
+ // You must provide a cost-center name against which to charge
+ // this allocation; it isn't optional.
+ vg_assert(cc);
+
+ // Check that the requested alignment seems reasonable; that is, is
+ // a power of 2.
+ if (req_alignB < VG_MIN_MALLOC_SZB
+ || req_alignB > 1048576
+ || VG_(log2)( req_alignB ) == -1 /* not a power of 2 */) {
+ VG_(printf)("VG_(arena_memalign)(%p, %lu, %lu)\n"
+ "bad alignment value %lu\n"
+ "(it is too small, too big, or not a power of two)",
+ a, req_alignB, req_pszB, req_alignB );
+ VG_(core_panic)("VG_(arena_memalign)");
+ /*NOTREACHED*/
+ }
+ // Paranoid
+ vg_assert(req_alignB % VG_MIN_MALLOC_SZB == 0);
+
+ /* Required payload size for the aligned chunk. */
+ req_pszB = align_req_pszB(req_pszB);
+
+ /* Payload size to request for the big block that we will split up. */
+ base_pszB_req = req_pszB + min_useful_bszB(a) + req_alignB;
+
+ /* Payload ptr for the block we are going to split. Note this
+ changes a->bytes_on_loan; we save and restore it ourselves. */
+ saved_bytes_on_loan = a->bytes_on_loan;
+ base_p = VG_(arena_malloc) ( aid, cc, base_pszB_req );
+ a->bytes_on_loan = saved_bytes_on_loan;
+
+ /* Give up if we couldn't allocate enough space */
+ if (base_p == 0)
+ return 0;
+
+ /* Block ptr for the block we are going to split. */
+ base_b = get_payload_block ( a, base_p );
+
+ /* Pointer to the payload of the aligned block we are going to
+ return. This has to be suitably aligned. */
+ align_p = align_upwards ( base_b + 2 * overhead_szB_lo(a)
+ + overhead_szB_hi(a),
+ req_alignB );
+ align_b = get_payload_block(a, align_p);
+
+ /* The block size of the fragment we will create. This must be big
+ enough to actually create a fragment. */
+ frag_bszB = align_b - base_b;
+
+ vg_assert(frag_bszB >= min_useful_bszB(a));
+
+ /* The actual payload size of the block we are going to split. */
+ base_pszB_act = get_pszB(a, base_b);
+
+ /* Create the fragment block, and put it back on the relevant free list. */
+ mkFreeBlock ( a, base_b, frag_bszB,
+ pszB_to_listNo(bszB_to_pszB(a, frag_bszB)) );
+ if (VG_(clo_profile_heap))
+ set_cc(base_b, "admin.frag-memalign-1");
+
+ /* Create the aligned block. */
+ mkInuseBlock ( a, align_b,
+ base_p + base_pszB_act
+ + overhead_szB_hi(a) - (UByte*)align_b );
+ if (VG_(clo_profile_heap))
+ set_cc(align_b, cc);
+
+ /* Final sanity checks. */
+ vg_assert( is_inuse_block(get_payload_block(a, align_p)) );
+
+ vg_assert(req_pszB <= get_pszB(a, get_payload_block(a, align_p)));
+
+ a->bytes_on_loan += get_pszB(a, get_payload_block(a, align_p));
+ if (a->bytes_on_loan > a->bytes_on_loan_max)
+ a->bytes_on_loan_max = a->bytes_on_loan;
+
+# ifdef DEBUG_MALLOC
+ sanity_check_malloc_arena(aid);
+# endif
+
+ vg_assert( (((Addr)align_p) % req_alignB) == 0 );
+
+ //zzVALGRIND_MALLOCLIKE_BLOCK(align_p, req_pszB, 0, False);
+
+ return align_p;
+}
+
+
+SizeT VG_(arena_malloc_usable_size) ( ArenaId aid, void* ptr )
+{
+ Arena* a = arenaId_to_ArenaP(aid);
+ Block* b = get_payload_block(a, ptr);
+ return get_pszB(a, b);
+}
+
+
+// Implementation of mallinfo(). There is no recent standard that defines
+// the behavior of mallinfo(). The meaning of the fields in struct mallinfo
+// is as follows:
+//
+// struct mallinfo {
+// int arena; /* total space in arena */
+// int ordblks; /* number of ordinary blocks */
+// int smblks; /* number of small blocks */
+// int hblks; /* number of holding blocks */
+// int hblkhd; /* space in holding block headers */
+// int usmblks; /* space in small blocks in use */
+// int fsmblks; /* space in free small blocks */
+// int uordblks; /* space in ordinary blocks in use */
+// int fordblks; /* space in free ordinary blocks */
+// int keepcost; /* space penalty if keep option */
+// /* is used */
+// };
+//
+// The glibc documentation about mallinfo (which is somewhat outdated) can
+// be found here:
+// http://www.gnu.org/software/libtool/manual/libc/Statistics-of-Malloc.html
+//
+// See also http://bugs.kde.org/show_bug.cgi?id=160956.
+//
+// Regarding the implementation of VG_(mallinfo)(): we cannot return the
+// whole struct as the library function does, because this is called by a
+// client request. So instead we use a pointer to do call by reference.
+void VG_(mallinfo) ( ThreadId tid, struct vg_mallinfo* mi )
+{
+ UWord i, free_blocks, free_blocks_size;
+ Arena* a = arenaId_to_ArenaP(VG_AR_CLIENT);
+
+ // Traverse free list and calculate free blocks statistics.
+ // This may seem slow but glibc works the same way.
+ free_blocks_size = free_blocks = 0;
+ for (i = 0; i < N_MALLOC_LISTS; i++) {
+ Block* b = a->freelist[i];
+ if (b == NULL) continue;
+ for (;;) {
+ free_blocks++;
+ free_blocks_size += (UWord)get_pszB(a, b);
+ b = get_next_b(b);
+ if (b == a->freelist[i]) break;
+ }
+ }
+
+ // We don't have fastbins so smblks & fsmblks are always 0. Also we don't
+ // have a separate mmap allocator so set hblks & hblkhd to 0.
+ mi->arena = a->bytes_mmaped;
+ mi->ordblks = free_blocks + VG_(free_queue_length);
+ mi->smblks = 0;
+ mi->hblks = 0;
+ mi->hblkhd = 0;
+ mi->usmblks = 0;
+ mi->fsmblks = 0;
+ mi->uordblks = a->bytes_on_loan - VG_(free_queue_volume);
+ mi->fordblks = free_blocks_size + VG_(free_queue_volume);
+ mi->keepcost = 0; // may want some value in here
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Services layered on top of malloc/free. ---*/
+/*------------------------------------------------------------*/
+
+void* VG_(arena_calloc) ( ArenaId aid, HChar* cc,
+ SizeT nmemb, SizeT bytes_per_memb )
+{
+ SizeT size;
+ UChar* p;
+
+ size = nmemb * bytes_per_memb;
+ vg_assert(size >= nmemb && size >= bytes_per_memb);// check against overflow
+
+ p = VG_(arena_malloc) ( aid, cc, size );
+
+ VG_(memset)(p, 0, size);
+
+ //zzVALGRIND_MALLOCLIKE_BLOCK(p, size, 0, True);
+
+ return p;
+}
+
+
+void* VG_(arena_realloc) ( ArenaId aid, HChar* cc,
+ void* ptr, SizeT req_pszB )
+{
+ Arena* a;
+ SizeT old_pszB;
+ UChar *p_new;
+ Block* b;
+
+ ensure_mm_init(aid);
+ a = arenaId_to_ArenaP(aid);
+
+ vg_assert(req_pszB < MAX_PSZB);
+
+ if (NULL == ptr) {
+ return VG_(arena_malloc)(aid, cc, req_pszB);
+ }
+
+ if (req_pszB == 0) {
+ VG_(arena_free)(aid, ptr);
+ return NULL;
+ }
+
+ b = get_payload_block(a, ptr);
+ vg_assert(blockSane(a, b));
+
+ vg_assert(is_inuse_block(b));
+ old_pszB = get_pszB(a, b);
+
+ if (req_pszB <= old_pszB) {
+ return ptr;
+ }
+
+ p_new = VG_(arena_malloc) ( aid, cc, req_pszB );
+
+ VG_(memcpy)(p_new, ptr, old_pszB);
+
+ VG_(arena_free)(aid, ptr);
+
+ return p_new;
+}
+
+
+/* Inline just for the wrapper VG_(strdup) below */
+__inline__ Char* VG_(arena_strdup) ( ArenaId aid, HChar* cc,
+ const Char* s )
+{
+ Int i;
+ Int len;
+ Char* res;
+
+ if (s == NULL)
+ return NULL;
+
+ len = VG_(strlen)(s) + 1;
+ res = VG_(arena_malloc) (aid, cc, len);
+
+ for (i = 0; i < len; i++)
+ res[i] = s[i];
+ return res;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Tool-visible functions. ---*/
+/*------------------------------------------------------------*/
+
+// All just wrappers to avoid exposing arenas to tools.
+
+void* VG_(malloc) ( HChar* cc, SizeT nbytes )
+{
+ return VG_(arena_malloc) ( VG_AR_TOOL, cc, nbytes );
+}
+
+void VG_(free) ( void* ptr )
+{
+ VG_(arena_free) ( VG_AR_TOOL, ptr );
+}
+
+void* VG_(calloc) ( HChar* cc, SizeT nmemb, SizeT bytes_per_memb )
+{
+ return VG_(arena_calloc) ( VG_AR_TOOL, cc, nmemb, bytes_per_memb );
+}
+
+void* VG_(realloc) ( HChar* cc, void* ptr, SizeT size )
+{
+ return VG_(arena_realloc) ( VG_AR_TOOL, cc, ptr, size );
+}
+
+Char* VG_(strdup) ( HChar* cc, const Char* s )
+{
+ return VG_(arena_strdup) ( VG_AR_TOOL, cc, s );
+}
+
+// Useful for querying user blocks.
+SizeT VG_(malloc_usable_size) ( void* p )
+{
+ return VG_(arena_malloc_usable_size)(VG_AR_CLIENT, p);
+}
+
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_options.c.svn-base b/coregrind/.svn/text-base/m_options.c.svn-base
new file mode 100644
index 0000000..2a06fcd
--- /dev/null
+++ b/coregrind/.svn/text-base/m_options.c.svn-base
@@ -0,0 +1,256 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Command line options. ---*/
+/*--- m_options.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Nicholas Nethercote
+ njn@valgrind.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_options.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcfile.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_libcproc.h"
+#include "pub_core_mallocfree.h"
+
+// See pub_{core,tool}_options.h for explanations of all these.
+
+
+/* Define, and set defaults. */
+VexControl VG_(clo_vex_control);
+Bool VG_(clo_error_limit) = True;
+Int VG_(clo_error_exitcode) = 0;
+Bool VG_(clo_db_attach) = False;
+Char* VG_(clo_db_command) = GDB_PATH " -nw %f %p";
+Int VG_(clo_gen_suppressions) = 0;
+Int VG_(clo_sanity_level) = 1;
+Int VG_(clo_verbosity) = 1;
+Bool VG_(clo_xml) = False;
+HChar* VG_(clo_xml_user_comment) = NULL;
+Bool VG_(clo_demangle) = True;
+Bool VG_(clo_trace_children) = False;
+Bool VG_(clo_child_silent_after_fork) = False;
+Int VG_(clo_log_fd) = 2; /* must be signed, as -1 is possible. */
+Char* VG_(clo_log_name) = NULL;
+Bool VG_(clo_time_stamp) = False;
+Int VG_(clo_input_fd) = 0; /* stdin */
+Int VG_(clo_n_suppressions) = 0;
+Char* VG_(clo_suppressions)[VG_CLO_MAX_SFILES];
+UChar VG_(clo_trace_flags) = 0; // 00000000b
+UChar VG_(clo_profile_flags) = 0; // 00000000b
+Int VG_(clo_trace_notbelow) = 999999999;
+Bool VG_(clo_trace_syscalls) = False;
+Bool VG_(clo_trace_signals) = False;
+Bool VG_(clo_trace_symtab) = False;
+HChar* VG_(clo_trace_symtab_patt) = "*";
+Bool VG_(clo_trace_cfi) = False;
+Bool VG_(clo_debug_dump_syms) = False;
+Bool VG_(clo_debug_dump_line) = False;
+Bool VG_(clo_debug_dump_frames) = False;
+Bool VG_(clo_trace_redir) = False;
+Bool VG_(clo_trace_sched) = False;
+Bool VG_(clo_profile_heap) = False;
+Int VG_(clo_dump_error) = 0;
+Int VG_(clo_backtrace_size) = 12;
+Char* VG_(clo_sim_hints) = NULL;
+Bool VG_(clo_sym_offsets) = False;
+Bool VG_(clo_read_var_info) = False;
+Bool VG_(clo_run_libc_freeres) = True;
+Bool VG_(clo_track_fds) = False;
+Bool VG_(clo_show_below_main)= False;
+Bool VG_(clo_show_emwarns) = False;
+Word VG_(clo_max_stackframe) = 2000000;
+Word VG_(clo_main_stacksize) = 0; /* use client's rlimit.stack */
+Bool VG_(clo_wait_for_gdb) = False;
+VgSmc VG_(clo_smc_check) = Vg_SmcStack;
+HChar* VG_(clo_kernel_variant) = NULL;
+
+
+/*====================================================================*/
+/*=== Command line errors ===*/
+/*====================================================================*/
+
+static void revert_to_stderr ( void )
+{
+ vg_assert( !VG_(logging_to_socket) );
+ VG_(clo_log_fd) = 2; /* stderr */
+}
+
+__attribute__((noreturn))
+void VG_(err_bad_option) ( Char* opt )
+{
+ revert_to_stderr();
+ VG_(printf)("valgrind: Bad option '%s'; aborting.\n", opt);
+ VG_(printf)("valgrind: Use --help for more information.\n");
+ VG_(exit)(1);
+}
+
+__attribute__((noreturn))
+void VG_(err_missing_prog) ( void )
+{
+ revert_to_stderr();
+ VG_(printf)("valgrind: no program specified\n");
+ VG_(printf)("valgrind: Use --help for more information.\n");
+ VG_(exit)(1);
+}
+
+__attribute__((noreturn))
+void VG_(err_config_error) ( Char* msg )
+{
+ revert_to_stderr();
+ VG_(printf)("valgrind: Startup or configuration error:\n %s\n", msg);
+ VG_(printf)("valgrind: Unable to start up properly. Giving up.\n");
+ VG_(exit)(1);
+}
+
+// Copies the string, prepending it with the startup working directory, and
+// expanding %p and %q entries. Returns a new, malloc'd string.
+Char* VG_(expand_file_name)(Char* option_name, Char* format)
+{
+ static Char base_dir[VKI_PATH_MAX];
+ Int len, i = 0, j = 0;
+ Char* out;
+
+ Bool ok = VG_(get_startup_wd)(base_dir, VKI_PATH_MAX);
+ tl_assert(ok);
+
+ if (VG_STREQ(format, "")) {
+ // Empty name, bad.
+ VG_(message)(Vg_UserMsg, "%s: filename is empty", option_name);
+ goto bad;
+ }
+
+ // If 'format' starts with a '/', do not prefix with startup dir.
+ if (format[0] != '/') {
+ j += VG_(strlen)(base_dir);
+ }
+
+ // The 10 is slop, it should be enough in most cases.
+ len = j + VG_(strlen)(format) + 10;
+ out = VG_(malloc)( "options.efn.1", len );
+ if (format[0] != '/') {
+ VG_(strcpy)(out, base_dir);
+ out[j++] = '/';
+ }
+
+#define ENSURE_THIS_MUCH_SPACE(x) \
+ if (j + x >= len) { \
+ len += (10 + x); \
+ out = VG_(realloc)("options.efn.2(multiple)", out, len); \
+ }
+
+ while (format[i]) {
+ if (format[i] != '%') {
+ ENSURE_THIS_MUCH_SPACE(1);
+ out[j++] = format[i++];
+
+ } else {
+ // We saw a '%'. What's next...
+ i++;
+ if ('%' == format[i]) {
+ // Replace '%%' with '%'.
+ ENSURE_THIS_MUCH_SPACE(1);
+ out[j++] = format[i++];
+ }
+ else if ('p' == format[i]) {
+ // Print the PID. Assume that it's not longer than 10 chars --
+ // reasonable since 'pid' is an Int (ie. 32 bits).
+ Int pid = VG_(getpid)();
+ ENSURE_THIS_MUCH_SPACE(10);
+ j += VG_(sprintf)(&out[j], "%d", pid);
+ i++;
+ }
+ else if ('q' == format[i]) {
+ i++;
+ if ('{' == format[i]) {
+ // Get the env var name, print its contents.
+ Char* qualname;
+ Char* qual;
+ i++;
+ qualname = &format[i];
+ while (True) {
+ if (0 == format[i]) {
+ VG_(message)(Vg_UserMsg, "%s: malformed %%q specifier",
+ option_name);
+ goto bad;
+ } else if ('}' == format[i]) {
+ // Temporarily replace the '}' with NUL to extract var
+ // name.
+ format[i] = 0;
+ qual = VG_(getenv)(qualname);
+ if (NULL == qual) {
+ VG_(message)(Vg_UserMsg,
+ "%s: environment variable %s is not set",
+ option_name, qualname);
+ format[i] = '}'; // Put the '}' back.
+ goto bad;
+ }
+ format[i] = '}'; // Put the '}' back.
+ i++;
+ break;
+ }
+ i++;
+ }
+ ENSURE_THIS_MUCH_SPACE(VG_(strlen)(qual));
+ j += VG_(sprintf)(&out[j], "%s", qual);
+ } else {
+ VG_(message)(Vg_UserMsg,
+ "%s: expected '{' after '%%q'", option_name);
+ goto bad;
+ }
+ }
+ else {
+ // Something else, abort.
+ VG_(message)(Vg_UserMsg,
+ "%s: expected 'p' or 'q' or '%%' after '%%'", option_name);
+ goto bad;
+ }
+ }
+ }
+ ENSURE_THIS_MUCH_SPACE(1);
+ out[j++] = 0;
+
+ return out;
+
+ bad: {
+ Char* opt = // 2: 1 for the '=', 1 for the NUL.
+ VG_(malloc)( "options.efn.3",
+ VG_(strlen)(option_name) + VG_(strlen)(format) + 2 );
+ VG_(strcpy)(opt, option_name);
+ VG_(strcat)(opt, "=");
+ VG_(strcat)(opt, format);
+ VG_(err_bad_option)(opt);
+ }
+}
+
+
+
+/*--------------------------------------------------------------------*/
+/*--- end m_options.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_oset.c.svn-base b/coregrind/.svn/text-base/m_oset.c.svn-base
new file mode 100644
index 0000000..4753418
--- /dev/null
+++ b/coregrind/.svn/text-base/m_oset.c.svn-base
@@ -0,0 +1,882 @@
+
+/*--------------------------------------------------------------------*/
+/*--- An ordered set implemented using an AVL tree. m_oset.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2005-2009 Nicholas Nethercote
+ njn@valgrind.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+//----------------------------------------------------------------------
+// This file is based on:
+//
+// ANSI C Library for maintainance of AVL Balanced Trees
+// (C) 2000 Daniel Nagy, Budapest University of Technology and Economics
+// Released under GNU General Public License (GPL) version 2
+//----------------------------------------------------------------------
+
+// This file implements a generic ordered set using an AVL tree.
+//
+// Each node in the tree has two parts.
+// - First is the AVL metadata, which is three words: a left pointer, a
+// right pointer, and a word containing balancing information and a
+// "magic" value which provides some checking that the user has not
+// corrupted the metadata. So the overhead is 12 bytes on 32-bit
+// platforms and 24 bytes on 64-bit platforms.
+// - Second is the user's data. This can be anything. Note that because it
+// comes after the metadata, it will only be word-aligned, even if the
+// user data is a struct that would normally be doubleword-aligned.
+//
+// AvlNode* node -> +---------------+ V
+// | struct |
+// | AvlNode |
+// void* element -> +---------------+ ^
+// | element | |
+// keyOff -> | key | elemSize
+// +---------------+ v
+//
+// Users have to allocate AvlNodes with OSetGen_AllocNode(), which allocates
+// space for the metadata.
+//
+// The terminology used throughout this file:
+// - a "node", usually called "n", is a pointer to the metadata.
+// - an "element", usually called "e", is a pointer to the user data.
+// - a "key", usually called "k", is a pointer to a key.
+//
+// The helper functions elem_of_node and node_of_elem do the pointer
+// arithmetic to switch between the node and the element. The node magic is
+// checked after each operation to make sure that we're really operating on
+// an AvlNode.
+//
+// Each tree also has an iterator. Note that we cannot use the iterator
+// internally within this file (eg. we could implement OSetGen_Size() by
+// stepping through with the iterator and counting nodes) because it's
+// non-reentrant -- the user might be using it themselves, and the
+// concurrent uses would screw things up.
+
+#include "pub_core_basics.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_oset.h"
+
+/*--------------------------------------------------------------------*/
+/*--- Types and constants ---*/
+/*--------------------------------------------------------------------*/
+
+typedef struct _OSetNode OSetNode;
+
+// Internal names for the OSet types.
+typedef OSet AvlTree;
+typedef OSetNode AvlNode;
+
+// The padding ensures that magic is right at the end of the node,
+// regardless of the machine's word size, so that any overwrites will be
+// detected earlier.
+struct _OSetNode {
+ AvlNode* left;
+ AvlNode* right;
+ Char balance;
+ Char padding[sizeof(void*)-sizeof(Char)-sizeof(Short)];
+ Short magic;
+};
+
+#define STACK_MAX 32 // At most 2**32 entries can be iterated over
+#define OSET_MAGIC 0x5b1f
+
+// An OSet (AVL tree). If cmp is NULL, the key must be a UWord, and must
+// be the first word in the element. If cmp is set, arbitrary keys in
+// arbitrary positions can be used.
+struct _OSet {
+ SizeT keyOff; // key offset
+ OSetCmp_t cmp; // compare a key and an element, or NULL
+ OSetAlloc_t alloc; // allocator
+ HChar* cc; // cc for allocator
+ OSetFree_t free; // deallocator
+ Word nElems; // number of elements in the tree
+ AvlNode* root; // root node
+
+ AvlNode* nodeStack[STACK_MAX]; // Iterator node stack
+ Int numStack[STACK_MAX]; // Iterator num stack
+ Int stackTop; // Iterator stack pointer, one past end
+};
+
+/*--------------------------------------------------------------------*/
+/*--- Helper operations ---*/
+/*--------------------------------------------------------------------*/
+
+// Given a pointer to the node's element, return the pointer to the AvlNode
+// structure. If the node has a bad magic number, it will die with an
+// assertion failure.
+static inline
+AvlNode* node_of_elem(const void *elem)
+{
+ AvlNode* n = (AvlNode*)((Addr)elem - sizeof(AvlNode));
+ vg_assert2(n->magic == OSET_MAGIC,
+ "bad magic on node %p = %x (expected %x)\n"
+ "possible causes:\n"
+ " - node not allocated with VG_(OSetGen_AllocNode)()?\n"
+ " - node metadata corrupted by underwriting start of element?\n",
+ n, n->magic, OSET_MAGIC);
+ return n;
+}
+
+// Given an AvlNode, return the pointer to the element.
+static inline
+void* elem_of_node(const AvlNode *n)
+{
+ vg_assert2(n->magic == OSET_MAGIC,
+ "bad magic on node %p = %x (expected %x)\n"
+ "possible causes:\n"
+ " - node metadata corrupted by overwriting end of element?\n",
+ n, n->magic, OSET_MAGIC);
+ return (void*)((Addr)n + sizeof(AvlNode));
+}
+
+// Like elem_of_node, but no magic checking.
+static inline
+void* elem_of_node_no_check(const AvlNode *n)
+{
+ return (void*)((Addr)n + sizeof(AvlNode));
+}
+
+static inline
+void* slow_key_of_node(AvlTree* t, AvlNode* n)
+{
+ return (void*)((Addr)elem_of_node(n) + t->keyOff);
+}
+
+static inline
+void* fast_key_of_node(AvlNode* n)
+{
+ return elem_of_node(n);
+}
+
+// Compare the first word of each element. Inlining is *crucial*.
+static inline Word fast_cmp(const void* k, const AvlNode* n)
+{
+ UWord w1 = *(UWord*)k;
+ UWord w2 = *(UWord*)elem_of_node(n);
+ // In previous versions, we tried to do this faster by doing
+ // "return w1 - w2". But it didn't work reliably, because the
+ // complete result of subtracting two N-bit numbers is an N+1-bit
+ // number, and what the caller is interested in is the sign of
+ // the complete N+1-bit result. The branching version is slightly
+ // slower, but safer and easier to understand.
+ if (w1 > w2) return 1;
+ if (w1 < w2) return -1;
+ return 0;
+}
+
+// Compare a key and an element. Inlining is *crucial*.
+static
+inline Word slow_cmp(const AvlTree* t, const void* k, const AvlNode* n)
+{
+ return t->cmp(k, elem_of_node(n));
+}
+
+
+// Swing to the left. Warning: no balance maintainance.
+static void avl_swl ( AvlNode** root )
+{
+ AvlNode* a = *root;
+ AvlNode* b = a->right;
+ *root = b;
+ a->right = b->left;
+ b->left = a;
+}
+
+// Swing to the right. Warning: no balance maintainance.
+static void avl_swr ( AvlNode** root )
+{
+ AvlNode* a = *root;
+ AvlNode* b = a->left;
+ *root = b;
+ a->left = b->right;
+ b->right = a;
+}
+
+// Balance maintainance after especially nasty swings.
+static void avl_nasty ( AvlNode* root )
+{
+ switch (root->balance) {
+ case -1:
+ root->left->balance = 0;
+ root->right->balance = 1;
+ break;
+ case 1:
+ root->left->balance =-1;
+ root->right->balance = 0;
+ break;
+ case 0:
+ root->left->balance = 0;
+ root->right->balance = 0;
+ }
+ root->balance = 0;
+}
+
+
+// Clear the iterator stack.
+static void stackClear(AvlTree* t)
+{
+ Int i;
+ vg_assert(t);
+ for (i = 0; i < STACK_MAX; i++) {
+ t->nodeStack[i] = NULL;
+ t->numStack[i] = 0;
+ }
+ t->stackTop = 0;
+}
+
+// Push onto the iterator stack.
+static inline void stackPush(AvlTree* t, AvlNode* n, Int i)
+{
+ vg_assert(t->stackTop < STACK_MAX);
+ vg_assert(1 <= i && i <= 3);
+ t->nodeStack[t->stackTop] = n;
+ t-> numStack[t->stackTop] = i;
+ t->stackTop++;
+}
+
+// Pop from the iterator stack.
+static inline Bool stackPop(AvlTree* t, AvlNode** n, Int* i)
+{
+ vg_assert(t->stackTop <= STACK_MAX);
+
+ if (t->stackTop > 0) {
+ t->stackTop--;
+ *n = t->nodeStack[t->stackTop];
+ *i = t-> numStack[t->stackTop];
+ vg_assert(1 <= *i && *i <= 3);
+ t->nodeStack[t->stackTop] = NULL;
+ t-> numStack[t->stackTop] = 0;
+ return True;
+ } else {
+ return False;
+ }
+}
+
+/*--------------------------------------------------------------------*/
+/*--- Creating and destroying AvlTrees and AvlNodes ---*/
+/*--------------------------------------------------------------------*/
+
+// The underscores avoid GCC complaints about overshadowing global names.
+AvlTree* VG_(OSetGen_Create)(PtrdiffT _keyOff, OSetCmp_t _cmp,
+ OSetAlloc_t _alloc, HChar* _cc,
+ OSetFree_t _free)
+{
+ AvlTree* t;
+
+ // Check the padding is right and the AvlNode is the expected size.
+ vg_assert(sizeof(AvlNode) == 3*sizeof(void*));
+
+ // Sanity check args
+ vg_assert(_alloc);
+ vg_assert(_free);
+ if (!_cmp) vg_assert(0 == _keyOff); // If no cmp, offset must be zero
+
+ t = _alloc(_cc, sizeof(AvlTree));
+ t->keyOff = _keyOff;
+ t->cmp = _cmp;
+ t->alloc = _alloc;
+ t->cc = _cc;
+ t->free = _free;
+ t->nElems = 0;
+ t->root = NULL;
+ stackClear(t);
+
+ return t;
+}
+
+AvlTree* VG_(OSetWord_Create)(OSetAlloc_t _alloc, HChar* _cc,
+ OSetFree_t _free)
+{
+ return VG_(OSetGen_Create)(/*keyOff*/0, /*cmp*/NULL, _alloc, _cc, _free);
+}
+
+// Destructor, frees up all memory held by remaining nodes.
+void VG_(OSetGen_Destroy)(AvlTree* t)
+{
+ AvlNode* n = NULL;
+ Int i = 0;
+ Word sz = 0;
+
+ vg_assert(t);
+ stackClear(t);
+ if (t->root)
+ stackPush(t, t->root, 1);
+
+ /* Free all the AvlNodes. This is a post-order traversal, because we */
+ /* must free all children of a node before the node itself. */
+ while (stackPop(t, &n, &i)) {
+ switch (i) {
+ case 1:
+ stackPush(t, n, 2);
+ if (n->left) stackPush(t, n->left, 1);
+ break;
+ case 2:
+ stackPush(t, n, 3);
+ if (n->right) stackPush(t, n->right, 1);
+ break;
+ case 3:
+ t->free(n);
+ sz++;
+ break;
+ }
+ }
+ vg_assert(sz == t->nElems);
+
+ /* Free the AvlTree itself. */
+ t->free(t);
+}
+
+void VG_(OSetWord_Destroy)(AvlTree* t)
+{
+ VG_(OSetGen_Destroy)(t);
+}
+
+// Allocate and initialise a new node.
+void* VG_(OSetGen_AllocNode)(AvlTree* t, SizeT elemSize)
+{
+ Int nodeSize = sizeof(AvlNode) + elemSize;
+ AvlNode* n = t->alloc( t->cc, nodeSize );
+ vg_assert(elemSize > 0);
+ VG_(memset)(n, 0, nodeSize);
+ n->magic = OSET_MAGIC;
+ return elem_of_node(n);
+}
+
+void VG_(OSetGen_FreeNode)(AvlTree* t, void* e)
+{
+ t->free( node_of_elem(e) );
+}
+
+/*--------------------------------------------------------------------*/
+/*--- Insertion ---*/
+/*--------------------------------------------------------------------*/
+
+static inline Word cmp_key_root(AvlTree* t, AvlNode* n)
+{
+ return t->cmp
+ ? slow_cmp(t, slow_key_of_node(t, n), t->root)
+ : fast_cmp( fast_key_of_node( n), t->root);
+}
+
+// Insert element e into the non-empty AVL tree t.
+// Returns True if the depth of the tree has grown.
+static Bool avl_insert(AvlTree* t, AvlNode* n)
+{
+ Word cmpres = cmp_key_root(t, n);
+
+ if (cmpres < 0) {
+ // Insert into the left subtree.
+ if (t->root->left) {
+ // Only need to set the used fields in the subtree.
+ AvlTree left_subtree;
+ left_subtree.root = t->root->left;
+ left_subtree.cmp = t->cmp;
+ left_subtree.keyOff = t->keyOff;
+ if (avl_insert(&left_subtree, n)) {
+ switch (t->root->balance--) {
+ case 1: return False;
+ case 0: return True;
+ }
+ if (t->root->left->balance < 0) {
+ avl_swr(&(t->root));
+ t->root->balance = 0;
+ t->root->right->balance = 0;
+ } else {
+ avl_swl(&(t->root->left));
+ avl_swr(&(t->root));
+ avl_nasty(t->root);
+ }
+ } else {
+ t->root->left=left_subtree.root;
+ }
+ return False;
+ } else {
+ t->root->left = n;
+ if (t->root->balance--) return False;
+ return True;
+ }
+
+ } else if (cmpres > 0) {
+ // Insert into the right subtree
+ if (t->root->right) {
+ // Only need to set the used fields in the subtree.
+ AvlTree right_subtree;
+ right_subtree.root = t->root->right;
+ right_subtree.cmp = t->cmp;
+ right_subtree.keyOff = t->keyOff;
+ if (avl_insert(&right_subtree, n)) {
+ switch (t->root->balance++) {
+ case -1: return False;
+ case 0: return True;
+ }
+ if (t->root->right->balance > 0) {
+ avl_swl(&(t->root));
+ t->root->balance = 0;
+ t->root->left->balance = 0;
+ } else {
+ avl_swr(&(t->root->right));
+ avl_swl(&(t->root));
+ avl_nasty(t->root);
+ }
+ } else {
+ t->root->right=right_subtree.root;
+ }
+ return False;
+ } else {
+ t->root->right = n;
+ if (t->root->balance++) return False;
+ return True;
+ }
+
+ } else {
+ vg_assert2(0, "OSet{Word,Gen}_Insert: duplicate element added");
+ }
+}
+
+// Insert element e into the AVL tree t. This is just a wrapper for
+// avl_insert() which doesn't return a Bool.
+void VG_(OSetGen_Insert)(AvlTree* t, void* e)
+{
+ AvlNode* n;
+
+ vg_assert(t);
+
+ // Initialise. Even though OSetGen_AllocNode zeroes these fields,
+ // we should do it again in case a node is removed and then
+ // re-added to the tree.
+ n = node_of_elem(e);
+ n->left = 0;
+ n->right = 0;
+ n->balance = 0;
+
+ // Insert into an empty tree
+ if (!t->root) {
+ t->root = n;
+ } else {
+ avl_insert(t, n);
+ }
+
+ t->nElems++;
+ t->stackTop = 0; // So the iterator can't get out of sync
+}
+
+void VG_(OSetWord_Insert)(AvlTree* t, UWord val)
+{
+ Word* node = VG_(OSetGen_AllocNode)(t, sizeof(UWord));
+ *node = val;
+ VG_(OSetGen_Insert)(t, node);
+}
+
+/*--------------------------------------------------------------------*/
+/*--- Lookup ---*/
+/*--------------------------------------------------------------------*/
+
+// Find the *node* in t matching k, or NULL if not found.
+static AvlNode* avl_lookup(const AvlTree* t, const void* k)
+{
+ Word cmpres;
+ AvlNode* curr = t->root;
+
+ if (t->cmp) {
+ // General case
+ while (True) {
+ if (curr == NULL) return NULL;
+ cmpres = slow_cmp(t, k, curr);
+ if (cmpres < 0) curr = curr->left;
+ else if (cmpres > 0) curr = curr->right;
+ else return curr;
+ }
+ } else {
+ // Fast-track special case. We use the no-check version of
+ // elem_of_node because it saves about 10% on lookup time. This
+ // shouldn't be very dangerous because each node will have been
+ // checked on insertion.
+ UWord w1 = *(UWord*)k;
+ UWord w2;
+ while (True) {
+ if (curr == NULL) return NULL;
+ w2 = *(UWord*)elem_of_node_no_check(curr);
+ if (w1 < w2) curr = curr->left;
+ else if (w1 > w2) curr = curr->right;
+ else return curr;
+ }
+ }
+}
+
+// Find the *element* in t matching k, or NULL if not found.
+void* VG_(OSetGen_Lookup)(const AvlTree* t, const void* k)
+{
+ AvlNode* n;
+ vg_assert(t);
+ n = avl_lookup(t, k);
+ return ( n ? elem_of_node(n) : NULL );
+}
+
+// Find the *element* in t matching k, or NULL if not found; use the given
+// comparison function rather than the standard one.
+void* VG_(OSetGen_LookupWithCmp)(AvlTree* t, const void* k, OSetCmp_t cmp)
+{
+ // Save the normal one to the side, then restore once we're done.
+ void* e;
+ OSetCmp_t tmpcmp;
+ vg_assert(t);
+ tmpcmp = t->cmp;
+ t->cmp = cmp;
+ e = VG_(OSetGen_Lookup)(t, k);
+ t->cmp = tmpcmp;
+ return e;
+}
+
+// Is there an element matching k?
+Bool VG_(OSetGen_Contains)(const AvlTree* t, const void* k)
+{
+ return (NULL != VG_(OSetGen_Lookup)(t, k));
+}
+
+Bool VG_(OSetWord_Contains)(AvlTree* t, UWord val)
+{
+ return (NULL != VG_(OSetGen_Lookup)(t, &val));
+}
+
+/*--------------------------------------------------------------------*/
+/*--- Deletion ---*/
+/*--------------------------------------------------------------------*/
+
+static Bool avl_removeroot(AvlTree* t);
+
+// Remove an already-selected node n from the AVL tree t.
+// Returns True if the depth of the tree has shrunk.
+static Bool avl_remove(AvlTree* t, AvlNode* n)
+{
+ Bool ch;
+ Word cmpres = cmp_key_root(t, n);
+
+ if (cmpres < 0) {
+ AvlTree left_subtree;
+ // Remove from the left subtree
+ vg_assert(t->root->left);
+ // Only need to set the used fields in the subtree.
+ left_subtree.root = t->root->left;
+ left_subtree.cmp = t->cmp;
+ left_subtree.keyOff = t->keyOff;
+ ch = avl_remove(&left_subtree, n);
+ t->root->left = left_subtree.root;
+ if (ch) {
+ switch (t->root->balance++) {
+ case -1: return True;
+ case 0: return False;
+ }
+ switch (t->root->right->balance) {
+ case 0:
+ avl_swl(&(t->root));
+ t->root->balance = -1;
+ t->root->left->balance = 1;
+ return False;
+ case 1:
+ avl_swl(&(t->root));
+ t->root->balance = 0;
+ t->root->left->balance = 0;
+ return True;
+ }
+ avl_swr(&(t->root->right));
+ avl_swl(&(t->root));
+ avl_nasty(t->root);
+ return True;
+ } else {
+ return False;
+ }
+
+ } else if (cmpres > 0) {
+ // Remove from the right subtree
+ AvlTree right_subtree;
+ vg_assert(t->root->right);
+ // Only need to set the used fields in the subtree.
+ right_subtree.root = t->root->right;
+ right_subtree.cmp = t->cmp;
+ right_subtree.keyOff = t->keyOff;
+ ch = avl_remove(&right_subtree, n);
+ t->root->right = right_subtree.root;
+ if (ch) {
+ switch (t->root->balance--) {
+ case 1: return True;
+ case 0: return False;
+ }
+ switch (t->root->left->balance) {
+ case 0:
+ avl_swr(&(t->root));
+ t->root->balance = 1;
+ t->root->right->balance = -1;
+ return False;
+ case -1:
+ avl_swr(&(t->root));
+ t->root->balance = 0;
+ t->root->right->balance = 0;
+ return True;
+ }
+ avl_swl(&(t->root->left));
+ avl_swr(&(t->root));
+ avl_nasty(t->root);
+ return True;
+ } else {
+ return False;
+ }
+
+ } else {
+ // Found the node to be removed.
+ vg_assert(t->root == n);
+ return avl_removeroot(t);
+ }
+}
+
+// Remove the root of the AVL tree t.
+// Returns True if the depth of the tree has shrunk.
+static Bool avl_removeroot(AvlTree* t)
+{
+ Bool ch;
+ AvlNode* n;
+
+ if (!t->root->left) {
+ if (!t->root->right) {
+ t->root = NULL;
+ return True;
+ }
+ t->root = t->root->right;
+ return True;
+ }
+ if (!t->root->right) {
+ t->root = t->root->left;
+ return True;
+ }
+ if (t->root->balance < 0) {
+ // Remove from the left subtree
+ n = t->root->left;
+ while (n->right) n = n->right;
+ } else {
+ // Remove from the right subtree
+ n = t->root->right;
+ while (n->left) n = n->left;
+ }
+ ch = avl_remove(t, n);
+ n->left = t->root->left;
+ n->right = t->root->right;
+ n->balance = t->root->balance;
+ t->root = n;
+ if (n->balance == 0) return ch;
+ return False;
+}
+
+// Remove and return the element matching the key 'k', or NULL
+// if not present.
+void* VG_(OSetGen_Remove)(AvlTree* t, const void* k)
+{
+ // Have to find the node first, then remove it.
+ AvlNode* n = avl_lookup(t, k);
+ if (n) {
+ avl_remove(t, n);
+ t->nElems--;
+ t->stackTop = 0; // So the iterator can't get out of sync
+ return elem_of_node(n);
+ } else {
+ return NULL;
+ }
+}
+
+Bool VG_(OSetWord_Remove)(AvlTree* t, UWord val)
+{
+ void* n = VG_(OSetGen_Remove)(t, &val);
+ if (n) {
+ VG_(OSetGen_FreeNode)(t, n);
+ return True;
+ } else {
+ return False;
+ }
+}
+
+/*--------------------------------------------------------------------*/
+/*--- Iterator ---*/
+/*--------------------------------------------------------------------*/
+
+// The iterator is implemented using in-order traversal with an explicit
+// stack, which lets us do the traversal one step at a time and remember
+// where we are between each call to OSetGen_Next().
+
+void VG_(OSetGen_ResetIter)(AvlTree* t)
+{
+ vg_assert(t);
+ stackClear(t);
+ if (t->root)
+ stackPush(t, t->root, 1);
+}
+
+void VG_(OSetWord_ResetIter)(AvlTree* t)
+{
+ VG_(OSetGen_ResetIter)(t);
+}
+
+void* VG_(OSetGen_Next)(AvlTree* t)
+{
+ Int i = 0;
+ OSetNode* n = NULL;
+
+ vg_assert(t);
+
+ // This in-order traversal requires each node to be pushed and popped
+ // three times. These could be avoided by updating nodes in-situ on the
+ // top of the stack, but the push/pop cost is so small that it's worth
+ // keeping this loop in this simpler form.
+ while (stackPop(t, &n, &i)) {
+ switch (i) {
+ case 1: case_1:
+ stackPush(t, n, 2);
+ /* if (n->left) stackPush(t, n->left, 1); */
+ if (n->left) { n = n->left; goto case_1; }
+ break;
+ case 2:
+ stackPush(t, n, 3);
+ return elem_of_node(n);
+ case 3:
+ /* if (n->right) stackPush(t, n->right, 1); */
+ if (n->right) { n = n->right; goto case_1; }
+ break;
+ }
+ }
+
+ // Stack empty, iterator is exhausted, return NULL
+ return NULL;
+}
+
+Bool VG_(OSetWord_Next)(AvlTree* t, UWord* val)
+{
+ UWord* n = VG_(OSetGen_Next)(t);
+ if (n) {
+ *val = *n;
+ return True;
+ } else {
+ return False;
+ }
+}
+
+// set up 'oset' for iteration so that the first key subsequently
+// produced VG_(OSetGen_Next) is the smallest key in the map
+// >= start_at. Naturally ">=" is defined by the comparison
+// function supplied to VG_(OSetGen_Create).
+void VG_(OSetGen_ResetIterAt)(AvlTree* oset, const void* k)
+{
+ Int i;
+ AvlNode *n, *t;
+ Word cmpresS; /* signed */
+ UWord cmpresU; /* unsigned */
+
+ vg_assert(oset);
+ stackClear(oset);
+
+ if (!oset->root)
+ return;
+
+ n = NULL;
+ // We need to do regular search and fill in the stack.
+ t = oset->root;
+
+ while (True) {
+ if (t == NULL) return;
+
+ if (oset->cmp) {
+ cmpresS = (Word)slow_cmp(oset, k, t);
+ } else {
+ cmpresS = fast_cmp(k, t);
+ }
+
+ /* Switch the sense of the comparison, since the comparison
+ order of args (k vs t) above is opposite to that of the
+ corresponding code in hg_wordfm.c. */
+ if (cmpresS < 0) { cmpresS = 1; }
+ else if (cmpresS > 0) { cmpresS = -1; }
+
+ if (cmpresS == 0) {
+ // We found the exact key -- we are done.
+ // The iteration should start with this node.
+ stackPush(oset, t, 2);
+ // The stack now looks like {2, 2, ... ,2, 2}
+ return;
+ }
+ cmpresU = (UWord)cmpresS;
+ cmpresU >>=/*unsigned*/ (8 * sizeof(cmpresU) - 1);
+ vg_assert(cmpresU == 0 || cmpresU == 1);
+ if (!cmpresU) {
+ // Push this node only if we go to the left child.
+ stackPush(oset, t, 2);
+ }
+ t = cmpresU==0 ? t->left : t->right;
+ }
+ if (stackPop(oset, &n, &i)) {
+ // If we've pushed something to stack and did not find the exact key,
+ // we must fix the top element of stack.
+ vg_assert(i == 2);
+ stackPush(oset, n, 3);
+ // the stack looks like {2, 2, ..., 2, 3}
+ }
+}
+
+/*--------------------------------------------------------------------*/
+/*--- Miscellaneous operations ---*/
+/*--------------------------------------------------------------------*/
+
+Word VG_(OSetGen_Size)(const AvlTree* t)
+{
+ vg_assert(t);
+ return t->nElems;
+}
+
+Word VG_(OSetWord_Size)(AvlTree* t)
+{
+ return VG_(OSetGen_Size)(t);
+}
+
+static void OSet_Print2( AvlTree* t, AvlNode* n,
+ Char*(*strElem)(void *), Int p )
+{
+ // This is a recursive in-order traversal.
+ Int q = p;
+ if (NULL == n) return;
+ if (n->right) OSet_Print2(t, n->right, strElem, p+1);
+ while (q--) VG_(printf)(".. ");
+ VG_(printf)("%s\n", strElem(elem_of_node(n)));
+ if (n->left) OSet_Print2(t, n->left, strElem, p+1);
+}
+
+__attribute__((unused))
+static void OSet_Print( AvlTree* t, const HChar *where, Char*(*strElem)(void *) )
+{
+ VG_(printf)("-- start %s ----------------\n", where);
+ OSet_Print2(t, t->root, strElem, 0);
+ VG_(printf)("-- end %s ----------------\n", where);
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_redir.c.svn-base b/coregrind/.svn/text-base/m_redir.c.svn-base
new file mode 100644
index 0000000..d31d90f
--- /dev/null
+++ b/coregrind/.svn/text-base/m_redir.c.svn-base
@@ -0,0 +1,1115 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Function replacement and wrapping. m_redir.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+ Copyright (C) 2003-2009 Jeremy Fitzhardinge
+ jeremy@goop.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_debuginfo.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_seqmatch.h"
+#include "pub_core_mallocfree.h"
+#include "pub_core_options.h"
+#include "pub_core_oset.h"
+#include "pub_core_redir.h"
+#include "pub_core_trampoline.h"
+#include "pub_core_transtab.h"
+#include "pub_core_tooliface.h" // VG_(needs).malloc_replacement
+#include "pub_core_machine.h" // VG_(fnptr_to_fnentry)
+#include "pub_core_aspacemgr.h" // VG_(am_find_nsegment)
+#include "pub_core_xarray.h"
+#include "pub_core_clientstate.h" // VG_(client___libc_freeres_wrapper)
+#include "pub_core_demangle.h" // VG_(maybe_Z_demangle)
+
+
+/* This module is a critical part of the redirection/intercept system.
+ It keeps track of the current intercept state, cleans up the
+ translation caches when that state changes, and finally, answers
+ queries about the whether an address is currently redirected or
+ not. It doesn't do any of the control-flow trickery needed to put
+ the redirections into practice. That is the job of m_translate,
+ which calls here to find out which translations need to be
+ redirected.
+
+ The interface is simple. VG_(redir_initialise) initialises and
+ loads some hardwired redirects which never disappear; this is
+ platform-specific.
+
+ The module is notified of redirection state changes by m_debuginfo.
+ That calls VG_(redir_notify_new_SegInfo) when a new SegInfo (shared
+ object symbol table, basically) appears. Appearance of new symbols
+ can cause new (active) redirections to appear for two reasons: the
+ symbols in the new table may match existing redirection
+ specifications (see comments below), and because the symbols in the
+ new table may themselves supply new redirect specifications which
+ match existing symbols (or ones in the new table).
+
+ Redirect specifications are really symbols with "funny" prefixes
+ (_vgrZU_ and _vgrZZ_). These names tell m_redir that the
+ associated code should replace the standard entry point for some
+ set of functions. The set of functions is specified by a (soname
+ pattern, function name pattern) pair which is encoded in the symbol
+ name following the prefix. The names use a Z-encoding scheme so
+ that they may contain punctuation characters and wildcards (*).
+ The encoding scheme is described in pub_tool_redir.h and is decoded
+ by VG_(maybe_Z_demangle).
+
+ When a shared object is unloaded, this module learns of it via a
+ call to VG_(redir_notify_delete_SegInfo). It then removes from its
+ tables all active redirections in any way associated with that
+ object, and tidies up the translation caches accordingly.
+
+ That takes care of tracking the redirection state. When a
+ translation is actually to be made, m_translate calls to
+ VG_(redir_do_lookup) in this module to find out if the
+ translation's address should be redirected.
+*/
+
+/*------------------------------------------------------------*/
+/*--- Semantics ---*/
+/*------------------------------------------------------------*/
+
+/* The redirector holds two pieces of state:
+
+ Specs - a set of (soname pattern, fnname pattern) -> redir addr
+ Active - a set of orig addr -> (bool, redir addr)
+
+ Active is the currently active set of bindings that the translator
+ consults. Specs is the current set of specifications as harvested
+ from reading symbol tables of the currently loaded objects.
+
+ Active is a pure function of Specs and the current symbol table
+ state (maintained by m_debuginfo). Call the latter SyminfoState.
+
+ Therefore whenever either Specs or SyminfoState changes, Active
+ must be recomputed. [Inefficient if done naively, but this is a
+ spec].
+
+ Active is computed as follows:
+
+ Active = empty
+ for spec in Specs {
+ sopatt = spec.soname pattern
+ fnpatt = spec.fnname pattern
+ redir = spec.redir addr
+ for so matching sopatt in SyminfoState {
+ for fn matching fnpatt in fnnames_of(so) {
+ &fn -> redir is added to Active
+ }
+ }
+ }
+
+ [as an implementation detail, when a binding (orig -> redir) is
+ deleted from Active as a result of recomputing it, then all
+ translations intersecting redir must be deleted. However, this is
+ not part of the spec].
+
+ [Active also depends on where the aspacemgr has decided to put all
+ the pieces of code -- that affects the "orig addr" and "redir addr"
+ values.]
+
+ ---------------------
+
+ That completes the spec, apart from one difficult issue: duplicates.
+
+ Clearly we must impose the requirement that domain(Active) contains
+ no duplicates. The difficulty is how to constrain Specs enough to
+ avoid getting into that situation. It's easy to write specs which
+ could cause conflicting bindings in Active, eg:
+
+ (libpthread.so, pthread_mutex_lock) -> a1
+ (libpthread.so, pthread_*) -> a2
+
+ for a1 != a2. Or even hairier:
+
+ (libpthread.so, pthread_mutex_*) -> a1
+ (libpthread.so, pthread_*_lock) -> a2
+
+ I can't think of any sane way of detecting when an addition to
+ Specs would generate conflicts. However, considering we don't
+ actually want to have a system that allows this, I propose this:
+ all changes to Specs are acceptable. But, when recomputing Active
+ following the change, if the same orig is bound to more than one
+ redir, then the first binding for orig is retained, and all the
+ rest ignored.
+
+ ===========================================================
+ ===========================================================
+ Incremental implementation:
+
+ When a new SegInfo appears:
+ - it may be the source of new specs
+ - it may be the source of new matches for existing specs
+ Therefore:
+
+ - (new Specs x existing SegInfos): scan all symbols in the new
+ SegInfo to find new specs. Each of these needs to be compared
+ against all symbols in all the existing SegInfos to generate
+ new actives.
+
+ - (existing Specs x new SegInfo): scan all symbols in the SegInfo,
+ trying to match them to any existing specs, also generating
+ new actives.
+
+ - (new Specs x new SegInfo): scan all symbols in the new SegInfo,
+ trying to match them against the new specs, to generate new
+ actives.
+
+ - Finally, add new new specs to the current set of specs.
+
+ When adding a new active (s,d) to the Actives:
+ lookup s in Actives
+ if already bound to d, ignore
+ if already bound to something other than d, complain loudly and ignore
+ else add (s,d) to Actives
+ and discard (s,1) and (d,1) (maybe overly conservative)
+
+ When a SegInfo disappears:
+ - delete all specs acquired from the seginfo
+ - delete all actives derived from the just-deleted specs
+ - if each active (s,d) deleted, discard (s,1) and (d,1)
+*/
+
+
+/*------------------------------------------------------------*/
+/*--- REDIRECTION SPECIFICATIONS ---*/
+/*------------------------------------------------------------*/
+
+/* A specification of a redirection we want to do. Note that because
+ both the "from" soname and function name may contain wildcards, the
+ spec can match an arbitrary number of times.
+
+ 16 Nov 2007: Comments re .mandatory field: The initial motivation
+ for this is making Memcheck work sanely on glibc-2.6.X ppc32-linux.
+ We really need to intercept 'strlen' in ld.so right from startup.
+ If ld.so does not have a visible 'strlen' symbol, Memcheck
+ generates an impossible number of errors resulting from highly
+ tuned strlen implementation in ld.so, and is completely unusable
+ -- the resulting undefinedness eventually seeps everywhere. */
+typedef
+ struct _Spec {
+ struct _Spec* next; /* linked list */
+ /* FIXED PARTS -- set when created and not changed */
+ HChar* from_sopatt; /* from soname pattern */
+ HChar* from_fnpatt; /* from fnname pattern */
+ Addr to_addr; /* where redirecting to */
+ Bool isWrap; /* wrap or replacement? */
+ const HChar* mandatory; /* non-NULL ==> abort V and print the
+ string if from_sopatt is loaded but
+ from_fnpatt cannot be found */
+ /* VARIABLE PARTS -- used transiently whilst processing redirections */
+ Bool mark; /* set if spec requires further processing */
+ Bool done; /* set if spec was successfully matched */
+ }
+ Spec;
+
+/* Top-level data structure. It contains a pointer to a SegInfo and
+ also a list of the specs harvested from that SegInfo. Note that
+ seginfo is allowed to be NULL, meaning that the specs are
+ pre-loaded ones at startup and are not associated with any
+ particular seginfo. */
+typedef
+ struct _TopSpec {
+ struct _TopSpec* next; /* linked list */
+ DebugInfo* seginfo; /* symbols etc */
+ Spec* specs; /* specs pulled out of seginfo */
+ Bool mark; /* transient temporary used during deletion */
+ }
+ TopSpec;
+
+/* This is the top level list of redirections. m_debuginfo maintains
+ a list of SegInfos, and the idea here is to maintain a list with
+ the same number of elements (in fact, with one more element, so as
+ to record abovementioned preloaded specifications.) */
+static TopSpec* topSpecs = NULL;
+
+
+/*------------------------------------------------------------*/
+/*--- CURRENTLY ACTIVE REDIRECTIONS ---*/
+/*------------------------------------------------------------*/
+
+/* Represents a currently active binding. If either parent_spec or
+ parent_sym is NULL, then this binding was hardwired at startup and
+ should not be deleted. Same is true if either parent's seginfo
+ field is NULL. */
+typedef
+ struct {
+ Addr from_addr; /* old addr -- MUST BE THE FIRST WORD! */
+ Addr to_addr; /* where redirecting to */
+ TopSpec* parent_spec; /* the TopSpec which supplied the Spec */
+ TopSpec* parent_sym; /* the TopSpec which supplied the symbol */
+ Bool isWrap; /* wrap or replacement? */
+ }
+ Active;
+
+/* The active set is a fast lookup table */
+static OSet* activeSet = NULL;
+
+
+/*------------------------------------------------------------*/
+/*--- FWDses ---*/
+/*------------------------------------------------------------*/
+
+static void maybe_add_active ( Active /*by value; callee copies*/ );
+
+static void* dinfo_zalloc(HChar* ec, SizeT);
+static void dinfo_free(void*);
+static HChar* dinfo_strdup(HChar* ec, HChar*);
+static Bool is_plausible_guest_addr(Addr);
+static Bool is_aix5_glink_idiom(Addr);
+
+static void show_redir_state ( HChar* who );
+static void show_active ( HChar* left, Active* act );
+
+static void handle_maybe_load_notifier( const UChar* soname,
+ HChar* symbol, Addr addr );
+
+
+/*------------------------------------------------------------*/
+/*--- NOTIFICATIONS ---*/
+/*------------------------------------------------------------*/
+
+static
+void generate_and_add_actives (
+ /* spec list and the owning TopSpec */
+ Spec* specs,
+ TopSpec* parent_spec,
+ /* debuginfo and the owning TopSpec */
+ DebugInfo* di,
+ TopSpec* parent_sym
+ );
+
+/* Notify m_redir of the arrival of a new DebugInfo. This is fairly
+ complex, but the net effect is to (1) add a new entry to the
+ topspecs list, and (2) figure out what new binding are now active,
+ and, as a result, add them to the actives mapping. */
+
+#define N_DEMANGLED 256
+
+void VG_(redir_notify_new_DebugInfo)( DebugInfo* newsi )
+{
+ Bool ok, isWrap;
+ Int i, nsyms;
+ Spec* specList;
+ Spec* spec;
+ TopSpec* ts;
+ TopSpec* newts;
+ HChar* sym_name;
+ Addr sym_addr, sym_toc;
+ HChar demangled_sopatt[N_DEMANGLED];
+ HChar demangled_fnpatt[N_DEMANGLED];
+ Bool check_ppcTOCs = False;
+ Bool isText;
+ const UChar* newsi_soname;
+
+# if defined(VG_PLAT_USES_PPCTOC)
+ check_ppcTOCs = True;
+# endif
+
+ vg_assert(newsi);
+ newsi_soname = VG_(seginfo_soname)(newsi);
+ vg_assert(newsi_soname != NULL);
+
+ /* stay sane: we don't already have this. */
+ for (ts = topSpecs; ts; ts = ts->next)
+ vg_assert(ts->seginfo != newsi);
+
+ /* scan this DebugInfo's symbol table, pulling out and demangling
+ any specs found */
+
+ specList = NULL; /* the spec list we're building up */
+
+ nsyms = VG_(seginfo_syms_howmany)( newsi );
+ for (i = 0; i < nsyms; i++) {
+ VG_(seginfo_syms_getidx)( newsi, i, &sym_addr, &sym_toc,
+ NULL, &sym_name, &isText );
+ ok = VG_(maybe_Z_demangle)( sym_name, demangled_sopatt, N_DEMANGLED,
+ demangled_fnpatt, N_DEMANGLED, &isWrap );
+ /* ignore data symbols */
+ if (!isText)
+ continue;
+ if (!ok) {
+ /* It's not a full-scale redirect, but perhaps it is a load-notify
+ fn? Let the load-notify department see it. */
+ handle_maybe_load_notifier( newsi_soname, sym_name, sym_addr );
+ continue;
+ }
+ if (check_ppcTOCs && sym_toc == 0) {
+ /* This platform uses toc pointers, but none could be found
+ for this symbol, so we can't safely redirect/wrap to it.
+ Just skip it; we'll make a second pass over the symbols in
+ the following loop, and complain at that point. */
+ continue;
+ }
+ spec = dinfo_zalloc("redir.rnnD.1", sizeof(Spec));
+ vg_assert(spec);
+ spec->from_sopatt = dinfo_strdup("redir.rnnD.2", demangled_sopatt);
+ spec->from_fnpatt = dinfo_strdup("redir.rnnD.3", demangled_fnpatt);
+ vg_assert(spec->from_sopatt);
+ vg_assert(spec->from_fnpatt);
+ spec->to_addr = sym_addr;
+ spec->isWrap = isWrap;
+ /* check we're not adding manifestly stupid destinations */
+ vg_assert(is_plausible_guest_addr(sym_addr));
+ spec->next = specList;
+ spec->mark = False; /* not significant */
+ spec->done = False; /* not significant */
+ specList = spec;
+ }
+
+ if (check_ppcTOCs) {
+ for (i = 0; i < nsyms; i++) {
+ VG_(seginfo_syms_getidx)( newsi, i, &sym_addr, &sym_toc,
+ NULL, &sym_name, &isText );
+ ok = isText
+ && VG_(maybe_Z_demangle)(
+ sym_name, demangled_sopatt, N_DEMANGLED,
+ demangled_fnpatt, N_DEMANGLED, &isWrap );
+ if (!ok)
+ /* not a redirect. Ignore. */
+ continue;
+ if (sym_toc != 0)
+ /* has a valid toc pointer. Ignore. */
+ continue;
+
+ for (spec = specList; spec; spec = spec->next)
+ if (0 == VG_(strcmp)(spec->from_sopatt, demangled_sopatt)
+ && 0 == VG_(strcmp)(spec->from_fnpatt, demangled_fnpatt))
+ break;
+ if (spec)
+ /* a redirect to some other copy of that symbol, which
+ does have a TOC value, already exists */
+ continue;
+
+ /* Complain */
+ VG_(message)(Vg_DebugMsg,
+ "WARNING: no TOC ptr for redir/wrap to %s %s",
+ demangled_sopatt, demangled_fnpatt);
+ }
+ }
+
+ /* Ok. Now specList holds the list of specs from the DebugInfo.
+ Build a new TopSpec, but don't add it to topSpecs yet. */
+ newts = dinfo_zalloc("redir.rnnD.4", sizeof(TopSpec));
+ vg_assert(newts);
+ newts->next = NULL; /* not significant */
+ newts->seginfo = newsi;
+ newts->specs = specList;
+ newts->mark = False; /* not significant */
+
+ /* We now need to augment the active set with the following partial
+ cross product:
+
+ (1) actives formed by matching the new specs in specList against
+ all symbols currently listed in topSpecs
+
+ (2) actives formed by matching the new symbols in newsi against
+ all specs currently listed in topSpecs
+
+ (3) actives formed by matching the new symbols in newsi against
+ the new specs in specList
+
+ This is necessary in order to maintain the invariant that
+ Actives contains all bindings generated by matching ALL specs in
+ topSpecs against ALL symbols in topSpecs (that is, a cross
+ product of ALL known specs against ALL known symbols).
+ */
+ /* Case (1) */
+ for (ts = topSpecs; ts; ts = ts->next) {
+ if (ts->seginfo)
+ generate_and_add_actives( specList, newts,
+ ts->seginfo, ts );
+ }
+
+ /* Case (2) */
+ for (ts = topSpecs; ts; ts = ts->next) {
+ generate_and_add_actives( ts->specs, ts,
+ newsi, newts );
+ }
+
+ /* Case (3) */
+ generate_and_add_actives( specList, newts,
+ newsi, newts );
+
+ /* Finally, add the new TopSpec. */
+ newts->next = topSpecs;
+ topSpecs = newts;
+
+ if (VG_(clo_trace_redir))
+ show_redir_state("after VG_(redir_notify_new_DebugInfo)");
+}
+
+#undef N_DEMANGLED
+
+
+/* Do one element of the basic cross product: add to the active set,
+ all matches resulting from comparing all the given specs against
+ all the symbols in the given seginfo. If a conflicting binding
+ would thereby arise, don't add it, but do complain. */
+
+static
+void generate_and_add_actives (
+ /* spec list and the owning TopSpec */
+ Spec* specs,
+ TopSpec* parent_spec,
+ /* seginfo and the owning TopSpec */
+ DebugInfo* di,
+ TopSpec* parent_sym
+ )
+{
+ Spec* sp;
+ Bool anyMark, isText;
+ Active act;
+ Int nsyms, i;
+ Addr sym_addr;
+ HChar* sym_name;
+
+ /* First figure out which of the specs match the seginfo's soname.
+ Also clear the 'done' bits, so that after the main loop below
+ tell which of the Specs really did get done. */
+ anyMark = False;
+ for (sp = specs; sp; sp = sp->next) {
+ sp->done = False;
+ sp->mark = VG_(string_match)( sp->from_sopatt,
+ VG_(seginfo_soname)(di) );
+ anyMark = anyMark || sp->mark;
+ }
+
+ /* shortcut: if none of the sonames match, there will be no bindings. */
+ if (!anyMark)
+ return;
+
+ /* Iterate outermost over the symbols in the seginfo, in the hope
+ of trashing the caches less. */
+ nsyms = VG_(seginfo_syms_howmany)( di );
+ for (i = 0; i < nsyms; i++) {
+ VG_(seginfo_syms_getidx)( di, i,
+ &sym_addr, NULL, NULL, &sym_name, &isText );
+
+ /* ignore data symbols */
+ if (!isText)
+ continue;
+
+ /* On AIX, we cannot redirect calls to a so-called glink
+ function for reasons which are not obvious - something to do
+ with saving r2 across the call. Not a problem, as we don't
+ want to anyway; presumably it is the target of the glink we
+ need to redirect. Hence just spot them and ignore them.
+ They are always of a very specific (more or less
+ ABI-mandated) form. */
+ if (is_aix5_glink_idiom(sym_addr))
+ continue;
+
+ for (sp = specs; sp; sp = sp->next) {
+ if (!sp->mark)
+ continue; /* soname doesn't match */
+ if (VG_(string_match)( sp->from_fnpatt, sym_name )) {
+ /* got a new binding. Add to collection. */
+ act.from_addr = sym_addr;
+ act.to_addr = sp->to_addr;
+ act.parent_spec = parent_spec;
+ act.parent_sym = parent_sym;
+ act.isWrap = sp->isWrap;
+ sp->done = True;
+ maybe_add_active( act );
+ }
+ } /* for (sp = specs; sp; sp = sp->next) */
+ } /* for (i = 0; i < nsyms; i++) */
+
+ /* Now, finally, look for Specs which were marked to be done, but
+ didn't get matched. If any such are mandatory we must abort the
+ system at this point. */
+ for (sp = specs; sp; sp = sp->next) {
+ if (!sp->mark)
+ continue;
+ if (sp->mark && (!sp->done) && sp->mandatory)
+ break;
+ }
+ if (sp) {
+ HChar* v = "valgrind: ";
+ vg_assert(sp->mark);
+ vg_assert(!sp->done);
+ vg_assert(sp->mandatory);
+ VG_(printf)("\n");
+ VG_(printf)(
+ "%sFatal error at startup: a function redirection\n", v);
+ VG_(printf)(
+ "%swhich is mandatory for this platform-tool combination\n", v);
+ VG_(printf)(
+ "%scannot be set up. Details of the redirection are:\n", v);
+ VG_(printf)(
+ "%s\n", v);
+ VG_(printf)(
+ "%sA must-be-redirected function\n", v);
+ VG_(printf)(
+ "%swhose name matches the pattern: %s\n", v, sp->from_fnpatt);
+ VG_(printf)(
+ "%sin an object with soname matching: %s\n", v, sp->from_sopatt);
+ VG_(printf)(
+ "%swas not found whilst processing\n", v);
+ VG_(printf)(
+ "%ssymbols from the object with soname: %s\n", v, VG_(seginfo_soname)(di));
+ VG_(printf)(
+ "%s\n", v);
+ VG_(printf)(
+ "%s%s\n", v, sp->mandatory);
+ VG_(printf)(
+ "%s\n", v);
+ VG_(printf)(
+ "%sCannot continue -- exiting now. Sorry.\n", v);
+ VG_(printf)("\n");
+ VG_(exit)(1);
+ }
+}
+
+
+/* Add an act (passed by value; is copied here) and deal with
+ conflicting bindings. */
+static void maybe_add_active ( Active act )
+{
+ HChar* what = NULL;
+ Active* old;
+
+ /* Complain and ignore manifestly bogus 'from' addresses.
+
+ Kludge: because this can get called befor the trampoline area (a
+ bunch of magic 'to' addresses) has its ownership changed from V
+ to C, we can't check the 'to' address similarly. Sigh.
+
+ amd64-linux hack: the vsysinfo pages appear to have no
+ permissions
+ ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0
+ so skip the check for them. */
+ if (!is_plausible_guest_addr(act.from_addr)
+# if defined(VGP_amd64_linux)
+ && act.from_addr != 0xFFFFFFFFFF600000ULL
+ && act.from_addr != 0xFFFFFFFFFF600400ULL
+# endif
+ ) {
+ what = "redirection from-address is in non-executable area";
+ goto bad;
+ }
+
+ old = VG_(OSetGen_Lookup)( activeSet, &act.from_addr );
+ if (old) {
+ /* Dodgy. Conflicting binding. */
+ vg_assert(old->from_addr == act.from_addr);
+ if (old->to_addr != act.to_addr) {
+ /* we have to ignore it -- otherwise activeSet would contain
+ conflicting bindings. */
+ what = "new redirection conflicts with existing -- ignoring it";
+ goto bad;
+ } else {
+ /* This appears to be a duplicate of an existing binding.
+ Safe(ish) -- ignore. */
+ /* XXXXXXXXXXX COMPLAIN if new and old parents differ */
+ }
+ } else {
+ Active* a = VG_(OSetGen_AllocNode)(activeSet, sizeof(Active));
+ vg_assert(a);
+ *a = act;
+ VG_(OSetGen_Insert)(activeSet, a);
+ /* Now that a new from->to redirection is in force, we need to
+ get rid of any translations intersecting 'from' in order that
+ they get redirected to 'to'. So discard them. Just for
+ paranoia (but, I believe, unnecessarily), discard 'to' as
+ well. */
+ VG_(discard_translations)( (Addr64)act.from_addr, 1,
+ "redir_new_DebugInfo(from_addr)");
+ VG_(discard_translations)( (Addr64)act.to_addr, 1,
+ "redir_new_DebugInfo(to_addr)");
+ }
+ return;
+
+ bad:
+ vg_assert(what);
+ if (VG_(clo_verbosity) > 1) {
+ VG_(message)(Vg_UserMsg, "WARNING: %s", what);
+ show_active( " new: ", &act);
+ }
+}
+
+
+/* Notify m_redir of the deletion of a DebugInfo. This is relatively
+ simple -- just get rid of all actives derived from it, and free up
+ the associated list elements. */
+
+void VG_(redir_notify_delete_DebugInfo)( DebugInfo* delsi )
+{
+ TopSpec* ts;
+ TopSpec* tsPrev;
+ Spec* sp;
+ Spec* sp_next;
+ OSet* tmpSet;
+ Active* act;
+ Bool delMe;
+ Addr addr;
+
+ vg_assert(delsi);
+
+ /* Search for it, and make tsPrev point to the previous entry, if
+ any. */
+ tsPrev = NULL;
+ ts = topSpecs;
+ while (True) {
+ if (ts == NULL) break;
+ if (ts->seginfo == delsi) break;
+ tsPrev = ts;
+ ts = ts->next;
+ }
+
+ vg_assert(ts); /* else we don't have the deleted DebugInfo */
+ vg_assert(ts->seginfo == delsi);
+
+ /* Traverse the actives, copying the addresses of those we intend
+ to delete into tmpSet. */
+ tmpSet = VG_(OSetWord_Create)(dinfo_zalloc, "redir.rndD.1", dinfo_free);
+
+ ts->mark = True;
+
+ VG_(OSetGen_ResetIter)( activeSet );
+ while ( (act = VG_(OSetGen_Next)(activeSet)) ) {
+ delMe = act->parent_spec != NULL
+ && act->parent_sym != NULL
+ && act->parent_spec->seginfo != NULL
+ && act->parent_sym->seginfo != NULL
+ && (act->parent_spec->mark || act->parent_sym->mark);
+
+ /* While we're at it, a bit of paranoia: delete any actives
+ which don't have both feet in valid client executable areas.
+ But don't delete hardwired-at-startup ones; these are denoted
+ by having parent_spec or parent_sym being NULL. */
+ if ( (!delMe)
+ && act->parent_spec != NULL
+ && act->parent_sym != NULL ) {
+ if (!is_plausible_guest_addr(act->from_addr))
+ delMe = True;
+ if (!is_plausible_guest_addr(act->to_addr))
+ delMe = True;
+ }
+
+ if (delMe) {
+ VG_(OSetWord_Insert)( tmpSet, act->from_addr );
+ /* While we have our hands on both the 'from' and 'to'
+ of this Active, do paranoid stuff with tt/tc. */
+ VG_(discard_translations)( (Addr64)act->from_addr, 1,
+ "redir_del_DebugInfo(from_addr)");
+ VG_(discard_translations)( (Addr64)act->to_addr, 1,
+ "redir_del_DebugInfo(to_addr)");
+ }
+ }
+
+ /* Now traverse tmpSet, deleting corresponding elements in activeSet. */
+ VG_(OSetWord_ResetIter)( tmpSet );
+ while ( VG_(OSetWord_Next)(tmpSet, &addr) ) {
+ act = VG_(OSetGen_Remove)( activeSet, &addr );
+ vg_assert(act);
+ VG_(OSetGen_FreeNode)( activeSet, act );
+ }
+
+ VG_(OSetWord_Destroy)( tmpSet );
+
+ /* The Actives set is now cleaned up. Free up this TopSpec and
+ everything hanging off it. */
+ for (sp = ts->specs; sp; sp = sp_next) {
+ if (sp->from_sopatt) dinfo_free(sp->from_sopatt);
+ if (sp->from_fnpatt) dinfo_free(sp->from_fnpatt);
+ sp_next = sp->next;
+ dinfo_free(sp);
+ }
+
+ if (tsPrev == NULL) {
+ /* first in list */
+ topSpecs = ts->next;
+ } else {
+ tsPrev->next = ts->next;
+ }
+ dinfo_free(ts);
+
+ if (VG_(clo_trace_redir))
+ show_redir_state("after VG_(redir_notify_delete_DebugInfo)");
+}
+
+
+/*------------------------------------------------------------*/
+/*--- QUERIES (really the whole point of this module) ---*/
+/*------------------------------------------------------------*/
+
+/* This is the crucial redirection function. It answers the question:
+ should this code address be redirected somewhere else? It's used
+ just before translating a basic block. */
+Addr VG_(redir_do_lookup) ( Addr orig, Bool* isWrap )
+{
+ Active* r = VG_(OSetGen_Lookup)(activeSet, &orig);
+ if (r == NULL)
+ return orig;
+
+ vg_assert(r->to_addr != 0);
+ if (isWrap)
+ *isWrap = r->isWrap;
+ return r->to_addr;
+}
+
+
+/*------------------------------------------------------------*/
+/*--- INITIALISATION ---*/
+/*------------------------------------------------------------*/
+
+/* Add a never-delete-me Active. */
+
+__attribute__((unused)) /* only used on amd64 */
+static void add_hardwired_active ( Addr from, Addr to )
+{
+ Active act;
+ act.from_addr = from;
+ act.to_addr = to;
+ act.parent_spec = NULL;
+ act.parent_sym = NULL;
+ act.isWrap = False;
+ maybe_add_active( act );
+}
+
+
+/* Add a never-delete-me Spec. This is a bit of a kludge. On the
+ assumption that this is called only at startup, only handle the
+ case where topSpecs is completely empty, or if it isn't, it has
+ just one entry and that is the one with NULL seginfo -- that is the
+ entry that holds these initial specs. */
+
+__attribute__((unused)) /* not used on all platforms */
+static void add_hardwired_spec ( HChar* sopatt, HChar* fnpatt,
+ Addr to_addr,
+ const HChar* const mandatory )
+{
+ Spec* spec = dinfo_zalloc("redir.ahs.1", sizeof(Spec));
+ vg_assert(spec);
+
+ if (topSpecs == NULL) {
+ topSpecs = dinfo_zalloc("redir.ahs.2", sizeof(TopSpec));
+ vg_assert(topSpecs);
+ /* symtab_zalloc sets all fields to zero */
+ }
+
+ vg_assert(topSpecs != NULL);
+ vg_assert(topSpecs->next == NULL);
+ vg_assert(topSpecs->seginfo == NULL);
+ /* FIXED PARTS */
+ spec->from_sopatt = sopatt;
+ spec->from_fnpatt = fnpatt;
+ spec->to_addr = to_addr;
+ spec->isWrap = False;
+ spec->mandatory = mandatory;
+ /* VARIABLE PARTS */
+ spec->mark = False; /* not significant */
+ spec->done = False; /* not significant */
+
+ spec->next = topSpecs->specs;
+ topSpecs->specs = spec;
+}
+
+
+/* Initialise the redir system, and create the initial Spec list and
+ for amd64-linux a couple of permanent active mappings. The initial
+ Specs are not converted into Actives yet, on the (checked)
+ assumption that no DebugInfos have so far been created, and so when
+ they are created, that will happen. */
+
+void VG_(redir_initialise) ( void )
+{
+ // Assert that there are no DebugInfos so far
+ vg_assert( VG_(next_seginfo)(NULL) == NULL );
+
+ // Initialise active mapping.
+ activeSet = VG_(OSetGen_Create)(offsetof(Active, from_addr),
+ NULL, // Use fast comparison
+ dinfo_zalloc,
+ "redir.ri.1",
+ dinfo_free);
+
+ // The rest of this function just adds initial Specs.
+
+# if defined(VGP_x86_linux)
+ /* If we're using memcheck, use this intercept right from the
+ start, otherwise ld.so (glibc-2.3.5) makes a lot of noise. */
+ if (0==VG_(strcmp)("Memcheck", VG_(details).name)) {
+ add_hardwired_spec(
+ "ld-linux.so.2", "index",
+ (Addr)&VG_(x86_linux_REDIR_FOR_index),
+ NULL
+ );
+ }
+
+# elif defined(VGP_amd64_linux)
+ /* Redirect vsyscalls to local versions */
+ add_hardwired_active(
+ 0xFFFFFFFFFF600000ULL,
+ (Addr)&VG_(amd64_linux_REDIR_FOR_vgettimeofday)
+ );
+ add_hardwired_active(
+ 0xFFFFFFFFFF600400ULL,
+ (Addr)&VG_(amd64_linux_REDIR_FOR_vtime)
+ );
+
+# elif defined(VGP_ppc32_linux)
+ {
+ static const HChar croakage[]
+ = "Possible fix: install glibc's debuginfo package on this machine.";
+
+ /* If we're using memcheck, use these intercepts right from
+ the start, otherwise ld.so makes a lot of noise. */
+ if (0==VG_(strcmp)("Memcheck", VG_(details).name)) {
+
+ /* this is mandatory - can't sanely continue without it */
+ add_hardwired_spec(
+ "ld.so.1", "strlen",
+ (Addr)&VG_(ppc32_linux_REDIR_FOR_strlen),
+ croakage
+ );
+ add_hardwired_spec(
+ "ld.so.1", "strcmp",
+ (Addr)&VG_(ppc32_linux_REDIR_FOR_strcmp),
+ NULL /* not mandatory - so why bother at all? */
+ /* glibc-2.6.1 (openSUSE 10.3, ppc32) seems fine without it */
+ );
+ add_hardwired_spec(
+ "ld.so.1", "index",
+ (Addr)&VG_(ppc32_linux_REDIR_FOR_strchr),
+ NULL /* not mandatory - so why bother at all? */
+ /* glibc-2.6.1 (openSUSE 10.3, ppc32) seems fine without it */
+ );
+ }
+ }
+
+# elif defined(VGP_ppc64_linux)
+ {
+ static const HChar croakage[]
+ = "Possible fix: install glibc's debuginfo package on this machine.";
+
+ /* If we're using memcheck, use these intercepts right from
+ the start, otherwise ld.so makes a lot of noise. */
+ if (0==VG_(strcmp)("Memcheck", VG_(details).name)) {
+
+ /* this is mandatory - can't sanely continue without it */
+ add_hardwired_spec(
+ "ld64.so.1", "strlen",
+ (Addr)VG_(fnptr_to_fnentry)( &VG_(ppc64_linux_REDIR_FOR_strlen) ),
+ croakage
+ );
+
+ add_hardwired_spec(
+ "ld64.so.1", "index",
+ (Addr)VG_(fnptr_to_fnentry)( &VG_(ppc64_linux_REDIR_FOR_strchr) ),
+ NULL /* not mandatory - so why bother at all? */
+ /* glibc-2.5 (FC6, ppc64) seems fine without it */
+ );
+ }
+ }
+
+# elif defined(VGP_ppc32_aix5)
+ /* nothing so far */
+
+# elif defined(VGP_ppc64_aix5)
+ /* nothing so far */
+
+# else
+# error Unknown platform
+# endif
+
+ if (VG_(clo_trace_redir))
+ show_redir_state("after VG_(redir_initialise)");
+}
+
+
+/*------------------------------------------------------------*/
+/*--- MISC HELPERS ---*/
+/*------------------------------------------------------------*/
+
+static void* dinfo_zalloc(HChar* ec, SizeT n) {
+ void* p;
+ vg_assert(n > 0);
+ p = VG_(arena_malloc)(VG_AR_DINFO, ec, n);
+ tl_assert(p);
+ VG_(memset)(p, 0, n);
+ return p;
+}
+
+static void dinfo_free(void* p) {
+ tl_assert(p);
+ return VG_(arena_free)(VG_AR_DINFO, p);
+}
+
+static HChar* dinfo_strdup(HChar* ec, HChar* str)
+{
+ return VG_(arena_strdup)(VG_AR_DINFO, ec, str);
+}
+
+/* Really this should be merged with translations_allowable_from_seg
+ in m_translate. */
+static Bool is_plausible_guest_addr(Addr a)
+{
+ NSegment const* seg = VG_(am_find_nsegment)(a);
+ return seg != NULL
+ && (seg->kind == SkAnonC || seg->kind == SkFileC)
+ && (seg->hasX || seg->hasR); /* crude x86-specific hack */
+}
+
+/* A function which spots AIX 'glink' functions. A 'glink' function
+ is a stub function which has something to do with AIX-style dynamic
+ linking, and jumps to the real target (with which it typically
+ shares the same name). See also comment where this function is
+ used (above). */
+static Bool is_aix5_glink_idiom ( Addr sym_addr )
+{
+# if defined(VGP_ppc32_aix5)
+ UInt* w = (UInt*)sym_addr;
+ if (VG_IS_4_ALIGNED(w)
+ && is_plausible_guest_addr((Addr)(w+0))
+ && is_plausible_guest_addr((Addr)(w+6))
+ && (w[0] & 0xFFFF0000) == 0x81820000 /* lwz r12,func@toc(r2) */
+ && w[1] == 0x90410014 /* stw r2,20(r1) */
+ && w[2] == 0x800c0000 /* lwz r0,0(r12) */
+ && w[3] == 0x804c0004 /* lwz r2,4(r12) */
+ && w[4] == 0x7c0903a6 /* mtctr r0 */
+ && w[5] == 0x4e800420 /* bctr */
+ && w[6] == 0x00000000 /* illegal */)
+ return True;
+# elif defined(VGP_ppc64_aix5)
+ UInt* w = (UInt*)sym_addr;
+ if (VG_IS_4_ALIGNED(w)
+ && is_plausible_guest_addr((Addr)(w+0))
+ && is_plausible_guest_addr((Addr)(w+6))
+ && (w[0] & 0xFFFF0000) == 0xE9820000 /* ld r12,func@toc(r2) */
+ && w[1] == 0xF8410028 /* std r2,40(r1) */
+ && w[2] == 0xE80C0000 /* ld r0,0(r12) */
+ && w[3] == 0xE84C0008 /* ld r2,8(r12) */
+ && w[4] == 0x7c0903a6 /* mtctr r0 */
+ && w[5] == 0x4e800420 /* bctr */
+ && w[6] == 0x00000000 /* illegal */)
+ return True;
+# endif
+ return False;
+}
+
+/*------------------------------------------------------------*/
+/*--- NOTIFY-ON-LOAD FUNCTIONS ---*/
+/*------------------------------------------------------------*/
+
+static
+void handle_maybe_load_notifier( const UChar* soname,
+ HChar* symbol, Addr addr )
+{
+# if defined(VGP_x86_linux)
+ /* x86-linux only: if we see _dl_sysinfo_int80, note its address.
+ See comment on declaration of VG_(client__dl_sysinfo_int80) for
+ the reason. As far as I can tell, the relevant symbol is always
+ in object with soname "ld-linux.so.2". */
+ if (symbol && symbol[0] == '_'
+ && 0 == VG_(strcmp)(symbol, "_dl_sysinfo_int80")
+ && 0 == VG_(strcmp)(soname, "ld-linux.so.2")) {
+ if (VG_(client__dl_sysinfo_int80) == 0)
+ VG_(client__dl_sysinfo_int80) = addr;
+ }
+# endif
+
+ /* Normal load-notifier handling after here. First, ignore all
+ symbols lacking the right prefix. */
+ if (0 != VG_(strncmp)(symbol, VG_NOTIFY_ON_LOAD_PREFIX,
+ VG_NOTIFY_ON_LOAD_PREFIX_LEN))
+ /* Doesn't have the right prefix */
+ return;
+
+ if (VG_(strcmp)(symbol, VG_STRINGIFY(VG_NOTIFY_ON_LOAD(freeres))) == 0)
+ VG_(client___libc_freeres_wrapper) = addr;
+ else
+ vg_assert2(0, "unrecognised load notification function: %s", symbol);
+}
+
+
+/*------------------------------------------------------------*/
+/*--- SANITY/DEBUG ---*/
+/*------------------------------------------------------------*/
+
+static void show_spec ( HChar* left, Spec* spec )
+{
+ VG_(message)(Vg_DebugMsg,
+ "%s%25s %30s %s-> 0x%08llx",
+ left,
+ spec->from_sopatt, spec->from_fnpatt,
+ spec->isWrap ? "W" : "R",
+ (ULong)spec->to_addr );
+}
+
+static void show_active ( HChar* left, Active* act )
+{
+ Bool ok;
+ HChar name1[64] = "";
+ HChar name2[64] = "";
+ name1[0] = name2[0] = 0;
+ ok = VG_(get_fnname_w_offset)(act->from_addr, name1, 64);
+ if (!ok) VG_(strcpy)(name1, "???");
+ ok = VG_(get_fnname_w_offset)(act->to_addr, name2, 64);
+ if (!ok) VG_(strcpy)(name2, "???");
+
+ VG_(message)(Vg_DebugMsg, "%s0x%08llx (%20s) %s-> 0x%08llx %s",
+ left,
+ (ULong)act->from_addr, name1,
+ act->isWrap ? "W" : "R",
+ (ULong)act->to_addr, name2 );
+}
+
+static void show_redir_state ( HChar* who )
+{
+ TopSpec* ts;
+ Spec* sp;
+ Active* act;
+ VG_(message)(Vg_DebugMsg, "<<");
+ VG_(message)(Vg_DebugMsg, " ------ REDIR STATE %s ------", who);
+ for (ts = topSpecs; ts; ts = ts->next) {
+ VG_(message)(Vg_DebugMsg,
+ " TOPSPECS of soname %s",
+ ts->seginfo ? (HChar*)VG_(seginfo_soname)(ts->seginfo)
+ : "(hardwired)" );
+ for (sp = ts->specs; sp; sp = sp->next)
+ show_spec(" ", sp);
+ }
+ VG_(message)(Vg_DebugMsg, " ------ ACTIVE ------");
+ VG_(OSetGen_ResetIter)( activeSet );
+ while ( (act = VG_(OSetGen_Next)(activeSet)) ) {
+ show_active(" ", act);
+ }
+
+ VG_(message)(Vg_DebugMsg, ">>");
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_seqmatch.c.svn-base b/coregrind/.svn/text-base/m_seqmatch.c.svn-base
new file mode 100644
index 0000000..27270ab
--- /dev/null
+++ b/coregrind/.svn/text-base/m_seqmatch.c.svn-base
@@ -0,0 +1,233 @@
+
+/*--------------------------------------------------------------------*/
+/*--- A simple sequence matching facility. ---*/
+/*--- m_seqmatch.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2008-2009 OpenWorks Ltd
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcbase.h" // VG_(strlen)
+#include "pub_core_seqmatch.h" // self
+
+/* ---------------------------------------------------------------------
+ A simple sequence matching facility
+ ------------------------------------------------------------------ */
+
+/* See detailed comment in include/pub_tool_seqmatch.h about this. */
+Bool VG_(generic_match) (
+ Bool matchAll,
+ void* patt, SizeT szbPatt, UWord nPatt, UWord ixPatt,
+ void* input, SizeT szbInput, UWord nInput, UWord ixInput,
+ Bool (*pIsStar)(void*),
+ Bool (*pIsQuery)(void*),
+ Bool (*pattEQinp)(void*,void*)
+ )
+{
+ /* This is the spec, written in my favourite formal specification
+ language. It specifies non-greedy matching of '*'s.
+
+ ma ('*':ps) (i:is) = ma ps (i:is) || ma ('*':ps) is
+ ma ('*':ps) [] = ma ps []
+
+ ma ('?':ps) (i:is) = ma ps is
+ ma ('?':ps) [] = False
+
+ ma (p:ps) (i:is) = p == i && ma ps is
+
+ ma (p:ps) [] = False
+ ma [] (i:is) = False -- m-all, True for m-prefix
+ ma [] [] = True
+ */
+ Bool havePatt, haveInput;
+ void *currPatt, *currInput;
+ tailcall:
+ vg_assert(nPatt >= 0 && nPatt < 1000000); /* arbitrary */
+ vg_assert(nInput >= 0 && nInput < 1000000); /* arbitrary */
+ vg_assert(ixPatt >= 0 && ixPatt <= nPatt);
+ vg_assert(ixInput >= 0 && ixInput <= nInput);
+
+ havePatt = ixPatt < nPatt;
+ haveInput = ixInput < nInput;
+
+ /* No specific need to set NULL when !have{Patt,Input}, but guards
+ against inadvertantly dereferencing an out of range pointer to
+ the pattern or input arrays. */
+ currPatt = havePatt ? ((Char*)patt) + szbPatt * ixPatt : NULL;
+ currInput = haveInput ? ((Char*)input) + szbInput * ixInput : NULL;
+
+ // Deal with the complex case first: wildcards. Do frugal
+ // matching. When encountering a '*', first skip no characters
+ // at all, and see if the rest of the match still works. Only if
+ // that fails do we then skip a character, and retry at the next
+ // position.
+ //
+ // ma ('*':ps) (i:is) = ma ps (i:is) || ma ('*':ps) is
+ //
+ // If we're out of input, check the rest of the pattern matches
+ // the empty input. This really means can only be be empty or
+ // composed entirely of '*'s.
+ //
+ // ma ('*':ps) [] = ma ps []
+ //
+ if (havePatt && pIsStar(currPatt)) {
+ if (haveInput) {
+ // ma ('*':ps) (i:is) = ma ps (i:is) || ma ('*':ps) is
+ // we unavoidably have to make a real recursive call for the
+ // first half of the OR, since this isn't straight tail-recursion.
+ if (VG_(generic_match)( matchAll,
+ patt, szbPatt, nPatt, ixPatt+1,
+ input,szbInput,nInput, ixInput+0,
+ pIsStar,pIsQuery,pattEQinp) ) {
+ return True;
+ }
+ // but we can tail-recurse for the second call
+ ixInput++; goto tailcall;
+ } else {
+ // ma ('*':ps) [] = ma ps []
+ ixPatt++; goto tailcall;
+ }
+ }
+
+ // simpler cases now. Deal with '?' wildcards.
+ //
+ // ma ('?':ps) (i:is) = ma ps is
+ // ma ('?':ps) [] = False
+ if (havePatt && pIsQuery(currPatt)) {
+ if (haveInput) {
+ ixPatt++; ixInput++; goto tailcall;
+ } else {
+ return False;
+ }
+ }
+
+ // obvious case with literal chars in the pattern
+ //
+ // ma (p:ps) (i:is) = p == i && ma ps is
+ if (havePatt && haveInput) {
+ if (!pattEQinp(currPatt,currInput)) return False;
+ ixPatt++; ixInput++; goto tailcall;
+ }
+
+ // if we run out of input before we run out of pattern, we must fail
+ // ma (p:ps) [] = False
+ if (havePatt && !haveInput) return False;
+
+ // if we run out of pattern before we run out of input, the
+ // verdict depends on the matching mode. If we are trying to
+ // match exactly (the pattern must consume the entire input)
+ // then the outcome is failure. However, if we're merely attempting
+ // to match some prefix of the input, then we have been successful.
+ //
+ // ma [] (i:is) = False -- m-all, True for m-prefix
+ if (!havePatt && haveInput) {
+ return matchAll ? False // match-all
+ : True; // match-prefix
+ }
+
+ // finally, if both sequence and input are both completely
+ // consumed, then we were successful, regardless of matching mode.
+ if (!havePatt && !haveInput) return True;
+
+ // end of cases
+ vg_assert(0);
+}
+
+
+/* And a parameterization of the above, to make it do
+ string matching.
+*/
+static Bool charIsStar ( void* pV ) { return *(Char*)pV == '*'; }
+static Bool charIsQuery ( void* pV ) { return *(Char*)pV == '?'; }
+static Bool char_p_EQ_i ( void* pV, void* cV ) {
+ Char p = *(Char*)pV;
+ Char c = *(Char*)cV;
+ vg_assert(p != '*' && p != '?');
+ return p == c;
+}
+Bool VG_(string_match) ( const Char* patt, const Char* input )
+{
+ return VG_(generic_match)(
+ True/* match-all */,
+ (void*)patt, sizeof(UChar), VG_(strlen)(patt), 0,
+ (void*)input, sizeof(UChar), VG_(strlen)(input), 0,
+ charIsStar, charIsQuery, char_p_EQ_i
+ );
+}
+
+
+// test cases for the matcher (in match-all mode)
+// typedef struct { char* patt; char* input; Bool xres; } Test;
+//
+//static Test tests[] =
+// {
+// { "" ,"" , True },
+// { "a" ,"" , False },
+// { "a" ,"b" , False },
+// { "a" ,"a" , True },
+// { "a" ,"aa" , False },
+// { "*" ,"" , True },
+// { "**" ,"" , True },
+// { "*" ,"abc", True },
+// { "*a" ,"abc", False },
+// { "*b" ,"abc", False },
+// { "*bc" ,"abc", True },
+// { "a*b" ,"abc", False },
+// { "a*c" ,"abc", True },
+// { "*c" ,"abc", True },
+// { "c*c" ,"abc", False },
+// { "abc*" ,"abc", True },
+// { "abc**" ,"abc", True },
+// { "**abc" ,"abc", True },
+// { "**a*b*c**" ,"abc", True },
+// { "**a*b*d**" ,"abc", False },
+// { "a?b" ,"abc", False },
+// { "a?c" ,"abc", True },
+// { "?" ,"" , False },
+// { "?" ,"a" , True },
+// { "?" ,"ab" , False },
+// { "abcd" ,"abc", False },
+// { "ab" ,"abc", False },
+// { NULL ,NULL , False }
+// };
+//
+//int main ( void )
+//{
+// Test* t;
+// for (t = tests; t->patt; t++) {
+// printf("%10s %6s %s\n",
+// t->patt, t->input,
+// match_string_all((UChar*)t->patt,(UChar*)t->input,True)
+// == t->xres
+// ? "pass" : "FAIL" );
+// }
+// return 0;
+//}
+
+/*--------------------------------------------------------------------*/
+/*--- end m_seqmatch.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_signals.c.svn-base b/coregrind/.svn/text-base/m_signals.c.svn-base
new file mode 100644
index 0000000..e4896e2
--- /dev/null
+++ b/coregrind/.svn/text-base/m_signals.c.svn-base
@@ -0,0 +1,2227 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Implementation of POSIX signals. m_signals.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+/*
+ Signal handling.
+
+ There are 4 distinct classes of signal:
+
+ 1. Synchronous, instruction-generated (SIGILL, FPE, BUS, SEGV and
+ TRAP): these are signals as a result of an instruction fault. If
+ we get one while running client code, then we just do the
+ appropriate thing. If it happens while running Valgrind code, then
+ it indicates a Valgrind bug. Note that we "manually" implement
+ automatic stack growth, such that if a fault happens near the
+ client process stack, it is extended in the same way the kernel
+ would, and the fault is never reported to the client program.
+
+ 2. Asynchronous varients of the above signals: If the kernel tries
+ to deliver a sync signal while it is blocked, it just kills the
+ process. Therefore, we can't block those signals if we want to be
+ able to report on bugs in Valgrind. This means that we're also
+ open to receiving those signals from other processes, sent with
+ kill. We could get away with just dropping them, since they aren't
+ really signals that processes send to each other.
+
+ 3. Synchronous, general signals. If a thread/process sends itself
+ a signal with kill, its expected to be synchronous: ie, the signal
+ will have been delivered by the time the syscall finishes.
+
+ 4. Asyncronous, general signals. All other signals, sent by
+ another process with kill. These are generally blocked, except for
+ two special cases: we poll for them each time we're about to run a
+ thread for a time quanta, and while running blocking syscalls.
+
+
+ In addition, we define two signals for internal use: SIGVGCHLD and
+ SIGVGKILL. SIGVGCHLD is used to indicate thread death to any
+ reaping thread (the master thread). It is always blocked and never
+ delivered as a signal; it is always polled with sigtimedwait.
+
+ SIGVGKILL is used to terminate threads. When one thread wants
+ another to exit, it will set its exitreason and send it SIGVGKILL
+ if it appears to be blocked in a syscall.
+
+
+ We use a kernel thread for each application thread. When the
+ thread allows itself to be open to signals, it sets the thread
+ signal mask to what the client application set it to. This means
+ that we get the kernel to do all signal routing: under Valgrind,
+ signals get delivered in the same way as in the non-Valgrind case
+ (the exception being for the sync signal set, since they're almost
+ always unblocked).
+ */
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_xarray.h"
+#include "pub_core_clientstate.h"
+#include "pub_core_aspacemgr.h"
+#include "pub_core_debugger.h" // For VG_(start_debugger)
+#include "pub_core_errormgr.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_libcproc.h"
+#include "pub_core_libcsignal.h"
+#include "pub_core_machine.h"
+#include "pub_core_mallocfree.h"
+#include "pub_core_options.h"
+#include "pub_core_scheduler.h"
+#include "pub_core_signals.h"
+#include "pub_core_sigframe.h" // For VG_(sigframe_create)()
+#include "pub_core_stacks.h" // For VG_(change_stack)()
+#include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
+#include "pub_core_syscall.h"
+#include "pub_core_syswrap.h"
+#include "pub_core_tooliface.h"
+#include "pub_core_coredump.h"
+
+
+/* ---------------------------------------------------------------------
+ Forwards decls.
+ ------------------------------------------------------------------ */
+
+static void sync_signalhandler ( Int sigNo, vki_siginfo_t *info,
+ struct vki_ucontext * );
+static void async_signalhandler ( Int sigNo, vki_siginfo_t *info,
+ struct vki_ucontext * );
+static void sigvgkill_handler ( Int sigNo, vki_siginfo_t *info,
+ struct vki_ucontext * );
+
+static const Char *signame(Int sigNo);
+
+/* Maximum usable signal. */
+Int VG_(max_signal) = _VKI_NSIG;
+
+#define N_QUEUED_SIGNALS 8
+
+typedef struct SigQueue {
+ Int next;
+ vki_siginfo_t sigs[N_QUEUED_SIGNALS];
+} SigQueue;
+
+/* ------ Macros for pulling stuff out of ucontexts ------ */
+
+/* Q: what does UCONTEXT_SYSCALL_SYSRES do? A: let's suppose the
+ machine context (uc) reflects the situation that a syscall had just
+ completed, quite literally -- that is, that the program counter was
+ now at the instruction following the syscall. (or we're slightly
+ downstream, but we're sure no relevant register has yet changed
+ value.) Then UCONTEXT_SYSCALL_SYSRES returns a SysRes reflecting
+ the result of the syscall; it does this by fishing relevant bits of
+ the machine state out of the uc. Of course if the program counter
+ was somewhere else entirely then the result is likely to be
+ meaningless, so the caller of UCONTEXT_SYSCALL_SYSRES has to be
+ very careful to pay attention to the results only when it is sure
+ that the said constraint on the program counter is indeed valid. */
+#if defined(VGP_x86_linux)
+# define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.eip)
+# define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.esp)
+# define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.ebp)
+# define VG_UCONTEXT_SYSCALL_NUM(uc) ((uc)->uc_mcontext.eax)
+# define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
+ /* Convert the value in uc_mcontext.eax into a SysRes. */ \
+ VG_(mk_SysRes_x86_linux)( (uc)->uc_mcontext.eax )
+# define VG_UCONTEXT_LINK_REG(uc) 0 /* Dude, where's my LR? */
+
+#elif defined(VGP_amd64_linux)
+# define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.rip)
+# define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.rsp)
+# define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.rbp)
+# define VG_UCONTEXT_SYSCALL_NUM(uc) ((uc)->uc_mcontext.rax)
+# define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
+ /* Convert the value in uc_mcontext.rax into a SysRes. */ \
+ VG_(mk_SysRes_amd64_linux)( (uc)->uc_mcontext.rax )
+# define VG_UCONTEXT_LINK_REG(uc) 0 /* No LR on amd64 either */
+
+#elif defined(VGP_ppc32_linux)
+/* Comments from Paul Mackerras 25 Nov 05:
+
+ > I'm tracking down a problem where V's signal handling doesn't
+ > work properly on a ppc440gx running 2.4.20. The problem is that
+ > the ucontext being presented to V's sighandler seems completely
+ > bogus.
+
+ > V's kernel headers and hence ucontext layout are derived from
+ > 2.6.9. I compared include/asm-ppc/ucontext.h from 2.4.20 and
+ > 2.6.13.
+
+ > Can I just check my interpretation: the 2.4.20 one contains the
+ > uc_mcontext field in line, whereas the 2.6.13 one has a pointer
+ > to said struct? And so if V is using the 2.6.13 struct then a
+ > 2.4.20 one will make no sense to it.
+
+ Not quite... what is inline in the 2.4.20 version is a
+ sigcontext_struct, not an mcontext. The sigcontext looks like
+ this:
+
+ struct sigcontext_struct {
+ unsigned long _unused[4];
+ int signal;
+ unsigned long handler;
+ unsigned long oldmask;
+ struct pt_regs *regs;
+ };
+
+ The regs pointer of that struct ends up at the same offset as the
+ uc_regs of the 2.6 struct ucontext, and a struct pt_regs is the
+ same as the mc_gregs field of the mcontext. In fact the integer
+ regs are followed in memory by the floating point regs on 2.4.20.
+
+ Thus if you are using the 2.6 definitions, it should work on 2.4.20
+ provided that you go via uc->uc_regs rather than looking in
+ uc->uc_mcontext directly.
+
+ There is another subtlety: 2.4.20 doesn't save the vector regs when
+ delivering a signal, and 2.6.x only saves the vector regs if the
+ process has ever used an altivec instructions. If 2.6.x does save
+ the vector regs, it sets the MSR_VEC bit in
+ uc->uc_regs->mc_gregs[PT_MSR], otherwise it clears it. That bit
+ will always be clear under 2.4.20. So you can use that bit to tell
+ whether uc->uc_regs->mc_vregs is valid. */
+# define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_NIP])
+# define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_R1])
+# define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_R1])
+# define VG_UCONTEXT_SYSCALL_NUM(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_R0])
+# define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
+ /* Convert the values in uc_mcontext r3,cr into a SysRes. */ \
+ VG_(mk_SysRes_ppc32_linux)( \
+ (uc)->uc_regs->mc_gregs[VKI_PT_R3], \
+ (((uc)->uc_regs->mc_gregs[VKI_PT_CCR] >> 28) & 1) \
+ )
+# define VG_UCONTEXT_LINK_REG(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_LNK])
+
+#elif defined(VGP_ppc64_linux)
+# define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_NIP])
+# define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_R1])
+# define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_R1])
+# define VG_UCONTEXT_SYSCALL_NUM(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_R0])
+ /* Dubious hack: if there is an error, only consider the lowest 8
+ bits of r3. memcheck/tests/post-syscall shows a case where an
+ interrupted syscall should have produced a ucontext with 0x4
+ (VKI_EINTR) in r3 but is in fact producing 0x204. */
+ /* Awaiting clarification from PaulM. Evidently 0x204 is
+ ERESTART_RESTARTBLOCK, which shouldn't have made it into user
+ space. */
+ static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( struct vki_ucontext* uc )
+ {
+ ULong err = (uc->uc_mcontext.gp_regs[VKI_PT_CCR] >> 28) & 1;
+ ULong r3 = uc->uc_mcontext.gp_regs[VKI_PT_R3];
+ if (err) r3 &= 0xFF;
+ return VG_(mk_SysRes_ppc64_linux)( r3, err );
+ }
+# define VG_UCONTEXT_LINK_REG(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_LNK])
+
+#elif defined(VGP_ppc32_aix5)
+
+ /* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
+# include <ucontext.h>
+ /* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
+ static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
+ ucontext_t* uc = (ucontext_t*)ucV;
+ struct __jmpbuf* mc = &(uc->uc_mcontext);
+ struct mstsave* jc = &mc->jmp_context;
+ return jc->iar;
+ }
+ static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
+ ucontext_t* uc = (ucontext_t*)ucV;
+ struct __jmpbuf* mc = &(uc->uc_mcontext);
+ struct mstsave* jc = &mc->jmp_context;
+ return jc->gpr[1];
+ }
+ static inline Addr VG_UCONTEXT_SYSCALL_NUM( void* ucV ) {
+ ucontext_t* uc = (ucontext_t*)ucV;
+ struct __jmpbuf* mc = &(uc->uc_mcontext);
+ struct mstsave* jc = &mc->jmp_context;
+ return jc->gpr[2];
+ }
+ static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV ) {
+ ucontext_t* uc = (ucontext_t*)ucV;
+ struct __jmpbuf* mc = &(uc->uc_mcontext);
+ struct mstsave* jc = &mc->jmp_context;
+ return VG_(mk_SysRes_ppc32_aix5)( jc->gpr[3], jc->gpr[4] );
+ }
+ static inline Addr VG_UCONTEXT_LINK_REG( void* ucV ) {
+ ucontext_t* uc = (ucontext_t*)ucV;
+ struct __jmpbuf* mc = &(uc->uc_mcontext);
+ struct mstsave* jc = &mc->jmp_context;
+ return jc->lr;
+ }
+ static inline Addr VG_UCONTEXT_FRAME_PTR( void* ucV ) {
+ return VG_UCONTEXT_STACK_PTR(ucV);
+ }
+
+#elif defined(VGP_ppc64_aix5)
+
+ /* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
+# include <ucontext.h>
+ /* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
+ static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
+ ucontext_t* uc = (ucontext_t*)ucV;
+ struct __jmpbuf* mc = &(uc->uc_mcontext);
+ struct __context64* jc = &mc->jmp_context;
+ return jc->iar;
+ }
+ static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
+ ucontext_t* uc = (ucontext_t*)ucV;
+ struct __jmpbuf* mc = &(uc->uc_mcontext);
+ struct __context64* jc = &mc->jmp_context;
+ return jc->gpr[1];
+ }
+ static inline Addr VG_UCONTEXT_SYSCALL_NUM( void* ucV ) {
+ ucontext_t* uc = (ucontext_t*)ucV;
+ struct __jmpbuf* mc = &(uc->uc_mcontext);
+ struct __context64* jc = &mc->jmp_context;
+ return jc->gpr[2];
+ }
+ static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV ) {
+ ucontext_t* uc = (ucontext_t*)ucV;
+ struct __jmpbuf* mc = &(uc->uc_mcontext);
+ struct __context64* jc = &mc->jmp_context;
+ return VG_(mk_SysRes_ppc32_aix5)( jc->gpr[3], jc->gpr[4] );
+ }
+ static inline Addr VG_UCONTEXT_LINK_REG( void* ucV ) {
+ ucontext_t* uc = (ucontext_t*)ucV;
+ struct __jmpbuf* mc = &(uc->uc_mcontext);
+ struct __context64* jc = &mc->jmp_context;
+ return jc->lr;
+ }
+ static inline Addr VG_UCONTEXT_FRAME_PTR( void* ucV ) {
+ return VG_UCONTEXT_STACK_PTR(ucV);
+ }
+
+#else
+# error Unknown platform
+#endif
+
+
+/* ------ Macros for pulling stuff out of siginfos ------ */
+
+/* These macros allow use of uniform names when working with
+ both the Linux and AIX vki definitions. */
+#if defined(VGO_linux)
+# define VKI_SIGINFO_si_addr _sifields._sigfault._addr
+# define VKI_SIGINFO_si_pid _sifields._kill._pid
+#elif defined(VGO_aix5)
+# define VKI_SIGINFO_si_addr si_addr
+# define VKI_SIGINFO_si_pid si_pid
+#else
+# error Unknown OS
+#endif
+
+
+/* ---------------------------------------------------------------------
+ HIGH LEVEL STUFF TO DO WITH SIGNALS: POLICY (MOSTLY)
+ ------------------------------------------------------------------ */
+
+/* ---------------------------------------------------------------------
+ Signal state for this process.
+ ------------------------------------------------------------------ */
+
+
+/* Base-ment of these arrays[_VKI_NSIG].
+
+ Valid signal numbers are 1 .. _VKI_NSIG inclusive.
+ Rather than subtracting 1 for indexing these arrays, which
+ is tedious and error-prone, they are simply dimensioned 1 larger,
+ and entry [0] is not used.
+ */
+
+
+/* -----------------------------------------------------
+ Static client signal state (SCSS). This is the state
+ that the client thinks it has the kernel in.
+ SCSS records verbatim the client's settings. These
+ are mashed around only when SKSS is calculated from it.
+ -------------------------------------------------- */
+
+typedef
+ struct {
+ void* scss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN or ptr to
+ client's handler */
+ UInt scss_flags;
+ vki_sigset_t scss_mask;
+ void* scss_restorer; /* where sigreturn goes */
+ }
+ SCSS_Per_Signal;
+
+typedef
+ struct {
+ /* per-signal info */
+ SCSS_Per_Signal scss_per_sig[1+_VKI_NSIG];
+
+ /* Additional elements to SCSS not stored here:
+ - for each thread, the thread's blocking mask
+ - for each thread in WaitSIG, the set of waited-on sigs
+ */
+ }
+ SCSS;
+
+static SCSS scss;
+
+
+/* -----------------------------------------------------
+ Static kernel signal state (SKSS). This is the state
+ that we have the kernel in. It is computed from SCSS.
+ -------------------------------------------------- */
+
+/* Let's do:
+ sigprocmask assigns to all thread masks
+ so that at least everything is always consistent
+ Flags:
+ SA_SIGINFO -- we always set it, and honour it for the client
+ SA_NOCLDSTOP -- passed to kernel
+ SA_ONESHOT or SA_RESETHAND -- pass through
+ SA_RESTART -- we observe this but set our handlers to always restart
+ SA_NOMASK or SA_NODEFER -- we observe this, but our handlers block everything
+ SA_ONSTACK -- pass through
+ SA_NOCLDWAIT -- pass through
+*/
+
+
+typedef
+ struct {
+ void* skss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN
+ or ptr to our handler */
+ UInt skss_flags;
+ /* There is no skss_mask, since we know that we will always ask
+ for all signals to be blocked in our sighandlers. */
+ /* Also there is no skss_restorer. */
+ }
+ SKSS_Per_Signal;
+
+typedef
+ struct {
+ SKSS_Per_Signal skss_per_sig[1+_VKI_NSIG];
+ }
+ SKSS;
+
+static SKSS skss;
+
+static Bool is_sig_ign(Int sigNo)
+{
+ vg_assert(sigNo >= 1 && sigNo <= _VKI_NSIG);
+
+ return scss.scss_per_sig[sigNo].scss_handler == VKI_SIG_IGN;
+}
+
+/* ---------------------------------------------------------------------
+ Compute the SKSS required by the current SCSS.
+ ------------------------------------------------------------------ */
+
+static
+void pp_SKSS ( void )
+{
+ Int sig;
+ VG_(printf)("\n\nSKSS:\n");
+ for (sig = 1; sig <= _VKI_NSIG; sig++) {
+ VG_(printf)("sig %d: handler %p, flags 0x%x\n", sig,
+ skss.skss_per_sig[sig].skss_handler,
+ skss.skss_per_sig[sig].skss_flags );
+
+ }
+}
+
+/* This is the core, clever bit. Computation is as follows:
+
+ For each signal
+ handler = if client has a handler, then our handler
+ else if client is DFL, then our handler as well
+ else (client must be IGN)
+ then hander is IGN
+*/
+static
+void calculate_SKSS_from_SCSS ( SKSS* dst )
+{
+ Int sig;
+ UInt scss_flags;
+ UInt skss_flags;
+
+ for (sig = 1; sig <= _VKI_NSIG; sig++) {
+ void *skss_handler;
+ void *scss_handler;
+
+ scss_handler = scss.scss_per_sig[sig].scss_handler;
+ scss_flags = scss.scss_per_sig[sig].scss_flags;
+
+ switch(sig) {
+ case VKI_SIGSEGV:
+ case VKI_SIGBUS:
+ case VKI_SIGFPE:
+ case VKI_SIGILL:
+ case VKI_SIGTRAP:
+ /* For these, we always want to catch them and report, even
+ if the client code doesn't. */
+ skss_handler = sync_signalhandler;
+ break;
+
+ case VKI_SIGCONT:
+ /* Let the kernel handle SIGCONT unless the client is actually
+ catching it. */
+ case VKI_SIGCHLD:
+ case VKI_SIGWINCH:
+ case VKI_SIGURG:
+ /* For signals which are have a default action of Ignore,
+ only set a handler if the client has set a signal handler.
+ Otherwise the kernel will interrupt a syscall which
+ wouldn't have otherwise been interrupted. */
+ if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_DFL)
+ skss_handler = VKI_SIG_DFL;
+ else if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_IGN)
+ skss_handler = VKI_SIG_IGN;
+ else
+ skss_handler = async_signalhandler;
+ break;
+
+ default:
+ // VKI_SIGVG* are runtime variables, so we can't make them
+ // cases in the switch, so we handle them in the 'default' case.
+ if (sig == VG_SIGVGKILL)
+ skss_handler = sigvgkill_handler;
+ else {
+ if (scss_handler == VKI_SIG_IGN)
+ skss_handler = VKI_SIG_IGN;
+ else
+ skss_handler = async_signalhandler;
+ }
+ break;
+ }
+
+ /* Flags */
+
+ skss_flags = 0;
+
+ /* SA_NOCLDSTOP, SA_NOCLDWAIT: pass to kernel */
+ skss_flags |= scss_flags & (VKI_SA_NOCLDSTOP | VKI_SA_NOCLDWAIT);
+
+ /* SA_ONESHOT: ignore client setting */
+
+ /* SA_RESTART: ignore client setting and always set it for us.
+ Though we never rely on the kernel to restart a
+ syscall, we observe whether it wanted to restart the syscall
+ or not, which is needed by
+ VG_(fixup_guest_state_after_syscall_interrupted) */
+ skss_flags |= VKI_SA_RESTART;
+
+ /* SA_NOMASK: ignore it */
+
+ /* SA_ONSTACK: client setting is irrelevant here */
+ /* We don't set a signal stack, so ignore */
+
+ /* always ask for SA_SIGINFO */
+ skss_flags |= VKI_SA_SIGINFO;
+
+ /* use our own restorer */
+ skss_flags |= VKI_SA_RESTORER;
+
+ /* Create SKSS entry for this signal. */
+ if (sig != VKI_SIGKILL && sig != VKI_SIGSTOP)
+ dst->skss_per_sig[sig].skss_handler = skss_handler;
+ else
+ dst->skss_per_sig[sig].skss_handler = VKI_SIG_DFL;
+
+ dst->skss_per_sig[sig].skss_flags = skss_flags;
+ }
+
+ /* Sanity checks. */
+ vg_assert(dst->skss_per_sig[VKI_SIGKILL].skss_handler == VKI_SIG_DFL);
+ vg_assert(dst->skss_per_sig[VKI_SIGSTOP].skss_handler == VKI_SIG_DFL);
+
+ if (0)
+ pp_SKSS();
+}
+
+
+/* ---------------------------------------------------------------------
+ After a possible SCSS change, update SKSS and the kernel itself.
+ ------------------------------------------------------------------ */
+
+// We need two levels of macro-expansion here to convert __NR_rt_sigreturn
+// to a number before converting it to a string... sigh.
+extern void my_sigreturn(void);
+
+#if defined(VGP_x86_linux)
+# define _MY_SIGRETURN(name) \
+ ".text\n" \
+ "my_sigreturn:\n" \
+ " movl $" #name ", %eax\n" \
+ " int $0x80\n" \
+ ".previous\n"
+#elif defined(VGP_amd64_linux)
+# define _MY_SIGRETURN(name) \
+ ".text\n" \
+ "my_sigreturn:\n" \
+ " movq $" #name ", %rax\n" \
+ " syscall\n" \
+ ".previous\n"
+#elif defined(VGP_ppc32_linux)
+# define _MY_SIGRETURN(name) \
+ ".text\n" \
+ "my_sigreturn:\n" \
+ " li 0, " #name "\n" \
+ " sc\n" \
+ ".previous\n"
+#elif defined(VGP_ppc64_linux)
+# define _MY_SIGRETURN(name) \
+ ".align 2\n" \
+ ".globl my_sigreturn\n" \
+ ".section \".opd\",\"aw\"\n" \
+ ".align 3\n" \
+ "my_sigreturn:\n" \
+ ".quad .my_sigreturn,.TOC.@tocbase,0\n" \
+ ".previous\n" \
+ ".type .my_sigreturn,@function\n" \
+ ".globl .my_sigreturn\n" \
+ ".my_sigreturn:\n" \
+ " li 0, " #name "\n" \
+ " sc\n"
+#elif defined(VGP_ppc32_aix5)
+# define _MY_SIGRETURN(name) \
+ ".globl my_sigreturn\n" \
+ "my_sigreturn:\n" \
+ ".long 0\n"
+#elif defined(VGP_ppc64_aix5)
+# define _MY_SIGRETURN(name) \
+ ".globl my_sigreturn\n" \
+ "my_sigreturn:\n" \
+ ".long 0\n"
+#else
+# error Unknown platform
+#endif
+
+#define MY_SIGRETURN(name) _MY_SIGRETURN(name)
+asm(
+ MY_SIGRETURN(__NR_rt_sigreturn)
+);
+
+
+static void handle_SCSS_change ( Bool force_update )
+{
+ Int res, sig;
+ SKSS skss_old;
+ struct vki_sigaction ksa, ksa_old;
+
+ /* Remember old SKSS and calculate new one. */
+ skss_old = skss;
+ calculate_SKSS_from_SCSS ( &skss );
+
+ /* Compare the new SKSS entries vs the old ones, and update kernel
+ where they differ. */
+ for (sig = 1; sig <= VG_(max_signal); sig++) {
+
+ /* Trying to do anything with SIGKILL is pointless; just ignore
+ it. */
+ if (sig == VKI_SIGKILL || sig == VKI_SIGSTOP)
+ continue;
+
+ if (!force_update) {
+ if ((skss_old.skss_per_sig[sig].skss_handler
+ == skss.skss_per_sig[sig].skss_handler)
+ && (skss_old.skss_per_sig[sig].skss_flags
+ == skss.skss_per_sig[sig].skss_flags))
+ /* no difference */
+ continue;
+ }
+
+ ksa.ksa_handler = skss.skss_per_sig[sig].skss_handler;
+ ksa.sa_flags = skss.skss_per_sig[sig].skss_flags;
+# if !defined(VGP_ppc32_linux) && !defined(VGP_ppc32_aix5) \
+ && !defined(VGP_ppc64_aix5)
+ ksa.sa_restorer = my_sigreturn;
+# endif
+ /* Re above ifdef (also the assertion below), PaulM says:
+ The sa_restorer field is not used at all on ppc. Glibc
+ converts the sigaction you give it into a kernel sigaction,
+ but it doesn't put anything in the sa_restorer field.
+ */
+
+ /* block all signals in handler */
+ VG_(sigfillset)( &ksa.sa_mask );
+ VG_(sigdelset)( &ksa.sa_mask, VKI_SIGKILL );
+ VG_(sigdelset)( &ksa.sa_mask, VKI_SIGSTOP );
+
+ if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
+ VG_DMSG("setting ksig %d to: hdlr %p, flags 0x%lx, "
+ "mask(63..0) 0x%lx 0x%lx",
+ sig, ksa.ksa_handler,
+ (UWord)ksa.sa_flags,
+ (UWord)ksa.sa_mask.sig[1],
+ (UWord)ksa.sa_mask.sig[0]);
+
+ res = VG_(sigaction)( sig, &ksa, &ksa_old );
+ vg_assert(res == 0);
+
+ /* Since we got the old sigaction more or less for free, might
+ as well extract the maximum sanity-check value from it. */
+ if (!force_update) {
+ vg_assert(ksa_old.ksa_handler
+ == skss_old.skss_per_sig[sig].skss_handler);
+ vg_assert(ksa_old.sa_flags
+ == skss_old.skss_per_sig[sig].skss_flags);
+# if !defined(VGP_ppc32_linux) && !defined(VGP_ppc32_aix5) \
+ && !defined(VGP_ppc64_aix5)
+ vg_assert(ksa_old.sa_restorer
+ == my_sigreturn);
+# endif
+ VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGKILL );
+ VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGSTOP );
+ vg_assert(VG_(isfullsigset)( &ksa_old.sa_mask ));
+ }
+ }
+}
+
+
+/* ---------------------------------------------------------------------
+ Update/query SCSS in accordance with client requests.
+ ------------------------------------------------------------------ */
+
+/* Logic for this alt-stack stuff copied directly from do_sigaltstack
+ in kernel/signal.[ch] */
+
+/* True if we are on the alternate signal stack. */
+static Bool on_sig_stack ( ThreadId tid, Addr m_SP )
+{
+ ThreadState *tst = VG_(get_ThreadState)(tid);
+
+ return (m_SP - (Addr)tst->altstack.ss_sp < (Addr)tst->altstack.ss_size);
+}
+
+static Int sas_ss_flags ( ThreadId tid, Addr m_SP )
+{
+ ThreadState *tst = VG_(get_ThreadState)(tid);
+
+ return (tst->altstack.ss_size == 0
+ ? VKI_SS_DISABLE
+ : on_sig_stack(tid, m_SP) ? VKI_SS_ONSTACK : 0);
+}
+
+
+SysRes VG_(do_sys_sigaltstack) ( ThreadId tid, vki_stack_t* ss, vki_stack_t* oss )
+{
+ Addr m_SP;
+
+ vg_assert(VG_(is_valid_tid)(tid));
+ m_SP = VG_(get_SP)(tid);
+
+ if (VG_(clo_trace_signals))
+ VG_EMSG("sys_sigaltstack: tid %d, "
+ "ss %p{%p,sz=%llu,flags=0x%llx}, oss %p (current SP %p)",
+ tid, (void*)ss,
+ ss ? ss->ss_sp : 0,
+ (ULong)(ss ? ss->ss_size : 0),
+ (ULong)(ss ? ss->ss_flags : 0),
+ (void*)oss, (void*)m_SP);
+
+ if (oss != NULL) {
+ oss->ss_sp = VG_(threads)[tid].altstack.ss_sp;
+ oss->ss_size = VG_(threads)[tid].altstack.ss_size;
+ oss->ss_flags = VG_(threads)[tid].altstack.ss_flags
+ | sas_ss_flags(tid, m_SP);
+ }
+
+ if (ss != NULL) {
+ if (on_sig_stack(tid, VG_(get_SP)(tid))) {
+ return VG_(mk_SysRes_Error)( VKI_EPERM );
+ }
+ if (ss->ss_flags != VKI_SS_DISABLE
+ && ss->ss_flags != VKI_SS_ONSTACK
+ && ss->ss_flags != 0) {
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+ if (ss->ss_flags == VKI_SS_DISABLE) {
+ VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
+ } else {
+ if (ss->ss_size < VKI_MINSIGSTKSZ) {
+ return VG_(mk_SysRes_Error)( VKI_ENOMEM );
+ }
+
+ VG_(threads)[tid].altstack.ss_sp = ss->ss_sp;
+ VG_(threads)[tid].altstack.ss_size = ss->ss_size;
+ VG_(threads)[tid].altstack.ss_flags = 0;
+ }
+ }
+ return VG_(mk_SysRes_Success)( 0 );
+}
+
+
+SysRes VG_(do_sys_sigaction) ( Int signo,
+ const struct vki_sigaction *new_act,
+ struct vki_sigaction *old_act )
+{
+ if (VG_(clo_trace_signals))
+ VG_EMSG("sys_sigaction: sigNo %d, "
+ "new %#lx, old %#lx, new flags 0x%llx",
+ signo, (UWord)new_act, (UWord)old_act,
+ (ULong)(new_act ? new_act->sa_flags : 0));
+
+ /* Rule out various error conditions. The aim is to ensure that if
+ when the call is passed to the kernel it will definitely
+ succeed. */
+
+ /* Reject out-of-range signal numbers. */
+ if (signo < 1 || signo > VG_(max_signal)) goto bad_signo;
+
+ /* don't let them use our signals */
+ if ( (signo > VG_SIGVGRTUSERMAX)
+ && new_act
+ && !(new_act->ksa_handler == VKI_SIG_DFL
+ || new_act->ksa_handler == VKI_SIG_IGN) )
+ goto bad_signo_reserved;
+
+ /* Reject attempts to set a handler (or set ignore) for SIGKILL. */
+ if ( (signo == VKI_SIGKILL || signo == VKI_SIGSTOP)
+ && new_act
+ && new_act->ksa_handler != VKI_SIG_DFL)
+ goto bad_sigkill_or_sigstop;
+
+ /* If the client supplied non-NULL old_act, copy the relevant SCSS
+ entry into it. */
+ if (old_act) {
+ old_act->ksa_handler = scss.scss_per_sig[signo].scss_handler;
+ old_act->sa_flags = scss.scss_per_sig[signo].scss_flags;
+ old_act->sa_mask = scss.scss_per_sig[signo].scss_mask;
+# if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5)
+ old_act->sa_restorer = scss.scss_per_sig[signo].scss_restorer;
+# endif
+ }
+
+ /* And now copy new SCSS entry from new_act. */
+ if (new_act) {
+ scss.scss_per_sig[signo].scss_handler = new_act->ksa_handler;
+ scss.scss_per_sig[signo].scss_flags = new_act->sa_flags;
+ scss.scss_per_sig[signo].scss_mask = new_act->sa_mask;
+
+ scss.scss_per_sig[signo].scss_restorer = 0;
+# if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5)
+ scss.scss_per_sig[signo].scss_restorer = new_act->sa_restorer;
+# endif
+
+ VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGKILL);
+ VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGSTOP);
+ }
+
+ /* All happy bunnies ... */
+ if (new_act) {
+ handle_SCSS_change( False /* lazy update */ );
+ }
+ return VG_(mk_SysRes_Success)( 0 );
+
+ bad_signo:
+ if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
+ VG_UMSG("Warning: bad signal number %d in sigaction()", signo);
+ }
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ bad_signo_reserved:
+ if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
+ VG_UMSG("Warning: ignored attempt to set %s handler in sigaction();",
+ signame(signo));
+ VG_UMSG(" the %s signal is used internally by Valgrind",
+ signame(signo));
+ }
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+
+ bad_sigkill_or_sigstop:
+ if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
+ VG_UMSG("Warning: ignored attempt to set %s handler in sigaction();",
+ signame(signo));
+ VG_UMSG(" the %s signal is uncatchable",
+ signame(signo));
+ }
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+}
+
+
+static
+void do_sigprocmask_bitops ( Int vki_how,
+ vki_sigset_t* orig_set,
+ vki_sigset_t* modifier )
+{
+ switch (vki_how) {
+ case VKI_SIG_BLOCK:
+ VG_(sigaddset_from_set)( orig_set, modifier );
+ break;
+ case VKI_SIG_UNBLOCK:
+ VG_(sigdelset_from_set)( orig_set, modifier );
+ break;
+ case VKI_SIG_SETMASK:
+ *orig_set = *modifier;
+ break;
+ default:
+ VG_(core_panic)("do_sigprocmask_bitops");
+ break;
+ }
+}
+
+static
+HChar* format_sigset ( const vki_sigset_t* set )
+{
+ static HChar buf[128];
+ int w;
+
+ VG_(strcpy)(buf, "");
+
+ for (w = _VKI_NSIG_WORDS - 1; w >= 0; w--)
+ {
+# if _VKI_NSIG_BPW == 32
+ VG_(sprintf)(buf + VG_(strlen)(buf), "%08llx",
+ set ? (ULong)set->sig[w] : 0);
+# elif _VKI_NSIG_BPW == 64
+ VG_(sprintf)(buf + VG_(strlen)(buf), "%16llx",
+ set ? (ULong)set->sig[w] : 0);
+# else
+# error "Unsupported value for _VKI_NSIG_BPW"
+# endif
+ }
+
+ return buf;
+}
+
+/*
+ This updates the thread's signal mask. There's no such thing as a
+ process-wide signal mask.
+
+ Note that the thread signal masks are an implicit part of SCSS,
+ which is why this routine is allowed to mess with them.
+*/
+static
+void do_setmask ( ThreadId tid,
+ Int how,
+ vki_sigset_t* newset,
+ vki_sigset_t* oldset )
+{
+ if (VG_(clo_trace_signals))
+ VG_EMSG("do_setmask: tid = %d how = %d (%s), newset = %p (%s)",
+ tid, how,
+ how==VKI_SIG_BLOCK ? "SIG_BLOCK" : (
+ how==VKI_SIG_UNBLOCK ? "SIG_UNBLOCK" : (
+ how==VKI_SIG_SETMASK ? "SIG_SETMASK" : "???")),
+ newset, newset ? format_sigset(newset) : "NULL" );
+
+ /* Just do this thread. */
+ vg_assert(VG_(is_valid_tid)(tid));
+ if (oldset) {
+ *oldset = VG_(threads)[tid].sig_mask;
+ if (VG_(clo_trace_signals))
+ VG_EMSG("\toldset=%p %s", oldset, format_sigset(oldset));
+ }
+ if (newset) {
+ do_sigprocmask_bitops (how, &VG_(threads)[tid].sig_mask, newset );
+ VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGKILL);
+ VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGSTOP);
+ VG_(threads)[tid].tmp_sig_mask = VG_(threads)[tid].sig_mask;
+ }
+}
+
+
+SysRes VG_(do_sys_sigprocmask) ( ThreadId tid,
+ Int how,
+ vki_sigset_t* set,
+ vki_sigset_t* oldset )
+{
+ switch(how) {
+ case VKI_SIG_BLOCK:
+ case VKI_SIG_UNBLOCK:
+ case VKI_SIG_SETMASK:
+ vg_assert(VG_(is_valid_tid)(tid));
+ do_setmask ( tid, how, set, oldset );
+ return VG_(mk_SysRes_Success)( 0 );
+
+ default:
+ VG_DMSG("sigprocmask: unknown 'how' field %d", how);
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+}
+
+
+/* ---------------------------------------------------------------------
+ LOW LEVEL STUFF TO DO WITH SIGNALS: IMPLEMENTATION
+ ------------------------------------------------------------------ */
+
+/* ---------------------------------------------------------------------
+ Handy utilities to block/restore all host signals.
+ ------------------------------------------------------------------ */
+
+/* Block all host signals, dumping the old mask in *saved_mask. */
+static void block_all_host_signals ( /* OUT */ vki_sigset_t* saved_mask )
+{
+ Int ret;
+ vki_sigset_t block_procmask;
+ VG_(sigfillset)(&block_procmask);
+ ret = VG_(sigprocmask)
+ (VKI_SIG_SETMASK, &block_procmask, saved_mask);
+ vg_assert(ret == 0);
+}
+
+/* Restore the blocking mask using the supplied saved one. */
+static void restore_all_host_signals ( /* IN */ vki_sigset_t* saved_mask )
+{
+ Int ret;
+ ret = VG_(sigprocmask)(VKI_SIG_SETMASK, saved_mask, NULL);
+ vg_assert(ret == 0);
+}
+
+void VG_(clear_out_queued_signals)( ThreadId tid, vki_sigset_t* saved_mask )
+{
+ block_all_host_signals(saved_mask);
+ if (VG_(threads)[tid].sig_queue != NULL) {
+ VG_(arena_free)(VG_AR_CORE, VG_(threads)[tid].sig_queue);
+ VG_(threads)[tid].sig_queue = NULL;
+ }
+ restore_all_host_signals(saved_mask);
+}
+
+/* ---------------------------------------------------------------------
+ The signal simulation proper. A simplified version of what the
+ Linux kernel does.
+ ------------------------------------------------------------------ */
+
+/* Set up a stack frame (VgSigContext) for the client's signal
+ handler. */
+static
+void push_signal_frame ( ThreadId tid, const vki_siginfo_t *siginfo,
+ const struct vki_ucontext *uc )
+{
+ Addr esp_top_of_frame;
+ ThreadState* tst;
+ Int sigNo = siginfo->si_signo;
+
+ vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
+ vg_assert(VG_(is_valid_tid)(tid));
+ tst = & VG_(threads)[tid];
+
+ if (VG_(clo_trace_signals)) {
+ VG_DMSG("push_signal_frame (thread %d): signal %d", tid, sigNo);
+ VG_(get_and_pp_StackTrace)(tid, 10);
+ }
+
+ if (/* this signal asked to run on an alt stack */
+ (scss.scss_per_sig[sigNo].scss_flags & VKI_SA_ONSTACK )
+ && /* there is a defined and enabled alt stack, which we're not
+ already using. Logic from get_sigframe in
+ arch/i386/kernel/signal.c. */
+ sas_ss_flags(tid, VG_(get_SP)(tid)) == 0
+ ) {
+ esp_top_of_frame
+ = (Addr)(tst->altstack.ss_sp) + tst->altstack.ss_size;
+ if (VG_(clo_trace_signals))
+ VG_DMSG("delivering signal %d (%s) to thread %d: "
+ "on ALT STACK (%p-%p; %ld bytes)",
+ sigNo, signame(sigNo), tid, tst->altstack.ss_sp,
+ (UChar *)tst->altstack.ss_sp + tst->altstack.ss_size,
+ (unsigned long)tst->altstack.ss_size );
+
+ /* Signal delivery to tools */
+ VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/True );
+
+ } else {
+ esp_top_of_frame = VG_(get_SP)(tid) - VG_STACK_REDZONE_SZB;
+
+ /* Signal delivery to tools */
+ VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/False );
+ }
+
+ vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_IGN);
+ vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_DFL);
+
+ /* This may fail if the client stack is busted; if that happens,
+ the whole process will exit rather than simply calling the
+ signal handler. */
+ VG_(sigframe_create) (tid, esp_top_of_frame, siginfo, uc,
+ scss.scss_per_sig[sigNo].scss_handler,
+ scss.scss_per_sig[sigNo].scss_flags,
+ &tst->sig_mask,
+ scss.scss_per_sig[sigNo].scss_restorer);
+}
+
+
+static const Char *signame(Int sigNo)
+{
+ static Char buf[20];
+
+ switch(sigNo) {
+ case VKI_SIGHUP: return "SIGHUP";
+ case VKI_SIGINT: return "SIGINT";
+ case VKI_SIGQUIT: return "SIGQUIT";
+ case VKI_SIGILL: return "SIGILL";
+ case VKI_SIGTRAP: return "SIGTRAP";
+ case VKI_SIGABRT: return "SIGABRT";
+ case VKI_SIGBUS: return "SIGBUS";
+ case VKI_SIGFPE: return "SIGFPE";
+ case VKI_SIGKILL: return "SIGKILL";
+ case VKI_SIGUSR1: return "SIGUSR1";
+ case VKI_SIGUSR2: return "SIGUSR2";
+ case VKI_SIGSEGV: return "SIGSEGV";
+ case VKI_SIGPIPE: return "SIGPIPE";
+ case VKI_SIGALRM: return "SIGALRM";
+ case VKI_SIGTERM: return "SIGTERM";
+# if defined(VKI_SIGSTKFLT)
+ case VKI_SIGSTKFLT: return "SIGSTKFLT";
+# endif
+ case VKI_SIGCHLD: return "SIGCHLD";
+ case VKI_SIGCONT: return "SIGCONT";
+ case VKI_SIGSTOP: return "SIGSTOP";
+ case VKI_SIGTSTP: return "SIGTSTP";
+ case VKI_SIGTTIN: return "SIGTTIN";
+ case VKI_SIGTTOU: return "SIGTTOU";
+ case VKI_SIGURG: return "SIGURG";
+ case VKI_SIGXCPU: return "SIGXCPU";
+ case VKI_SIGXFSZ: return "SIGXFSZ";
+ case VKI_SIGVTALRM: return "SIGVTALRM";
+ case VKI_SIGPROF: return "SIGPROF";
+ case VKI_SIGWINCH: return "SIGWINCH";
+ case VKI_SIGIO: return "SIGIO";
+ case VKI_SIGPWR: return "SIGPWR";
+# if defined(VKI_SIGUNUSED)
+ case VKI_SIGUNUSED: return "SIGUNUSED";
+# endif
+
+ case VKI_SIGRTMIN ... VKI_SIGRTMAX:
+ VG_(sprintf)(buf, "SIGRT%d", sigNo-VKI_SIGRTMIN);
+ return buf;
+
+ default:
+ VG_(sprintf)(buf, "SIG%d", sigNo);
+ return buf;
+ }
+}
+
+/* Hit ourselves with a signal using the default handler */
+void VG_(kill_self)(Int sigNo)
+{
+ vki_sigset_t mask, origmask;
+ struct vki_sigaction sa, origsa;
+
+ sa.ksa_handler = VKI_SIG_DFL;
+ sa.sa_flags = 0;
+# if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5)
+ sa.sa_restorer = 0;
+# endif
+ VG_(sigemptyset)(&sa.sa_mask);
+
+ VG_(sigaction)(sigNo, &sa, &origsa);
+
+ VG_(sigemptyset)(&mask);
+ VG_(sigaddset)(&mask, sigNo);
+ VG_(sigprocmask)(VKI_SIG_UNBLOCK, &mask, &origmask);
+
+ VG_(kill)(VG_(getpid)(), sigNo);
+
+ VG_(sigaction)(sigNo, &origsa, NULL);
+ VG_(sigprocmask)(VKI_SIG_SETMASK, &origmask, NULL);
+}
+
+// The si_code describes where the signal came from. Some come from the
+// kernel, eg.: seg faults, illegal opcodes. Some come from the user, eg.:
+// from kill() (SI_USER), or timer_settime() (SI_TIMER), or an async I/O
+// request (SI_ASYNCIO). There's lots of implementation-defined leeway in
+// POSIX, but the user vs. kernal distinction is what we want here.
+static Bool is_signal_from_kernel(int si_code)
+{
+#if defined(VGO_linux) || defined(VGO_aix5)
+ // On Linux, SI_USER is zero, negative values are from the user, positive
+ // values are from the kernel. There are SI_FROMUSER and SI_FROMKERNEL
+ // macros but we don't use them here because other platforms don't have
+ // them.
+ return ( si_code > VKI_SI_USER ? True : False );
+#else
+# error Unknown OS
+#endif
+}
+
+// This is an arbitrary si_code that we only use internally. It corresponds
+// to the value SI_KERNEL on Linux, but that's not really of any significance
+// as far as I can determine.
+#define VKI_SEGV_MADE_UP_GPF 0x80
+
+/*
+ Perform the default action of a signal. If the signal is fatal, it
+ marks all threads as needing to exit, but it doesn't actually kill
+ the process or thread.
+
+ If we're not being quiet, then print out some more detail about
+ fatal signals (esp. core dumping signals).
+ */
+static void default_action(const vki_siginfo_t *info, ThreadId tid)
+{
+ Int sigNo = info->si_signo;
+ Bool terminate = False; /* kills process */
+ Bool core = False; /* kills process w/ core */
+ struct vki_rlimit corelim;
+ Bool could_core;
+
+ vg_assert(VG_(is_running_thread)(tid));
+
+ switch(sigNo) {
+ case VKI_SIGQUIT: /* core */
+ case VKI_SIGILL: /* core */
+ case VKI_SIGABRT: /* core */
+ case VKI_SIGFPE: /* core */
+ case VKI_SIGSEGV: /* core */
+ case VKI_SIGBUS: /* core */
+ case VKI_SIGTRAP: /* core */
+ case VKI_SIGXCPU: /* core */
+ case VKI_SIGXFSZ: /* core */
+ terminate = True;
+ core = True;
+ break;
+
+ case VKI_SIGHUP: /* term */
+ case VKI_SIGINT: /* term */
+ case VKI_SIGKILL: /* term - we won't see this */
+ case VKI_SIGPIPE: /* term */
+ case VKI_SIGALRM: /* term */
+ case VKI_SIGTERM: /* term */
+ case VKI_SIGUSR1: /* term */
+ case VKI_SIGUSR2: /* term */
+ case VKI_SIGIO: /* term */
+ case VKI_SIGPWR: /* term */
+ case VKI_SIGSYS: /* term */
+ case VKI_SIGPROF: /* term */
+ case VKI_SIGVTALRM: /* term */
+ case VKI_SIGRTMIN ... VKI_SIGRTMAX: /* term */
+ terminate = True;
+ break;
+ }
+
+ vg_assert(!core || (core && terminate));
+
+ if (VG_(clo_trace_signals))
+ VG_DMSG("delivering %d (code %d) to default handler; action: %s%s",
+ sigNo, info->si_code, terminate ? "terminate" : "ignore",
+ core ? "+core" : "");
+
+ if (!terminate)
+ return; /* nothing to do */
+
+ could_core = core;
+
+ if (core) {
+ /* If they set the core-size limit to zero, don't generate a
+ core file */
+
+ VG_(getrlimit)(VKI_RLIMIT_CORE, &corelim);
+
+ if (corelim.rlim_cur == 0)
+ core = False;
+ }
+
+ if ( (VG_(clo_verbosity) > 1 ||
+ (could_core && is_signal_from_kernel(info->si_code))
+ ) &&
+ !VG_(clo_xml) ) {
+ VG_UMSG("");
+ VG_UMSG("Process terminating with default action of signal %d (%s)%s",
+ sigNo, signame(sigNo), core ? ": dumping core" : "");
+
+ /* Be helpful - decode some more details about this fault */
+ if (is_signal_from_kernel(info->si_code)) {
+ const Char *event = NULL;
+ Bool haveaddr = True;
+
+ switch(sigNo) {
+ case VKI_SIGSEGV:
+ switch(info->si_code) {
+ case VKI_SEGV_MAPERR: event = "Access not within mapped region"; break;
+ case VKI_SEGV_ACCERR: event = "Bad permissions for mapped region"; break;
+ case VKI_SEGV_MADE_UP_GPF:
+ /* General Protection Fault: The CPU/kernel
+ isn't telling us anything useful, but this
+ is commonly the result of exceeding a
+ segment limit. */
+ event = "General Protection Fault";
+ haveaddr = False;
+ break;
+ }
+#if 0
+ {
+ HChar buf[110];
+ VG_(am_show_nsegments)(0,"post segfault");
+ VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
+ VG_(system)(buf);
+ }
+#endif
+ break;
+
+ case VKI_SIGILL:
+ switch(info->si_code) {
+ case VKI_ILL_ILLOPC: event = "Illegal opcode"; break;
+ case VKI_ILL_ILLOPN: event = "Illegal operand"; break;
+ case VKI_ILL_ILLADR: event = "Illegal addressing mode"; break;
+ case VKI_ILL_ILLTRP: event = "Illegal trap"; break;
+ case VKI_ILL_PRVOPC: event = "Privileged opcode"; break;
+ case VKI_ILL_PRVREG: event = "Privileged register"; break;
+ case VKI_ILL_COPROC: event = "Coprocessor error"; break;
+ case VKI_ILL_BADSTK: event = "Internal stack error"; break;
+ }
+ break;
+
+ case VKI_SIGFPE:
+ switch (info->si_code) {
+ case VKI_FPE_INTDIV: event = "Integer divide by zero"; break;
+ case VKI_FPE_INTOVF: event = "Integer overflow"; break;
+ case VKI_FPE_FLTDIV: event = "FP divide by zero"; break;
+ case VKI_FPE_FLTOVF: event = "FP overflow"; break;
+ case VKI_FPE_FLTUND: event = "FP underflow"; break;
+ case VKI_FPE_FLTRES: event = "FP inexact"; break;
+ case VKI_FPE_FLTINV: event = "FP invalid operation"; break;
+ case VKI_FPE_FLTSUB: event = "FP subscript out of range"; break;
+ }
+ break;
+
+ case VKI_SIGBUS:
+ switch (info->si_code) {
+ case VKI_BUS_ADRALN: event = "Invalid address alignment"; break;
+ case VKI_BUS_ADRERR: event = "Non-existent physical address"; break;
+ case VKI_BUS_OBJERR: event = "Hardware error"; break;
+ }
+ break;
+ } /* switch (sigNo) */
+
+ if (event != NULL) {
+ if (haveaddr)
+ VG_UMSG(" %s at address %p", event, info->VKI_SIGINFO_si_addr);
+ else
+ VG_UMSG(" %s", event);
+ }
+ }
+ /* Print a stack trace. Be cautious if the thread's SP is in an
+ obviously stupid place (not mapped readable) that would
+ likely cause a segfault. */
+ if (VG_(is_valid_tid)(tid)) {
+ ExeContext* ec = VG_(am_is_valid_for_client)
+ (VG_(get_SP)(tid), sizeof(Addr), VKI_PROT_READ)
+ ? VG_(record_ExeContext)( tid, 0/*first_ip_delta*/ )
+ : VG_(record_depth_1_ExeContext)( tid );
+ vg_assert(ec);
+ VG_(pp_ExeContext)( ec );
+ }
+ if (sigNo == VKI_SIGSEGV
+ && info && is_signal_from_kernel(info->si_code)
+ && info->si_code == VKI_SEGV_MAPERR) {
+ VG_UMSG(" If you believe this happened as a result of a stack" );
+ VG_UMSG(" overflow in your program's main thread (unlikely but");
+ VG_UMSG(" possible), you can try to increase the size of the" );
+ VG_UMSG(" main thread stack using the --main-stacksize= flag." );
+ // FIXME: assumes main ThreadId == 1
+ if (VG_(is_valid_tid)(1)) {
+ VG_UMSG(" The main thread stack size used in this run was %d.",
+ (Int)VG_(threads)[1].client_stack_szB);
+ }
+ }
+ }
+
+ if (VG_(is_action_requested)( "Attach to debugger", & VG_(clo_db_attach) )) {
+ VG_(start_debugger)( tid );
+ }
+
+ if (core) {
+ const static struct vki_rlimit zero = { 0, 0 };
+
+ VG_(make_coredump)(tid, info, corelim.rlim_cur);
+
+ /* Make sure we don't get a confusing kernel-generated
+ coredump when we finally exit */
+ VG_(setrlimit)(VKI_RLIMIT_CORE, &zero);
+ }
+
+ /* stash fatal signal in main thread */
+ // what's this for?
+ //VG_(threads)[VG_(master_tid)].os_state.fatalsig = sigNo;
+
+ /* everyone dies */
+ VG_(nuke_all_threads_except)(tid, VgSrc_FatalSig);
+ VG_(threads)[tid].exitreason = VgSrc_FatalSig;
+ VG_(threads)[tid].os_state.fatalsig = sigNo;
+}
+
+/*
+ This does the business of delivering a signal to a thread. It may
+ be called from either a real signal handler, or from normal code to
+ cause the thread to enter the signal handler.
+
+ This updates the thread state, but it does not set it to be
+ Runnable.
+*/
+static void deliver_signal ( ThreadId tid, const vki_siginfo_t *info,
+ const struct vki_ucontext *uc )
+{
+ Int sigNo = info->si_signo;
+ SCSS_Per_Signal *handler = &scss.scss_per_sig[sigNo];
+ void *handler_fn;
+ ThreadState *tst = VG_(get_ThreadState)(tid);
+
+ if (VG_(clo_trace_signals))
+ VG_DMSG("delivering signal %d (%s):%d to thread %d",
+ sigNo, signame(sigNo), info->si_code, tid );
+
+ if (sigNo == VG_SIGVGKILL) {
+ /* If this is a SIGVGKILL, we're expecting it to interrupt any
+ blocked syscall. It doesn't matter whether the VCPU state is
+ set to restart or not, because we don't expect it will
+ execute any more client instructions. */
+ vg_assert(VG_(is_exiting)(tid));
+ return;
+ }
+
+ /* If the client specifies SIG_IGN, treat it as SIG_DFL.
+
+ If deliver_signal() is being called on a thread, we want
+ the signal to get through no matter what; if they're ignoring
+ it, then we do this override (this is so we can send it SIGSEGV,
+ etc). */
+ handler_fn = handler->scss_handler;
+ if (handler_fn == VKI_SIG_IGN)
+ handler_fn = VKI_SIG_DFL;
+
+ vg_assert(handler_fn != VKI_SIG_IGN);
+
+ if (handler_fn == VKI_SIG_DFL) {
+ default_action(info, tid);
+ } else {
+ /* Create a signal delivery frame, and set the client's %ESP and
+ %EIP so that when execution continues, we will enter the
+ signal handler with the frame on top of the client's stack,
+ as it expects.
+
+ Signal delivery can fail if the client stack is too small or
+ missing, and we can't push the frame. If that happens,
+ push_signal_frame will cause the whole process to exit when
+ we next hit the scheduler.
+ */
+ vg_assert(VG_(is_valid_tid)(tid));
+
+ push_signal_frame ( tid, info, uc );
+
+ if (handler->scss_flags & VKI_SA_ONESHOT) {
+ /* Do the ONESHOT thing. */
+ handler->scss_handler = VKI_SIG_DFL;
+
+ handle_SCSS_change( False /* lazy update */ );
+ }
+
+ /* At this point:
+ tst->sig_mask is the current signal mask
+ tst->tmp_sig_mask is the same as sig_mask, unless we're in sigsuspend
+ handler->scss_mask is the mask set by the handler
+
+ Handler gets a mask of tmp_sig_mask|handler_mask|signo
+ */
+ tst->sig_mask = tst->tmp_sig_mask;
+ if (!(handler->scss_flags & VKI_SA_NOMASK)) {
+ VG_(sigaddset_from_set)(&tst->sig_mask, &handler->scss_mask);
+ VG_(sigaddset)(&tst->sig_mask, sigNo);
+
+ tst->tmp_sig_mask = tst->sig_mask;
+ }
+ }
+
+ /* Thread state is ready to go - just add Runnable */
+}
+
+static void resume_scheduler(ThreadId tid)
+{
+ ThreadState *tst = VG_(get_ThreadState)(tid);
+
+ vg_assert(tst->os_state.lwpid == VG_(gettid)());
+
+ if (tst->sched_jmpbuf_valid) {
+ /* Can't continue; must longjmp back to the scheduler and thus
+ enter the sighandler immediately. */
+ __builtin_longjmp(tst->sched_jmpbuf, True);
+ }
+}
+
+static void synth_fault_common(ThreadId tid, Addr addr, Int si_code)
+{
+ vki_siginfo_t info;
+
+ vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
+
+ VG_(memset)(&info, 0, sizeof(info));
+ info.si_signo = VKI_SIGSEGV;
+ info.si_code = si_code;
+ info.VKI_SIGINFO_si_addr = (void*)addr;
+
+ /* If they're trying to block the signal, force it to be delivered */
+ if (VG_(sigismember)(&VG_(threads)[tid].sig_mask, VKI_SIGSEGV))
+ VG_(set_default_handler)(VKI_SIGSEGV);
+
+ deliver_signal(tid, &info, NULL);
+}
+
+// Synthesize a fault where the address is OK, but the page
+// permissions are bad.
+void VG_(synth_fault_perms)(ThreadId tid, Addr addr)
+{
+ synth_fault_common(tid, addr, VKI_SEGV_ACCERR);
+}
+
+// Synthesize a fault where the address there's nothing mapped at the address.
+void VG_(synth_fault_mapping)(ThreadId tid, Addr addr)
+{
+ synth_fault_common(tid, addr, VKI_SEGV_MAPERR);
+}
+
+// Synthesize a misc memory fault.
+void VG_(synth_fault)(ThreadId tid)
+{
+ synth_fault_common(tid, 0, VKI_SEGV_MADE_UP_GPF);
+}
+
+// Synthesise a SIGILL.
+void VG_(synth_sigill)(ThreadId tid, Addr addr)
+{
+ vki_siginfo_t info;
+
+ vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
+
+ VG_(memset)(&info, 0, sizeof(info));
+ info.si_signo = VKI_SIGILL;
+ info.si_code = VKI_ILL_ILLOPC; /* jrs: no idea what this should be */
+ info.VKI_SIGINFO_si_addr = (void*)addr;
+
+ resume_scheduler(tid);
+ deliver_signal(tid, &info, NULL);
+}
+
+// Synthesise a SIGTRAP.
+void VG_(synth_sigtrap)(ThreadId tid)
+{
+ vki_siginfo_t info;
+ struct vki_ucontext uc;
+
+ vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
+
+ VG_(memset)(&info, 0, sizeof(info));
+ VG_(memset)(&uc, 0, sizeof(uc));
+ info.si_signo = VKI_SIGTRAP;
+ info.si_code = VKI_TRAP_BRKPT; /* tjh: only ever called for a brkpt ins */
+#if defined(VGA_x86) || defined(VGA_amd64)
+ uc.uc_mcontext.trapno = 3; /* tjh: this is the x86 trap number
+ for a breakpoint trap... */
+ uc.uc_mcontext.err = 0; /* tjh: no error code for x86
+ breakpoint trap... */
+#endif
+
+ resume_scheduler(tid);
+ deliver_signal(tid, &info, &uc);
+}
+
+/* Make a signal pending for a thread, for later delivery.
+ VG_(poll_signals) will arrange for it to be delivered at the right
+ time.
+
+ tid==0 means add it to the process-wide queue, and not sent it to a
+ specific thread.
+*/
+static
+void queue_signal(ThreadId tid, const vki_siginfo_t *si)
+{
+ ThreadState *tst;
+ SigQueue *sq;
+ vki_sigset_t savedmask;
+
+ tst = VG_(get_ThreadState)(tid);
+
+ /* Protect the signal queue against async deliveries */
+ block_all_host_signals(&savedmask);
+
+ if (tst->sig_queue == NULL) {
+ tst->sig_queue = VG_(arena_malloc)(VG_AR_CORE, "signals.qs.1",
+ sizeof(*tst->sig_queue));
+ VG_(memset)(tst->sig_queue, 0, sizeof(*tst->sig_queue));
+ }
+ sq = tst->sig_queue;
+
+ if (VG_(clo_trace_signals))
+ VG_DMSG("Queueing signal %d (idx %d) to thread %d",
+ si->si_signo, sq->next, tid);
+
+ /* Add signal to the queue. If the queue gets overrun, then old
+ queued signals may get lost.
+
+ XXX We should also keep a sigset of pending signals, so that at
+ least a non-siginfo signal gets deliviered.
+ */
+ if (sq->sigs[sq->next].si_signo != 0)
+ VG_UMSG("Signal %d being dropped from thread %d's queue",
+ sq->sigs[sq->next].si_signo, tid);
+
+ sq->sigs[sq->next] = *si;
+ sq->next = (sq->next+1) % N_QUEUED_SIGNALS;
+
+ restore_all_host_signals(&savedmask);
+}
+
+/*
+ Returns the next queued signal for thread tid which is in "set".
+ tid==0 means process-wide signal. Set si_signo to 0 when the
+ signal has been delivered.
+
+ Must be called with all signals blocked, to protect against async
+ deliveries.
+*/
+static vki_siginfo_t *next_queued(ThreadId tid, const vki_sigset_t *set)
+{
+ ThreadState *tst = VG_(get_ThreadState)(tid);
+ SigQueue *sq;
+ Int idx;
+ vki_siginfo_t *ret = NULL;
+
+ sq = tst->sig_queue;
+ if (sq == NULL)
+ goto out;
+
+ idx = sq->next;
+ do {
+ if (0)
+ VG_(printf)("idx=%d si_signo=%d inset=%d\n", idx,
+ sq->sigs[idx].si_signo, VG_(sigismember)(set, sq->sigs[idx].si_signo));
+
+ if (sq->sigs[idx].si_signo != 0 && VG_(sigismember)(set, sq->sigs[idx].si_signo)) {
+ if (VG_(clo_trace_signals))
+ VG_DMSG("Returning queued signal %d (idx %d) for thread %d",
+ sq->sigs[idx].si_signo, idx, tid);
+ ret = &sq->sigs[idx];
+ goto out;
+ }
+
+ idx = (idx + 1) % N_QUEUED_SIGNALS;
+ } while(idx != sq->next);
+ out:
+ return ret;
+}
+
+static int sanitize_si_code(int si_code)
+{
+#if defined(VGO_linux)
+ /* The linux kernel uses the top 16 bits of si_code for it's own
+ use and only exports the bottom 16 bits to user space - at least
+ that is the theory, but it turns out that there are some kernels
+ around that forget to mask out the top 16 bits so we do it here.
+
+ The kernel treats the bottom 16 bits as signed and (when it does
+ mask them off) sign extends them when exporting to user space so
+ we do the same thing here. */
+ return (Short)si_code;
+#elif defined(VGO_aix5)
+ return si_code;
+#else
+# error Unknown OS
+#endif
+}
+
+/*
+ Receive an async signal from the kernel.
+
+ This should only happen when the thread is blocked in a syscall,
+ since that's the only time this set of signals is unblocked.
+*/
+static
+void async_signalhandler ( Int sigNo,
+ vki_siginfo_t *info, struct vki_ucontext *uc )
+{
+ ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
+ ThreadState* tst = VG_(get_ThreadState)(tid);
+
+ /* The thread isn't currently running, make it so before going on */
+ vg_assert(tst->status == VgTs_WaitSys);
+ VG_(acquire_BigLock)(tid, "async_signalhandler");
+
+ info->si_code = sanitize_si_code(info->si_code);
+
+ if (VG_(clo_trace_signals))
+ VG_DMSG("async signal handler: signal=%d, tid=%d, si_code=%d",
+ sigNo, tid, info->si_code);
+
+ /* Update thread state properly */
+ VG_(fixup_guest_state_after_syscall_interrupted)(
+ tid,
+ VG_UCONTEXT_INSTR_PTR(uc),
+ VG_UCONTEXT_SYSCALL_NUM(uc),
+ VG_UCONTEXT_SYSCALL_SYSRES(uc),
+ !!(scss.scss_per_sig[sigNo].scss_flags & VKI_SA_RESTART)
+ );
+
+ /* Set up the thread's state to deliver a signal */
+ if (!is_sig_ign(info->si_signo))
+ deliver_signal(tid, info, uc);
+
+ /* longjmp back to the thread's main loop to start executing the
+ handler. */
+ resume_scheduler(tid);
+
+ VG_(core_panic)("async_signalhandler: got unexpected signal "
+ "while outside of scheduler");
+}
+
+/* Extend the stack to cover addr. maxsize is the limit the stack can grow to.
+
+ Returns True on success, False on failure.
+
+ Succeeds without doing anything if addr is already within a segment.
+
+ Failure could be caused by:
+ - addr not below a growable segment
+ - new stack size would exceed maxsize
+ - mmap failed for some other reason
+ */
+Bool VG_(extend_stack)(Addr addr, UInt maxsize)
+{
+ SizeT udelta;
+
+ /* Find the next Segment above addr */
+ NSegment const* seg
+ = VG_(am_find_nsegment)(addr);
+ NSegment const* seg_next
+ = seg ? VG_(am_next_nsegment)( (NSegment*)seg, True/*fwds*/ )
+ : NULL;
+
+ if (seg && seg->kind == SkAnonC)
+ /* addr is already mapped. Nothing to do. */
+ return True;
+
+ /* Check that the requested new base is in a shrink-down
+ reservation section which abuts an anonymous mapping that
+ belongs to the client. */
+ if ( ! (seg
+ && seg->kind == SkResvn
+ && seg->smode == SmUpper
+ && seg_next
+ && seg_next->kind == SkAnonC
+ && seg->end+1 == seg_next->start))
+ return False;
+
+ udelta = VG_PGROUNDUP(seg_next->start - addr);
+ VG_(debugLog)(1, "signals",
+ "extending a stack base 0x%llx down by %lld\n",
+ (ULong)seg_next->start, (ULong)udelta);
+ if (! VG_(am_extend_into_adjacent_reservation_client)
+ ( (NSegment*)seg_next, -(SSizeT)udelta )) {
+ VG_(debugLog)(1, "signals", "extending a stack base: FAILED\n");
+ return False;
+ }
+
+ /* When we change the main stack, we have to let the stack handling
+ code know about it. */
+ VG_(change_stack)(VG_(clstk_id), addr, VG_(clstk_end));
+
+ if (VG_(clo_sanity_level) > 2)
+ VG_(sanity_check_general)(False);
+
+ return True;
+}
+
+static void (*fault_catcher)(Int sig, Addr addr) = NULL;
+
+void VG_(set_fault_catcher)(void (*catcher)(Int, Addr))
+{
+ if (0)
+ VG_(debugLog)(0, "signals", "set fault catcher to %p\n", catcher);
+ vg_assert2(NULL == catcher || NULL == fault_catcher,
+ "Fault catcher is already registered");
+
+ fault_catcher = catcher;
+}
+
+static
+void sync_signalhandler_from_outside ( ThreadId tid,
+ Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
+{
+ ThreadId qtid;
+
+ /* If some user-process sent us a sync signal (ie, they're not the result
+ of a faulting instruction), then how we treat it depends on when it
+ arrives... */
+
+ if (VG_(threads)[tid].status == VgTs_WaitSys) {
+ /* Signal arrived while we're blocked in a syscall. This means that
+ the client's signal mask was applied. In other words, so we can't
+ get here unless the client wants this signal right now. This means
+ we can simply use the async_signalhandler. */
+ if (VG_(clo_trace_signals))
+ VG_DMSG("Delivering user-sent sync signal %d as async signal", sigNo);
+
+ async_signalhandler(sigNo, info, uc);
+ VG_(core_panic)("async_signalhandler returned!?\n");
+
+ } else {
+ /* Signal arrived while in generated client code, or while running
+ Valgrind core code. That means that every thread has these signals
+ unblocked, so we can't rely on the kernel to route them properly, so
+ we need to queue them manually. */
+ if (VG_(clo_trace_signals))
+ VG_DMSG("Routing user-sent sync signal %d via queue", sigNo);
+
+# if defined(VGO_linux)
+ /* On Linux, first we have to do a sanity check of the siginfo. */
+ if (info->VKI_SIGINFO_si_pid == 0) {
+ /* There's a per-user limit of pending siginfo signals. If
+ you exceed this, by having more than that number of
+ pending signals with siginfo, then new signals are
+ delivered without siginfo. This condition can be caused
+ by any unrelated program you're running at the same time
+ as Valgrind, if it has a large number of pending siginfo
+ signals which it isn't taking delivery of.
+
+ Since we depend on siginfo to work out why we were sent a
+ signal and what we should do about it, we really can't
+ continue unless we get it. */
+ VG_UMSG("Signal %d (%s) appears to have lost its siginfo; "
+ "I can't go on.", sigNo, signame(sigNo));
+ VG_(printf)(
+" This may be because one of your programs has consumed your ration of\n"
+" siginfo structures. For more information, see:\n"
+" http://kerneltrap.org/mailarchive/1/message/25599/thread\n"
+" Basically, some program on your system is building up a large queue of\n"
+" pending signals, and this causes the siginfo data for other signals to\n"
+" be dropped because it's exceeding a system limit. However, Valgrind\n"
+" absolutely needs siginfo for SIGSEGV. A workaround is to track down the\n"
+" offending program and avoid running it while using Valgrind, but there\n"
+" is no easy way to do this. Apparently the problem was fixed in kernel\n"
+" 2.6.12.\n");
+
+ /* It's a fatal signal, so we force the default handler. */
+ VG_(set_default_handler)(sigNo);
+ deliver_signal(tid, info, uc);
+ resume_scheduler(tid);
+ VG_(exit)(99); /* If we can't resume, then just exit */
+ }
+# endif
+
+ qtid = 0; /* shared pending by default */
+# if defined(VGO_linux)
+ if (info->si_code == VKI_SI_TKILL)
+ qtid = tid; /* directed to us specifically */
+# endif
+ queue_signal(qtid, info);
+ }
+}
+
+/* Returns True if the sync signal was due to the stack requiring extension
+ and the extension was successful.
+*/
+static Bool extend_stack_if_appropriate(ThreadId tid, vki_siginfo_t* info)
+{
+ Addr fault;
+ Addr esp;
+ NSegment const* seg;
+ NSegment const* seg_next;
+
+ if (info->si_signo != VKI_SIGSEGV)
+ return False;
+
+ fault = (Addr)info->VKI_SIGINFO_si_addr;
+ esp = VG_(get_SP)(tid);
+ seg = VG_(am_find_nsegment)(fault);
+ seg_next = seg ? VG_(am_next_nsegment)( (NSegment*)seg, True/*fwds*/ )
+ : NULL;
+
+ if (VG_(clo_trace_signals)) {
+ if (seg == NULL)
+ VG_DMSG("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
+ "seg=NULL",
+ info->si_code, fault, tid, esp);
+ else
+ VG_DMSG("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
+ "seg=%#lx-%#lx",
+ info->si_code, fault, tid, esp, seg->start, seg->end);
+ }
+
+ if (info->si_code == VKI_SEGV_MAPERR
+ && seg
+ && seg->kind == SkResvn
+ && seg->smode == SmUpper
+ && seg_next
+ && seg_next->kind == SkAnonC
+ && seg->end+1 == seg_next->start
+ && fault >= (esp - VG_STACK_REDZONE_SZB)) {
+ /* If the fault address is above esp but below the current known
+ stack segment base, and it was a fault because there was
+ nothing mapped there (as opposed to a permissions fault),
+ then extend the stack segment.
+ */
+ Addr base = VG_PGROUNDDN(esp - VG_STACK_REDZONE_SZB);
+ if (VG_(extend_stack)(base, VG_(threads)[tid].client_stack_szB)) {
+ if (VG_(clo_trace_signals))
+ VG_DMSG(" -> extended stack base to %#lx",
+ VG_PGROUNDDN(fault));
+ return True;
+ } else {
+ VG_UMSG("Stack overflow in thread %d: can't grow stack to %#lx",
+ tid, fault);
+ return False;
+ }
+ } else {
+ return False;
+ }
+}
+
+static
+void sync_signalhandler_from_inside ( ThreadId tid,
+ Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
+{
+ /* Check to see if some part of Valgrind itself is interested in faults.
+ The fault catcher should never be set whilst we're in generated code, so
+ check for that. AFAIK the only use of the catcher right now is
+ memcheck's leak detector. */
+ if (fault_catcher) {
+ vg_assert(VG_(in_generated_code) == False);
+
+ (*fault_catcher)(sigNo, (Addr)info->VKI_SIGINFO_si_addr);
+ /* If the catcher returns, then it didn't handle the fault,
+ so carry on panicking. */
+ }
+
+ if (extend_stack_if_appropriate(tid, info)) {
+ /* Stack extension occurred, so we don't need to do anything else; upon
+ returning from this function, we'll restart the host (hence guest)
+ instruction. */
+ } else {
+ /* OK, this is a signal we really have to deal with. If it came
+ from the client's code, then we can jump back into the scheduler
+ and have it delivered. Otherwise it's a Valgrind bug. */
+ ThreadState *tst = VG_(get_ThreadState)(tid);
+
+ if (VG_(sigismember)(&tst->sig_mask, sigNo)) {
+ /* signal is blocked, but they're not allowed to block faults */
+ VG_(set_default_handler)(sigNo);
+ }
+
+ if (VG_(in_generated_code)) {
+ /* Can't continue; must longjmp back to the scheduler and thus
+ enter the sighandler immediately. */
+ deliver_signal(tid, info, uc);
+ resume_scheduler(tid);
+ }
+
+ /* If resume_scheduler returns or its our fault, it means we
+ don't have longjmp set up, implying that we weren't running
+ client code, and therefore it was actually generated by
+ Valgrind internally.
+ */
+ VG_DMSG("VALGRIND INTERNAL ERROR: Valgrind received "
+ "a signal %d (%s) - exiting",
+ sigNo, signame(sigNo));
+
+ VG_DMSG("si_code=%x; Faulting address: %p; sp: %#lx",
+ info->si_code, info->VKI_SIGINFO_si_addr,
+ VG_UCONTEXT_STACK_PTR(uc));
+
+ if (0)
+ VG_(kill_self)(sigNo); /* generate a core dump */
+
+ //if (tid == 0) /* could happen after everyone has exited */
+ // tid = VG_(master_tid);
+ vg_assert(tid != 0);
+
+ VG_(core_panic_at)("Killed by fatal signal",
+ VG_UCONTEXT_INSTR_PTR(uc),
+ VG_UCONTEXT_STACK_PTR(uc),
+ VG_UCONTEXT_FRAME_PTR(uc),
+ VG_UCONTEXT_LINK_REG(uc));
+ }
+}
+
+/*
+ Receive a sync signal from the host.
+*/
+static
+void sync_signalhandler ( Int sigNo,
+ vki_siginfo_t *info, struct vki_ucontext *uc )
+{
+ ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
+ Bool from_outside;
+
+ if (0)
+ VG_(printf)("sync_sighandler(%d, %p, %p)\n", sigNo, info, uc);
+
+ vg_assert(info != NULL);
+ vg_assert(info->si_signo == sigNo);
+ vg_assert(sigNo == VKI_SIGSEGV ||
+ sigNo == VKI_SIGBUS ||
+ sigNo == VKI_SIGFPE ||
+ sigNo == VKI_SIGILL ||
+ sigNo == VKI_SIGTRAP);
+
+ info->si_code = sanitize_si_code(info->si_code);
+
+ from_outside = !is_signal_from_kernel(info->si_code);
+
+ if (VG_(clo_trace_signals)) {
+ VG_DMSG("sync signal handler: "
+ "signal=%d, si_code=%d, EIP=%#lx, eip=%#lx, from %s",
+ sigNo, info->si_code, VG_(get_IP)(tid),
+ VG_UCONTEXT_INSTR_PTR(uc),
+ ( from_outside ? "outside" : "inside" ));
+ }
+ vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
+
+ /* // debug code:
+ if (0) {
+ VG_(printf)("info->si_signo %d\n", info->si_signo);
+ VG_(printf)("info->si_errno %d\n", info->si_errno);
+ VG_(printf)("info->si_code %d\n", info->si_code);
+ VG_(printf)("info->si_pid %d\n", info->si_pid);
+ VG_(printf)("info->si_uid %d\n", info->si_uid);
+ VG_(printf)("info->si_status %d\n", info->si_status);
+ VG_(printf)("info->si_addr %p\n", info->si_addr);
+ }
+ */
+
+ /* Figure out if the signal is being sent from outside the process.
+ (Why do we care?) If the signal is from the user rather than the
+ kernel, then treat it more like an async signal than a sync signal --
+ that is, merely queue it for later delivery. */
+ if (from_outside) {
+ sync_signalhandler_from_outside(tid, sigNo, info, uc);
+ } else {
+ sync_signalhandler_from_inside( tid, sigNo, info, uc);
+ }
+}
+
+
+/*
+ Kill this thread. Makes it leave any syscall it might be currently
+ blocked in, and return to the scheduler. This doesn't mark the thread
+ as exiting; that's the caller's job.
+ */
+static void sigvgkill_handler(int signo, vki_siginfo_t *si,
+ struct vki_ucontext *uc)
+{
+ ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
+ ThreadStatus at_signal = VG_(threads)[tid].status;
+
+ if (VG_(clo_trace_signals))
+ VG_DMSG("sigvgkill for lwp %d tid %d", VG_(gettid)(), tid);
+
+ VG_(acquire_BigLock)(tid, "sigvgkill_handler");
+
+ vg_assert(signo == VG_SIGVGKILL);
+ vg_assert(si->si_signo == signo);
+
+ /* jrs 2006 August 3: the following assertion seems incorrect to
+ me, and fails on AIX. sigvgkill could be sent to a thread which
+ is runnable - see VG_(nuke_all_threads_except) in the scheduler.
+ Hence comment these out ..
+
+ vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
+ VG_(post_syscall)(tid);
+
+ and instead do:
+ */
+ if (at_signal == VgTs_WaitSys)
+ VG_(post_syscall)(tid);
+ /* jrs 2006 August 3 ends */
+
+ resume_scheduler(tid);
+
+ VG_(core_panic)("sigvgkill_handler couldn't return to the scheduler\n");
+}
+
+static __attribute((unused))
+void pp_ksigaction ( struct vki_sigaction* sa )
+{
+ Int i;
+ VG_(printf)("pp_ksigaction: handler %p, flags 0x%x, restorer %p\n",
+ sa->ksa_handler,
+ (UInt)sa->sa_flags,
+# if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5)
+ sa->sa_restorer
+# else
+ (void*)0
+# endif
+ );
+ VG_(printf)("pp_ksigaction: { ");
+ for (i = 1; i <= VG_(max_signal); i++)
+ if (VG_(sigismember(&(sa->sa_mask),i)))
+ VG_(printf)("%d ", i);
+ VG_(printf)("}\n");
+}
+
+/*
+ Force signal handler to default
+ */
+void VG_(set_default_handler)(Int signo)
+{
+ struct vki_sigaction sa;
+
+ sa.ksa_handler = VKI_SIG_DFL;
+ sa.sa_flags = 0;
+# if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5)
+ sa.sa_restorer = 0;
+# endif
+ VG_(sigemptyset)(&sa.sa_mask);
+
+ VG_(do_sys_sigaction)(signo, &sa, NULL);
+}
+
+/*
+ Poll for pending signals, and set the next one up for delivery.
+ */
+void VG_(poll_signals)(ThreadId tid)
+{
+ vki_siginfo_t si, *sip;
+ vki_sigset_t pollset;
+ ThreadState *tst = VG_(get_ThreadState)(tid);
+ Int i;
+ vki_sigset_t saved_mask;
+
+ /* look for all the signals this thread isn't blocking */
+ for(i = 0; i < _VKI_NSIG_WORDS; i++)
+ pollset.sig[i] = ~tst->sig_mask.sig[i];
+
+ //VG_(printf)("tid %d pollset=%08x%08x\n", tid, pollset.sig[1], pollset.sig[0]);
+
+ block_all_host_signals(&saved_mask); // protect signal queue
+
+ /* First look for any queued pending signals */
+ sip = next_queued(tid, &pollset); /* this thread */
+
+ if (sip == NULL)
+ sip = next_queued(0, &pollset); /* process-wide */
+
+ /* If there was nothing queued, ask the kernel for a pending signal */
+ if (sip == NULL && VG_(sigtimedwait_zero)(&pollset, &si) > 0) {
+ if (VG_(clo_trace_signals))
+ VG_DMSG("poll_signals: got signal %d for thread %d",
+ si.si_signo, tid);
+ sip = &si;
+ }
+
+ if (sip != NULL) {
+ /* OK, something to do; deliver it */
+ if (VG_(clo_trace_signals))
+ VG_DMSG("Polling found signal %d for tid %d", sip->si_signo, tid);
+ if (!is_sig_ign(sip->si_signo))
+ deliver_signal(tid, sip, NULL);
+ else if (VG_(clo_trace_signals))
+ VG_DMSG(" signal %d ignored", sip->si_signo);
+
+ sip->si_signo = 0; /* remove from signal queue, if that's
+ where it came from */
+ }
+
+ restore_all_host_signals(&saved_mask);
+}
+
+/* At startup, copy the process' real signal state to the SCSS.
+ Whilst doing this, block all real signals. Then calculate SKSS and
+ set the kernel to that. Also initialise DCSS.
+*/
+void VG_(sigstartup_actions) ( void )
+{
+ Int i, ret;
+ vki_sigset_t saved_procmask;
+ struct vki_sigaction sa;
+
+ /* VG_(printf)("SIGSTARTUP\n"); */
+ /* Block all signals. saved_procmask remembers the previous mask,
+ which the first thread inherits.
+ */
+ block_all_host_signals( &saved_procmask );
+
+ /* Copy per-signal settings to SCSS. */
+ for (i = 1; i <= _VKI_NSIG; i++) {
+ /* Get the old host action */
+ ret = VG_(sigaction)(i, NULL, &sa);
+
+ if (ret != 0)
+ break;
+
+ /* Try setting it back to see if this signal is really
+ available */
+ if (i >= VKI_SIGRTMIN) {
+ struct vki_sigaction tsa;
+
+ tsa.ksa_handler = (void *)sync_signalhandler;
+ tsa.sa_flags = VKI_SA_SIGINFO;
+# if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5)
+ tsa.sa_restorer = 0;
+# endif
+ VG_(sigfillset)(&tsa.sa_mask);
+
+ /* try setting it to some arbitrary handler */
+ if (VG_(sigaction)(i, &tsa, NULL) != 0) {
+ /* failed - not really usable */
+ break;
+ }
+
+ ret = VG_(sigaction)(i, &sa, NULL);
+ vg_assert(ret == 0);
+ }
+
+ VG_(max_signal) = i;
+
+ if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
+ VG_(printf)("snaffling handler 0x%lx for signal %d\n",
+ (Addr)(sa.ksa_handler), i );
+
+ scss.scss_per_sig[i].scss_handler = sa.ksa_handler;
+ scss.scss_per_sig[i].scss_flags = sa.sa_flags;
+ scss.scss_per_sig[i].scss_mask = sa.sa_mask;
+ scss.scss_per_sig[i].scss_restorer = 0;
+# if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5)
+ scss.scss_per_sig[i].scss_restorer = sa.sa_restorer;
+# endif
+ }
+
+ if (VG_(clo_trace_signals))
+ VG_DMSG("Max kernel-supported signal is %d", VG_(max_signal));
+
+ /* Our private internal signals are treated as ignored */
+ scss.scss_per_sig[VG_SIGVGKILL].scss_handler = VKI_SIG_IGN;
+ scss.scss_per_sig[VG_SIGVGKILL].scss_flags = VKI_SA_SIGINFO;
+ VG_(sigfillset)(&scss.scss_per_sig[VG_SIGVGKILL].scss_mask);
+
+ /* Copy the process' signal mask into the root thread. */
+ vg_assert(VG_(threads)[1].status == VgTs_Init);
+ for (i = 2; i < VG_N_THREADS; i++)
+ vg_assert(VG_(threads)[i].status == VgTs_Empty);
+
+ VG_(threads)[1].sig_mask = saved_procmask;
+ VG_(threads)[1].tmp_sig_mask = saved_procmask;
+
+ /* Calculate SKSS and apply it. This also sets the initial kernel
+ mask we need to run with. */
+ handle_SCSS_change( True /* forced update */ );
+
+ /* Leave with all signals still blocked; the thread scheduler loop
+ will set the appropriate mask at the appropriate time. */
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_sparsewa.c.svn-base b/coregrind/.svn/text-base/m_sparsewa.c.svn-base
new file mode 100644
index 0000000..e8f4965
--- /dev/null
+++ b/coregrind/.svn/text-base/m_sparsewa.c.svn-base
@@ -0,0 +1,478 @@
+
+/*--------------------------------------------------------------------*/
+/*--- An sparse array (of words) implementation. ---*/
+/*--- m_sparsewa.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2008-2009 OpenWorks Ltd
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_sparsewa.h" /* self */
+
+/////////////////////////////////////////////////////////
+// //
+// SparseWA: Implementation //
+// //
+/////////////////////////////////////////////////////////
+
+//////// SWA data structures
+
+// (UInt) `echo "Level Zero Byte Map" | md5sum`
+#define Level0_MAGIC 0x458ec222
+
+// (UInt) `echo "Level N Byte Map" | md5sum`
+#define LevelN_MAGIC 0x0a280a1a
+
+/* It's important that the .magic field appears at offset zero in both
+ structs, so that we can reliably distinguish between them. */
+
+typedef
+ struct {
+ UWord magic;
+ UWord words[256];
+ Int nInUse;
+ UChar inUse[256/8];
+ }
+ Level0;
+
+typedef
+ struct {
+ UWord magic;
+ void* child[256]; /* either LevelN* or Level0* */
+ Int nInUse;
+ Int level; /* 3 .. 1 on 32-bit, 7 .. 1 on 64-bit */
+ }
+ LevelN;
+
+typedef
+ struct {
+ UWord partial_key;
+ Int curr_ix;
+ void* curr_nd; /* LevelN* or Level0* */
+ Int resume_point; /* 1, 2 or 3 */
+ }
+ SWAStackElem;
+
+struct _SparseWA {
+ void* (*alloc_nofail)(HChar*,SizeT);
+ HChar* cc;
+ void (*dealloc)(void*);
+ LevelN* root;
+ SWAStackElem iterStack[8];
+ Int isUsed;
+};
+
+//////// SWA helper functions (bitarray)
+
+static inline UWord swa_bitarray_read ( UChar* arr, UWord ix ) {
+ UWord bix = ix >> 3;
+ UWord off = ix & 7;
+ return (arr[bix] >> off) & 1;
+}
+
+static inline UWord swa_bitarray_read_then_set ( UChar* arr, UWord ix ) {
+ UWord bix = ix >> 3;
+ UWord off = ix & 7;
+ UChar old = arr[bix];
+ UChar nyu = old | (1 << off);
+ arr[bix] = nyu;
+ return (old >> off) & 1;
+}
+
+static inline UWord swa_bitarray_read_then_clear ( UChar* arr, UWord ix ) {
+ UWord bix = ix >> 3;
+ UWord off = ix & 7;
+ UChar old = arr[bix];
+ UChar nyu = old & ~(1 << off);
+ arr[bix] = nyu;
+ return (old >> off) & 1;
+}
+
+//////// SWA helper functions (iteration)
+
+static void swa_PUSH ( SparseWA* swa, UWord partial_key, Int curr_ix,
+ void* curr_nd, Int resume_point )
+{
+ Int sp = swa->isUsed;
+ const Int _3_or_7 = sizeof(void*) - 1;
+ // if (0) VG_(printf)("PUSH, old sp = %d\n", sp);
+ vg_assert(sp >= 0 && sp <= _3_or_7);
+ swa->iterStack[sp].partial_key = partial_key;
+ swa->iterStack[sp].curr_ix = curr_ix;
+ swa->iterStack[sp].curr_nd = curr_nd;
+ swa->iterStack[sp].resume_point = resume_point;
+ swa->isUsed = sp+1;
+}
+
+static void swa_POP ( SparseWA* swa,
+ UWord* partial_key, Int* curr_ix,
+ void** curr_nd, Int* resume_point )
+{
+ Int sp = swa->isUsed - 1;
+ const Int _3_or_7 = sizeof(void*) - 1;
+ // if (0) VG_(printf)("POP, old sp = %d\n", sp+1);
+ vg_assert(sp >= 0 && sp <= _3_or_7);
+ *partial_key = swa->iterStack[sp].partial_key;
+ *curr_ix = swa->iterStack[sp].curr_ix;
+ *curr_nd = swa->iterStack[sp].curr_nd;
+ *resume_point = swa->iterStack[sp].resume_point;
+ swa->isUsed = sp;
+}
+
+//////// SWA helper functions (allocation)
+
+static LevelN* swa_new_LevelN ( SparseWA* swa, Int level )
+{
+ LevelN* levelN = swa->alloc_nofail( swa->cc, sizeof(LevelN) );
+ VG_(memset)(levelN, 0, sizeof(*levelN));
+ levelN->magic = LevelN_MAGIC;
+ levelN->level = level;
+ return levelN;
+}
+
+static Level0* swa_new_Level0 ( SparseWA* swa )
+{
+ Level0* level0 = swa->alloc_nofail( swa->cc, sizeof(Level0) );
+ VG_(memset)(level0, 0, sizeof(*level0));
+ level0->magic = Level0_MAGIC;
+ return level0;
+}
+
+
+//////// SWA public interface
+
+void VG_(initIterSWA) ( SparseWA* swa )
+{
+ swa->isUsed = 0;
+ if (swa->root) swa_PUSH(swa, 0, 0, swa->root, 1/*start_new_node*/);
+}
+
+
+Bool VG_(nextIterSWA)( SparseWA* swa,
+ /*OUT*/UWord* keyP, /*OUT*/UWord* valP )
+{
+ UWord p_key;
+ Int curr_ix;
+ void* curr_nd;
+ Int resume_point;
+
+ /* dispatch whatever's on top of the stack; what that actually
+ means is to return to some previously-saved context. */
+ dispatch:
+
+ if (swa->isUsed == 0)
+ return False;
+
+ swa_POP(swa, &p_key, &curr_ix, &curr_nd, &resume_point);
+ switch (resume_point) {
+ case 1: goto start_new_node;
+ case 2: goto resume_leaf_node;
+ case 3: goto resume_nonleaf_node;
+ default: vg_assert(0);
+ }
+
+ start_new_node:
+ if (*(UWord*)curr_nd == Level0_MAGIC) {
+ /* curr_nd is a leaf node */
+ Level0* level0 = (Level0*)curr_nd;
+ for (curr_ix = 0; curr_ix < 256; curr_ix++) {
+ if (swa_bitarray_read(level0->inUse, curr_ix) == 1) {
+ swa_PUSH(swa, p_key, curr_ix, curr_nd, 2/*resume_leaf_node*/);
+ *keyP = (p_key << 8) + (UWord)curr_ix;
+ *valP = level0->words[curr_ix];
+ return True;
+ resume_leaf_node:
+ level0 = (Level0*)curr_nd;
+ }
+ }
+ } else {
+ /* curr_nd is a non-leaf node */
+ LevelN* levelN;
+ vg_assert(*(UWord*)curr_nd == LevelN_MAGIC);
+ levelN = (LevelN*)curr_nd;
+ for (curr_ix = 0; curr_ix < 256; curr_ix++) {
+ if (levelN->child[curr_ix]) {
+ swa_PUSH(swa, p_key, curr_ix, curr_nd, 3/*resume_nonleaf_node*/);
+ p_key = (p_key << 8) + (UWord)curr_ix;
+ curr_nd = levelN->child[curr_ix];
+ goto start_new_node;
+ resume_nonleaf_node:
+ levelN = (LevelN*)curr_nd;
+ }
+ }
+ }
+
+ goto dispatch;
+}
+
+
+SparseWA* VG_(newSWA) ( void*(*alloc_nofail)(HChar* cc, SizeT),
+ HChar* cc,
+ void(*dealloc)(void*) )
+{
+ SparseWA* swa;
+ vg_assert(alloc_nofail);
+ vg_assert(cc);
+ vg_assert(dealloc);
+ swa = alloc_nofail( cc, sizeof(SparseWA) );
+ VG_(memset)(swa, 0, sizeof(*swa));
+ swa->alloc_nofail = alloc_nofail;
+ swa->cc = cc;
+ swa->dealloc = dealloc;
+ swa->root = NULL;
+ return swa;
+}
+
+
+static void swa_deleteSWA_wrk ( void(*dealloc)(void*), void* nd )
+{
+ Int i;
+ vg_assert(nd);
+ if (*(UWord*)nd == LevelN_MAGIC) {
+ LevelN* levelN = (LevelN*)nd;
+ for (i = 0; i < 256; i++) {
+ if (levelN->child[i]) {
+ swa_deleteSWA_wrk( dealloc, levelN->child[i] );
+ }
+ }
+ } else {
+ vg_assert(*(UWord*)nd == Level0_MAGIC);
+ }
+ dealloc(nd);
+}
+void VG_(deleteSWA) ( SparseWA* swa )
+{
+ if (swa->root)
+ swa_deleteSWA_wrk( swa->dealloc, swa->root );
+ swa->dealloc(swa);
+}
+
+
+Bool VG_(lookupSWA) ( SparseWA* swa,
+ /*OUT*/UWord* keyP, /*OUT*/UWord* valP,
+ UWord key )
+{
+ Int i;
+ UWord ix;
+ Level0* level0;
+ LevelN* levelN;
+ const Int _3_or_7 = sizeof(void*) - 1;
+
+ vg_assert(swa);
+ levelN = swa->root;
+
+ /* levels 3/7 .. 1 */
+ for (i = _3_or_7; i >= 1; i--) {
+ if (!levelN) return False;
+ vg_assert(levelN->level == i);
+ vg_assert(levelN->nInUse > 0);
+ ix = (key >> (i*8)) & 0xFF;
+ levelN = levelN->child[ix];
+ }
+
+ /* level0 */
+ level0 = (Level0*)levelN;
+ if (!level0) return False;
+ vg_assert(level0->magic == Level0_MAGIC);
+ vg_assert(level0->nInUse > 0);
+ ix = key & 0xFF;
+ if (swa_bitarray_read(level0->inUse, ix) == 0) return False;
+ *keyP = key; /* this is stupid. only here to make it look like WordFM */
+ *valP = level0->words[ix];
+ return True;
+}
+
+
+Bool VG_(addToSWA) ( SparseWA* swa, UWord key, UWord val )
+{
+ Int i;
+ UWord ix;
+ Level0* level0;
+ LevelN* levelN;
+ Bool already_present;
+ const Int _3_or_7 = sizeof(void*) - 1;
+
+ vg_assert(swa);
+
+ if (!swa->root)
+ swa->root = swa_new_LevelN(swa, _3_or_7);
+ levelN = swa->root;
+
+ /* levels 3/7 .. 2 */
+ for (i = _3_or_7; i >= 2; i--) {
+ /* levelN is the level-i map */
+ vg_assert(levelN);
+ vg_assert(levelN->level == i);
+ ix = (key >> (i*8)) & 0xFF;
+ if (levelN->child[ix] == NULL) {
+ levelN->child[ix] = swa_new_LevelN(swa, i-1);
+ levelN->nInUse++;
+ }
+ vg_assert(levelN->nInUse >= 1 && levelN->nInUse <= 256);
+ levelN = levelN->child[ix];
+ }
+
+ /* levelN is the level-1 map */
+ vg_assert(levelN);
+ vg_assert(levelN->level == 1);
+ ix = (key >> (1*8)) & 0xFF;
+ if (levelN->child[ix] == NULL) {
+ levelN->child[ix] = swa_new_Level0(swa);
+ levelN->nInUse++;
+ }
+ vg_assert(levelN->nInUse >= 1 && levelN->nInUse <= 256);
+ level0 = levelN->child[ix];
+
+ /* level0 is the level-0 map */
+ vg_assert(level0);
+ vg_assert(level0->magic == Level0_MAGIC);
+ ix = key & 0xFF;
+ if (swa_bitarray_read_then_set(level0->inUse, ix) == 0) {
+ level0->nInUse++;
+ already_present = False;
+ } else {
+ already_present = True;
+ }
+ vg_assert(level0->nInUse >= 1 && level0->nInUse <= 256);
+ level0->words[ix] = val;
+
+ return already_present;
+}
+
+
+Bool VG_(delFromSWA) ( SparseWA* swa,
+ /*OUT*/UWord* oldK, /*OUT*/UWord* oldV, UWord key )
+{
+ Int i;
+ UWord ix;
+ Level0* level0;
+ LevelN* levelN;
+ const Int _3_or_7 = sizeof(void*) - 1;
+
+ LevelN* visited[_3_or_7];
+ UWord visitedIx[_3_or_7];
+ Int nVisited = 0;
+
+ vg_assert(swa);
+ levelN = swa->root;
+
+ /* levels 3/7 .. 1 */
+ for (i = _3_or_7; i >= 1; i--) {
+ /* level i */
+ if (!levelN) return False;
+ vg_assert(levelN->level == i);
+ vg_assert(levelN->nInUse > 0);
+ ix = (key >> (i*8)) & 0xFF;
+ visited[nVisited] = levelN;
+ visitedIx[nVisited++] = ix;
+ levelN = levelN->child[ix];
+ }
+
+ /* level 0 */
+ level0 = (Level0*)levelN;
+ if (!level0) return False;
+ vg_assert(level0->magic == Level0_MAGIC);
+ vg_assert(level0->nInUse > 0);
+ ix = key & 0xFF;
+
+ if (swa_bitarray_read_then_clear(level0->inUse, ix) == 0)
+ return False;
+
+ *oldK = key; /* this is silly */
+ *oldV = level0->words[ix];
+
+ level0->nInUse--;
+ if (level0->nInUse > 0)
+ return True;
+
+ vg_assert(nVisited == _3_or_7);
+ swa->dealloc( level0 );
+
+ /* levels 1 .. 3/7 */
+ for (i = 1; i <= _3_or_7; i++) {
+ /* level i */
+ nVisited--;
+ vg_assert(visited[nVisited]->child[ visitedIx[nVisited] ]);
+ visited[nVisited]->child[ visitedIx[nVisited] ] = NULL;
+ visited[nVisited]->nInUse--;
+ vg_assert(visited[nVisited]->nInUse >= 0);
+ if (visited[nVisited]->nInUse > 0)
+ return True;
+ swa->dealloc(visited[nVisited]);
+ }
+
+ vg_assert(nVisited == 0);
+ swa->root = NULL;
+ return True;
+}
+
+
+static UWord swa_sizeSWA_wrk ( void* nd )
+{
+ Int i;
+ UWord sum = 0;
+ if (*(UWord*)nd == LevelN_MAGIC) {
+ LevelN* levelN = (LevelN*)nd;
+ for (i = 0; i < 256; i++) {
+ if (levelN->child[i]) {
+ sum += swa_sizeSWA_wrk( levelN->child[i] );
+ }
+ }
+ } else {
+ Level0* level0;
+ vg_assert(*(UWord*)nd == Level0_MAGIC);
+ level0 = (Level0*)nd;
+ for (i = 0; i < 256/8; i += 2) {
+ UWord x = level0->inUse[i+0]; /* assume zero-extend */
+ UWord y = level0->inUse[i+1]; /* assume zero-extend */
+ /* do 'sum += popcount(x) + popcount(y)' for byte-sized x, y */
+ /* unroll the loop twice so as to expose more ILP */
+ x = (x & 0x55) + ((x >> 1) & 0x55);
+ y = (y & 0x55) + ((y >> 1) & 0x55);
+ x = (x & 0x33) + ((x >> 2) & 0x33);
+ y = (y & 0x33) + ((y >> 2) & 0x33);
+ x = (x & 0x0F) + ((x >> 4) & 0x0F);
+ y = (y & 0x0F) + ((y >> 4) & 0x0F);
+ sum += x + y;
+ }
+ }
+ return sum;
+}
+UWord VG_(sizeSWA) ( SparseWA* swa )
+{
+ if (swa->root)
+ return swa_sizeSWA_wrk ( swa->root );
+ else
+ return 0;
+}
+
+
+
+/*--------------------------------------------------------------------*/
+/*--- end m_sparsewa.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_stacks.c.svn-base b/coregrind/.svn/text-base/m_stacks.c.svn-base
new file mode 100644
index 0000000..db16239
--- /dev/null
+++ b/coregrind/.svn/text-base/m_stacks.c.svn-base
@@ -0,0 +1,329 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Stack management. m_stacks.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_mallocfree.h"
+#include "pub_core_options.h"
+#include "pub_core_stacks.h"
+#include "pub_core_tooliface.h"
+
+/*
+ The stack
+ ~~~~~~~~~
+ The stack's segment seems to be dynamically extended downwards by
+ the kernel as the stack pointer moves down. Initially, a 1-page
+ (4k) stack is allocated. When SP moves below that for the first
+ time, presumably a page fault occurs. The kernel detects that the
+ faulting address is in the range from SP - VG_STACK_REDZONE_SZB
+ upwards to the current valid stack. It then extends the stack
+ segment downwards for enough to cover the faulting address, and
+ resumes the process (invisibly). The process is unaware of any of
+ this.
+
+ That means that Valgrind can't spot when the stack segment is being
+ extended. Fortunately, we want to precisely and continuously
+ update stack permissions around SP, so we need to spot all writes
+ to SP anyway.
+
+ The deal is: when SP is assigned a lower value, the stack is being
+ extended. Create suitably-permissioned pages to fill in any holes
+ between the old stack ptr and this one, if necessary. Then mark
+ all bytes in the area just "uncovered" by this SP change as
+ write-only.
+
+ When SP goes back up, mark the area receded over as unreadable and
+ unwritable.
+
+ Just to record the SP boundary conditions somewhere convenient:
+ SP - VG_STACK_REDZONE_SZB always points to the lowest live byte in
+ the stack. All addresses below SP - VG_STACK_REDZONE_SZB are not
+ live; those at and above it are.
+
+ We do not concern ourselves here with the VG_STACK_REDZONE_SZB
+ bias; that is handled by new_mem_stack/die_mem_stack.
+*/
+
+/*
+ * This structure holds information about the start and end addresses of
+ * registered stacks. There's always at least one stack registered:
+ * the main process stack. It will be the first stack registered and
+ * so will have a stack id of 0. The user does not need to register
+ * this stack: Valgrind does it automatically right before it starts
+ * running the client. No other stacks are automatically registered by
+ * Valgrind, however.
+ */
+typedef struct _Stack {
+ UWord id;
+ Addr start;
+ Addr end;
+ struct _Stack *next;
+} Stack;
+
+static Stack *stacks;
+static UWord next_id; /* Next id we hand out to a newly registered stack */
+
+/*
+ * These are the id, start and end values of the current stack. If the
+ * stack pointer falls outside the range of the current stack, we search
+ * the stacks list above for a matching stack.
+ */
+static Stack *current_stack;
+
+/* Find 'st' in the stacks_list and move it one step closer the the
+ front of the list, so as to make subsequent searches for it
+ cheaper. */
+static void move_Stack_one_step_forward ( Stack* st )
+{
+ Stack *st0, *st1, *st2;
+ if (st == stacks)
+ return; /* already at head of list */
+ vg_assert(st != NULL);
+ st0 = stacks;
+ st1 = NULL;
+ st2 = NULL;
+ while (True) {
+ if (st0 == NULL || st0 == st) break;
+ st2 = st1;
+ st1 = st0;
+ st0 = st0->next;
+ }
+ vg_assert(st0 == st);
+ if (st0 != NULL && st1 != NULL && st2 != NULL) {
+ Stack* tmp;
+ /* st0 points to st, st1 to its predecessor, and st2 to st1's
+ predecessor. Swap st0 and st1, that is, move st0 one step
+ closer to the start of the list. */
+ vg_assert(st2->next == st1);
+ vg_assert(st1->next == st0);
+ tmp = st0->next;
+ st2->next = st0;
+ st0->next = st1;
+ st1->next = tmp;
+ }
+ else
+ if (st0 != NULL && st1 != NULL && st2 == NULL) {
+ /* it's second in the list. */
+ vg_assert(stacks == st1);
+ vg_assert(st1->next == st0);
+ st1->next = st0->next;
+ st0->next = st1;
+ stacks = st0;
+ }
+}
+
+/* Find what stack an address falls into. */
+static Stack* find_stack_by_addr(Addr sp)
+{
+ static UWord n_fails = 0;
+ static UWord n_searches = 0;
+ static UWord n_steps = 0;
+ Stack *i = stacks;
+ n_searches++;
+ if (0 && 0 == (n_searches % 10000))
+ VG_(printf)("(hgdev) %lu searches, %lu steps, %lu fails\n",
+ n_searches, n_steps+1, n_fails);
+ /* fast track common case */
+ if (i && sp >= i->start && sp <= i->end)
+ return i;
+ /* else search the list */
+ while (i) {
+ n_steps++;
+ if (sp >= i->start && sp <= i->end) {
+ if (1 && (n_searches & 0x3F) == 0) {
+ move_Stack_one_step_forward( i );
+ }
+ return i;
+ }
+ i = i->next;
+ }
+ n_fails++;
+ return NULL;
+}
+
+/*
+ * Register a new stack from start - end. This is invoked from the
+ * VALGRIND_STACK_REGISTER client request, and is also called just before
+ * we start the client running, to register the main process stack.
+ */
+UWord VG_(register_stack)(Addr start, Addr end)
+{
+ Stack *i;
+
+ if (start > end) {
+ Addr t = end;
+ end = start;
+ start = t;
+ }
+
+ i = (Stack *)VG_(arena_malloc)(VG_AR_CORE, "stacks.rs.1", sizeof(Stack));
+ i->start = start;
+ i->end = end;
+ i->id = next_id++;
+ i->next = stacks;
+ stacks = i;
+
+ if (i->id == 0) {
+ current_stack = i;
+ }
+
+ VG_(debugLog)(2, "stacks", "register %p-%p as stack %lu\n",
+ (void*)start, (void*)end, i->id);
+
+ return i->id;
+}
+
+/*
+ * Deregister a stack. This is invoked from the VALGRIND_STACK_DEREGISTER
+ * client request.
+ */
+void VG_(deregister_stack)(UWord id)
+{
+ Stack *i = stacks;
+ Stack *prev = NULL;
+
+ VG_(debugLog)(2, "stacks", "deregister stack %lu\n", id);
+
+ if (current_stack && current_stack->id == id) {
+ current_stack = NULL;
+ }
+
+ while(i) {
+ if (i->id == id) {
+ if(prev == NULL) {
+ stacks = i->next;
+ } else {
+ prev->next = i->next;
+ }
+ VG_(arena_free)(VG_AR_CORE, i);
+ return;
+ }
+ prev = i;
+ i = i->next;
+ }
+}
+
+/*
+ * Change a stack. This is invoked from the VALGRIND_STACK_CHANGE client
+ * request and from the stack growth stuff the signals module when
+ * extending the main process stack.
+ */
+void VG_(change_stack)(UWord id, Addr start, Addr end)
+{
+ Stack *i = stacks;
+
+ while (i) {
+ if (i->id == id) {
+ VG_(debugLog)(2, "stacks", "change stack %lu from %p-%p to %p-%p\n",
+ id, (void*)i->start, (void*)i->end,
+ (void*)start, (void*)end);
+ i->start = start;
+ i->end = end;
+ return;
+ }
+ i = i->next;
+ }
+}
+
+/*
+ * Find the bounds of the stack (if any) which includes the
+ * specified stack pointer.
+ */
+void VG_(stack_limits)(Addr SP, Addr *start, Addr *end )
+{
+ Stack* stack = find_stack_by_addr(SP);
+
+ if (stack) {
+ *start = stack->start;
+ *end = stack->end;
+ }
+}
+
+/* This function gets called if new_mem_stack and/or die_mem_stack are
+ tracked by the tool, and one of the specialised cases
+ (eg. new_mem_stack_4) isn't used in preference.
+*/
+VG_REGPARM(3)
+void VG_(unknown_SP_update)( Addr old_SP, Addr new_SP, UInt ecu )
+{
+ static Int moans = 3;
+ Word delta = (Word)new_SP - (Word)old_SP;
+
+ /* Check if the stack pointer is still in the same stack as before. */
+ if (current_stack == NULL ||
+ new_SP < current_stack->start || new_SP > current_stack->end) {
+ Stack* new_stack = find_stack_by_addr(new_SP);
+ if (new_stack
+ && (current_stack == NULL || new_stack->id != current_stack->id)) {
+ /* The stack pointer is now in another stack. Update the current
+ stack information and return without doing anything else. */
+ current_stack = new_stack;
+ return;
+ }
+ }
+
+ if (delta < -VG_(clo_max_stackframe) || VG_(clo_max_stackframe) < delta) {
+ /* SP has changed by more than some threshold amount (by
+ default, 2MB). We take this to mean that the application is
+ switching to a new stack, for whatever reason.
+
+ JRS 20021001: following discussions with John Regehr, if a stack
+ switch happens, it seems best not to mess at all with memory
+ permissions. Seems to work well with Netscape 4.X. Really the
+ only remaining difficulty is knowing exactly when a stack switch is
+ happening. */
+ if (VG_(clo_verbosity) > 0 && moans > 0 && !VG_(clo_xml)) {
+ moans--;
+ VG_(message)(Vg_UserMsg,
+ "Warning: client switching stacks? "
+ "SP change: 0x%lx --> 0x%lx", old_SP, new_SP);
+ VG_(message)(Vg_UserMsg,
+ " to suppress, use: --max-stackframe=%ld or greater",
+ (delta < 0 ? -delta : delta));
+ if (moans == 0)
+ VG_(message)(Vg_UserMsg,
+ " further instances of this message "
+ "will not be shown.");
+ }
+ } else if (delta < 0) {
+ VG_TRACK( new_mem_stack_w_ECU, new_SP, -delta, ecu );
+ VG_TRACK( new_mem_stack, new_SP, -delta );
+
+ } else if (delta > 0) {
+ VG_TRACK( die_mem_stack, old_SP, delta );
+ }
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
+
diff --git a/coregrind/.svn/text-base/m_stacktrace.c.svn-base b/coregrind/.svn/text-base/m_stacktrace.c.svn-base
new file mode 100644
index 0000000..aeb98ad
--- /dev/null
+++ b/coregrind/.svn/text-base/m_stacktrace.c.svn-base
@@ -0,0 +1,580 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Take snapshots of client stacks. m_stacktrace.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_debuginfo.h" // XXX: circular dependency
+#include "pub_core_aspacemgr.h" // For VG_(is_addressable)()
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_machine.h"
+#include "pub_core_options.h"
+#include "pub_core_stacks.h" // VG_(stack_limits)
+#include "pub_core_stacktrace.h"
+#include "pub_core_xarray.h"
+#include "pub_core_clientstate.h" // VG_(client__dl_sysinfo_int80)
+#include "pub_core_trampoline.h"
+
+/*------------------------------------------------------------*/
+/*--- Exported functions. ---*/
+/*------------------------------------------------------------*/
+
+/* Take a snapshot of the client's stack, putting the up to 'n_ips'
+ IPs into 'ips'. In order to be thread-safe, we pass in the
+ thread's IP SP, FP if that's meaningful, and LR if that's
+ meaningful. Returns number of IPs put in 'ips'.
+
+ If you know what the thread ID for this stack is, send that as the
+ first parameter, else send zero. This helps generate better stack
+ traces on ppc64-linux and has no effect on other platforms.
+*/
+UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
+ /*OUT*/Addr* ips, UInt n_ips,
+ /*OUT*/Addr* sps, /*OUT*/Addr* fps,
+ Addr ip, Addr sp, Addr fp, Addr lr,
+ Addr fp_min, Addr fp_max_orig )
+{
+# if defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux) \
+ || defined(VGP_ppc32_aix5) \
+ || defined(VGP_ppc64_aix5)
+ Bool lr_is_first_RA = False;
+# endif
+# if defined(VGP_ppc64_linux) || defined(VGP_ppc64_aix5) \
+ || defined(VGP_ppc32_aix5)
+ Word redir_stack_size = 0;
+ Word redirs_used = 0;
+# endif
+
+ Bool debug = False;
+ Int i;
+ Addr fp_max;
+ UInt n_found = 0;
+
+ vg_assert(sizeof(Addr) == sizeof(UWord));
+ vg_assert(sizeof(Addr) == sizeof(void*));
+
+ /* Snaffle IPs from the client's stack into ips[0 .. n_ips-1],
+ stopping when the trail goes cold, which we guess to be
+ when FP is not a reasonable stack location. */
+
+ // JRS 2002-sep-17: hack, to round up fp_max to the end of the
+ // current page, at least. Dunno if it helps.
+ // NJN 2002-sep-17: seems to -- stack traces look like 1.0.X again
+ fp_max = VG_PGROUNDUP(fp_max_orig);
+ if (fp_max >= sizeof(Addr))
+ fp_max -= sizeof(Addr);
+
+ if (debug)
+ VG_(printf)("n_ips=%d fp_min=0x%lx fp_max_orig=0x%lx, "
+ "fp_max=0x%lx ip=0x%lx fp=0x%lx\n",
+ n_ips, fp_min, fp_max_orig, fp_max, ip, fp);
+
+ /* Assertion broken before main() is reached in pthreaded programs; the
+ * offending stack traces only have one item. --njn, 2002-aug-16 */
+ /* vg_assert(fp_min <= fp_max);*/
+ if (fp_min + 512 >= fp_max) {
+ /* If the stack limits look bogus, don't poke around ... but
+ don't bomb out either. */
+ if (sps) sps[0] = sp;
+ if (fps) fps[0] = fp;
+ ips[0] = ip;
+ return 1;
+ }
+
+ /* Otherwise unwind the stack in a platform-specific way. Trying
+ to merge the x86, amd64, ppc32 and ppc64 logic into a single
+ piece of code is just too confusing and difficult to
+ performance-tune. */
+
+# if defined(VGP_x86_linux)
+
+ /*--------------------- x86 ---------------------*/
+
+ /* fp is %ebp. sp is %esp. ip is %eip. */
+
+ if (sps) sps[0] = sp;
+ if (fps) fps[0] = fp;
+ ips[0] = ip;
+ i = 1;
+
+ /* Loop unwinding the stack. Note that the IP value we get on
+ * each pass (whether from CFI info or a stack frame) is a
+ * return address so is actually after the calling instruction
+ * in the calling function.
+ *
+ * Because of this we subtract one from the IP after each pass
+ * of the loop so that we find the right CFI block on the next
+ * pass - otherwise we can find the wrong CFI info if it happens
+ * to change after the calling instruction and that will mean
+ * that we will fail to unwind the next step.
+ *
+ * This most frequently happens at the end of a function when
+ * a tail call occurs and we wind up using the CFI info for the
+ * next function which is completely wrong.
+ */
+ while (True) {
+
+ if (i >= n_ips)
+ break;
+
+ /* Try to derive a new (ip,sp,fp) triple from the current
+ set. */
+
+ /* On x86, first try the old-fashioned method of following the
+ %ebp-chain. Code which doesn't use this (that is, compiled
+ with -fomit-frame-pointer) is not ABI compliant and so
+ relatively rare. Besides, trying the CFI first almost always
+ fails, and is expensive. */
+ /* Deal with frames resulting from functions which begin "pushl%
+ ebp ; movl %esp, %ebp" which is the ABI-mandated preamble. */
+ if (fp_min <= fp && fp <= fp_max
+ - 1 * sizeof(UWord)/*see comment below*/) {
+ /* fp looks sane, so use it. */
+ ip = (((UWord*)fp)[1]);
+ sp = fp + sizeof(Addr) /*saved %ebp*/
+ + sizeof(Addr) /*ra*/;
+ fp = (((UWord*)fp)[0]);
+ if (sps) sps[i] = sp;
+ if (fps) fps[i] = fp;
+ ips[i++] = ip - 1; /* -1: refer to calling insn, not the RA */
+ if (debug)
+ VG_(printf)(" ipsF[%d]=0x%08lx\n", i-1, ips[i-1]);
+ ip = ip - 1; /* as per comment at the head of this loop */
+ continue;
+ }
+
+ /* That didn't work out, so see if there is any CF info to hand
+ which can be used. */
+ if ( VG_(use_CF_info)( &ip, &sp, &fp, fp_min, fp_max ) ) {
+ if (sps) sps[i] = sp;
+ if (fps) fps[i] = fp;
+ ips[i++] = ip - 1; /* -1: refer to calling insn, not the RA */
+ if (debug)
+ VG_(printf)(" ipsC[%d]=0x%08lx\n", i-1, ips[i-1]);
+ ip = ip - 1; /* as per comment at the head of this loop */
+ continue;
+ }
+
+ /* And, similarly, try for MSVC FPO unwind info. */
+ if ( VG_(use_FPO_info)( &ip, &sp, &fp, fp_min, fp_max ) ) {
+ if (sps) sps[i] = sp;
+ if (fps) fps[i] = fp;
+ ips[i++] = ip;
+ if (debug)
+ VG_(printf)(" ipsC[%d]=0x%08lx\n", i-1, ips[i-1]);
+ ip = ip - 1;
+ continue;
+ }
+
+ /* No luck. We have to give up. */
+ break;
+ }
+
+# elif defined(VGP_amd64_linux)
+
+ /*--------------------- amd64 ---------------------*/
+
+ /* fp is %rbp. sp is %rsp. ip is %rip. */
+
+ ips[0] = ip;
+ if (sps) sps[0] = sp;
+ if (fps) fps[0] = fp;
+ i = 1;
+
+ /* Loop unwinding the stack. Note that the IP value we get on
+ * each pass (whether from CFI info or a stack frame) is a
+ * return address so is actually after the calling instruction
+ * in the calling function.
+ *
+ * Because of this we subtract one from the IP after each pass
+ * of the loop so that we find the right CFI block on the next
+ * pass - otherwise we can find the wrong CFI info if it happens
+ * to change after the calling instruction and that will mean
+ * that we will fail to unwind the next step.
+ *
+ * This most frequently happens at the end of a function when
+ * a tail call occurs and we wind up using the CFI info for the
+ * next function which is completely wrong.
+ */
+ while (True) {
+
+ if (i >= n_ips)
+ break;
+
+ /* Try to derive a new (ip,sp,fp) triple from the current
+ set. */
+
+ /* First off, see if there is any CFI info to hand which can
+ be used. */
+ if ( VG_(use_CF_info)( &ip, &sp, &fp, fp_min, fp_max ) ) {
+ if (sps) sps[i] = sp;
+ if (fps) fps[i] = fp;
+ ips[i++] = ip - 1; /* -1: refer to calling insn, not the RA */
+ if (debug)
+ VG_(printf)(" ipsC[%d]=%#08lx\n", i-1, ips[i-1]);
+ ip = ip - 1; /* as per comment at the head of this loop */
+ continue;
+ }
+
+ /* If VG_(use_CF_info) fails, it won't modify ip/sp/fp, so
+ we can safely try the old-fashioned method. */
+ /* This bit is supposed to deal with frames resulting from
+ functions which begin "pushq %rbp ; movq %rsp, %rbp".
+ Unfortunately, since we can't (easily) look at the insns at
+ the start of the fn, like GDB does, there's no reliable way
+ to tell. Hence the hack of first trying out CFI, and if that
+ fails, then use this as a fallback. */
+ /* Note: re "- 1 * sizeof(UWord)", need to take account of the
+ fact that we are prodding at & ((UWord*)fp)[1] and so need to
+ adjust the limit check accordingly. Omitting this has been
+ observed to cause segfaults on rare occasions. */
+ if (fp_min <= fp && fp <= fp_max - 1 * sizeof(UWord)) {
+ /* fp looks sane, so use it. */
+ ip = (((UWord*)fp)[1]);
+ sp = fp + sizeof(Addr) /*saved %rbp*/
+ + sizeof(Addr) /*ra*/;
+ fp = (((UWord*)fp)[0]);
+ if (sps) sps[i] = sp;
+ if (fps) fps[i] = fp;
+ ips[i++] = ip - 1; /* -1: refer to calling insn, not the RA */
+ if (debug)
+ VG_(printf)(" ipsF[%d]=%#08lx\n", i-1, ips[i-1]);
+ ip = ip - 1; /* as per comment at the head of this loop */
+ continue;
+ }
+
+ /* Last-ditch hack (evidently GDB does something similar). We
+ are in the middle of nowhere and we have a nonsense value for
+ the frame pointer. If the stack pointer is still valid,
+ assume that what it points at is a return address. Yes,
+ desperate measures. Could do better here:
+ - check that the supposed return address is in
+ an executable page
+ - check that the supposed return address is just after a call insn
+ - given those two checks, don't just consider *sp as the return
+ address; instead scan a likely section of stack (eg sp .. sp+256)
+ and use suitable values found there.
+ */
+ if (fp_min <= sp && sp < fp_max) {
+ ip = ((UWord*)sp)[0];
+ if (sps) sps[i] = sp;
+ if (fps) fps[i] = fp;
+ ips[i++] = ip == 0
+ ? 0 /* sp[0] == 0 ==> stuck at the bottom of a
+ thread stack */
+ : ip - 1; /* -1: refer to calling insn, not the RA */
+ if (debug)
+ VG_(printf)(" ipsH[%d]=%#08lx\n", i-1, ips[i-1]);
+ ip = ip - 1; /* as per comment at the head of this loop */
+ sp += 8;
+ continue;
+ }
+
+ /* No luck at all. We have to give up. */
+ break;
+ }
+
+# elif defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux) \
+ || defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+
+ /*--------------------- ppc32/64 ---------------------*/
+
+ /* fp is %r1. ip is %cia. Note, ppc uses r1 as both the stack and
+ frame pointers. */
+
+# if defined(VGP_ppc64_linux) || defined(VGP_ppc64_aix5)
+ redir_stack_size = VEX_GUEST_PPC64_REDIR_STACK_SIZE;
+ redirs_used = 0;
+# elif defined(VGP_ppc32_aix5)
+ redir_stack_size = VEX_GUEST_PPC32_REDIR_STACK_SIZE;
+ redirs_used = 0;
+# endif
+
+# if defined(VG_PLAT_USES_PPCTOC)
+ /* Deal with bogus LR values caused by function
+ interception/wrapping on ppc-TOC platforms; see comment on
+ similar code a few lines further down. */
+ if (ULong_to_Ptr(lr) == (void*)&VG_(ppctoc_magic_redirect_return_stub)
+ && VG_(is_valid_tid)(tid_if_known)) {
+ Word hsp = VG_(threads)[tid_if_known].arch.vex.guest_REDIR_SP;
+ redirs_used++;
+ if (hsp >= 1 && hsp < redir_stack_size)
+ lr = VG_(threads)[tid_if_known]
+ .arch.vex.guest_REDIR_STACK[hsp-1];
+ }
+# endif
+
+ /* We have to determine whether or not LR currently holds this fn
+ (call it F)'s return address. It might not if F has previously
+ called some other function, hence overwriting LR with a pointer
+ to some part of F. Hence if LR and IP point to the same
+ function then we conclude LR does not hold this function's
+ return address; instead the LR at entry must have been saved in
+ the stack by F's prologue and so we must get it from there
+ instead. Note all this guff only applies to the innermost
+ frame. */
+ lr_is_first_RA = False;
+ {
+# define M_VG_ERRTXT 1000
+ UChar buf_lr[M_VG_ERRTXT], buf_ip[M_VG_ERRTXT];
+ /* The following conditional looks grossly inefficient and
+ surely could be majorly improved, with not much effort. */
+ if (VG_(get_fnname_raw) (lr, buf_lr, M_VG_ERRTXT))
+ if (VG_(get_fnname_raw) (ip, buf_ip, M_VG_ERRTXT))
+ if (VG_(strncmp)(buf_lr, buf_ip, M_VG_ERRTXT))
+ lr_is_first_RA = True;
+# undef M_VG_ERRTXT
+ }
+
+ if (sps) sps[0] = fp; /* NB. not sp */
+ if (fps) fps[0] = fp;
+ ips[0] = ip;
+ i = 1;
+
+ if (fp_min <= fp && fp < fp_max-VG_WORDSIZE+1) {
+
+ /* initial FP is sane; keep going */
+ fp = (((UWord*)fp)[0]);
+
+ while (True) {
+
+ /* On ppc64-linux (ppc64-elf, really), and on AIX, the lr save
+ slot is 2 words back from sp, whereas on ppc32-elf(?) it's
+ only one word back. */
+# if defined(VGP_ppc64_linux) \
+ || defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ const Int lr_offset = 2;
+# else
+ const Int lr_offset = 1;
+# endif
+
+ if (i >= n_ips)
+ break;
+
+ /* Try to derive a new (ip,fp) pair from the current set. */
+
+ if (fp_min <= fp && fp <= fp_max - lr_offset * sizeof(UWord)) {
+ /* fp looks sane, so use it. */
+
+ if (i == 1 && lr_is_first_RA)
+ ip = lr;
+ else
+ ip = (((UWord*)fp)[lr_offset]);
+
+# if defined(VG_PLAT_USES_PPCTOC)
+ /* Nasty hack to do with function replacement/wrapping on
+ ppc64-linux/ppc64-aix/ppc32-aix. If LR points to our
+ magic return stub, then we are in a wrapped or
+ intercepted function, in which LR has been messed with.
+ The original LR will have been pushed onto the thread's
+ hidden REDIR stack one down from the top (top element
+ is the saved R2) and so we should restore the value
+ from there instead. Since nested redirections can and
+ do happen, we keep track of the number of nested LRs
+ used by the unwinding so far with 'redirs_used'. */
+ if (ip == (Addr)&VG_(ppctoc_magic_redirect_return_stub)
+ && VG_(is_valid_tid)(tid_if_known)) {
+ Word hsp = VG_(threads)[tid_if_known]
+ .arch.vex.guest_REDIR_SP;
+ hsp -= 2 * redirs_used;
+ redirs_used ++;
+ if (hsp >= 1 && hsp < redir_stack_size)
+ ip = VG_(threads)[tid_if_known]
+ .arch.vex.guest_REDIR_STACK[hsp-1];
+ }
+# endif
+
+ fp = (((UWord*)fp)[0]);
+ if (sps) sps[i] = fp; /* NB. not sp */
+ if (fps) fps[i] = fp;
+ ips[i++] = ip - 1; /* -1: refer to calling insn, not the RA */
+ if (debug)
+ VG_(printf)(" ipsF[%d]=%#08lx\n", i-1, ips[i-1]);
+ ip = ip - 1; /* ip is probably dead at this point, but
+ play safe, a la x86/amd64 above. See
+ extensive comments above. */
+ continue;
+ }
+
+ /* No luck there. We have to give up. */
+ break;
+ }
+ }
+
+# else
+# error "Unknown platform"
+# endif
+
+ n_found = i;
+ return n_found;
+}
+
+UInt VG_(get_StackTrace) ( ThreadId tid,
+ /*OUT*/StackTrace ips, UInt n_ips,
+ /*OUT*/StackTrace sps,
+ /*OUT*/StackTrace fps,
+ Word first_ip_delta )
+{
+ /* thread in thread table */
+ Addr ip = VG_(get_IP)(tid);
+ Addr fp = VG_(get_FP)(tid);
+ Addr sp = VG_(get_SP)(tid);
+ Addr lr = VG_(get_LR)(tid);
+ Addr stack_highest_word = VG_(threads)[tid].client_stack_highest_word;
+ Addr stack_lowest_word = 0;
+
+# if defined(VGP_x86_linux)
+ /* Nasty little hack to deal with syscalls - if libc is using its
+ _dl_sysinfo_int80 function for syscalls (the TLS version does),
+ then ip will always appear to be in that function when doing a
+ syscall, not the actual libc function doing the syscall. This
+ check sees if IP is within that function, and pops the return
+ address off the stack so that ip is placed within the library
+ function calling the syscall. This makes stack backtraces much
+ more useful.
+
+ The function is assumed to look like this (from glibc-2.3.6 sources):
+ _dl_sysinfo_int80:
+ int $0x80
+ ret
+ That is 3 (2+1) bytes long. We could be more thorough and check
+ the 3 bytes of the function are as expected, but I can't be
+ bothered.
+ */
+ if (VG_(client__dl_sysinfo_int80) != 0 /* we know its address */
+ && ip >= VG_(client__dl_sysinfo_int80)
+ && ip < VG_(client__dl_sysinfo_int80)+3
+ && VG_(am_is_valid_for_client)(sp, sizeof(Addr), VKI_PROT_READ)) {
+ ip = *(Addr *)sp;
+ sp += sizeof(Addr);
+ }
+# endif
+
+ /* See if we can get a better idea of the stack limits */
+ VG_(stack_limits)(sp, &stack_lowest_word, &stack_highest_word);
+
+ /* Take into account the first_ip_delta. */
+ vg_assert( sizeof(Addr) == sizeof(Word) );
+ ip += first_ip_delta;
+
+ if (0)
+ VG_(printf)("tid %d: stack_highest=0x%08lx ip=0x%08lx "
+ "sp=0x%08lx fp=0x%08lx\n",
+ tid, stack_highest_word, ip, sp, fp);
+
+ return VG_(get_StackTrace_wrk)(tid, ips, n_ips,
+ sps, fps,
+ ip, sp, fp, lr, sp,
+ stack_highest_word);
+}
+
+static void printIpDesc(UInt n, Addr ip)
+{
+ #define BUF_LEN 4096
+
+ static UChar buf[BUF_LEN];
+
+ VG_(describe_IP)(ip, buf, BUF_LEN);
+
+ if (VG_(clo_xml)) {
+ VG_(message)(Vg_UserMsg, " %s", buf);
+ } else {
+ VG_(message)(Vg_UserMsg, " %s %s", ( n == 0 ? "at" : "by" ), buf);
+ }
+}
+
+/* Print a StackTrace. */
+void VG_(pp_StackTrace) ( StackTrace ips, UInt n_ips )
+{
+ vg_assert( n_ips > 0 );
+
+ if (VG_(clo_xml))
+ VG_(message)(Vg_UserMsg, " <stack>");
+
+ VG_(apply_StackTrace)( printIpDesc, ips, n_ips );
+
+ if (VG_(clo_xml))
+ VG_(message)(Vg_UserMsg, " </stack>");
+}
+
+/* Get and immediately print a StackTrace. */
+void VG_(get_and_pp_StackTrace) ( ThreadId tid, UInt n_ips )
+{
+ Addr ips[n_ips];
+ UInt n_ips_obtained
+ = VG_(get_StackTrace)(tid, ips, n_ips,
+ NULL/*array to dump SP values in*/,
+ NULL/*array to dump FP values in*/,
+ 0/*first_ip_delta*/);
+ VG_(pp_StackTrace)(ips, n_ips_obtained);
+}
+
+
+void VG_(apply_StackTrace)( void(*action)(UInt n, Addr ip),
+ StackTrace ips, UInt n_ips )
+{
+ Bool main_done = False;
+ Int i = 0;
+
+ vg_assert(n_ips > 0);
+ do {
+ Addr ip = ips[i];
+
+ // Stop after the first appearance of "main" or one of the other names
+ // (the appearance of which is a pretty good sign that we've gone past
+ // main without seeing it, for whatever reason)
+ if ( ! VG_(clo_show_below_main) ) {
+ Vg_FnNameKind kind = VG_(get_fnname_kind_from_IP)(ip);
+ if (Vg_FnNameMain == kind || Vg_FnNameBelowMain == kind) {
+ main_done = True;
+ }
+ }
+
+ // Act on the ip
+ action(i, ip);
+
+ i++;
+ // re 'while' condition: stop if we hit a zero value (the traditional
+ // end-of-stack marker) or a ~0 value. The latter because r8818
+ // (in this file) changes the meaning of entries [1] and above in a
+ // stack trace, by subtracting 1 from them. Hence stacks that used
+ // to end with a zero value now end in -1 and so we must detect
+ // that too.
+ } while (i < n_ips && ips[i] != 0 && ips[i] != ~(Addr)0 && !main_done);
+
+ #undef MYBUF_LEN
+}
+
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_syscall.c.svn-base b/coregrind/.svn/text-base/m_syscall.c.svn-base
new file mode 100644
index 0000000..676254a
--- /dev/null
+++ b/coregrind/.svn/text-base/m_syscall.c.svn-base
@@ -0,0 +1,567 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Doing syscalls. m_syscall.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h"
+#include "pub_core_syscall.h"
+
+/* ---------------------------------------------------------------------
+ Building syscall return values.
+ ------------------------------------------------------------------ */
+
+/* Make a SysRes value from an syscall return value. This is
+ Linux-specific.
+
+ From:
+ http://sources.redhat.com/cgi-bin/cvsweb.cgi/libc/sysdeps/unix/sysv/
+ linux/i386/sysdep.h?
+ rev=1.28&content-type=text/x-cvsweb-markup&cvsroot=glibc
+
+ Linux uses a negative return value to indicate syscall errors,
+ unlike most Unices, which use the condition codes' carry flag.
+
+ Since version 2.1 the return value of a system call might be
+ negative even if the call succeeded. E.g., the 'lseek' system call
+ might return a large offset. Therefore we must not anymore test
+ for < 0, but test for a real error by making sure the value in %eax
+ is a real error number. Linus said he will make sure the no
+ syscall returns a value in -1 .. -4095 as a valid result so we can
+ safely test with -4095.
+*/
+SysRes VG_(mk_SysRes_x86_linux) ( UInt val ) {
+ SysRes res;
+ res.isError = val >= -4095 && val <= -1;
+ if (res.isError) {
+ res.err = -val;
+ res.res = 0;
+ } else {
+ res.err = 0;
+ res.res = val;
+ }
+ return res;
+}
+
+/* Similarly .. */
+SysRes VG_(mk_SysRes_amd64_linux) ( ULong val ) {
+ SysRes res;
+ res.isError = val >= -4095 && val <= -1;
+ if (res.isError) {
+ res.err = -val;
+ res.res = 0;
+ } else {
+ res.err = 0;
+ res.res = val;
+ }
+ return res;
+}
+
+/* PPC uses the CR7.SO bit to flag an error (CR0 in IBM-speak) */
+/* Note this must be in the bottom bit of the second arg */
+SysRes VG_(mk_SysRes_ppc32_linux) ( UInt val, UInt cr0so ) {
+ SysRes res;
+ res.isError = (cr0so & 1) != 0;
+ if (res.isError) {
+ res.err = val;
+ res.res = 0;
+ } else {
+ res.err = 0;
+ res.res = val;
+ }
+ return res;
+}
+
+/* As per ppc32 version, cr0.so must be in l.s.b. of 2nd arg */
+SysRes VG_(mk_SysRes_ppc64_linux) ( ULong val, ULong cr0so ) {
+ SysRes res;
+ res.isError = (cr0so & 1) != 0;
+ if (res.isError) {
+ res.err = val;
+ res.res = 0;
+ } else {
+ res.err = 0;
+ res.res = val;
+ }
+ return res;
+}
+
+/* AIX scheme: we have to record both 'res' (r3) and 'err' (r4). If
+ 'err' is nonzero then the call has failed, but it could still be
+ that AIX userspace will ignore 'err' and instead consult 'res' to
+ determine if the call failed. So we have to record both. */
+SysRes VG_(mk_SysRes_ppc32_aix5) ( UInt res, UInt err ) {
+ SysRes r;
+ r.res = res;
+ r.err = err;
+ r.isError = r.err != 0;
+ return r;
+}
+
+SysRes VG_(mk_SysRes_ppc64_aix5) ( ULong res, ULong err ) {
+ SysRes r;
+ r.res = res;
+ r.err = err;
+ r.isError = r.err != 0;
+ return r;
+}
+
+/* Generic constructors. */
+SysRes VG_(mk_SysRes_Error) ( UWord err ) {
+ SysRes r;
+ r.res = 0;
+ r.err = err;
+ r.isError = True;
+ return r;
+}
+
+SysRes VG_(mk_SysRes_Success) ( UWord res ) {
+ SysRes r;
+ r.res = res;
+ r.err = 0;
+ r.isError = False;
+ return r;
+}
+
+
+/* ---------------------------------------------------------------------
+ A function for doing syscalls.
+ ------------------------------------------------------------------ */
+
+#if defined(VGP_x86_linux)
+/* Incoming args (syscall number + up to 6 args) come on the stack.
+ (ie. the C calling convention).
+
+ The syscall number goes in %eax. The args are passed to the syscall in
+ the regs %ebx, %ecx, %edx, %esi, %edi, %ebp, ie. the kernel's syscall
+ calling convention.
+
+ %eax gets the return value. Not sure which registers the kernel
+ clobbers, so we preserve all the callee-save regs (%esi, %edi, %ebx,
+ %ebp).
+*/
+extern UWord do_syscall_WRK (
+ UWord syscall_no,
+ UWord a1, UWord a2, UWord a3,
+ UWord a4, UWord a5, UWord a6
+ );
+asm(
+".text\n"
+"do_syscall_WRK:\n"
+" push %esi\n"
+" push %edi\n"
+" push %ebx\n"
+" push %ebp\n"
+" movl 16+ 4(%esp),%eax\n"
+" movl 16+ 8(%esp),%ebx\n"
+" movl 16+12(%esp),%ecx\n"
+" movl 16+16(%esp),%edx\n"
+" movl 16+20(%esp),%esi\n"
+" movl 16+24(%esp),%edi\n"
+" movl 16+28(%esp),%ebp\n"
+" int $0x80\n"
+" popl %ebp\n"
+" popl %ebx\n"
+" popl %edi\n"
+" popl %esi\n"
+" ret\n"
+".previous\n"
+);
+
+#elif defined(VGP_amd64_linux)
+/* Incoming args (syscall number + up to 6 args) come in %rdi, %rsi,
+ %rdx, %rcx, %r8, %r9, and the last one on the stack (ie. the C
+ calling convention).
+
+ The syscall number goes in %rax. The args are passed to the syscall in
+ the regs %rdi, %rsi, %rdx, %r10, %r8, %r9 (yes, really %r10, not %rcx),
+ ie. the kernel's syscall calling convention.
+
+ %rax gets the return value. %rcx and %r11 are clobbered by the syscall;
+ no matter, they are caller-save (the syscall clobbers no callee-save
+ regs, so we don't have to do any register saving/restoring).
+*/
+extern UWord do_syscall_WRK (
+ UWord syscall_no,
+ UWord a1, UWord a2, UWord a3,
+ UWord a4, UWord a5, UWord a6
+ );
+asm(
+".text\n"
+"do_syscall_WRK:\n"
+ /* Convert function calling convention --> syscall calling
+ convention */
+" movq %rdi, %rax\n"
+" movq %rsi, %rdi\n"
+" movq %rdx, %rsi\n"
+" movq %rcx, %rdx\n"
+" movq %r8, %r10\n"
+" movq %r9, %r8\n"
+" movq 8(%rsp), %r9\n" /* last arg from stack */
+" syscall\n"
+" ret\n"
+".previous\n"
+);
+
+#elif defined(VGP_ppc32_linux)
+/* Incoming args (syscall number + up to 6 args) come in %r3:%r9.
+
+ The syscall number goes in %r0. The args are passed to the syscall in
+ the regs %r3:%r8, i.e. the kernel's syscall calling convention.
+
+ The %cr0.so bit flags an error.
+ We return the syscall return value in %r3, and the %cr0.so in
+ the lowest bit of %r4.
+ We return a ULong, of which %r3 is the high word, and %r4 the low.
+ No callee-save regs are clobbered, so no saving/restoring is needed.
+*/
+extern ULong do_syscall_WRK (
+ UWord syscall_no,
+ UWord a1, UWord a2, UWord a3,
+ UWord a4, UWord a5, UWord a6
+ );
+asm(
+".text\n"
+"do_syscall_WRK:\n"
+" mr 0,3\n"
+" mr 3,4\n"
+" mr 4,5\n"
+" mr 5,6\n"
+" mr 6,7\n"
+" mr 7,8\n"
+" mr 8,9\n"
+" sc\n" /* syscall: sets %cr0.so on error */
+" mfcr 4\n" /* %cr -> low word of return var */
+" rlwinm 4,4,4,31,31\n" /* rotate flag bit so to lsb, and mask it */
+" blr\n" /* and return */
+".previous\n"
+);
+
+#elif defined(VGP_ppc64_linux)
+/* Due to the need to return 65 bits of result, this is completely
+ different from the ppc32 case. The single arg register points to a
+ 7-word block containing the syscall # and the 6 args. The syscall
+ result proper is put in [0] of the block, and %cr0.so is in the
+ bottom but of [1]. */
+extern void do_syscall_WRK ( ULong* argblock );
+asm(
+".align 2\n"
+".globl do_syscall_WRK\n"
+".section \".opd\",\"aw\"\n"
+".align 3\n"
+"do_syscall_WRK:\n"
+".quad .do_syscall_WRK,.TOC.@tocbase,0\n"
+".previous\n"
+".type .do_syscall_WRK,@function\n"
+".globl .do_syscall_WRK\n"
+".do_syscall_WRK:\n"
+" std 3,-16(1)\n" /* stash arg */
+" ld 8, 48(3)\n" /* sc arg 6 */
+" ld 7, 40(3)\n" /* sc arg 5 */
+" ld 6, 32(3)\n" /* sc arg 4 */
+" ld 5, 24(3)\n" /* sc arg 3 */
+" ld 4, 16(3)\n" /* sc arg 2 */
+" ld 0, 0(3)\n" /* sc number */
+" ld 3, 8(3)\n" /* sc arg 1 */
+" sc\n" /* result in r3 and cr0.so */
+" ld 5,-16(1)\n" /* reacquire argblock ptr (r5 is caller-save) */
+" std 3,0(5)\n" /* argblock[0] = r3 */
+" mfcr 3\n"
+" srwi 3,3,28\n"
+" andi. 3,3,1\n"
+" std 3,8(5)\n" /* argblock[1] = cr0.s0 & 1 */
+" blr\n"
+);
+
+#elif defined(VGP_ppc32_aix5)
+static void do_syscall_WRK ( UWord* res_r3, UWord* res_r4,
+ UWord sysno,
+ UWord a1, UWord a2, UWord a3,
+ UWord a4, UWord a5, UWord a6,
+ UWord a7, UWord a8 )
+{
+ /* Syscalls on AIX are very similar to function calls:
+ - up to 8 args in r3-r10
+ - syscall number in r2
+ - kernel resumes at 'lr', so must set it appropriately beforehand
+ - r3 holds the result and r4 any applicable error code
+ See http://www.cs.utexas.edu/users/cart/publications/tr00-04.ps
+ and also 'man truss'.
+ */
+ /* For some reason gcc-3.3.2 doesn't preserve r31 across the asm
+ even though we state it to be trashed. So use r27 instead. */
+ UWord args[9];
+ args[0] = sysno;
+ args[1] = a1; args[2] = a2;
+ args[3] = a3; args[4] = a4;
+ args[5] = a5; args[6] = a6;
+ args[7] = a7; args[8] = a8;
+
+ __asm__ __volatile__(
+
+ // establish base ptr
+ "mr 28,%0\n\t"
+
+ // save r2, lr
+ "mr 27,2\n\t" // save r2 in r27
+ "mflr 30\n\t" // save lr in r30
+
+ // set syscall number and args
+ "lwz 2, 0(28)\n\t"
+ "lwz 3, 4(28)\n\t"
+ "lwz 4, 8(28)\n\t"
+ "lwz 5, 12(28)\n\t"
+ "lwz 6, 16(28)\n\t"
+ "lwz 7, 20(28)\n\t"
+ "lwz 8, 24(28)\n\t"
+ "lwz 9, 28(28)\n\t"
+ "lwz 10, 32(28)\n\t"
+
+ // set bit 3 of CR1 otherwise AIX 5.1 returns to the
+ // wrong address after the sc instruction
+ "crorc 6,6,6\n\t"
+
+ // set up LR to point just after the sc insn
+ ".long 0x48000005\n\t" // "bl here+4" -- lr := & next insn
+ "mflr 29\n\t"
+ "addi 29,29,16\n\t"
+ "mtlr 29\n\t"
+
+ // do it!
+ "sc\n\t"
+
+ // result is now in r3; save it in args[0]
+ "stw 3,0(28)\n\t"
+ // error code in r4; save it in args[1]
+ "stw 4,4(28)\n\t"
+
+ // restore
+ "mr 2,27\n\t"
+ "mtlr 30\n\t"
+
+ : /*out*/
+ : /*in*/ "b" (&args[0])
+ : /*trash*/
+ /*temps*/ "r31","r30","r29","r28","r27",
+ /*args*/ "r3","r4","r5","r6","r7","r8","r9","r10",
+ /*paranoia*/ "memory","cc","r0","r1","r11","r12","r13",
+ "xer","ctr","cr0","cr1","cr2","cr3",
+ "cr4","cr5","cr6","cr7"
+ );
+
+ *res_r3 = args[0];
+ *res_r4 = args[1];
+}
+
+#elif defined(VGP_ppc64_aix5)
+static void do_syscall_WRK ( UWord* res_r3, UWord* res_r4,
+ UWord sysno,
+ UWord a1, UWord a2, UWord a3,
+ UWord a4, UWord a5, UWord a6,
+ UWord a7, UWord a8 )
+{
+ /* Same scheme as ppc32-aix5. */
+ UWord args[9];
+ args[0] = sysno;
+ args[1] = a1; args[2] = a2;
+ args[3] = a3; args[4] = a4;
+ args[5] = a5; args[6] = a6;
+ args[7] = a7; args[8] = a8;
+
+ __asm__ __volatile__(
+
+ // establish base ptr
+ "mr 28,%0\n\t"
+
+ // save r2, lr
+ "mr 27,2\n\t" // save r2 in r27
+ "mflr 30\n\t" // save lr in r30
+
+ // set syscall number and args
+ "ld 2, 0(28)\n\t"
+ "ld 3, 8(28)\n\t"
+ "ld 4, 16(28)\n\t"
+ "ld 5, 24(28)\n\t"
+ "ld 6, 32(28)\n\t"
+ "ld 7, 40(28)\n\t"
+ "ld 8, 48(28)\n\t"
+ "ld 9, 56(28)\n\t"
+ "ld 10, 64(28)\n\t"
+
+ // set bit 3 of CR1 otherwise AIX 5.1 returns to the
+ // wrong address after the sc instruction
+ "crorc 6,6,6\n\t"
+
+ // set up LR to point just after the sc insn
+ ".long 0x48000005\n\t" // "bl here+4" -- lr := & next insn
+ "mflr 29\n\t"
+ "addi 29,29,16\n\t"
+ "mtlr 29\n\t"
+
+ // do it!
+ "sc\n\t"
+
+ // result is now in r3; save it in args[0]
+ "std 3,0(28)\n\t"
+ // error code in r4; save it in args[1]
+ "std 4,8(28)\n\t"
+
+ // restore
+ "mr 2,27\n\t"
+ "mtlr 30\n\t"
+
+ : /*out*/
+ : /*in*/ "b" (&args[0])
+ : /*trash*/
+ /*temps*/ "r31","r30","r29","r28","r27",
+ /*args*/ "r3","r4","r5","r6","r7","r8","r9","r10",
+ /*paranoia*/ "memory","cc","r0","r1","r11","r12","r13",
+ "xer","ctr","cr0","cr1","cr2","cr3",
+ "cr4","cr5","cr6","cr7"
+ );
+
+ *res_r3 = args[0];
+ *res_r4 = args[1];
+}
+
+#else
+# error Unknown platform
+#endif
+
+
+SysRes VG_(do_syscall) ( UWord sysno, UWord a1, UWord a2, UWord a3,
+ UWord a4, UWord a5, UWord a6,
+ UWord a7, UWord a8 )
+{
+#if defined(VGP_x86_linux)
+ UWord val = do_syscall_WRK(sysno,a1,a2,a3,a4,a5,a6);
+ return VG_(mk_SysRes_x86_linux)( val );
+
+#elif defined(VGP_amd64_linux)
+ UWord val = do_syscall_WRK(sysno,a1,a2,a3,a4,a5,a6);
+ return VG_(mk_SysRes_amd64_linux)( val );
+
+#elif defined(VGP_ppc32_linux)
+ ULong ret = do_syscall_WRK(sysno,a1,a2,a3,a4,a5,a6);
+ UInt val = (UInt)(ret>>32);
+ UInt cr0so = (UInt)(ret);
+ return VG_(mk_SysRes_ppc32_linux)( val, cr0so );
+
+#elif defined(VGP_ppc64_linux)
+ ULong argblock[7];
+ argblock[0] = sysno;
+ argblock[1] = a1;
+ argblock[2] = a2;
+ argblock[3] = a3;
+ argblock[4] = a4;
+ argblock[5] = a5;
+ argblock[6] = a6;
+ do_syscall_WRK( &argblock[0] );
+ return VG_(mk_SysRes_ppc64_linux)( argblock[0], argblock[1] );
+
+#elif defined(VGP_ppc32_aix5)
+ UWord res;
+ UWord err;
+ do_syscall_WRK( &res, &err,
+ sysno, a1, a2, a3, a4, a5, a6, a7, a8);
+ /* Try to set the error number to zero if the syscall hasn't
+ really failed. */
+ if (sysno == __NR_AIX5_kread
+ || sysno == __NR_AIX5_kwrite) {
+ if (res != (UWord)-1L)
+ err = 0;
+ }
+ else if (sysno == __NR_AIX5_sigprocmask
+ || sysno == __NR_AIX5__sigpending) {
+ if (res == 0)
+ err = 0;
+ }
+
+ return VG_(mk_SysRes_ppc32_aix5)( res, err );
+
+#elif defined(VGP_ppc64_aix5)
+ UWord res;
+ UWord err;
+ do_syscall_WRK( &res, &err,
+ sysno, a1, a2, a3, a4, a5, a6, a7, a8);
+ /* Try to set the error number to zero if the syscall hasn't
+ really failed. */
+ if (sysno == __NR_AIX5_kread
+ || sysno == __NR_AIX5_kwrite) {
+ if (res != (UWord)-1L)
+ err = 0;
+ }
+ else if (sysno == __NR_AIX5_sigprocmask
+ || sysno == __NR_AIX5__sigpending) {
+ if (res == 0)
+ err = 0;
+ }
+
+ return VG_(mk_SysRes_ppc64_aix5)( res, err );
+
+#else
+# error Unknown platform
+#endif
+}
+
+/* ---------------------------------------------------------------------
+ Names of errors.
+ ------------------------------------------------------------------ */
+
+/* Return a string which gives the name of an error value. Note,
+ unlike the standard C syserror fn, the returned string is not
+ malloc-allocated or writable -- treat it as a constant.
+ TODO: implement this properly. */
+
+const HChar* VG_(strerror) ( UWord errnum )
+{
+ switch (errnum) {
+ case VKI_EPERM: return "Operation not permitted";
+ case VKI_ENOENT: return "No such file or directory";
+ case VKI_ESRCH: return "No such process";
+ case VKI_EINTR: return "Interrupted system call";
+ case VKI_EBADF: return "Bad file number";
+ case VKI_EAGAIN: return "Try again";
+ case VKI_ENOMEM: return "Out of memory";
+ case VKI_EACCES: return "Permission denied";
+ case VKI_EFAULT: return "Bad address";
+ case VKI_EEXIST: return "File exists";
+ case VKI_EINVAL: return "Invalid argument";
+ case VKI_EMFILE: return "Too many open files";
+ case VKI_ENOSYS: return "Function not implemented";
+ case VKI_EOVERFLOW: return "Value too large for defined data type";
+ case VKI_ERESTARTSYS: return "ERESTARTSYS";
+ default: return "VG_(strerror): unknown error";
+ }
+}
+
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_threadstate.c.svn-base b/coregrind/.svn/text-base/m_threadstate.c.svn-base
new file mode 100644
index 0000000..30a2cf8
--- /dev/null
+++ b/coregrind/.svn/text-base/m_threadstate.c.svn-base
@@ -0,0 +1,143 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The thread state. m_threadstate.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_libcassert.h"
+
+/*------------------------------------------------------------*/
+/*--- Data structures. ---*/
+/*------------------------------------------------------------*/
+
+ThreadId VG_(running_tid) = VG_INVALID_THREADID;
+
+ThreadState VG_(threads)[VG_N_THREADS];
+
+/*------------------------------------------------------------*/
+/*--- Operations. ---*/
+/*------------------------------------------------------------*/
+
+const HChar* VG_(name_of_ThreadStatus) ( ThreadStatus status )
+{
+ switch (status) {
+ case VgTs_Empty: return "VgTs_Empty";
+ case VgTs_Init: return "VgTs_Init";
+ case VgTs_Runnable: return "VgTs_Runnable";
+ case VgTs_WaitSys: return "VgTs_WaitSys";
+ case VgTs_Yielding: return "VgTs_Yielding";
+ case VgTs_Zombie: return "VgTs_Zombie";
+ default: return "VgTs_???";
+ }
+}
+
+ThreadState *VG_(get_ThreadState)(ThreadId tid)
+{
+ vg_assert(tid >= 0 && tid < VG_N_THREADS);
+ vg_assert(VG_(threads)[tid].tid == tid);
+ return &VG_(threads)[tid];
+}
+
+Bool VG_(is_valid_tid) ( ThreadId tid )
+{
+ /* tid is unsigned, hence no < 0 test. */
+ if (tid == 0) return False;
+ if (tid >= VG_N_THREADS) return False;
+ if (VG_(threads)[tid].status == VgTs_Empty) return False;
+ return True;
+}
+
+// This function is for tools to call.
+ThreadId VG_(get_running_tid)(void)
+{
+ return VG_(running_tid);
+}
+
+Bool VG_(is_running_thread)(ThreadId tid)
+{
+ ThreadState *tst = VG_(get_ThreadState)(tid);
+
+ return
+// tst->os_state.lwpid == VG_(gettid)() && // check we're this tid
+ VG_(running_tid) == tid && // and that we've got the lock
+ tst->status == VgTs_Runnable; // and we're runnable
+}
+
+/* Return true if the thread is still alive but in the process of exiting. */
+inline Bool VG_(is_exiting)(ThreadId tid)
+{
+ vg_assert(VG_(is_valid_tid)(tid));
+ return VG_(threads)[tid].exitreason != VgSrc_None;
+}
+
+/* Return the number of non-dead Threads */
+Int VG_(count_living_threads)(void)
+{
+ Int count = 0;
+ ThreadId tid;
+
+ for(tid = 1; tid < VG_N_THREADS; tid++)
+ if (VG_(threads)[tid].status != VgTs_Empty &&
+ VG_(threads)[tid].status != VgTs_Zombie)
+ count++;
+
+ return count;
+}
+
+/* Return the number of threads in VgTs_Runnable state */
+Int VG_(count_runnable_threads)(void)
+{
+ Int count = 0;
+ ThreadId tid;
+
+ for(tid = 1; tid < VG_N_THREADS; tid++)
+ if (VG_(threads)[tid].status == VgTs_Runnable)
+ count++;
+
+ return count;
+}
+
+/* Given an LWP id (ie, real kernel thread id), find the corresponding
+ ThreadId */
+ThreadId VG_(lwpid_to_vgtid)(Int lwp)
+{
+ ThreadId tid;
+
+ for(tid = 1; tid < VG_N_THREADS; tid++)
+ if (VG_(threads)[tid].status != VgTs_Empty
+ && VG_(threads)[tid].os_state.lwpid == lwp)
+ return tid;
+
+ return VG_INVALID_THREADID;
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_tooliface.c.svn-base b/coregrind/.svn/text-base/m_tooliface.c.svn-base
new file mode 100644
index 0000000..79a5e4b
--- /dev/null
+++ b/coregrind/.svn/text-base/m_tooliface.c.svn-base
@@ -0,0 +1,413 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Stuff relating to tool data structures. ---*/
+/*--- m_tooliface.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Nicholas Nethercote
+ njn@valgrind.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_tooliface.h"
+
+// The core/tool dictionary of functions (initially zeroed, as we want it)
+VgToolInterface VG_(tdict);
+
+/*--------------------------------------------------------------------*/
+/* Setting basic functions */
+
+void VG_(basic_tool_funcs)(
+ void(*post_clo_init)(void),
+ IRSB*(*instrument)(VgCallbackClosure*, IRSB*,
+ VexGuestLayout*, VexGuestExtents*, IRType, IRType),
+ void(*fini)(Int)
+)
+{
+ VG_(tdict).tool_post_clo_init = post_clo_init;
+ VG_(tdict).tool_instrument = instrument;
+ VG_(tdict).tool_fini = fini;
+}
+
+
+/*--------------------------------------------------------------------*/
+/* Setting details */
+
+/* Init with default values. */
+VgDetails VG_(details) = {
+ .name = NULL,
+ .version = NULL,
+ .description = NULL,
+ .copyright_author = NULL,
+ .bug_reports_to = NULL,
+ .avg_translation_sizeB = VG_DEFAULT_TRANS_SIZEB,
+};
+
+/* Use macro because they're so repetitive */
+#define DETAILS(type, detail) \
+ extern void VG_(details_##detail)(type detail) \
+ { \
+ VG_(details).detail = detail; \
+ }
+
+DETAILS(Char*, name)
+DETAILS(Char*, version)
+DETAILS(Char*, description)
+DETAILS(Char*, copyright_author)
+DETAILS(Char*, bug_reports_to)
+DETAILS(UInt, avg_translation_sizeB)
+
+
+/*--------------------------------------------------------------------*/
+/* Setting needs */
+
+VgNeeds VG_(needs) = {
+ .core_errors = False,
+ .tool_errors = False,
+ .libc_freeres = False,
+ .superblock_discards = False,
+ .command_line_options = False,
+ .client_requests = False,
+ .syscall_wrapper = False,
+ .sanity_checks = False,
+ .var_info = False,
+ .malloc_replacement = False,
+ .xml_output = False,
+ .final_IR_tidy_pass = False
+};
+
+/* static */
+Bool VG_(sanity_check_needs)(Char** failmsg)
+{
+ Bool any_new_mem_stack_N, any_new_mem_stack_N_w_ECU;
+ Bool any_new_mem_stack_w_conflicting_otags;
+ Bool any_die_mem_stack_N;
+
+#define CHECK_NOT(var, value) \
+ if ((var)==(value)) { \
+ *failmsg = "Tool error: '" #var "' not initialised\n"; \
+ return False; \
+ }
+
+ /* Ones that must be set */
+ CHECK_NOT(VG_(details).name, NULL);
+ /* Nb: .version can be NULL */
+ CHECK_NOT(VG_(details).description, NULL);
+ CHECK_NOT(VG_(details).copyright_author, NULL);
+ CHECK_NOT(VG_(details).bug_reports_to, NULL);
+
+ /* Check that new_mem_stack is defined if any new_mem_stack_N
+ are. */
+ any_new_mem_stack_N
+ = VG_(tdict).track_new_mem_stack_4 ||
+ VG_(tdict).track_new_mem_stack_8 ||
+ VG_(tdict).track_new_mem_stack_12 ||
+ VG_(tdict).track_new_mem_stack_16 ||
+ VG_(tdict).track_new_mem_stack_32 ||
+ VG_(tdict).track_new_mem_stack_112 ||
+ VG_(tdict).track_new_mem_stack_128 ||
+ VG_(tdict).track_new_mem_stack_144 ||
+ VG_(tdict).track_new_mem_stack_160;
+
+ if (any_new_mem_stack_N && ! VG_(tdict).track_new_mem_stack) {
+ *failmsg = "Tool error: one of the specialised 'new_mem_stack_N'\n"
+ " events tracked, but not the generic 'new_mem_stack' one.\n"
+ " 'new_mem_stack' should be defined\n";
+ return False;
+ }
+
+ /* Check that new_mem_stack_w_ECU is defined if any
+ new_mem_stack_N_w_ECU are. */
+ any_new_mem_stack_N_w_ECU
+ = VG_(tdict).track_new_mem_stack_4_w_ECU ||
+ VG_(tdict).track_new_mem_stack_8_w_ECU ||
+ VG_(tdict).track_new_mem_stack_12_w_ECU ||
+ VG_(tdict).track_new_mem_stack_16_w_ECU ||
+ VG_(tdict).track_new_mem_stack_32_w_ECU ||
+ VG_(tdict).track_new_mem_stack_112_w_ECU ||
+ VG_(tdict).track_new_mem_stack_128_w_ECU ||
+ VG_(tdict).track_new_mem_stack_144_w_ECU ||
+ VG_(tdict).track_new_mem_stack_160_w_ECU;
+
+ if (any_new_mem_stack_N_w_ECU && ! VG_(tdict).track_new_mem_stack_w_ECU) {
+ *failmsg = "Tool error: one of the specialised 'new_mem_stack_N_w_ECU'\n"
+ " events tracked, but not the generic 'new_mem_stack_w_ECU' one.\n"
+ " 'new_mem_stack_w_ECU' should be defined\n";
+ return False;
+ }
+
+ /* Check that in no cases are both with- and without-otag versions of the
+ same new_mem_stack_ function defined. */
+ any_new_mem_stack_w_conflicting_otags
+ = (VG_(tdict).track_new_mem_stack_4 && VG_(tdict).track_new_mem_stack_4_w_ECU) ||
+ (VG_(tdict).track_new_mem_stack_8 && VG_(tdict).track_new_mem_stack_8_w_ECU) ||
+ (VG_(tdict).track_new_mem_stack_12 && VG_(tdict).track_new_mem_stack_12_w_ECU) ||
+ (VG_(tdict).track_new_mem_stack_16 && VG_(tdict).track_new_mem_stack_16_w_ECU) ||
+ (VG_(tdict).track_new_mem_stack_32 && VG_(tdict).track_new_mem_stack_32_w_ECU) ||
+ (VG_(tdict).track_new_mem_stack_112 && VG_(tdict).track_new_mem_stack_112_w_ECU) ||
+ (VG_(tdict).track_new_mem_stack_128 && VG_(tdict).track_new_mem_stack_128_w_ECU) ||
+ (VG_(tdict).track_new_mem_stack_144 && VG_(tdict).track_new_mem_stack_144_w_ECU) ||
+ (VG_(tdict).track_new_mem_stack_160 && VG_(tdict).track_new_mem_stack_160_w_ECU) ||
+ (VG_(tdict).track_new_mem_stack && VG_(tdict).track_new_mem_stack_w_ECU);
+
+ if (any_new_mem_stack_w_conflicting_otags) {
+ *failmsg = "Tool error: tool supplies both a 'new_mem_stack_N' and a\n"
+ " 'new_mem_stack_N_w_ECU' function for some N (or none),\n"
+ " but you can only have one or the other (not both)\n";
+ return False;
+ }
+
+ /* Check that die_mem_stack is defined if any die_mem_stack_N
+ are. */
+ any_die_mem_stack_N
+ = VG_(tdict).track_die_mem_stack_4 ||
+ VG_(tdict).track_die_mem_stack_8 ||
+ VG_(tdict).track_die_mem_stack_12 ||
+ VG_(tdict).track_die_mem_stack_16 ||
+ VG_(tdict).track_die_mem_stack_32 ||
+ VG_(tdict).track_die_mem_stack_112 ||
+ VG_(tdict).track_die_mem_stack_128 ||
+ VG_(tdict).track_die_mem_stack_144 ||
+ VG_(tdict).track_die_mem_stack_160;
+
+ if (any_die_mem_stack_N && ! VG_(tdict).track_die_mem_stack) {
+ *failmsg = "Tool error: one of the specialised 'die_mem_stack_N'\n"
+ " events tracked, but not the generic 'die_mem_stack' one.\n"
+ " 'die_mem_stack' should be defined\n";
+ return False;
+ }
+
+ return True;
+
+#undef CHECK_NOT
+}
+
+/* Use macro because they're so repetitive */
+#define NEEDS(need) \
+ extern void VG_(needs_##need)(void) \
+ { \
+ VG_(needs).need = True; \
+ }
+
+// These ones don't require any tool-supplied functions
+NEEDS(libc_freeres)
+NEEDS(core_errors)
+NEEDS(var_info)
+NEEDS(xml_output)
+
+void VG_(needs_superblock_discards)(
+ void (*discard)(Addr64, VexGuestExtents)
+)
+{
+ VG_(needs).superblock_discards = True;
+ VG_(tdict).tool_discard_superblock_info = discard;
+}
+
+void VG_(needs_tool_errors)(
+ Bool (*eq) (VgRes, Error*, Error*),
+ void (*pp) (Error*),
+ Bool show_TIDs,
+ UInt (*update) (Error*),
+ Bool (*recog) (Char*, Supp*),
+ Bool (*read_extra) (Int, Char*, Int, Supp*),
+ Bool (*matches) (Error*, Supp*),
+ Char* (*name) (Error*),
+ void (*print_extra)(Error*)
+)
+{
+ VG_(needs).tool_errors = True;
+ VG_(tdict).tool_eq_Error = eq;
+ VG_(tdict).tool_pp_Error = pp;
+ VG_(tdict).tool_show_ThreadIDs_for_errors = show_TIDs;
+ VG_(tdict).tool_update_extra = update;
+ VG_(tdict).tool_recognised_suppression = recog;
+ VG_(tdict).tool_read_extra_suppression_info = read_extra;
+ VG_(tdict).tool_error_matches_suppression = matches;
+ VG_(tdict).tool_get_error_name = name;
+ VG_(tdict).tool_print_extra_suppression_info = print_extra;
+}
+
+void VG_(needs_command_line_options)(
+ Bool (*process)(Char*),
+ void (*usage)(void),
+ void (*debug_usage)(void)
+)
+{
+ VG_(needs).command_line_options = True;
+ VG_(tdict).tool_process_cmd_line_option = process;
+ VG_(tdict).tool_print_usage = usage;
+ VG_(tdict).tool_print_debug_usage = debug_usage;
+}
+
+void VG_(needs_client_requests)(
+ Bool (*handle)(ThreadId, UWord*, UWord*)
+)
+{
+ VG_(needs).client_requests = True;
+ VG_(tdict).tool_handle_client_request = handle;
+}
+
+void VG_(needs_syscall_wrapper)(
+ void(*pre) (ThreadId, UInt),
+ void(*post)(ThreadId, UInt, SysRes res)
+)
+{
+ VG_(needs).syscall_wrapper = True;
+ VG_(tdict).tool_pre_syscall = pre;
+ VG_(tdict).tool_post_syscall = post;
+}
+
+void VG_(needs_sanity_checks)(
+ Bool(*cheap)(void),
+ Bool(*expen)(void)
+)
+{
+ VG_(needs).sanity_checks = True;
+ VG_(tdict).tool_cheap_sanity_check = cheap;
+ VG_(tdict).tool_expensive_sanity_check = expen;
+}
+
+void VG_(needs_malloc_replacement)(
+ void* (*malloc) ( ThreadId, SizeT ),
+ void* (*__builtin_new) ( ThreadId, SizeT ),
+ void* (*__builtin_vec_new) ( ThreadId, SizeT ),
+ void* (*memalign) ( ThreadId, SizeT, SizeT ),
+ void* (*calloc) ( ThreadId, SizeT, SizeT ),
+ void (*free) ( ThreadId, void* ),
+ void (*__builtin_delete) ( ThreadId, void* ),
+ void (*__builtin_vec_delete) ( ThreadId, void* ),
+ void* (*realloc) ( ThreadId, void*, SizeT ),
+ SizeT (*malloc_usable_size) ( ThreadId, void* ),
+ SizeT client_malloc_redzone_szB
+)
+{
+ VG_(needs).malloc_replacement = True;
+ VG_(tdict).tool_malloc = malloc;
+ VG_(tdict).tool___builtin_new = __builtin_new;
+ VG_(tdict).tool___builtin_vec_new = __builtin_vec_new;
+ VG_(tdict).tool_memalign = memalign;
+ VG_(tdict).tool_calloc = calloc;
+ VG_(tdict).tool_free = free;
+ VG_(tdict).tool___builtin_delete = __builtin_delete;
+ VG_(tdict).tool___builtin_vec_delete = __builtin_vec_delete;
+ VG_(tdict).tool_realloc = realloc;
+ VG_(tdict).tool_malloc_usable_size = malloc_usable_size;
+ VG_(tdict).tool_client_redzone_szB = client_malloc_redzone_szB;
+}
+
+void VG_(needs_final_IR_tidy_pass)(
+ IRSB*(*final_tidy)(IRSB*)
+)
+{
+ VG_(needs).final_IR_tidy_pass = True;
+ VG_(tdict).tool_final_IR_tidy_pass = final_tidy;
+}
+
+/*--------------------------------------------------------------------*/
+/* Tracked events. Digit 'n' on DEFn is the REGPARMness. */
+
+#define DEF0(fn, args...) \
+void VG_(fn)(void(*f)(args)) { \
+ VG_(tdict).fn = f; \
+}
+
+#define DEF1(fn, args...) \
+void VG_(fn)(VG_REGPARM(1) void(*f)(args)) { \
+ VG_(tdict).fn = f; \
+}
+
+#define DEF2(fn, args...) \
+void VG_(fn)(VG_REGPARM(2) void(*f)(args)) { \
+ VG_(tdict).fn = f; \
+}
+
+DEF0(track_new_mem_startup, Addr, SizeT, Bool, Bool, Bool, ULong)
+DEF0(track_new_mem_stack_signal, Addr, SizeT, UInt)
+DEF0(track_new_mem_brk, Addr, SizeT, UInt)
+DEF0(track_new_mem_mmap, Addr, SizeT, Bool, Bool, Bool, ULong)
+
+DEF0(track_copy_mem_remap, Addr, Addr, SizeT)
+DEF0(track_change_mem_mprotect, Addr, SizeT, Bool, Bool, Bool)
+DEF0(track_die_mem_stack_signal, Addr, SizeT)
+DEF0(track_die_mem_brk, Addr, SizeT)
+DEF0(track_die_mem_munmap, Addr, SizeT)
+
+DEF2(track_new_mem_stack_4_w_ECU, Addr, UInt)
+DEF2(track_new_mem_stack_8_w_ECU, Addr, UInt)
+DEF2(track_new_mem_stack_12_w_ECU, Addr, UInt)
+DEF2(track_new_mem_stack_16_w_ECU, Addr, UInt)
+DEF2(track_new_mem_stack_32_w_ECU, Addr, UInt)
+DEF2(track_new_mem_stack_112_w_ECU, Addr, UInt)
+DEF2(track_new_mem_stack_128_w_ECU, Addr, UInt)
+DEF2(track_new_mem_stack_144_w_ECU, Addr, UInt)
+DEF2(track_new_mem_stack_160_w_ECU, Addr, UInt)
+DEF0(track_new_mem_stack_w_ECU, Addr, SizeT, UInt)
+
+DEF1(track_new_mem_stack_4, Addr)
+DEF1(track_new_mem_stack_8, Addr)
+DEF1(track_new_mem_stack_12, Addr)
+DEF1(track_new_mem_stack_16, Addr)
+DEF1(track_new_mem_stack_32, Addr)
+DEF1(track_new_mem_stack_112, Addr)
+DEF1(track_new_mem_stack_128, Addr)
+DEF1(track_new_mem_stack_144, Addr)
+DEF1(track_new_mem_stack_160, Addr)
+DEF0(track_new_mem_stack, Addr, SizeT)
+
+DEF1(track_die_mem_stack_4, Addr)
+DEF1(track_die_mem_stack_8, Addr)
+DEF1(track_die_mem_stack_12, Addr)
+DEF1(track_die_mem_stack_16, Addr)
+DEF1(track_die_mem_stack_32, Addr)
+DEF1(track_die_mem_stack_112, Addr)
+DEF1(track_die_mem_stack_128, Addr)
+DEF1(track_die_mem_stack_144, Addr)
+DEF1(track_die_mem_stack_160, Addr)
+DEF0(track_die_mem_stack, Addr, SizeT)
+
+DEF0(track_ban_mem_stack, Addr, SizeT)
+
+DEF0(track_pre_mem_read, CorePart, ThreadId, Char*, Addr, SizeT)
+DEF0(track_pre_mem_read_asciiz, CorePart, ThreadId, Char*, Addr)
+DEF0(track_pre_mem_write, CorePart, ThreadId, Char*, Addr, SizeT)
+DEF0(track_post_mem_write, CorePart, ThreadId, Addr, SizeT)
+
+DEF0(track_pre_reg_read, CorePart, ThreadId, Char*, PtrdiffT, SizeT)
+DEF0(track_post_reg_write, CorePart, ThreadId, PtrdiffT, SizeT)
+
+DEF0(track_post_reg_write_clientcall_return, ThreadId, PtrdiffT, SizeT, Addr)
+
+DEF0(track_start_client_code, ThreadId, ULong)
+DEF0(track_stop_client_code, ThreadId, ULong)
+
+DEF0(track_pre_thread_ll_create, ThreadId, ThreadId)
+DEF0(track_pre_thread_first_insn, ThreadId)
+DEF0(track_pre_thread_ll_exit, ThreadId)
+
+DEF0(track_pre_deliver_signal, ThreadId, Int sigNo, Bool)
+DEF0(track_post_deliver_signal, ThreadId, Int sigNo)
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_trampoline.S.svn-base b/coregrind/.svn/text-base/m_trampoline.S.svn-base
new file mode 100644
index 0000000..d267172
--- /dev/null
+++ b/coregrind/.svn/text-base/m_trampoline.S.svn-base
@@ -0,0 +1,757 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Trampoline code page stuff. m_trampoline.S ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+ Copyright (C) 2006-2009 OpenWorks LLP
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics_asm.h"
+
+/* We need pub_core_vkiscnums.h, but the AIX5 formulation
+ brings in a load of C declarations. Defining this macro
+ makes them invisible. Yes, a nasty hack. */
+#define VG_IN_ASSEMBLY_SOURCE
+# include "pub_core_vkiscnums.h"
+#undef VG_IN_ASSEMBLY_SOURCE
+
+/* ------------------ SIMULATED CPU HELPERS ------------------ */
+/*
+ Replacements for some functions to do with vsyscalls and signals.
+ This code runs on the simulated CPU.
+*/
+
+/*---------------------- x86-linux ----------------------*/
+#if defined(VGP_x86_linux)
+
+# define UD2_16 ud2 ; ud2 ; ud2 ; ud2 ;ud2 ; ud2 ; ud2 ; ud2
+# define UD2_64 UD2_16 ; UD2_16 ; UD2_16 ; UD2_16
+# define UD2_256 UD2_64 ; UD2_64 ; UD2_64 ; UD2_64
+# define UD2_1024 UD2_256 ; UD2_256 ; UD2_256 ; UD2_256
+# define UD2_PAGE UD2_1024 ; UD2_1024 ; UD2_1024 ; UD2_1024
+
+ /* a leading page of unexecutable code */
+ UD2_PAGE
+
+.global VG_(trampoline_stuff_start)
+VG_(trampoline_stuff_start):
+
+.global VG_(x86_linux_SUBST_FOR_sigreturn)
+VG_(x86_linux_SUBST_FOR_sigreturn):
+ /* This is a very specific sequence which GDB uses to
+ recognize signal handler frames. Also gcc: see
+ x86_fallback_frame_state() in
+ gcc-4.1.0/gcc/config/i386/linux-unwind.h */
+ popl %eax
+ movl $ __NR_sigreturn, %eax
+ int $0x80
+ ud2
+
+.global VG_(x86_linux_SUBST_FOR_rt_sigreturn)
+VG_(x86_linux_SUBST_FOR_rt_sigreturn):
+ /* Likewise for rt signal frames */
+ movl $ __NR_rt_sigreturn, %eax
+ int $0x80
+ ud2
+
+/* There's no particular reason that this needs to be handwritten
+ assembly, but since that's what this file contains, here's a
+ simple index implementation (written in C and compiled by gcc.)
+
+ unsigned char* REDIR_FOR_index ( const char* s, int c )
+ {
+ unsigned char ch = (unsigned char)((unsigned int)c);
+ unsigned char* p = (unsigned char*)s;
+ while (1) {
+ if (*p == ch) return p;
+ if (*p == 0) return 0;
+ p++;
+ }
+ }
+*/
+.global VG_(x86_linux_REDIR_FOR_index)
+.type VG_(x86_linux_REDIR_FOR_index), @function
+VG_(x86_linux_REDIR_FOR_index):
+ pushl %ebp
+ movl %esp, %ebp
+ movl 8(%ebp), %eax
+ movzbl 12(%ebp), %ecx
+ movzbl (%eax), %edx
+ cmpb %dl, %cl
+ jne .L9
+ jmp .L2
+.L11:
+ addl $1, %eax
+ movzbl (%eax), %edx
+ cmpb %dl, %cl
+ je .L2
+.L9:
+ testb %dl, %dl
+ jne .L11
+ xorl %eax, %eax
+.L2:
+ popl %ebp
+ ret
+.size VG_(x86_linux_REDIR_FOR_index), .-VG_(x86_linux_REDIR_FOR_index)
+
+.global VG_(trampoline_stuff_end)
+VG_(trampoline_stuff_end):
+
+ /* and a trailing page of unexecutable code */
+ UD2_PAGE
+
+# undef UD2_16
+# undef UD2_64
+# undef UD2_256
+# undef UD2_1024
+# undef UD2_PAGE
+
+/*---------------------- amd64-linux ----------------------*/
+#else
+#if defined(VGP_amd64_linux)
+
+# define UD2_16 ud2 ; ud2 ; ud2 ; ud2 ;ud2 ; ud2 ; ud2 ; ud2
+# define UD2_64 UD2_16 ; UD2_16 ; UD2_16 ; UD2_16
+# define UD2_256 UD2_64 ; UD2_64 ; UD2_64 ; UD2_64
+# define UD2_1024 UD2_256 ; UD2_256 ; UD2_256 ; UD2_256
+# define UD2_PAGE UD2_1024 ; UD2_1024 ; UD2_1024 ; UD2_1024
+
+ /* a leading page of unexecutable code */
+ UD2_PAGE
+
+.global VG_(trampoline_stuff_start)
+VG_(trampoline_stuff_start):
+
+.global VG_(amd64_linux_SUBST_FOR_rt_sigreturn)
+VG_(amd64_linux_SUBST_FOR_rt_sigreturn):
+ /* This is a very specific sequence which GDB uses to
+ recognize signal handler frames. */
+ movq $__NR_rt_sigreturn, %rax
+ syscall
+ ud2
+
+.global VG_(amd64_linux_REDIR_FOR_vgettimeofday)
+.type VG_(amd64_linux_REDIR_FOR_vgettimeofday), @function
+VG_(amd64_linux_REDIR_FOR_vgettimeofday):
+.LfnB2:
+ movq $__NR_gettimeofday, %rax
+ syscall
+ ret
+.LfnE2:
+.size VG_(amd64_linux_REDIR_FOR_vgettimeofday), .-.LfnB2
+
+.global VG_(amd64_linux_REDIR_FOR_vtime)
+.type VG_(amd64_linux_REDIR_FOR_vtime), @function
+VG_(amd64_linux_REDIR_FOR_vtime):
+.LfnB3:
+ movq $__NR_time, %rax
+ syscall
+ ret
+.LfnE3:
+.size VG_(amd64_linux_REDIR_FOR_vtime), .-.LfnB3
+
+/* A CIE for the above two functions, followed by their FDEs */
+ .section .eh_frame,"a",@progbits
+.Lframe1:
+ .long .LEcie1-.LScie1
+.LScie1:
+ .long 0x0
+ .byte 0x1
+ .string "zR"
+ .uleb128 0x1
+ .sleb128 -8
+ .byte 0x10
+ .uleb128 0x1
+ .byte 0x3
+ .byte 0xc
+ .uleb128 0x7
+ .uleb128 0x8
+ .byte 0x90
+ .uleb128 0x1
+ .align 8
+.LEcie1:
+.LSfde2:
+ .long .LEfde2-.LASfde2
+.LASfde2:
+ .long .LASfde2-.Lframe1
+ .long .LfnB2
+ .long .LfnE2-.LfnB2
+ .uleb128 0x0
+ .align 8
+.LEfde2:
+.LSfde3:
+ .long .LEfde3-.LASfde3
+.LASfde3:
+ .long .LASfde3-.Lframe1
+ .long .LfnB3
+ .long .LfnE3-.LfnB3
+ .uleb128 0x0
+ .align 8
+.LEfde3:
+ .previous
+
+.global VG_(trampoline_stuff_end)
+VG_(trampoline_stuff_end):
+
+ /* and a trailing page of unexecutable code */
+ UD2_PAGE
+
+# undef UD2_16
+# undef UD2_64
+# undef UD2_256
+# undef UD2_1024
+# undef UD2_PAGE
+
+/*---------------- ppc32-linux ----------------*/
+#else
+#if defined(VGP_ppc32_linux)
+
+# define UD2_16 trap ; trap ; trap; trap
+# define UD2_64 UD2_16 ; UD2_16 ; UD2_16 ; UD2_16
+# define UD2_256 UD2_64 ; UD2_64 ; UD2_64 ; UD2_64
+# define UD2_1024 UD2_256 ; UD2_256 ; UD2_256 ; UD2_256
+# define UD2_PAGE UD2_1024 ; UD2_1024 ; UD2_1024 ; UD2_1024
+
+ /* a leading page of unexecutable code */
+ UD2_PAGE
+
+.global VG_(trampoline_stuff_start)
+VG_(trampoline_stuff_start):
+
+.global VG_(ppc32_linux_SUBST_FOR_sigreturn)
+VG_(ppc32_linux_SUBST_FOR_sigreturn):
+ li 0,__NR_sigreturn
+ sc
+ .long 0 /*illegal insn*/
+
+.global VG_(ppc32_linux_SUBST_FOR_rt_sigreturn)
+VG_(ppc32_linux_SUBST_FOR_rt_sigreturn):
+ li 0,__NR_rt_sigreturn
+ sc
+ .long 0 /*illegal insn*/
+
+/* There's no particular reason that this needs to be handwritten
+ assembly, but since that's what this file contains, here's a
+ simple strlen implementation (written in C and compiled by gcc.)
+*/
+.global VG_(ppc32_linux_REDIR_FOR_strlen)
+.type VG_(ppc32_linux_REDIR_FOR_strlen), @function
+VG_(ppc32_linux_REDIR_FOR_strlen):
+ lbz 4,0(3)
+ li 9,0
+ cmpwi 0,4,0
+ beq- 0,.L18
+.L19:
+ lbzu 5,1(3)
+ addi 9,9,1
+ cmpwi 0,5,0
+ bne+ 0,.L19
+.L18:
+ mr 3,9
+ blr
+.size VG_(ppc32_linux_REDIR_FOR_strlen), .-VG_(ppc32_linux_REDIR_FOR_strlen)
+
+/* Ditto strcmp */
+.global VG_(ppc32_linux_REDIR_FOR_strcmp)
+.type VG_(ppc32_linux_REDIR_FOR_strcmp), @function
+VG_(ppc32_linux_REDIR_FOR_strcmp):
+.L20:
+ lbz 0,0(3)
+ cmpwi 7,0,0
+ bne- 7,.L21
+ lbz 0,0(4)
+ li 11,0
+ cmpwi 7,0,0
+ beq- 7,.L22
+.L21:
+ lbz 0,0(3)
+ li 11,-1
+ cmpwi 7,0,0
+ beq- 7,.L22
+ lbz 0,0(4)
+ li 11,1
+ cmpwi 7,0,0
+ beq- 7,.L22
+ lbz 9,0(3)
+ lbz 0,0(4)
+ li 11,-1
+ cmplw 7,9,0
+ blt- 7,.L22
+ lbz 9,0(3)
+ lbz 0,0(4)
+ li 11,1
+ addi 3,3,1
+ addi 4,4,1
+ cmplw 7,9,0
+ ble+ 7,.L20
+.L22:
+ mr 3,11
+ blr
+.size VG_(ppc32_linux_REDIR_FOR_strcmp), .-VG_(ppc32_linux_REDIR_FOR_strcmp)
+
+/* Ditto index/strchr */
+.global VG_(ppc32_linux_REDIR_FOR_strchr)
+.type VG_(ppc32_linux_REDIR_FOR_strchr), @function
+VG_(ppc32_linux_REDIR_FOR_strchr):
+ lbz 0,0(3)
+ rlwinm 4,4,0,0xff
+ cmpw 7,4,0
+ beqlr 7
+ cmpwi 7,0,0
+ bne 7,.L308
+ b .L304
+.L309:
+ beq 6,.L304
+.L308:
+ lbzu 0,1(3)
+ cmpw 7,4,0
+ cmpwi 6,0,0
+ bne 7,.L309
+ blr
+.L304:
+ li 3,0
+ blr
+.size VG_(ppc32_linux_REDIR_FOR_strchr),.-VG_(ppc32_linux_REDIR_FOR_strchr)
+
+.global VG_(trampoline_stuff_end)
+VG_(trampoline_stuff_end):
+
+ /* and a trailing page of unexecutable code */
+ UD2_PAGE
+
+# undef UD2_16
+# undef UD2_64
+# undef UD2_256
+# undef UD2_1024
+# undef UD2_PAGE
+
+/*---------------- ppc64-linux ----------------*/
+#else
+#if defined(VGP_ppc64_linux)
+
+# define UD2_16 trap ; trap ; trap; trap
+# define UD2_64 UD2_16 ; UD2_16 ; UD2_16 ; UD2_16
+# define UD2_256 UD2_64 ; UD2_64 ; UD2_64 ; UD2_64
+# define UD2_1024 UD2_256 ; UD2_256 ; UD2_256 ; UD2_256
+# define UD2_PAGE UD2_1024 ; UD2_1024 ; UD2_1024 ; UD2_1024
+
+ /* a leading page of unexecutable code */
+ UD2_PAGE
+
+.global VG_(trampoline_stuff_start)
+VG_(trampoline_stuff_start):
+
+.global VG_(ppc64_linux_SUBST_FOR_rt_sigreturn)
+VG_(ppc64_linux_SUBST_FOR_rt_sigreturn):
+ li 0,__NR_rt_sigreturn
+ sc
+ .long 0 /*illegal insn*/
+
+ /* See comment in pub_core_trampoline.h for what this is for */
+.global VG_(ppctoc_magic_redirect_return_stub)
+VG_(ppctoc_magic_redirect_return_stub):
+ trap
+
+ /* this function is written using the "dotless" ABI convention */
+ .align 2
+ .globl VG_(ppc64_linux_REDIR_FOR_strlen)
+ .section ".opd","aw"
+ .align 3
+VG_(ppc64_linux_REDIR_FOR_strlen):
+ .quad .L.VG_(ppc64_linux_REDIR_FOR_strlen),.TOC.@tocbase,0
+ .previous
+ .size VG_(ppc64_linux_REDIR_FOR_strlen), \
+ .L0end-.L.VG_(ppc64_linux_REDIR_FOR_strlen)
+ .type VG_(ppc64_linux_REDIR_FOR_strlen), @function
+
+.L.VG_(ppc64_linux_REDIR_FOR_strlen):
+ mr 9,3
+ lbz 0,0(3)
+ li 3,0
+ cmpwi 7,0,0
+ beqlr 7
+ li 3,0
+.L01:
+ addi 0,3,1
+ extsw 3,0
+ lbzx 0,9,3
+ cmpwi 7,0,0
+ bne 7,.L01
+ blr
+ .long 0
+ .byte 0,0,0,0,0,0,0,0
+.L0end:
+
+ /* this function is written using the "dotless" ABI convention */
+ .align 2
+ .globl VG_(ppc64_linux_REDIR_FOR_strchr)
+ .section ".opd","aw"
+ .align 3
+VG_(ppc64_linux_REDIR_FOR_strchr):
+ .quad .L.VG_(ppc64_linux_REDIR_FOR_strchr),.TOC.@tocbase,0
+ .previous
+ .size VG_(ppc64_linux_REDIR_FOR_strchr), \
+ .L1end-.L.VG_(ppc64_linux_REDIR_FOR_strchr)
+ .type VG_(ppc64_linux_REDIR_FOR_strchr),@function
+
+.L.VG_(ppc64_linux_REDIR_FOR_strchr):
+ lbz 0,0(3)
+ rldicl 4,4,0,56
+ cmpw 7,4,0
+ beqlr 7
+ cmpdi 7,0,0
+ bne 7,.L18
+ b .L14
+.L19:
+ beq 6,.L14
+.L18:
+ lbzu 0,1(3)
+ cmpw 7,4,0
+ cmpdi 6,0,0
+ bne 7,.L19
+ blr
+.L14:
+ li 3,0
+ blr
+ .long 0
+ .byte 0,0,0,0,0,0,0,0
+.L1end:
+
+
+.global VG_(trampoline_stuff_end)
+VG_(trampoline_stuff_end):
+
+ /* and a trailing page of unexecutable code */
+ UD2_PAGE
+
+# undef UD2_16
+# undef UD2_64
+# undef UD2_256
+# undef UD2_1024
+# undef UD2_PAGE
+
+/*---------------- ppc32-aix5 ----------------*/
+#else
+#if defined(VGP_ppc32_aix5)
+
+# define UD2_16 trap ; trap ; trap; trap
+# define UD2_64 UD2_16 ; UD2_16 ; UD2_16 ; UD2_16
+# define UD2_256 UD2_64 ; UD2_64 ; UD2_64 ; UD2_64
+# define UD2_1024 UD2_256 ; UD2_256 ; UD2_256 ; UD2_256
+# define UD2_PAGE UD2_1024 ; UD2_1024 ; UD2_1024 ; UD2_1024
+
+ .csect .text[PR]
+
+ /* a leading page of unexecutable code */
+ UD2_PAGE
+
+.globl VG_(trampoline_stuff_start)
+VG_(trampoline_stuff_start):
+
+/* See pub_core_trampoline.h for an explaination of this. Also
+ see pub_core_initimg.h, struct AIX5PreloadPage. On entry, r3
+ points to an AIX5PreloadPage structure. Note we can only
+ use r2-r10 as scratch registers here since those are the
+ only ones restored from the preload page when finally
+ starting the client. */
+.globl VG_(ppc32_aix5_do_preloads_then_start_client)
+VG_(ppc32_aix5_do_preloads_then_start_client):
+ stwu 1,-1024(1)
+ stw 3,512(1) /* stash r3 512 bytes up stack */
+
+ /* Try to load .../vgpreload_core.so */
+ lwz 2,0(3) /* r2 = __NR___loadx */
+ lwz 5,20(3) /* r5 = off_preloadcorename */
+ add 6,3,5 /* r6 = preloadcorename */
+ addis 1,1,-4
+ bl do___loadx
+ addis 1,1,4
+ cmpwi 0,3,0
+ beq .Lfailed
+
+ /* Try to load .../vgpreload_tool.so, if it exists */
+ lwz 3,512(1) /* restore r3 */
+ lwz 2,0(3) /* r2 = __NR___loadx */
+ lwz 5,24(3) /* r5 = off_preloadtoolname */
+ cmpwi 0,5,0 /* skip tool preload if */
+ beq .Ltry_preload /* name not present */
+ add 6,3,5 /* r6 = preloadtoolname */
+ addis 1,1,-4
+ bl do___loadx
+ addis 1,1,4
+ cmpwi 0,3,0
+ beq .Lfailed
+
+.Ltry_preload:
+ /* Try to load the LD_PRELOAD= file, if it exists */
+ lwz 3,512(1) /* restore r3 */
+ lwz 2,0(3) /* r2 = __NR___loadx */
+ lwz 5,28(3) /* r5 = off_ld_preloadname */
+ cmpwi 0,5,0 /* skip ld_preload if */
+ beq .Lstart_client /* name not present */
+ add 6,3,5 /* r6 = ld_preloadname */
+ addis 1,1,-4
+ bl do___loadx
+ addis 1,1,4
+ cmpwi 0,3,0
+ beq .Lfailed
+
+.Lstart_client:
+ /* Success. Restore r2-r10 from preloadpage-> and start
+ the client. */
+ lwz 3,512(1) /* restore r3 */
+ addi 1,1,1024
+ lwz 2,32+4(3) /* preloadpage->client_start */
+ mtctr 2
+ lwz 2,40+4(3) /* preloadpage->r2 */
+ lwz 4,56+4(3) /* preloadpage->r4 */
+ lwz 5,64+4(3) /* preloadpage->r5 */
+ lwz 6,72+4(3) /* preloadpage->r6 */
+ lwz 7,80+4(3) /* preloadpage->r7 */
+ lwz 8,88+4(3) /* preloadpage->r8 */
+ lwz 9,96+4(3) /* preloadpage->r9 */
+ lwz 10,104+4(3) /* preloadpage->r10 */
+ lwz 3,48+4(3) /* preloadpage->r3 */
+ bctr
+ /*NOTREACHED*/
+ trap
+
+.Lfailed:
+ /* __loadx barfed for some reason. Print the error
+ message and get out. */
+ /* First the error msg */
+ lwz 3,512(1) /* restore r3 */
+ lwz 2,4(3) /* r2 = __NR_kwrite */
+ lwz 4,12(3) /* r4 = offset of err msg */
+ add 4,4,3 /* r4 = err msg */
+ lwz 5,16(3) /* r5 = length err msg */
+ li 3,2 /* r3 = stderr */
+ bl do_syscall
+ /* now call the diagnosis fn */
+ lwz 3,512(1) /* restore r3 */
+ lwz 4,112(3) /* preloadpage->p_diagnose_load_failure */
+ lwz 2,4(4) /* get its TOC ptr */
+ lwz 4,0(4) /* get its entry point */
+ mtlr 4
+ blrl
+ /* Now do _exit(1) */
+ lwz 3,512(1) /* restore r3 */
+ lwz 2,8(3) /* r2 = __NR_exit */
+ li 3,1 /* doing _exit(1) */
+ addi 1,1,1024 /* fix stack pointer */
+ bl do_syscall
+ /*NOTREACHED*/
+ trap
+
+do___loadx:
+ /* On entry: r2 = __NR___loadx, r6 = name of module */
+ li 3,1
+ slwi 3,3,24 /* r3 = 0x1000000 = VKI_DL_LOAD */
+ mr 4,1
+ lis 5,3
+ li 7,0
+ li 8,0
+ li 9,0
+ li 10,0
+do_syscall:
+ crorc 6,6,6
+ sc
+ trap
+ /* sc continues at 'lr', hence this
+ constitutes an automatic return */
+
+
+ /* See comment in pub_core_trampoline.h for what this is for */
+.globl VG_(ppctoc_magic_redirect_return_stub)
+VG_(ppctoc_magic_redirect_return_stub):
+ trap
+
+.globl VG_(trampoline_stuff_end)
+VG_(trampoline_stuff_end):
+
+ /* and a trailing page of unexecutable code */
+ UD2_PAGE
+
+# undef UD2_16
+# undef UD2_64
+# undef UD2_256
+# undef UD2_1024
+# undef UD2_PAGE
+
+/*---------------- ppc64-aix5 ----------------*/
+#else
+#if defined(VGP_ppc64_aix5)
+
+# define UD2_16 trap ; trap ; trap; trap
+# define UD2_64 UD2_16 ; UD2_16 ; UD2_16 ; UD2_16
+# define UD2_256 UD2_64 ; UD2_64 ; UD2_64 ; UD2_64
+# define UD2_1024 UD2_256 ; UD2_256 ; UD2_256 ; UD2_256
+# define UD2_PAGE UD2_1024 ; UD2_1024 ; UD2_1024 ; UD2_1024
+
+.globl VG_(trampoline_stuff_start)
+VG_(trampoline_stuff_start):
+/* See pub_core_trampoline.h for an explaination of this. Also
+ see pub_core_initimg.h, struct AIX5PreloadPage. On entry, r3
+ points to an AIX5PreloadPage structure. Note we can only
+ use r2-r10 as scratch registers here since those are the
+ only ones restored from the preload page when finally
+ starting the client. */
+.globl VG_(ppc64_aix5_do_preloads_then_start_client)
+VG_(ppc64_aix5_do_preloads_then_start_client):
+ stdu 1,-1024(1)
+ std 3,512(1) /* stash r3 512 bytes up stack */
+
+ /* Try to load .../vgpreload_core.so */
+ lwz 2,0(3) /* r2 = __NR_kload */
+ lwz 5,20(3) /* r5 = off_preloadcorename */
+ add 3,3,5 /* r6 = preloadcorename */
+ bl do_kload
+ cmpdi 0,3,0
+ beq .Lfailed
+
+ /* Try to load .../vgpreload_tool.so, if it exists */
+ ld 3,512(1) /* restore r3 */
+ lwz 2,0(3) /* r2 = __NR_kload */
+ lwz 5,24(3) /* r5 = off_preloadtoolname */
+ cmpwi 0,5,0 /* skip tool preload if */
+ beq .Ltry_preload /* name not present */
+ add 3,3,5 /* r6 = preloadtoolname */
+ bl do_kload
+ cmpdi 0,3,0
+ beq .Lfailed
+
+.Ltry_preload:
+ /* Try to load the LD_PRELOAD= file, if it exists */
+ ld 3,512(1) /* restore r3 */
+ lwz 2,0(3) /* r2 = __NR_kload */
+ lwz 5,28(3) /* r5 = off_ld_preloadname */
+ cmpwi 0,5,0 /* skip ld_preload if */
+ beq .Lstart_client /* name not present */
+ add 3,3,5 /* r6 = ld_preloadname */
+ bl do_kload
+ cmpdi 0,3,0
+ beq .Lfailed
+
+.Lstart_client:
+ /* Success. Restore r2-r10 from preloadpage-> and start
+ the client. */
+ ld 3,512(1) /* restore r3 */
+ addi 1,1,1024
+ ld 2,32+0(3) /* preloadpage->client_start */
+ mtctr 2
+ ld 2,40+0(3) /* preloadpage->r2 */
+ ld 4,56+0(3) /* preloadpage->r4 */
+ ld 5,64+0(3) /* preloadpage->r5 */
+ ld 6,72+0(3) /* preloadpage->r6 */
+ ld 7,80+0(3) /* preloadpage->r7 */
+ ld 8,88+0(3) /* preloadpage->r8 */
+ ld 9,96+0(3) /* preloadpage->r9 */
+ ld 10,104+0(3) /* preloadpage->r10 */
+ ld 3,48+0(3) /* preloadpage->r3 */
+ bctr
+ /*NOTREACHED*/
+ trap
+
+.Lfailed:
+ /* __loadx barfed for some reason. Print the error
+ message and get out. */
+ /* First the error msg */
+ ld 3,512(1) /* restore r3 */
+ lwz 2,4(3) /* r2 = __NR_kwrite */
+ lwz 4,12(3) /* r4 = offset of err msg */
+ add 4,4,3 /* r4 = err msg */
+ lwz 5,16(3) /* r5 = length err msg */
+ li 3,2 /* r3 = stderr */
+ bl do_syscall
+ /* now call the diagnosis fn */
+ ld 3,512(1) /* restore r3 */
+ ld 4,112(3) /* preloadpage->p_diagnose_load_failure */
+ ld 11,16(4)
+ ld 2,8(4) /* get its TOC ptr */
+ ld 4,0(4) /* get its entry point */
+ mtlr 4
+ blrl
+ /* Now do _exit(1) */
+ lwz 3,512(1) /* restore r3 */
+ lwz 2,8(3) /* r2 = __NR_exit */
+ li 3,1 /* doing _exit(1) */
+ addi 1,1,1024 /* fix stack pointer */
+ bl do_syscall
+ /*NOTREACHED*/
+ trap
+
+do_kload:
+ /* On entry: r2 = __NR_kload, r3 = name of module */
+ li 4,0
+ li 5,0
+ li 6,0
+ li 7,0
+ li 8,0
+ li 9,0
+ li 10,0
+do_syscall:
+ crorc 6,6,6
+ sc
+ /* sc continues at 'lr', hence this
+ constitutes an automatic return */
+
+ /* See comment in pub_core_trampoline.h for what this is for */
+.globl VG_(ppctoc_magic_redirect_return_stub)
+VG_(ppctoc_magic_redirect_return_stub):
+ trap
+
+.globl VG_(trampoline_stuff_end)
+VG_(trampoline_stuff_end):
+
+ /* and a trailing page of unexecutable code */
+ UD2_PAGE
+
+# undef UD2_16
+# undef UD2_64
+# undef UD2_256
+# undef UD2_1024
+# undef UD2_PAGE
+
+/*---------------- unknown ----------------*/
+#else
+# error Unknown platform
+
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+
+#if defined(VGO_linux)
+/* Let the linker know we don't need an executable stack */
+.section .note.GNU-stack,"",@progbits
+#endif
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_translate.c.svn-base b/coregrind/.svn/text-base/m_translate.c.svn-base
new file mode 100644
index 0000000..ea7ec46
--- /dev/null
+++ b/coregrind/.svn/text-base/m_translate.c.svn-base
@@ -0,0 +1,1559 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Interface to LibVEX_Translate, and the SP-update pass ---*/
+/*--- m_translate.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_aspacemgr.h"
+
+#include "pub_core_machine.h" // VG_(fnptr_to_fnentry)
+ // VG_(get_SP)
+ // VG_(machine_get_VexArchInfo)
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_options.h"
+
+#include "pub_core_debuginfo.h" // VG_(get_fnname_w_offset)
+#include "pub_core_redir.h" // VG_(redir_do_lookup)
+
+#include "pub_core_signals.h" // VG_(synth_fault_{perms,mapping}
+#include "pub_core_stacks.h" // VG_(unknown_SP_update)()
+#include "pub_core_tooliface.h" // VG_(tdict)
+
+#include "pub_core_translate.h"
+#include "pub_core_transtab.h"
+#include "pub_core_dispatch.h" // VG_(run_innerloop__dispatch_{un}profiled)
+ // VG_(run_a_noredir_translation__return_point)
+
+#include "pub_core_threadstate.h" // VexGuestArchState
+#include "pub_core_trampoline.h" // VG_(ppctoc_magic_redirect_return_stub)
+
+#include "pub_core_execontext.h" // VG_(make_depth_1_ExeContext_from_Addr)
+
+
+/*------------------------------------------------------------*/
+/*--- Stats ---*/
+/*------------------------------------------------------------*/
+
+static UInt n_SP_updates_fast = 0;
+static UInt n_SP_updates_generic_known = 0;
+static UInt n_SP_updates_generic_unknown = 0;
+
+void VG_(print_translation_stats) ( void )
+{
+ Char buf[6];
+ UInt n_SP_updates = n_SP_updates_fast + n_SP_updates_generic_known
+ + n_SP_updates_generic_unknown;
+ VG_(percentify)(n_SP_updates_fast, n_SP_updates, 1, 6, buf);
+ VG_(message)(Vg_DebugMsg,
+ "translate: fast SP updates identified: %'u (%s)",
+ n_SP_updates_fast, buf );
+
+ VG_(percentify)(n_SP_updates_generic_known, n_SP_updates, 1, 6, buf);
+ VG_(message)(Vg_DebugMsg,
+ "translate: generic_known SP updates identified: %'u (%s)",
+ n_SP_updates_generic_known, buf );
+
+ VG_(percentify)(n_SP_updates_generic_unknown, n_SP_updates, 1, 6, buf);
+ VG_(message)(Vg_DebugMsg,
+ "translate: generic_unknown SP updates identified: %'u (%s)",
+ n_SP_updates_generic_unknown, buf );
+}
+
+/*------------------------------------------------------------*/
+/*--- %SP-update pass ---*/
+/*------------------------------------------------------------*/
+
+static Bool need_to_handle_SP_assignment(void)
+{
+ return ( VG_(tdict).track_new_mem_stack_4 ||
+ VG_(tdict).track_die_mem_stack_4 ||
+ VG_(tdict).track_new_mem_stack_8 ||
+ VG_(tdict).track_die_mem_stack_8 ||
+ VG_(tdict).track_new_mem_stack_12 ||
+ VG_(tdict).track_die_mem_stack_12 ||
+ VG_(tdict).track_new_mem_stack_16 ||
+ VG_(tdict).track_die_mem_stack_16 ||
+ VG_(tdict).track_new_mem_stack_32 ||
+ VG_(tdict).track_die_mem_stack_32 ||
+ VG_(tdict).track_new_mem_stack_112 ||
+ VG_(tdict).track_die_mem_stack_112 ||
+ VG_(tdict).track_new_mem_stack_128 ||
+ VG_(tdict).track_die_mem_stack_128 ||
+ VG_(tdict).track_new_mem_stack_144 ||
+ VG_(tdict).track_die_mem_stack_144 ||
+ VG_(tdict).track_new_mem_stack_160 ||
+ VG_(tdict).track_die_mem_stack_160 ||
+ VG_(tdict).track_new_mem_stack ||
+ VG_(tdict).track_die_mem_stack );
+}
+
+// - The SP aliases are held in an array which is used as a circular buffer.
+// This misses very few constant updates of SP (ie. < 0.1%) while using a
+// small, constant structure that will also never fill up and cause
+// execution to abort.
+// - Unused slots have a .temp value of 'IRTemp_INVALID'.
+// - 'next_SP_alias_slot' is the index where the next alias will be stored.
+// - If the buffer fills, we circle around and start over-writing
+// non-IRTemp_INVALID values. This is rare, and the overwriting of a
+// value that would have subsequently be used is even rarer.
+// - Every slot below next_SP_alias_slot holds a non-IRTemp_INVALID value.
+// The rest either all won't (if we haven't yet circled around) or all
+// will (if we have circled around).
+
+typedef
+ struct {
+ IRTemp temp;
+ Long delta;
+ }
+ SP_Alias;
+
+// With 32 slots the buffer fills very rarely -- eg. once in a run of GCC.
+// And I've tested with smaller values and the wrap-around case works ok.
+#define N_ALIASES 32
+static SP_Alias SP_aliases[N_ALIASES];
+static Int next_SP_alias_slot = 0;
+
+static void clear_SP_aliases(void)
+{
+ Int i;
+ for (i = 0; i < N_ALIASES; i++) {
+ SP_aliases[i].temp = IRTemp_INVALID;
+ SP_aliases[i].delta = 0;
+ }
+ next_SP_alias_slot = 0;
+}
+
+static void add_SP_alias(IRTemp temp, Long delta)
+{
+ vg_assert(temp != IRTemp_INVALID);
+ SP_aliases[ next_SP_alias_slot ].temp = temp;
+ SP_aliases[ next_SP_alias_slot ].delta = delta;
+ next_SP_alias_slot++;
+ if (N_ALIASES == next_SP_alias_slot) next_SP_alias_slot = 0;
+}
+
+static Bool get_SP_delta(IRTemp temp, ULong* delta)
+{
+ Int i; // i must be signed!
+ vg_assert(IRTemp_INVALID != temp);
+ // Search backwards between current buffer position and the start.
+ for (i = next_SP_alias_slot-1; i >= 0; i--) {
+ if (temp == SP_aliases[i].temp) {
+ *delta = SP_aliases[i].delta;
+ return True;
+ }
+ }
+ // Search backwards between the end and the current buffer position.
+ for (i = N_ALIASES-1; i >= next_SP_alias_slot; i--) {
+ if (temp == SP_aliases[i].temp) {
+ *delta = SP_aliases[i].delta;
+ return True;
+ }
+ }
+ return False;
+}
+
+static void update_SP_aliases(Long delta)
+{
+ Int i;
+ for (i = 0; i < N_ALIASES; i++) {
+ if (SP_aliases[i].temp == IRTemp_INVALID) {
+ return;
+ }
+ SP_aliases[i].delta += delta;
+ }
+}
+
+/* Given a guest IP, get an origin tag for a 1-element stack trace,
+ and wrap it up in an IR atom that can be passed as the origin-tag
+ value for a stack-adjustment helper function. */
+static IRExpr* mk_ecu_Expr ( Addr64 guest_IP )
+{
+ UInt ecu;
+ ExeContext* ec
+ = VG_(make_depth_1_ExeContext_from_Addr)( (Addr)guest_IP );
+ vg_assert(ec);
+ ecu = VG_(get_ECU_from_ExeContext)( ec );
+ vg_assert(VG_(is_plausible_ECU)(ecu));
+ /* This is always safe to do, since ecu is only 32 bits, and
+ HWord is 32 or 64. */
+ return mkIRExpr_HWord( (HWord)ecu );
+}
+
+
+/* For tools that want to know about SP changes, this pass adds
+ in the appropriate hooks. We have to do it after the tool's
+ instrumentation, so the tool doesn't have to worry about the C calls
+ it adds in, and we must do it before register allocation because
+ spilled temps make it much harder to work out the SP deltas.
+ This it is done with Vex's "second instrumentation" pass.
+
+ Basically, we look for GET(SP)/PUT(SP) pairs and track constant
+ increments/decrements of SP between them. (This requires tracking one or
+ more "aliases", which are not exact aliases but instead are tempregs
+ whose value is equal to the SP's plus or minus a known constant.)
+ If all the changes to SP leading up to a PUT(SP) are by known, small
+ constants, we can do a specific call to eg. new_mem_stack_4, otherwise
+ we fall back to the case that handles an unknown SP change.
+
+ There is some extra complexity to deal correctly with updates to
+ only parts of SP. Bizarre, but it has been known to happen.
+*/
+static
+IRSB* vg_SP_update_pass ( void* closureV,
+ IRSB* sb_in,
+ VexGuestLayout* layout,
+ VexGuestExtents* vge,
+ IRType gWordTy,
+ IRType hWordTy )
+{
+ Int i, j, minoff_ST, maxoff_ST, sizeof_SP, offset_SP;
+ Int first_SP, last_SP, first_Put, last_Put;
+ IRDirty *dcall, *d;
+ IRStmt* st;
+ IRExpr* e;
+ IRRegArray* descr;
+ IRType typeof_SP;
+ Long delta, con;
+
+ /* Set up stuff for tracking the guest IP */
+ Bool curr_IP_known = False;
+ Addr64 curr_IP = 0;
+
+ /* Set up BB */
+ IRSB* bb = emptyIRSB();
+ bb->tyenv = deepCopyIRTypeEnv(sb_in->tyenv);
+ bb->next = deepCopyIRExpr(sb_in->next);
+ bb->jumpkind = sb_in->jumpkind;
+
+ delta = 0;
+
+ sizeof_SP = layout->sizeof_SP;
+ offset_SP = layout->offset_SP;
+ typeof_SP = sizeof_SP==4 ? Ity_I32 : Ity_I64;
+ vg_assert(sizeof_SP == 4 || sizeof_SP == 8);
+
+ /* --- Start of #defines --- */
+
+# define IS_ADD(op) (sizeof_SP==4 ? ((op)==Iop_Add32) : ((op)==Iop_Add64))
+# define IS_SUB(op) (sizeof_SP==4 ? ((op)==Iop_Sub32) : ((op)==Iop_Sub64))
+
+# define IS_ADD_OR_SUB(op) (IS_ADD(op) || IS_SUB(op))
+
+# define GET_CONST(con) \
+ (sizeof_SP==4 ? (Long)(Int)(con->Ico.U32) \
+ : (Long)(con->Ico.U64))
+
+# define DO_NEW(syze, tmpp) \
+ do { \
+ Bool vanilla, w_ecu; \
+ vg_assert(curr_IP_known); \
+ vanilla = NULL != VG_(tdict).track_new_mem_stack_##syze; \
+ w_ecu = NULL != VG_(tdict).track_new_mem_stack_##syze##_w_ECU; \
+ vg_assert(!(vanilla && w_ecu)); /* can't have both */ \
+ if (!(vanilla || w_ecu)) \
+ goto generic; \
+ \
+ /* I don't know if it's really necessary to say that the */ \
+ /* call reads the stack pointer. But anyway, we do. */ \
+ if (w_ecu) { \
+ dcall = unsafeIRDirty_0_N( \
+ 2/*regparms*/, \
+ "track_new_mem_stack_" #syze "_w_ECU", \
+ VG_(fnptr_to_fnentry)( \
+ VG_(tdict).track_new_mem_stack_##syze##_w_ECU ), \
+ mkIRExprVec_2(IRExpr_RdTmp(tmpp), \
+ mk_ecu_Expr(curr_IP)) \
+ ); \
+ } else { \
+ dcall = unsafeIRDirty_0_N( \
+ 1/*regparms*/, \
+ "track_new_mem_stack_" #syze , \
+ VG_(fnptr_to_fnentry)( \
+ VG_(tdict).track_new_mem_stack_##syze ), \
+ mkIRExprVec_1(IRExpr_RdTmp(tmpp)) \
+ ); \
+ } \
+ dcall->nFxState = 1; \
+ dcall->fxState[0].fx = Ifx_Read; \
+ dcall->fxState[0].offset = layout->offset_SP; \
+ dcall->fxState[0].size = layout->sizeof_SP; \
+ \
+ addStmtToIRSB( bb, IRStmt_Dirty(dcall) ); \
+ \
+ tl_assert(syze > 0); \
+ update_SP_aliases(syze); \
+ \
+ n_SP_updates_fast++; \
+ \
+ } while (0)
+
+# define DO_DIE(syze, tmpp) \
+ do { \
+ if (!VG_(tdict).track_die_mem_stack_##syze) \
+ goto generic; \
+ \
+ /* I don't know if it's really necessary to say that the */ \
+ /* call reads the stack pointer. But anyway, we do. */ \
+ dcall = unsafeIRDirty_0_N( \
+ 1/*regparms*/, \
+ "track_die_mem_stack_" #syze, \
+ VG_(fnptr_to_fnentry)( \
+ VG_(tdict).track_die_mem_stack_##syze ), \
+ mkIRExprVec_1(IRExpr_RdTmp(tmpp)) \
+ ); \
+ dcall->nFxState = 1; \
+ dcall->fxState[0].fx = Ifx_Read; \
+ dcall->fxState[0].offset = layout->offset_SP; \
+ dcall->fxState[0].size = layout->sizeof_SP; \
+ \
+ addStmtToIRSB( bb, IRStmt_Dirty(dcall) ); \
+ \
+ tl_assert(syze > 0); \
+ update_SP_aliases(-(syze)); \
+ \
+ n_SP_updates_fast++; \
+ \
+ } while (0)
+
+ /* --- End of #defines --- */
+
+ clear_SP_aliases();
+
+ for (i = 0; i < sb_in->stmts_used; i++) {
+
+ st = sb_in->stmts[i];
+
+ if (st->tag == Ist_IMark) {
+ curr_IP_known = True;
+ curr_IP = st->Ist.IMark.addr;
+ }
+
+ /* t = Get(sp): curr = t, delta = 0 */
+ if (st->tag != Ist_WrTmp) goto case2;
+ e = st->Ist.WrTmp.data;
+ if (e->tag != Iex_Get) goto case2;
+ if (e->Iex.Get.offset != offset_SP) goto case2;
+ if (e->Iex.Get.ty != typeof_SP) goto case2;
+ vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.WrTmp.tmp) == typeof_SP );
+ add_SP_alias(st->Ist.WrTmp.tmp, 0);
+ addStmtToIRSB( bb, st );
+ continue;
+
+ case2:
+ /* t' = curr +/- const: curr = t', delta +=/-= const */
+ if (st->tag != Ist_WrTmp) goto case3;
+ e = st->Ist.WrTmp.data;
+ if (e->tag != Iex_Binop) goto case3;
+ if (e->Iex.Binop.arg1->tag != Iex_RdTmp) goto case3;
+ if (!get_SP_delta(e->Iex.Binop.arg1->Iex.RdTmp.tmp, &delta)) goto case3;
+ if (e->Iex.Binop.arg2->tag != Iex_Const) goto case3;
+ if (!IS_ADD_OR_SUB(e->Iex.Binop.op)) goto case3;
+ con = GET_CONST(e->Iex.Binop.arg2->Iex.Const.con);
+ vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.WrTmp.tmp) == typeof_SP );
+ if (IS_ADD(e->Iex.Binop.op)) {
+ add_SP_alias(st->Ist.WrTmp.tmp, delta + con);
+ } else {
+ add_SP_alias(st->Ist.WrTmp.tmp, delta - con);
+ }
+ addStmtToIRSB( bb, st );
+ continue;
+
+ case3:
+ /* t' = curr: curr = t' */
+ if (st->tag != Ist_WrTmp) goto case4;
+ e = st->Ist.WrTmp.data;
+ if (e->tag != Iex_RdTmp) goto case4;
+ if (!get_SP_delta(e->Iex.RdTmp.tmp, &delta)) goto case4;
+ vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.WrTmp.tmp) == typeof_SP );
+ add_SP_alias(st->Ist.WrTmp.tmp, delta);
+ addStmtToIRSB( bb, st );
+ continue;
+
+ case4:
+ /* Put(sp) = curr */
+ /* More generally, we must correctly handle a Put which writes
+ any part of SP, not just the case where all of SP is
+ written. */
+ if (st->tag != Ist_Put) goto case5;
+ first_SP = offset_SP;
+ last_SP = first_SP + sizeof_SP - 1;
+ first_Put = st->Ist.Put.offset;
+ last_Put = first_Put
+ + sizeofIRType( typeOfIRExpr( bb->tyenv, st->Ist.Put.data ))
+ - 1;
+ vg_assert(first_SP <= last_SP);
+ vg_assert(first_Put <= last_Put);
+
+ if (last_Put < first_SP || last_SP < first_Put)
+ goto case5; /* no overlap */
+
+ if (st->Ist.Put.data->tag == Iex_RdTmp
+ && get_SP_delta(st->Ist.Put.data->Iex.RdTmp.tmp, &delta)) {
+ IRTemp tttmp = st->Ist.Put.data->Iex.RdTmp.tmp;
+ /* Why should the following assertion hold? Because any
+ alias added by put_SP_alias must be of a temporary which
+ has the same type as typeof_SP, and whose value is a Get
+ at exactly offset_SP of size typeof_SP. Each call to
+ put_SP_alias is immediately preceded by an assertion that
+ we are putting in a binding for a correctly-typed
+ temporary. */
+ vg_assert( typeOfIRTemp(bb->tyenv, tttmp) == typeof_SP );
+ /* From the same type-and-offset-correctness argument, if
+ we found a useable alias, it must for an "exact" write of SP. */
+ vg_assert(first_SP == first_Put);
+ vg_assert(last_SP == last_Put);
+ switch (delta) {
+ case 0: addStmtToIRSB(bb,st); continue;
+ case 4: DO_DIE( 4, tttmp); addStmtToIRSB(bb,st); continue;
+ case -4: DO_NEW( 4, tttmp); addStmtToIRSB(bb,st); continue;
+ case 8: DO_DIE( 8, tttmp); addStmtToIRSB(bb,st); continue;
+ case -8: DO_NEW( 8, tttmp); addStmtToIRSB(bb,st); continue;
+ case 12: DO_DIE( 12, tttmp); addStmtToIRSB(bb,st); continue;
+ case -12: DO_NEW( 12, tttmp); addStmtToIRSB(bb,st); continue;
+ case 16: DO_DIE( 16, tttmp); addStmtToIRSB(bb,st); continue;
+ case -16: DO_NEW( 16, tttmp); addStmtToIRSB(bb,st); continue;
+ case 32: DO_DIE( 32, tttmp); addStmtToIRSB(bb,st); continue;
+ case -32: DO_NEW( 32, tttmp); addStmtToIRSB(bb,st); continue;
+ case 112: DO_DIE( 112, tttmp); addStmtToIRSB(bb,st); continue;
+ case -112: DO_NEW( 112, tttmp); addStmtToIRSB(bb,st); continue;
+ case 128: DO_DIE( 128, tttmp); addStmtToIRSB(bb,st); continue;
+ case -128: DO_NEW( 128, tttmp); addStmtToIRSB(bb,st); continue;
+ case 144: DO_DIE( 144, tttmp); addStmtToIRSB(bb,st); continue;
+ case -144: DO_NEW( 144, tttmp); addStmtToIRSB(bb,st); continue;
+ case 160: DO_DIE( 160, tttmp); addStmtToIRSB(bb,st); continue;
+ case -160: DO_NEW( 160, tttmp); addStmtToIRSB(bb,st); continue;
+ default:
+ /* common values for ppc64: 144 128 160 112 176 */
+ n_SP_updates_generic_known++;
+ goto generic;
+ }
+ } else {
+ /* Deal with an unknown update to SP. We're here because
+ either:
+ (1) the Put does not exactly cover SP; it is a partial update.
+ Highly unlikely, but has been known to happen for 16-bit
+ Windows apps running on Wine, doing 16-bit adjustments to
+ %sp.
+ (2) the Put does exactly cover SP, but we are unable to
+ determine how the value relates to the old SP. In any
+ case, we cannot assume that the Put.data value is a tmp;
+ we must assume it can be anything allowed in flat IR (tmp
+ or const).
+ */
+ IRTemp old_SP;
+ n_SP_updates_generic_unknown++;
+
+ // Nb: if all is well, this generic case will typically be
+ // called something like every 1000th SP update. If it's more than
+ // that, the above code may be missing some cases.
+ generic:
+ /* Pass both the old and new SP values to this helper. Also,
+ pass an origin tag, even if it isn't needed. */
+ old_SP = newIRTemp(bb->tyenv, typeof_SP);
+ addStmtToIRSB(
+ bb,
+ IRStmt_WrTmp( old_SP, IRExpr_Get(offset_SP, typeof_SP) )
+ );
+
+ /* Now we know what the old value of SP is. But knowing the new
+ value is a bit tricky if there is a partial write. */
+ if (first_Put == first_SP && last_Put == last_SP) {
+ /* The common case, an exact write to SP. So st->Ist.Put.data
+ does hold the new value; simple. */
+ vg_assert(curr_IP_known);
+ dcall = unsafeIRDirty_0_N(
+ 3/*regparms*/,
+ "VG_(unknown_SP_update)",
+ VG_(fnptr_to_fnentry)( &VG_(unknown_SP_update) ),
+ mkIRExprVec_3( IRExpr_RdTmp(old_SP), st->Ist.Put.data,
+ mk_ecu_Expr(curr_IP) )
+ );
+ addStmtToIRSB( bb, IRStmt_Dirty(dcall) );
+ /* don't forget the original assignment */
+ addStmtToIRSB( bb, st );
+ } else {
+ /* We have a partial update to SP. We need to know what
+ the new SP will be, and hand that to the helper call,
+ but when the helper call happens, SP must hold the
+ value it had before the update. Tricky.
+ Therefore use the following kludge:
+ 1. do the partial SP update (Put)
+ 2. Get the new SP value into a tmp, new_SP
+ 3. Put old_SP
+ 4. Call the helper
+ 5. Put new_SP
+ */
+ IRTemp new_SP;
+ /* 1 */
+ addStmtToIRSB( bb, st );
+ /* 2 */
+ new_SP = newIRTemp(bb->tyenv, typeof_SP);
+ addStmtToIRSB(
+ bb,
+ IRStmt_WrTmp( new_SP, IRExpr_Get(offset_SP, typeof_SP) )
+ );
+ /* 3 */
+ addStmtToIRSB( bb, IRStmt_Put(offset_SP, IRExpr_RdTmp(old_SP) ));
+ /* 4 */
+ vg_assert(curr_IP_known);
+ dcall = unsafeIRDirty_0_N(
+ 3/*regparms*/,
+ "VG_(unknown_SP_update)",
+ VG_(fnptr_to_fnentry)( &VG_(unknown_SP_update) ),
+ mkIRExprVec_3( IRExpr_RdTmp(old_SP),
+ IRExpr_RdTmp(new_SP),
+ mk_ecu_Expr(curr_IP) )
+ );
+ addStmtToIRSB( bb, IRStmt_Dirty(dcall) );
+ /* 5 */
+ addStmtToIRSB( bb, IRStmt_Put(offset_SP, IRExpr_RdTmp(new_SP) ));
+ }
+
+ /* Forget what we already know. */
+ clear_SP_aliases();
+
+ /* If this is a Put of a tmp that exactly updates SP,
+ start tracking aliases against this tmp. */
+
+ if (first_Put == first_SP && last_Put == last_SP
+ && st->Ist.Put.data->tag == Iex_RdTmp) {
+ vg_assert( typeOfIRTemp(bb->tyenv, st->Ist.Put.data->Iex.RdTmp.tmp)
+ == typeof_SP );
+ add_SP_alias(st->Ist.Put.data->Iex.RdTmp.tmp, 0);
+ }
+ continue;
+ }
+
+ case5:
+ /* PutI or Dirty call which overlaps SP: complain. We can't
+ deal with SP changing in weird ways (well, we can, but not at
+ this time of night). */
+ if (st->tag == Ist_PutI) {
+ descr = st->Ist.PutI.descr;
+ minoff_ST = descr->base;
+ maxoff_ST = descr->base
+ + descr->nElems * sizeofIRType(descr->elemTy) - 1;
+ if (!(offset_SP > maxoff_ST
+ || (offset_SP + sizeof_SP - 1) < minoff_ST))
+ goto complain;
+ }
+ if (st->tag == Ist_Dirty) {
+ d = st->Ist.Dirty.details;
+ for (j = 0; j < d->nFxState; j++) {
+ minoff_ST = d->fxState[j].offset;
+ maxoff_ST = d->fxState[j].offset + d->fxState[j].size - 1;
+ if (d->fxState[j].fx == Ifx_Read || d->fxState[j].fx == Ifx_None)
+ continue;
+ if (!(offset_SP > maxoff_ST
+ || (offset_SP + sizeof_SP - 1) < minoff_ST))
+ goto complain;
+ }
+ }
+
+ /* well, not interesting. Just copy and keep going. */
+ addStmtToIRSB( bb, st );
+
+ } /* for (i = 0; i < sb_in->stmts_used; i++) */
+
+ return bb;
+
+ complain:
+ VG_(core_panic)("vg_SP_update_pass: PutI or Dirty which overlaps SP");
+
+#undef IS_ADD
+#undef IS_SUB
+#undef IS_ADD_OR_SUB
+#undef GET_CONST
+#undef DO_NEW
+#undef DO_DIE
+}
+
+/*------------------------------------------------------------*/
+/*--- Main entry point for the JITter. ---*/
+/*------------------------------------------------------------*/
+
+/* Extra comments re self-checking translations and self-modifying
+ code. (JRS 14 Oct 05).
+
+ There are 3 modes:
+ (1) no checking: all code assumed to be not self-modifying
+ (2) partial: known-problematic situations get a self-check
+ (3) full checking: all translations get a self-check
+
+ As currently implemented, the default is (2). (3) is always safe,
+ but very slow. (1) works mostly, but fails for gcc nested-function
+ code which uses trampolines on the stack; this situation is
+ detected and handled by (2).
+
+ ----------
+
+ A more robust and transparent solution, which is not currently
+ implemented, is a variant of (2): if a translation is made from an
+ area which aspacem says does not have 'w' permission, then it can
+ be non-self-checking. Otherwise, it needs a self-check.
+
+ This is complicated by Vex's basic-block chasing. If a self-check
+ is requested, then Vex will not chase over basic block boundaries
+ (it's too complex). However there is still a problem if it chases
+ from a non-'w' area into a 'w' area.
+
+ I think the right thing to do is:
+
+ - if a translation request starts in a 'w' area, ask for a
+ self-checking translation, and do not allow any chasing (make
+ chase_into_ok return False). Note that the latter is redundant
+ in the sense that Vex won't chase anyway in this situation.
+
+ - if a translation request starts in a non-'w' area, do not ask for
+ a self-checking translation. However, do not allow chasing (as
+ determined by chase_into_ok) to go into a 'w' area.
+
+ The result of this is that all code inside 'w' areas is self
+ checking.
+
+ To complete the trick, there is a caveat: we must watch the
+ client's mprotect calls. If pages are changed from non-'w' to 'w'
+ then we should throw away all translations which intersect the
+ affected area, so as to force them to be redone with self-checks.
+
+ ----------
+
+ The above outlines the conditions under which bb chasing is allowed
+ from a self-modifying-code point of view. There are other
+ situations pertaining to function redirection in which it is
+ necessary to disallow chasing, but those fall outside the scope of
+ this comment.
+*/
+
+
+/* Vex dumps the final code in here. Then we can copy it off
+ wherever we like. */
+/* 60000: should agree with assertion in VG_(add_to_transtab) in
+ m_transtab.c. */
+#define N_TMPBUF 60000
+static UChar tmpbuf[N_TMPBUF];
+
+
+/* Function pointers we must supply to LibVEX in order that it
+ can bomb out and emit messages under Valgrind's control. */
+__attribute__ ((noreturn))
+static
+void failure_exit ( void )
+{
+ LibVEX_ShowAllocStats();
+ VG_(core_panic)("LibVEX called failure_exit().");
+}
+
+static
+void log_bytes ( HChar* bytes, Int nbytes )
+{
+ Int i;
+ for (i = 0; i < nbytes-3; i += 4)
+ VG_(printf)("%c%c%c%c", bytes[i], bytes[i+1], bytes[i+2], bytes[i+3]);
+ for (; i < nbytes; i++)
+ VG_(printf)("%c", bytes[i]);
+}
+
+
+/* --------- Various helper functions for translation --------- */
+
+/* Look for reasons to disallow making translations from the given
+ segment. */
+
+static Bool translations_allowable_from_seg ( NSegment const* seg )
+{
+# if defined(VGA_x86)
+ Bool allowR = True;
+# else
+ Bool allowR = False;
+# endif
+ return seg != NULL
+ && (seg->kind == SkAnonC || seg->kind == SkFileC)
+ && (seg->hasX || (seg->hasR && allowR));
+}
+
+
+/* Is a self-check required for a translation of a guest address
+ inside segment SEG when requested by thread TID ? */
+
+static Bool self_check_required ( NSegment const* seg, ThreadId tid )
+{
+ switch (VG_(clo_smc_check)) {
+ case Vg_SmcNone: return False;
+ case Vg_SmcAll: return True;
+ case Vg_SmcStack:
+ return seg
+ ? (seg->start <= VG_(get_SP)(tid)
+ && VG_(get_SP)(tid)+sizeof(Word)-1 <= seg->end)
+ : False;
+ break;
+ default:
+ vg_assert2(0, "unknown VG_(clo_smc_check) value");
+ }
+}
+
+
+/* This is a callback passed to LibVEX_Translate. It stops Vex from
+ chasing into function entry points that we wish to redirect.
+ Chasing across them obviously defeats the redirect mechanism, with
+ bad effects for Memcheck, Addrcheck, and possibly others.
+
+ Also, we must stop Vex chasing into blocks for which we might want
+ to self checking.
+*/
+static Bool chase_into_ok ( void* closureV, Addr64 addr64 )
+{
+ Addr addr = (Addr)addr64;
+ NSegment const* seg = VG_(am_find_nsegment)(addr);
+ VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
+
+ /* Work through a list of possibilities why we might not want to
+ allow a chase. */
+
+ /* Destination not in a plausible segment? */
+ if (!translations_allowable_from_seg(seg))
+ goto dontchase;
+
+ /* Destination requires a self-check? */
+ if (self_check_required(seg, closure->tid))
+ goto dontchase;
+
+ /* Destination is redirected? */
+ if (addr != VG_(redir_do_lookup)(addr, NULL))
+ goto dontchase;
+
+# if defined(VG_PLAT_USES_PPCTOC)
+ /* This needs to be at the start of its own block. Don't chase. Re
+ ULong_to_Ptr, be careful to ensure we only compare 32 bits on a
+ 32-bit target.*/
+ if (ULong_to_Ptr(addr64)
+ == (void*)&VG_(ppctoc_magic_redirect_return_stub))
+ goto dontchase;
+# endif
+
+ /* overly conservative, but .. don't chase into the distinguished
+ address that m_transtab uses as an empty-slot marker for
+ VG_(tt_fast). */
+ if (addr == TRANSTAB_BOGUS_GUEST_ADDR)
+ goto dontchase;
+
+ /* well, ok then. go on and chase. */
+ return True;
+
+ vg_assert(0);
+ /*NOTREACHED*/
+
+ dontchase:
+ if (0) VG_(printf)("not chasing into 0x%lx\n", addr);
+ return False;
+}
+
+
+/* --------------- helpers for with-TOC platforms --------------- */
+
+/* NOTE: with-TOC platforms are: ppc64-linux, ppc32-aix5, ppc64-aix5. */
+
+static IRExpr* mkU64 ( ULong n ) {
+ return IRExpr_Const(IRConst_U64(n));
+}
+static IRExpr* mkU32 ( UInt n ) {
+ return IRExpr_Const(IRConst_U32(n));
+}
+
+#if defined(VG_PLAT_USES_PPCTOC)
+static IRExpr* mkU8 ( UChar n ) {
+ return IRExpr_Const(IRConst_U8(n));
+}
+static IRExpr* narrowTo32 ( IRTypeEnv* tyenv, IRExpr* e ) {
+ if (typeOfIRExpr(tyenv, e) == Ity_I32) {
+ return e;
+ } else {
+ vg_assert(typeOfIRExpr(tyenv, e) == Ity_I64);
+ return IRExpr_Unop(Iop_64to32, e);
+ }
+}
+
+/* Generate code to push word-typed expression 'e' onto this thread's
+ redir stack, checking for stack overflow and generating code to
+ bomb out if so. */
+
+static void gen_PUSH ( IRSB* bb, IRExpr* e )
+{
+ IRRegArray* descr;
+ IRTemp t1;
+ IRExpr* one;
+
+# if defined(VGP_ppc64_linux) || defined(VGP_ppc64_aix5)
+ Int stack_size = VEX_GUEST_PPC64_REDIR_STACK_SIZE;
+ Int offB_REDIR_SP = offsetof(VexGuestPPC64State,guest_REDIR_SP);
+ Int offB_REDIR_STACK = offsetof(VexGuestPPC64State,guest_REDIR_STACK);
+ Int offB_EMWARN = offsetof(VexGuestPPC64State,guest_EMWARN);
+ Bool is64 = True;
+ IRType ty_Word = Ity_I64;
+ IROp op_CmpNE = Iop_CmpNE64;
+ IROp op_Sar = Iop_Sar64;
+ IROp op_Sub = Iop_Sub64;
+ IROp op_Add = Iop_Add64;
+ IRExpr*(*mkU)(ULong) = mkU64;
+ vg_assert(VG_WORDSIZE == 8);
+# else
+ Int stack_size = VEX_GUEST_PPC32_REDIR_STACK_SIZE;
+ Int offB_REDIR_SP = offsetof(VexGuestPPC32State,guest_REDIR_SP);
+ Int offB_REDIR_STACK = offsetof(VexGuestPPC32State,guest_REDIR_STACK);
+ Int offB_EMWARN = offsetof(VexGuestPPC32State,guest_EMWARN);
+ Bool is64 = False;
+ IRType ty_Word = Ity_I32;
+ IROp op_CmpNE = Iop_CmpNE32;
+ IROp op_Sar = Iop_Sar32;
+ IROp op_Sub = Iop_Sub32;
+ IROp op_Add = Iop_Add32;
+ IRExpr*(*mkU)(UInt) = mkU32;
+ vg_assert(VG_WORDSIZE == 4);
+# endif
+
+ vg_assert(sizeof(void*) == VG_WORDSIZE);
+ vg_assert(sizeof(Word) == VG_WORDSIZE);
+ vg_assert(sizeof(Addr) == VG_WORDSIZE);
+
+ descr = mkIRRegArray( offB_REDIR_STACK, ty_Word, stack_size );
+ t1 = newIRTemp( bb->tyenv, ty_Word );
+ one = mkU(1);
+
+ vg_assert(typeOfIRExpr(bb->tyenv, e) == ty_Word);
+
+ /* t1 = guest_REDIR_SP + 1 */
+ addStmtToIRSB(
+ bb,
+ IRStmt_WrTmp(
+ t1,
+ IRExpr_Binop(op_Add, IRExpr_Get( offB_REDIR_SP, ty_Word ), one)
+ )
+ );
+
+ /* Bomb out if t1 >=s stack_size, that is, (stack_size-1)-t1 <s 0.
+ The destination (0) is a bit bogus but it doesn't matter since
+ this is an unrecoverable error and will lead to Valgrind
+ shutting down. _EMWARN is set regardless - that's harmless
+ since is only has a meaning if the exit is taken. */
+ addStmtToIRSB(
+ bb,
+ IRStmt_Put(offB_EMWARN, mkU32(EmWarn_PPC64_redir_overflow))
+ );
+ addStmtToIRSB(
+ bb,
+ IRStmt_Exit(
+ IRExpr_Binop(
+ op_CmpNE,
+ IRExpr_Binop(
+ op_Sar,
+ IRExpr_Binop(op_Sub,mkU(stack_size-1),IRExpr_RdTmp(t1)),
+ mkU8(8 * VG_WORDSIZE - 1)
+ ),
+ mkU(0)
+ ),
+ Ijk_EmFail,
+ is64 ? IRConst_U64(0) : IRConst_U32(0)
+ )
+ );
+
+ /* guest_REDIR_SP = t1 */
+ addStmtToIRSB(bb, IRStmt_Put(offB_REDIR_SP, IRExpr_RdTmp(t1)));
+
+ /* guest_REDIR_STACK[t1+0] = e */
+ /* PutI/GetI have I32-typed indexes regardless of guest word size */
+ addStmtToIRSB(
+ bb,
+ IRStmt_PutI(descr, narrowTo32(bb->tyenv,IRExpr_RdTmp(t1)), 0, e)
+ );
+}
+
+
+/* Generate code to pop a word-sized value from this thread's redir
+ stack, binding it to a new temporary, which is returned. As with
+ gen_PUSH, an overflow check is also performed. */
+
+static IRTemp gen_POP ( IRSB* bb )
+{
+# if defined(VGP_ppc64_linux) || defined(VGP_ppc64_aix5)
+ Int stack_size = VEX_GUEST_PPC64_REDIR_STACK_SIZE;
+ Int offB_REDIR_SP = offsetof(VexGuestPPC64State,guest_REDIR_SP);
+ Int offB_REDIR_STACK = offsetof(VexGuestPPC64State,guest_REDIR_STACK);
+ Int offB_EMWARN = offsetof(VexGuestPPC64State,guest_EMWARN);
+ Bool is64 = True;
+ IRType ty_Word = Ity_I64;
+ IROp op_CmpNE = Iop_CmpNE64;
+ IROp op_Sar = Iop_Sar64;
+ IROp op_Sub = Iop_Sub64;
+ IRExpr*(*mkU)(ULong) = mkU64;
+# else
+ Int stack_size = VEX_GUEST_PPC32_REDIR_STACK_SIZE;
+ Int offB_REDIR_SP = offsetof(VexGuestPPC32State,guest_REDIR_SP);
+ Int offB_REDIR_STACK = offsetof(VexGuestPPC32State,guest_REDIR_STACK);
+ Int offB_EMWARN = offsetof(VexGuestPPC32State,guest_EMWARN);
+ Bool is64 = False;
+ IRType ty_Word = Ity_I32;
+ IROp op_CmpNE = Iop_CmpNE32;
+ IROp op_Sar = Iop_Sar32;
+ IROp op_Sub = Iop_Sub32;
+ IRExpr*(*mkU)(UInt) = mkU32;
+# endif
+
+ IRRegArray* descr = mkIRRegArray( offB_REDIR_STACK, ty_Word, stack_size );
+ IRTemp t1 = newIRTemp( bb->tyenv, ty_Word );
+ IRTemp res = newIRTemp( bb->tyenv, ty_Word );
+ IRExpr* one = mkU(1);
+
+ vg_assert(sizeof(void*) == VG_WORDSIZE);
+ vg_assert(sizeof(Word) == VG_WORDSIZE);
+ vg_assert(sizeof(Addr) == VG_WORDSIZE);
+
+ /* t1 = guest_REDIR_SP */
+ addStmtToIRSB(
+ bb,
+ IRStmt_WrTmp( t1, IRExpr_Get( offB_REDIR_SP, ty_Word ) )
+ );
+
+ /* Bomb out if t1 < 0. Same comments as gen_PUSH apply. */
+ addStmtToIRSB(
+ bb,
+ IRStmt_Put(offB_EMWARN, mkU32(EmWarn_PPC64_redir_underflow))
+ );
+ addStmtToIRSB(
+ bb,
+ IRStmt_Exit(
+ IRExpr_Binop(
+ op_CmpNE,
+ IRExpr_Binop(
+ op_Sar,
+ IRExpr_RdTmp(t1),
+ mkU8(8 * VG_WORDSIZE - 1)
+ ),
+ mkU(0)
+ ),
+ Ijk_EmFail,
+ is64 ? IRConst_U64(0) : IRConst_U32(0)
+ )
+ );
+
+ /* res = guest_REDIR_STACK[t1+0] */
+ /* PutI/GetI have I32-typed indexes regardless of guest word size */
+ addStmtToIRSB(
+ bb,
+ IRStmt_WrTmp(
+ res,
+ IRExpr_GetI(descr, narrowTo32(bb->tyenv,IRExpr_RdTmp(t1)), 0)
+ )
+ );
+
+ /* guest_REDIR_SP = t1-1 */
+ addStmtToIRSB(
+ bb,
+ IRStmt_Put(offB_REDIR_SP, IRExpr_Binop(op_Sub, IRExpr_RdTmp(t1), one))
+ );
+
+ return res;
+}
+
+/* Generate code to push LR and R2 onto this thread's redir stack,
+ then set R2 to the new value (which is the TOC pointer to be used
+ for the duration of the replacement function, as determined by
+ m_debuginfo), and set LR to the magic return stub, so we get to
+ intercept the return and restore R2 and L2 to the values saved
+ here. */
+
+static void gen_push_and_set_LR_R2 ( IRSB* bb, Addr64 new_R2_value )
+{
+# if defined(VGP_ppc64_linux) || defined(VGP_ppc64_aix5)
+ Addr64 bogus_RA = (Addr64)&VG_(ppctoc_magic_redirect_return_stub);
+ Int offB_GPR2 = offsetof(VexGuestPPC64State,guest_GPR2);
+ Int offB_LR = offsetof(VexGuestPPC64State,guest_LR);
+ gen_PUSH( bb, IRExpr_Get(offB_LR, Ity_I64) );
+ gen_PUSH( bb, IRExpr_Get(offB_GPR2, Ity_I64) );
+ addStmtToIRSB( bb, IRStmt_Put( offB_LR, mkU64( bogus_RA )) );
+ addStmtToIRSB( bb, IRStmt_Put( offB_GPR2, mkU64( new_R2_value )) );
+
+# elif defined(VGP_ppc32_aix5)
+ Addr32 bogus_RA = (Addr32)&VG_(ppctoc_magic_redirect_return_stub);
+ Int offB_GPR2 = offsetof(VexGuestPPC32State,guest_GPR2);
+ Int offB_LR = offsetof(VexGuestPPC32State,guest_LR);
+ gen_PUSH( bb, IRExpr_Get(offB_LR, Ity_I32) );
+ gen_PUSH( bb, IRExpr_Get(offB_GPR2, Ity_I32) );
+ addStmtToIRSB( bb, IRStmt_Put( offB_LR, mkU32( bogus_RA )) );
+ addStmtToIRSB( bb, IRStmt_Put( offB_GPR2, mkU32( new_R2_value )) );
+
+# else
+# error Platform is not TOC-afflicted, fortunately
+# endif
+}
+
+static void gen_pop_R2_LR_then_bLR ( IRSB* bb )
+{
+# if defined(VGP_ppc64_linux) || defined(VGP_ppc64_aix5)
+ Int offB_GPR2 = offsetof(VexGuestPPC64State,guest_GPR2);
+ Int offB_LR = offsetof(VexGuestPPC64State,guest_LR);
+ IRTemp old_R2 = newIRTemp( bb->tyenv, Ity_I64 );
+ IRTemp old_LR = newIRTemp( bb->tyenv, Ity_I64 );
+ /* Restore R2 */
+ old_R2 = gen_POP( bb );
+ addStmtToIRSB( bb, IRStmt_Put( offB_GPR2, IRExpr_RdTmp(old_R2)) );
+ /* Restore LR */
+ old_LR = gen_POP( bb );
+ addStmtToIRSB( bb, IRStmt_Put( offB_LR, IRExpr_RdTmp(old_LR)) );
+ /* Branch to LR */
+ /* re boring, we arrived here precisely because a wrapped fn did a
+ blr (hence Ijk_Ret); so we should just mark this jump as Boring,
+ else one _Call will have resulted in two _Rets. */
+ bb->jumpkind = Ijk_Boring;
+ bb->next = IRExpr_Binop(Iop_And64, IRExpr_RdTmp(old_LR), mkU64(~(3ULL)));
+
+# elif defined(VGP_ppc32_aix5)
+ Int offB_GPR2 = offsetof(VexGuestPPC32State,guest_GPR2);
+ Int offB_LR = offsetof(VexGuestPPC32State,guest_LR);
+ IRTemp old_R2 = newIRTemp( bb->tyenv, Ity_I32 );
+ IRTemp old_LR = newIRTemp( bb->tyenv, Ity_I32 );
+ /* Restore R2 */
+ old_R2 = gen_POP( bb );
+ addStmtToIRSB( bb, IRStmt_Put( offB_GPR2, IRExpr_RdTmp(old_R2)) );
+ /* Restore LR */
+ old_LR = gen_POP( bb );
+ addStmtToIRSB( bb, IRStmt_Put( offB_LR, IRExpr_RdTmp(old_LR)) );
+
+ /* Branch to LR */
+ /* re boring, we arrived here precisely because a wrapped fn did a
+ blr (hence Ijk_Ret); so we should just mark this jump as Boring,
+ else one _Call will have resulted in two _Rets. */
+ bb->jumpkind = Ijk_Boring;
+ bb->next = IRExpr_Binop(Iop_And32, IRExpr_RdTmp(old_LR), mkU32(~3));
+
+# else
+# error Platform is not TOC-afflicted, fortunately
+# endif
+}
+
+static
+Bool mk_preamble__ppctoc_magic_return_stub ( void* closureV, IRSB* bb )
+{
+ VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
+ /* Since we're creating the entire IRSB right here, give it a
+ proper IMark, as it won't get one any other way, and cachegrind
+ will barf if it doesn't have one (fair enough really). */
+ addStmtToIRSB( bb, IRStmt_IMark( closure->readdr, 4 ) );
+ /* Generate the magic sequence:
+ pop R2 from hidden stack
+ pop LR from hidden stack
+ goto LR
+ */
+ gen_pop_R2_LR_then_bLR(bb);
+ return True; /* True == this is the entire BB; don't disassemble any
+ real insns into it - just hand it directly to
+ optimiser/instrumenter/backend. */
+}
+#endif
+
+/* --------------- END helpers for with-TOC platforms --------------- */
+
+
+/* This is the IR preamble generator used for replacement
+ functions. It adds code to set the guest_NRADDR{_GPR2} to zero
+ (technically not necessary, but facilitates detecting mixups in
+ which a replacement function has been erroneously declared using
+ VG_REPLACE_FUNCTION_Z{U,Z} when instead it should have been written
+ using VG_WRAP_FUNCTION_Z{U,Z}).
+
+ On with-TOC platforms the follow hacks are also done: LR and R2 are
+ pushed onto a hidden stack, R2 is set to the correct value for the
+ replacement function, and LR is set to point at the magic
+ return-stub address. Setting LR causes the return of the
+ wrapped/redirected function to lead to our magic return stub, which
+ restores LR and R2 from said stack and returns for real.
+
+ VG_(get_StackTrace_wrk) understands that the LR value may point to
+ the return stub address, and that in that case it can get the real
+ LR value from the hidden stack instead. */
+static
+Bool mk_preamble__set_NRADDR_to_zero ( void* closureV, IRSB* bb )
+{
+ Int nraddr_szB
+ = sizeof(((VexGuestArchState*)0)->guest_NRADDR);
+ vg_assert(nraddr_szB == 4 || nraddr_szB == 8);
+ vg_assert(nraddr_szB == VG_WORDSIZE);
+ addStmtToIRSB(
+ bb,
+ IRStmt_Put(
+ offsetof(VexGuestArchState,guest_NRADDR),
+ nraddr_szB == 8 ? mkU64(0) : mkU32(0)
+ )
+ );
+# if defined(VG_PLAT_USES_PPCTOC)
+ { VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
+ addStmtToIRSB(
+ bb,
+ IRStmt_Put(
+ offsetof(VexGuestArchState,guest_NRADDR_GPR2),
+ VG_WORDSIZE==8 ? mkU64(0) : mkU32(0)
+ )
+ );
+ gen_push_and_set_LR_R2 ( bb, VG_(get_tocptr)( closure->readdr ) );
+ }
+# endif
+ return False;
+}
+
+/* Ditto, except set guest_NRADDR to nraddr (the un-redirected guest
+ address). This is needed for function wrapping - so the wrapper
+ can read _NRADDR and find the address of the function being
+ wrapped. On toc-afflicted platforms we must also snarf r2. */
+static
+Bool mk_preamble__set_NRADDR_to_nraddr ( void* closureV, IRSB* bb )
+{
+ VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
+ Int nraddr_szB
+ = sizeof(((VexGuestArchState*)0)->guest_NRADDR);
+ vg_assert(nraddr_szB == 4 || nraddr_szB == 8);
+ vg_assert(nraddr_szB == VG_WORDSIZE);
+ addStmtToIRSB(
+ bb,
+ IRStmt_Put(
+ offsetof(VexGuestArchState,guest_NRADDR),
+ nraddr_szB == 8
+ ? IRExpr_Const(IRConst_U64( closure->nraddr ))
+ : IRExpr_Const(IRConst_U32( (UInt)closure->nraddr ))
+ )
+ );
+# if defined(VGP_ppc64_linux) || defined(VGP_ppc32_aix5) \
+ || defined(VGP_ppc64_aix5)
+ addStmtToIRSB(
+ bb,
+ IRStmt_Put(
+ offsetof(VexGuestArchState,guest_NRADDR_GPR2),
+ IRExpr_Get(offsetof(VexGuestArchState,guest_GPR2),
+ VG_WORDSIZE==8 ? Ity_I64 : Ity_I32)
+ )
+ );
+ gen_push_and_set_LR_R2 ( bb, VG_(get_tocptr)( closure->readdr ) );
+# endif
+ return False;
+}
+
+/* --- Helpers to do with PPC related stack redzones. --- */
+
+__attribute__((unused))
+static Bool const_True ( Addr64 guest_addr )
+{
+ return True;
+}
+
+__attribute__((unused))
+static Bool bl_RZ_zap_ok_for_AIX ( Addr64 bl_target )
+{
+ /* paranoia */
+ if (sizeof(void*) == 4)
+ bl_target &= 0xFFFFFFFFULL;
+
+ /* don't zap the redzone for calls to millicode. */
+ if (bl_target < 0x10000ULL)
+ return False;
+
+ /* don't zap the redzone for calls to .$SAVEF14 .. .$SAVEF31.
+ First we need to be reasonably sure we won't segfault by looking
+ at the branch target. */
+ { NSegment const*const seg = VG_(am_find_nsegment)( (Addr)bl_target );
+ if (seg && seg->hasR) {
+ switch ( *(UInt*)(Addr)bl_target ) {
+ case 0xd9c1ff70: /* stfd f14,-144(r1) */
+ case 0xd9e1ff78: /* stfd f15,-136(r1) */
+ case 0xda01ff80: /* stfd f16,-128(r1) */
+ case 0xda21ff88: /* stfd f17,-120(r1) */
+ case 0xda41ff90: /* stfd f18,-112(r1) */
+ case 0xda61ff98: /* stfd f19,-104(r1) */
+ case 0xda81ffa0: /* stfd f20,-96(r1) */
+ case 0xdaa1ffa8: /* stfd f21,-88(r1) */
+ case 0xdac1ffb0: /* stfd f22,-80(r1) */
+ case 0xdae1ffb8: /* stfd f23,-72(r1) */
+ case 0xdb01ffc0: /* stfd f24,-64(r1) */
+ case 0xdb21ffc8: /* stfd f25,-56(r1) */
+ case 0xdb41ffd0: /* stfd f26,-48(r1) */
+ case 0xdb61ffd8: /* stfd f27,-40(r1) */
+ case 0xdb81ffe0: /* stfd f28,-32(r1) */
+ case 0xdba1ffe8: /* stfd f29,-24(r1) */
+ case 0xdbc1fff0: /* stfd f30,-16(r1) */
+ case 0xdbe1fff8: /* stfd f31,-8(r1) */
+ return False;
+ }
+ }
+ }
+ return True;
+}
+
+/* --------------- main translation function --------------- */
+
+/* Note: see comments at top of m_redir.c for the Big Picture on how
+ redirections are managed. */
+
+typedef
+ enum {
+ /* normal translation, redir neither requested nor inhibited */
+ T_Normal,
+ /* redir translation, function-wrap (set _NRADDR) style */
+ T_Redir_Wrap,
+ /* redir translation, replacement (don't set _NRADDR) style */
+ T_Redir_Replace,
+ /* a translation in which redir is specifically disallowed */
+ T_NoRedir
+ }
+ T_Kind;
+
+/* Translate the basic block beginning at NRADDR, and add it to the
+ translation cache & translation table. Unless
+ DEBUGGING_TRANSLATION is true, in which case the call is being done
+ for debugging purposes, so (a) throw away the translation once it
+ is made, and (b) produce a load of debugging output. If
+ ALLOW_REDIRECTION is False, do not attempt redirection of NRADDR,
+ and also, put the resulting translation into the no-redirect tt/tc
+ instead of the normal one.
+
+ TID is the identity of the thread requesting this translation.
+*/
+
+Bool VG_(translate) ( ThreadId tid,
+ Addr64 nraddr,
+ Bool debugging_translation,
+ Int debugging_verbosity,
+ ULong bbs_done,
+ Bool allow_redirection )
+{
+ Addr64 addr;
+ T_Kind kind;
+ Int tmpbuf_used, verbosity, i;
+ Bool notrace_until_done, do_self_check;
+ UInt notrace_until_limit = 0;
+ Bool (*preamble_fn)(void*,IRSB*);
+ VexArch vex_arch;
+ VexArchInfo vex_archinfo;
+ VexAbiInfo vex_abiinfo;
+ VexGuestExtents vge;
+ VexTranslateArgs vta;
+ VexTranslateResult tres;
+ VgCallbackClosure closure;
+
+ /* Make sure Vex is initialised right. */
+
+ static Bool vex_init_done = False;
+
+ if (!vex_init_done) {
+ LibVEX_Init ( &failure_exit, &log_bytes,
+ 1, /* debug_paranoia */
+ False, /* valgrind support */
+ &VG_(clo_vex_control) );
+ vex_init_done = True;
+ }
+
+ /* Establish the translation kind and actual guest address to
+ start from. Sets (addr,kind). */
+ if (allow_redirection) {
+ Bool isWrap;
+ Addr64 tmp = VG_(redir_do_lookup)( nraddr, &isWrap );
+ if (tmp == nraddr) {
+ /* no redirection found */
+ addr = nraddr;
+ kind = T_Normal;
+ } else {
+ /* found a redirect */
+ addr = tmp;
+ kind = isWrap ? T_Redir_Wrap : T_Redir_Replace;
+ }
+ } else {
+ addr = nraddr;
+ kind = T_NoRedir;
+ }
+
+ /* Established: (nraddr, addr, kind) */
+
+ /* Printing redirection info. */
+
+ if ((kind == T_Redir_Wrap || kind == T_Redir_Replace)
+ && (VG_(clo_verbosity) >= 2 || VG_(clo_trace_redir))) {
+ Bool ok;
+ Char name1[64] = "";
+ Char name2[64] = "";
+ name1[0] = name2[0] = 0;
+ ok = VG_(get_fnname_w_offset)(nraddr, name1, 64);
+ if (!ok) VG_(strcpy)(name1, "???");
+ ok = VG_(get_fnname_w_offset)(addr, name2, 64);
+ if (!ok) VG_(strcpy)(name2, "???");
+ VG_(message)(Vg_DebugMsg,
+ "REDIR: 0x%llx (%s) redirected to 0x%llx (%s)",
+ nraddr, name1,
+ addr, name2 );
+ }
+
+ /* If codegen tracing, don't start tracing until
+ notrace_until_limit blocks have gone by. This avoids printing
+ huge amounts of useless junk when all we want to see is the last
+ few blocks translated prior to a failure. Set
+ notrace_until_limit to be the number of translations to be made
+ before --trace-codegen= style printing takes effect. */
+ notrace_until_done
+ = VG_(get_bbs_translated)() >= notrace_until_limit;
+
+ if (!debugging_translation)
+ VG_TRACK( pre_mem_read, Vg_CoreTranslate,
+ tid, "(translator)", addr, 1 );
+
+ /* If doing any code printing, print a basic block start marker */
+ if (VG_(clo_trace_flags) || debugging_translation) {
+ Char fnname[64] = "";
+ VG_(get_fnname_w_offset)(addr, fnname, 64);
+ VG_(printf)(
+ "==== BB %d %s(0x%llx) BBs exec'd %lld ====\n",
+ VG_(get_bbs_translated)(), fnname, addr,
+ bbs_done);
+ }
+
+ /* Are we allowed to translate here? */
+
+ { /* BEGIN new scope specially for 'seg' */
+ NSegment const* seg = VG_(am_find_nsegment)(addr);
+
+ if ( (!translations_allowable_from_seg(seg))
+ || addr == TRANSTAB_BOGUS_GUEST_ADDR ) {
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg, "translations not allowed here (0x%llx)"
+ " - throwing SEGV", addr);
+ /* U R busted, sonny. Place your hands on your head and step
+ away from the orig_addr. */
+ /* Code address is bad - deliver a signal instead */
+ if (seg != NULL) {
+ /* There's some kind of segment at the requested place, but we
+ aren't allowed to execute code here. */
+ VG_(synth_fault_perms)(tid, addr);
+ } else {
+ /* There is no segment at all; we are attempting to execute in
+ the middle of nowhere. */
+ VG_(synth_fault_mapping)(tid, addr);
+ }
+ return False;
+ }
+
+ /* Do we want a self-checking translation? */
+ do_self_check = self_check_required( seg, tid );
+
+ /* True if a debug trans., or if bit N set in VG_(clo_trace_codegen). */
+ verbosity = 0;
+ if (debugging_translation) {
+ verbosity = debugging_verbosity;
+ }
+ else
+ if ( (VG_(clo_trace_flags) > 0
+ && VG_(get_bbs_translated)() >= VG_(clo_trace_notbelow) )) {
+ verbosity = VG_(clo_trace_flags);
+ }
+
+ /* Figure out which preamble-mangling callback to send. */
+ preamble_fn = NULL;
+ if (kind == T_Redir_Replace)
+ preamble_fn = mk_preamble__set_NRADDR_to_zero;
+ else
+ if (kind == T_Redir_Wrap)
+ preamble_fn = mk_preamble__set_NRADDR_to_nraddr;
+
+# if defined(VG_PLAT_USES_PPCTOC)
+ if (ULong_to_Ptr(nraddr)
+ == (void*)&VG_(ppctoc_magic_redirect_return_stub)) {
+ /* If entering the special return stub, this means a wrapped or
+ redirected function is returning. Make this translation one
+ which restores R2 and LR from the thread's hidden redir
+ stack, and branch to the (restored) link register, thereby
+ really causing the function to return. */
+ vg_assert(kind == T_Normal);
+ vg_assert(nraddr == addr);
+ preamble_fn = mk_preamble__ppctoc_magic_return_stub;
+ }
+# endif
+
+ /* ------ Actually do the translation. ------ */
+ tl_assert2(VG_(tdict).tool_instrument,
+ "you forgot to set VgToolInterface function 'tool_instrument'");
+
+ /* Get the CPU info established at startup. */
+ VG_(machine_get_VexArchInfo)( &vex_arch, &vex_archinfo );
+
+ /* Set up 'abiinfo' structure with stuff Vex needs to know about
+ the guest and host ABIs. */
+
+ LibVEX_default_VexAbiInfo( &vex_abiinfo );
+ vex_abiinfo.guest_stack_redzone_size = VG_STACK_REDZONE_SZB;
+
+# if defined(VGP_amd64_linux)
+ vex_abiinfo.guest_amd64_assume_fs_is_zero = True;
+# endif
+# if defined(VGP_ppc32_linux)
+ vex_abiinfo.guest_ppc_zap_RZ_at_blr = False;
+ vex_abiinfo.guest_ppc_zap_RZ_at_bl = NULL;
+ vex_abiinfo.host_ppc32_regalign_int64_args = True;
+# endif
+# if defined(VGP_ppc64_linux)
+ vex_abiinfo.guest_ppc_zap_RZ_at_blr = True;
+ vex_abiinfo.guest_ppc_zap_RZ_at_bl = const_True;
+ vex_abiinfo.host_ppc_calls_use_fndescrs = True;
+# endif
+# if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ vex_abiinfo.guest_ppc_zap_RZ_at_blr = False;
+ vex_abiinfo.guest_ppc_zap_RZ_at_bl = bl_RZ_zap_ok_for_AIX;
+ vex_abiinfo.guest_ppc_sc_continues_at_LR = True;
+ vex_abiinfo.host_ppc_calls_use_fndescrs = True;
+# endif
+
+ /* Set up closure args. */
+ closure.tid = tid;
+ closure.nraddr = nraddr;
+ closure.readdr = addr;
+
+ /* Set up args for LibVEX_Translate. */
+ vta.arch_guest = vex_arch;
+ vta.archinfo_guest = vex_archinfo;
+ vta.arch_host = vex_arch;
+ vta.archinfo_host = vex_archinfo;
+ vta.abiinfo_both = vex_abiinfo;
+ vta.guest_bytes = (UChar*)ULong_to_Ptr(addr);
+ vta.guest_bytes_addr = (Addr64)addr;
+ vta.callback_opaque = (void*)&closure;
+ vta.chase_into_ok = chase_into_ok;
+ vta.preamble_function = preamble_fn;
+ vta.guest_extents = &vge;
+ vta.host_bytes = tmpbuf;
+ vta.host_bytes_size = N_TMPBUF;
+ vta.host_bytes_used = &tmpbuf_used;
+ { /* At this point we have to reconcile Vex's view of the
+ instrumentation callback - which takes a void* first argument
+ - with Valgrind's view, in which the first arg is a
+ VgCallbackClosure*. Hence the following longwinded casts.
+ They are entirely legal but longwinded so as to maximise the
+ chance of the C typechecker picking up any type snafus. */
+ IRSB*(*f)(VgCallbackClosure*,
+ IRSB*,VexGuestLayout*,VexGuestExtents*,
+ IRType,IRType)
+ = VG_(tdict).tool_instrument;
+ IRSB*(*g)(void*,
+ IRSB*,VexGuestLayout*,VexGuestExtents*,
+ IRType,IRType)
+ = (IRSB*(*)(void*,IRSB*,VexGuestLayout*,VexGuestExtents*,IRType,IRType))f;
+ vta.instrument1 = g;
+ }
+ /* No need for type kludgery here. */
+ vta.instrument2 = need_to_handle_SP_assignment()
+ ? vg_SP_update_pass
+ : NULL;
+ vta.finaltidy = VG_(needs).final_IR_tidy_pass
+ ? VG_(tdict).tool_final_IR_tidy_pass
+ : NULL;
+ vta.do_self_check = do_self_check;
+ vta.traceflags = verbosity;
+
+ /* Set up the dispatch-return info. For archs without a link
+ register, vex generates a jump back to the specified dispatch
+ address. Else, it just generates a branch-to-LR. */
+# if defined(VGA_x86) || defined(VGA_amd64)
+ vta.dispatch
+ = (!allow_redirection)
+ ? /* It's a no-redir translation. Will be run with the nonstandard
+ dispatcher VG_(run_a_noredir_translation)
+ and so needs a nonstandard return point. */
+ (void*) &VG_(run_a_noredir_translation__return_point)
+
+ : /* normal translation. Uses VG_(run_innerloop). Return
+ point depends on whether we're profiling bbs or not. */
+ VG_(clo_profile_flags) > 0
+ ? (void*) &VG_(run_innerloop__dispatch_profiled)
+ : (void*) &VG_(run_innerloop__dispatch_unprofiled);
+# elif defined(VGA_ppc32) || defined(VGA_ppc64)
+ vta.dispatch = NULL;
+# else
+# error "Unknown arch"
+# endif
+
+ /* Sheesh. Finally, actually _do_ the translation! */
+ tres = LibVEX_Translate ( &vta );
+
+ vg_assert(tres == VexTransOK);
+ vg_assert(tmpbuf_used <= N_TMPBUF);
+ vg_assert(tmpbuf_used > 0);
+
+ /* Tell aspacem of all segments that have had translations taken
+ from them. Optimisation: don't re-look up vge.base[0] since seg
+ should already point to it. */
+
+ vg_assert( vge.base[0] == (Addr64)addr );
+ /* set 'translations taken from this segment' flag */
+ VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( (NSegment*)seg );
+ } /* END new scope specially for 'seg' */
+
+ for (i = 1; i < vge.n_used; i++) {
+ NSegment const* seg
+ = VG_(am_find_nsegment)( vge.base[i] );
+ /* set 'translations taken from this segment' flag */
+ VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( (NSegment*)seg );
+ }
+
+ /* Copy data at trans_addr into the translation cache. */
+ vg_assert(tmpbuf_used > 0 && tmpbuf_used < 65536);
+
+ // If debugging, don't do anything with the translated block; we
+ // only did this for the debugging output produced along the way.
+ if (!debugging_translation) {
+
+ if (kind != T_NoRedir) {
+ // Put it into the normal TT/TC structures. This is the
+ // normal case.
+
+ // Note that we use nraddr (the non-redirected address), not
+ // addr, which might have been changed by the redirection
+ VG_(add_to_transtab)( &vge,
+ nraddr,
+ (Addr)(&tmpbuf[0]),
+ tmpbuf_used,
+ do_self_check );
+ } else {
+ VG_(add_to_unredir_transtab)( &vge,
+ nraddr,
+ (Addr)(&tmpbuf[0]),
+ tmpbuf_used );
+ }
+ }
+
+ return True;
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_transtab.c.svn-base b/coregrind/.svn/text-base/m_transtab.c.svn-base
new file mode 100644
index 0000000..956e299
--- /dev/null
+++ b/coregrind/.svn/text-base/m_transtab.c.svn-base
@@ -0,0 +1,1630 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Management of the translation table and cache. ---*/
+/*--- m_transtab.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_machine.h" // For VG(machine_get_VexArchInfo)
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_options.h"
+#include "pub_core_tooliface.h" // For VG_(details).avg_translation_sizeB
+#include "pub_core_transtab.h"
+#include "pub_core_aspacemgr.h"
+#include "pub_core_mallocfree.h" // VG_(out_of_memory_NORETURN)
+
+/* #define DEBUG_TRANSTAB */
+
+
+/*-------------------------------------------------------------*/
+/*--- Management of the FIFO-based translation table+cache. ---*/
+/*-------------------------------------------------------------*/
+
+/*------------------ CONSTANTS ------------------*/
+
+/* Number of sectors the TC is divided into. If you need a larger
+ overall translation cache, increase this value. */
+#define N_SECTORS 8
+
+/* Number of TC entries in each sector. This needs to be a prime
+ number to work properly, it must be <= 65535 (so that a TT index
+ fits in a UShort, leaving room for 0xFFFF(EC2TTE_DELETED) to denote
+ 'deleted') and it is strongly recommended not to change this.
+ 65521 is the largest prime <= 65535. */
+#define N_TTES_PER_SECTOR /*30011*/ /*40009*/ 65521
+
+/* Because each sector contains a hash table of TTEntries, we need to
+ specify the maximum allowable loading, after which the sector is
+ deemed full. */
+#define SECTOR_TT_LIMIT_PERCENT 80
+
+/* The sector is deemed full when this many entries are in it. */
+#define N_TTES_PER_SECTOR_USABLE \
+ ((N_TTES_PER_SECTOR * SECTOR_TT_LIMIT_PERCENT) / 100)
+
+/* Equivalence classes for fast address range deletion. There are 1 +
+ 2^ECLASS_WIDTH bins. The highest one, ECLASS_MISC, describes an
+ address range which does not fall cleanly within any specific bin.
+ Note that ECLASS_SHIFT + ECLASS_WIDTH must be < 32. */
+#define ECLASS_SHIFT 11
+#define ECLASS_WIDTH 8
+#define ECLASS_MISC (1 << ECLASS_WIDTH)
+#define ECLASS_N (1 + ECLASS_MISC)
+
+#define EC2TTE_DELETED 0xFFFF /* 16-bit special value */
+
+
+/*------------------ TYPES ------------------*/
+
+/* A translation-table entry. This indicates precisely which areas of
+ guest code are included in the translation, and contains all other
+ auxiliary info too. */
+typedef
+ struct {
+ /* Profiling only: the count and weight (arbitrary meaning) for
+ this translation. Weight is a property of the translation
+ itself and computed once when the translation is created.
+ Count is an entry count for the translation and is
+ incremented by 1 every time the translation is used, if we
+ are profiling. */
+ UInt count;
+ UShort weight;
+
+ /* Status of the slot. Note, we need to be able to do lazy
+ deletion, hence the Deleted state. */
+ enum { InUse, Deleted, Empty } status;
+
+ /* 64-bit aligned pointer to one or more 64-bit words containing
+ the corresponding host code (must be in the same sector!)
+ This is a pointer into the sector's tc (code) area. */
+ ULong* tcptr;
+
+ /* This is the original guest address that purportedly is the
+ entry point of the translation. You might think that .entry
+ should be the same as .vge->base[0], and most of the time it
+ is. However, when doing redirections, that is not the case.
+ .vge must always correctly describe the guest code sections
+ from which this translation was made. However, .entry may or
+ may not be a lie, depending on whether or not we're doing
+ redirection. */
+ Addr64 entry;
+
+ /* This structure describes precisely what ranges of guest code
+ the translation covers, so we can decide whether or not to
+ delete it when translations of a given address range are
+ invalidated. */
+ VexGuestExtents vge;
+
+ /* Address range summary info: these are pointers back to
+ eclass[] entries in the containing Sector. Those entries in
+ turn point back here -- the two structures are mutually
+ redundant but both necessary to make fast deletions work.
+ The eclass info is similar to, and derived from, this entry's
+ 'vge' field, but it is not the same */
+ UShort n_tte2ec; // # tte2ec pointers (1 to 3)
+ UShort tte2ec_ec[3]; // for each, the eclass #
+ UInt tte2ec_ix[3]; // and the index within the eclass.
+ // for i in 0 .. n_tte2ec-1
+ // sec->ec2tte[ tte2ec_ec[i] ][ tte2ec_ix[i] ]
+ // should be the index
+ // of this TTEntry in the containing Sector's tt array.
+ }
+ TTEntry;
+
+
+/* Finally, a sector itself. Each sector contains an array of
+ TCEntries, which hold code, and an array of TTEntries, containing
+ all required administrative info. Profiling is supported using the
+ TTEntry .count and .weight fields, if required. Each sector is
+ independent in that no cross-sector references are allowed.
+
+ If the sector is not in use, all three pointers are NULL and
+ tt_n_inuse is zero.
+*/
+typedef
+ struct {
+ /* The TCEntry area. Size of this depends on the average
+ translation size. We try and size it so it becomes full
+ precisely when this sector's translation table (tt) reaches
+ its load limit (SECTOR_TT_LIMIT_PERCENT). */
+ ULong* tc;
+
+ /* The TTEntry array. This is a fixed size, always containing
+ exactly N_TTES_PER_SECTOR entries. */
+ TTEntry* tt;
+
+ /* This points to the current allocation point in tc. */
+ ULong* tc_next;
+
+ /* The count of tt entries with state InUse. */
+ Int tt_n_inuse;
+
+ /* Expandable arrays of tt indices for each of the ECLASS_N
+ address range equivalence classes. These hold indices into
+ the containing sector's tt array, which in turn should point
+ back here. */
+ Int ec2tte_size[ECLASS_N];
+ Int ec2tte_used[ECLASS_N];
+ UShort* ec2tte[ECLASS_N];
+ }
+ Sector;
+
+
+/*------------------ DECLS ------------------*/
+
+/* The root data structure is an array of sectors. The index of the
+ youngest sector is recorded, and new translations are put into that
+ sector. When it fills up, we move along to the next sector and
+ start to fill that up, wrapping around at the end of the array.
+ That way, once all N_TC_SECTORS have been bought into use for the
+ first time, and are full, we then re-use the oldest sector,
+ endlessly.
+
+ When running, youngest sector should be between >= 0 and <
+ N_TC_SECTORS. The initial -1 value indicates the TT/TC system is
+ not yet initialised.
+*/
+static Sector sectors[N_SECTORS];
+static Int youngest_sector = -1;
+
+/* The number of ULongs in each TCEntry area. This is computed once
+ at startup and does not change. */
+static Int tc_sector_szQ;
+
+
+/* Fast helper for the TC. A direct-mapped cache which holds a set of
+ recently used (guest address, host address) pairs. This array is
+ referred to directly from m_dispatch/dispatch-<platform>.S.
+
+ Entries in tt_fast may refer to any valid TC entry, regardless of
+ which sector it's in. Consequently we must be very careful to
+ invalidate this cache when TC entries are changed or disappear.
+
+ A special .guest address - TRANSTAB_BOGUS_GUEST_ADDR -- must be
+ pointed at to cause that cache entry to miss. This relies on the
+ assumption that no guest code actually has that address, hence a
+ value 0x1 seems good. m_translate gives the client a synthetic
+ segfault if it tries to execute at this address.
+*/
+/*
+typedef
+ struct {
+ Addr guest;
+ Addr host;
+ }
+ FastCacheEntry;
+*/
+/*global*/ __attribute__((aligned(16)))
+ FastCacheEntry VG_(tt_fast)[VG_TT_FAST_SIZE];
+/*
+#define TRANSTAB_BOGUS_GUEST_ADDR ((Addr)1)
+*/
+
+/* For profiling, we have a parallel array of pointers to .count
+ fields in TT entries. Again, these pointers must be invalidated
+ when translations disappear. A NULL pointer suffices to indicate
+ an unused slot.
+
+ When not profiling (the normal case, VG_(clo_profile_flags) == 0),
+ all tt_fastN entries are set to NULL at startup and never read nor
+ written after that.
+
+ When profiling (VG_(clo_profile_flags) > 0), tt_fast and tt_fastN
+ change together: if tt_fast[i].guest is TRANSTAB_BOGUS_GUEST_ADDR
+ then the corresponding tt_fastN[i] must be null. If
+ tt_fast[i].guest is any other value, then tt_fastN[i] *must* point
+ to the .count field of the corresponding TT entry.
+
+ tt_fast and tt_fastN are referred to from assembly code
+ (dispatch.S).
+*/
+/*global*/ UInt* VG_(tt_fastN)[VG_TT_FAST_SIZE];
+
+
+/* Make sure we're not used before initialisation. */
+static Bool init_done = False;
+
+
+/*------------------ STATS DECLS ------------------*/
+
+/* Number of fast-cache updates and flushes done. */
+ULong n_fast_flushes = 0;
+ULong n_fast_updates = 0;
+
+/* Number of full lookups done. */
+ULong n_full_lookups = 0;
+ULong n_lookup_probes = 0;
+
+/* Number/osize/tsize of translations entered; also the number of
+ those for which self-checking was requested. */
+ULong n_in_count = 0;
+ULong n_in_osize = 0;
+ULong n_in_tsize = 0;
+ULong n_in_sc_count = 0;
+
+/* Number/osize of translations discarded due to lack of space. */
+ULong n_dump_count = 0;
+ULong n_dump_osize = 0;
+
+/* Number/osize of translations discarded due to requests to do so. */
+ULong n_disc_count = 0;
+ULong n_disc_osize = 0;
+
+
+/*-------------------------------------------------------------*/
+/*--- Address-range equivalence class stuff ---*/
+/*-------------------------------------------------------------*/
+
+/* Return equivalence class number for a range. */
+
+static Int range_to_eclass ( Addr64 start, UInt len )
+{
+ UInt mask = (1 << ECLASS_WIDTH) - 1;
+ UInt lo = (UInt)start;
+ UInt hi = lo + len - 1;
+ UInt loBits = (lo >> ECLASS_SHIFT) & mask;
+ UInt hiBits = (hi >> ECLASS_SHIFT) & mask;
+ if (loBits == hiBits) {
+ vg_assert(loBits < ECLASS_N-1);
+ return loBits;
+ } else {
+ return ECLASS_MISC;
+ }
+}
+
+
+/* Calculates the equivalence class numbers for any VexGuestExtent.
+ These are written in *eclasses, which must be big enough to hold 3
+ Ints. The number written, between 1 and 3, is returned. The
+ eclasses are presented in order, and any duplicates are removed.
+*/
+
+static
+Int vexGuestExtents_to_eclasses ( /*OUT*/Int* eclasses,
+ VexGuestExtents* vge )
+{
+# define SWAP(_lv1,_lv2) \
+ do { Int t = _lv1; _lv1 = _lv2; _lv2 = t; } while (0)
+
+ Int i, j, n_ec, r;
+
+ vg_assert(vge->n_used >= 1 && vge->n_used <= 3);
+
+ n_ec = 0;
+ for (i = 0; i < vge->n_used; i++) {
+ r = range_to_eclass( vge->base[i], (UInt)vge->len[i] );
+ if (r == ECLASS_MISC)
+ goto bad;
+ /* only add if we haven't already seen it */
+ for (j = 0; j < n_ec; j++)
+ if (eclasses[j] == r)
+ break;
+ if (j == n_ec)
+ eclasses[n_ec++] = r;
+ }
+
+ if (n_ec == 1)
+ return 1;
+
+ if (n_ec == 2) {
+ /* sort */
+ if (eclasses[0] > eclasses[1])
+ SWAP(eclasses[0], eclasses[1]);
+ return 2;
+ }
+
+ if (n_ec == 3) {
+ /* sort */
+ if (eclasses[0] > eclasses[2])
+ SWAP(eclasses[0], eclasses[2]);
+ if (eclasses[0] > eclasses[1])
+ SWAP(eclasses[0], eclasses[1]);
+ if (eclasses[1] > eclasses[2])
+ SWAP(eclasses[1], eclasses[2]);
+ return 3;
+ }
+
+ /* NOTREACHED */
+ vg_assert(0);
+
+ bad:
+ eclasses[0] = ECLASS_MISC;
+ return 1;
+
+# undef SWAP
+}
+
+
+/* Add tteno to the set of entries listed for equivalence class ec in
+ this sector. Returns used location in eclass array. */
+
+static
+UInt addEClassNo ( /*MOD*/Sector* sec, Int ec, UShort tteno )
+{
+ Int old_sz, new_sz, i, r;
+ UShort *old_ar, *new_ar;
+
+ vg_assert(ec >= 0 && ec < ECLASS_N);
+ vg_assert(tteno < N_TTES_PER_SECTOR);
+
+ if (0) VG_(printf)("ec %d gets %d\n", ec, (Int)tteno);
+
+ if (sec->ec2tte_used[ec] >= sec->ec2tte_size[ec]) {
+
+ vg_assert(sec->ec2tte_used[ec] == sec->ec2tte_size[ec]);
+
+ old_sz = sec->ec2tte_size[ec];
+ old_ar = sec->ec2tte[ec];
+ new_sz = old_sz==0 ? 8 : old_sz<64 ? 2*old_sz : (3*old_sz)/2;
+ new_ar = VG_(arena_malloc)(VG_AR_TTAUX, "transtab.aECN.1",
+ new_sz * sizeof(UShort));
+ for (i = 0; i < old_sz; i++)
+ new_ar[i] = old_ar[i];
+ if (old_ar)
+ VG_(arena_free)(VG_AR_TTAUX, old_ar);
+ sec->ec2tte_size[ec] = new_sz;
+ sec->ec2tte[ec] = new_ar;
+
+ if (0) VG_(printf)("expand ec %d to %d\n", ec, new_sz);
+ }
+
+ /* Common case */
+ r = sec->ec2tte_used[ec]++;
+ vg_assert(r >= 0 && r < sec->ec2tte_size[ec]);
+ sec->ec2tte[ec][r] = tteno;
+ return (UInt)r;
+}
+
+
+/* 'vge' is being added to 'sec' at TT entry 'tteno'. Add appropriate
+ eclass entries to 'sec'. */
+
+static
+void upd_eclasses_after_add ( /*MOD*/Sector* sec, Int tteno )
+{
+ Int i, r, eclasses[3];
+ TTEntry* tte;
+ vg_assert(tteno >= 0 && tteno < N_TTES_PER_SECTOR);
+
+ tte = &sec->tt[tteno];
+ r = vexGuestExtents_to_eclasses( eclasses, &tte->vge );
+
+ vg_assert(r >= 1 && r <= 3);
+ tte->n_tte2ec = r;
+
+ for (i = 0; i < r; i++) {
+ tte->tte2ec_ec[i] = eclasses[i];
+ tte->tte2ec_ix[i] = addEClassNo( sec, eclasses[i], (UShort)tteno );
+ }
+}
+
+
+/* Check the eclass info in 'sec' to ensure it is consistent. Returns
+ True if OK, False if something's not right. Expensive. */
+
+static Bool sanity_check_eclasses_in_sector ( Sector* sec )
+{
+# define BAD(_str) do { whassup = (_str); goto bad; } while (0)
+
+ HChar* whassup = NULL;
+ Int i, j, k, n, ec_num, ec_idx;
+ TTEntry* tte;
+ UShort tteno;
+ ULong* tce;
+
+ /* Basic checks on this sector */
+ if (sec->tt_n_inuse < 0 || sec->tt_n_inuse > N_TTES_PER_SECTOR_USABLE)
+ BAD("invalid sec->tt_n_inuse");
+ tce = sec->tc_next;
+ if (tce < &sec->tc[0] || tce > &sec->tc[tc_sector_szQ])
+ BAD("sec->tc_next points outside tc");
+
+ /* For each eclass ... */
+ for (i = 0; i < ECLASS_N; i++) {
+ if (sec->ec2tte_size[i] == 0 && sec->ec2tte[i] != NULL)
+ BAD("ec2tte_size/ec2tte mismatch(1)");
+ if (sec->ec2tte_size[i] != 0 && sec->ec2tte[i] == NULL)
+ BAD("ec2tte_size/ec2tte mismatch(2)");
+ if (sec->ec2tte_used[i] < 0
+ || sec->ec2tte_used[i] > sec->ec2tte_size[i])
+ BAD("implausible ec2tte_used");
+ if (sec->ec2tte_used[i] == 0)
+ continue;
+
+ /* For each tt reference in each eclass .. ensure the reference
+ is to a valid tt entry, and that the entry's address ranges
+ really include this eclass. */
+
+ for (j = 0; j < sec->ec2tte_used[i]; j++) {
+ tteno = sec->ec2tte[i][j];
+ if (tteno == EC2TTE_DELETED)
+ continue;
+ if (tteno >= N_TTES_PER_SECTOR)
+ BAD("implausible tteno");
+ tte = &sec->tt[tteno];
+ if (tte->status != InUse)
+ BAD("tteno points to non-inuse tte");
+ if (tte->n_tte2ec < 1 || tte->n_tte2ec > 3)
+ BAD("tte->n_tte2ec out of range");
+ /* Exactly least one of tte->eclasses[0 .. tte->n_eclasses-1]
+ must equal i. Inspect tte's eclass info. */
+ n = 0;
+ for (k = 0; k < tte->n_tte2ec; k++) {
+ if (k < tte->n_tte2ec-1
+ && tte->tte2ec_ec[k] >= tte->tte2ec_ec[k+1])
+ BAD("tte->tte2ec_ec[..] out of order");
+ ec_num = tte->tte2ec_ec[k];
+ if (ec_num < 0 || ec_num >= ECLASS_N)
+ BAD("tte->tte2ec_ec[..] out of range");
+ if (ec_num != i)
+ continue;
+ ec_idx = tte->tte2ec_ix[k];
+ if (ec_idx < 0 || ec_idx >= sec->ec2tte_used[i])
+ BAD("tte->tte2ec_ix[..] out of range");
+ if (ec_idx == j)
+ n++;
+ }
+ if (n != 1)
+ BAD("tteno does not point back at eclass");
+ }
+ }
+
+ /* That establishes that for each forward pointer from TTEntrys
+ there is a corresponding backward pointer from the eclass[]
+ arrays. However, it doesn't rule out the possibility of other,
+ bogus pointers in the eclass[] arrays. So do those similarly:
+ scan through them and check the TTEntryies they point at point
+ back. */
+
+ for (i = 0; i < N_TTES_PER_SECTOR_USABLE; i++) {
+
+ tte = &sec->tt[i];
+ if (tte->status == Empty || tte->status == Deleted) {
+ if (tte->n_tte2ec != 0)
+ BAD("tte->n_eclasses nonzero for unused tte");
+ continue;
+ }
+
+ vg_assert(tte->status == InUse);
+
+ if (tte->n_tte2ec < 1 || tte->n_tte2ec > 3)
+ BAD("tte->n_eclasses out of range(2)");
+
+ for (j = 0; j < tte->n_tte2ec; j++) {
+ ec_num = tte->tte2ec_ec[j];
+ if (ec_num < 0 || ec_num >= ECLASS_N)
+ BAD("tte->eclass[..] out of range");
+ ec_idx = tte->tte2ec_ix[j];
+ if (ec_idx < 0 || ec_idx >= sec->ec2tte_used[ec_num])
+ BAD("tte->ec_idx[..] out of range(2)");
+ if (sec->ec2tte[ec_num][ec_idx] != i)
+ BAD("ec2tte does not point back to tte");
+ }
+ }
+
+ return True;
+
+ bad:
+ if (whassup)
+ VG_(debugLog)(0, "transtab", "eclass sanity fail: %s\n", whassup);
+
+# if 0
+ VG_(printf)("eclass = %d\n", i);
+ VG_(printf)("tteno = %d\n", (Int)tteno);
+ switch (tte->status) {
+ case InUse: VG_(printf)("InUse\n"); break;
+ case Deleted: VG_(printf)("Deleted\n"); break;
+ case Empty: VG_(printf)("Empty\n"); break;
+ }
+ if (tte->status != Empty) {
+ for (k = 0; k < tte->vge.n_used; k++)
+ VG_(printf)("0x%llx %d\n", tte->vge.base[k],
+ (Int)tte->vge.len[k]);
+ }
+# endif
+
+ return False;
+
+# undef BAD
+}
+
+
+/* Sanity check absolutely everything. True == check passed. */
+
+/* forwards */
+static Bool sanity_check_redir_tt_tc ( void );
+static Bool sanity_check_fastcache ( void );
+
+static Bool sanity_check_all_sectors ( void )
+{
+ Int sno;
+ Bool sane;
+ Sector* sec;
+ for (sno = 0; sno < N_SECTORS; sno++) {
+ sec = &sectors[sno];
+ if (sec->tc == NULL)
+ continue;
+ sane = sanity_check_eclasses_in_sector( sec );
+ if (!sane)
+ return False;
+ }
+ if ( !sanity_check_redir_tt_tc() )
+ return False;
+ if ( !sanity_check_fastcache() )
+ return False;
+ return True;
+}
+
+
+/*-------------------------------------------------------------*/
+/*--- Add/find translations ---*/
+/*-------------------------------------------------------------*/
+
+static UInt vge_osize ( VexGuestExtents* vge )
+{
+ UInt i, n = 0;
+ for (i = 0; i < vge->n_used; i++)
+ n += (UInt)vge->len[i];
+ return n;
+}
+
+static Bool isValidSector ( Int sector )
+{
+ if (sector < 0 || sector >= N_SECTORS)
+ return False;
+ return True;
+}
+
+static inline UInt HASH_TT ( Addr64 key )
+{
+ UInt kHi = (UInt)(key >> 32);
+ UInt kLo = (UInt)key;
+ UInt k32 = kHi ^ kLo;
+ UInt ror = 7;
+ if (ror > 0)
+ k32 = (k32 >> ror) | (k32 << (32-ror));
+ return k32 % N_TTES_PER_SECTOR;
+}
+
+static void setFastCacheEntry ( Addr64 key, ULong* tcptr, UInt* count )
+{
+ UInt cno = (UInt)VG_TT_FAST_HASH(key);
+ VG_(tt_fast)[cno].guest = (Addr)key;
+ VG_(tt_fast)[cno].host = (Addr)tcptr;
+ if (VG_(clo_profile_flags) > 0)
+ VG_(tt_fastN)[cno] = count;
+ n_fast_updates++;
+ /* This shouldn't fail. It should be assured by m_translate
+ which should reject any attempt to make translation of code
+ starting at TRANSTAB_BOGUS_GUEST_ADDR. */
+ vg_assert(VG_(tt_fast)[cno].guest != TRANSTAB_BOGUS_GUEST_ADDR);
+}
+
+/* Invalidate the fast cache's counter array, VG_(tt_fastN). */
+static void invalidateFastNCache ( void )
+{
+ UInt j;
+ vg_assert(VG_TT_FAST_SIZE > 0 && (VG_TT_FAST_SIZE % 4) == 0);
+ for (j = 0; j < VG_TT_FAST_SIZE; j += 4) {
+ VG_(tt_fastN)[j+0] = NULL;
+ VG_(tt_fastN)[j+1] = NULL;
+ VG_(tt_fastN)[j+2] = NULL;
+ VG_(tt_fastN)[j+3] = NULL;
+ }
+ vg_assert(j == VG_TT_FAST_SIZE);
+}
+
+/* Invalidate the fast cache VG_(tt_fast). If profiling, also
+ invalidate the fast cache's counter array VG_(tt_fastN), otherwise
+ don't touch it. */
+static void invalidateFastCache ( void )
+{
+ UInt j;
+ /* This loop is popular enough to make it worth unrolling a
+ bit, at least on ppc32. */
+ vg_assert(VG_TT_FAST_SIZE > 0 && (VG_TT_FAST_SIZE % 4) == 0);
+ for (j = 0; j < VG_TT_FAST_SIZE; j += 4) {
+ VG_(tt_fast)[j+0].guest = TRANSTAB_BOGUS_GUEST_ADDR;
+ VG_(tt_fast)[j+1].guest = TRANSTAB_BOGUS_GUEST_ADDR;
+ VG_(tt_fast)[j+2].guest = TRANSTAB_BOGUS_GUEST_ADDR;
+ VG_(tt_fast)[j+3].guest = TRANSTAB_BOGUS_GUEST_ADDR;
+ }
+
+ if (VG_(clo_profile_flags) > 0)
+ invalidateFastNCache();
+
+ vg_assert(j == VG_TT_FAST_SIZE);
+ n_fast_flushes++;
+}
+
+static Bool sanity_check_fastcache ( void )
+{
+ UInt j;
+ if (0) VG_(printf)("sanity check fastcache\n");
+ if (VG_(clo_profile_flags) > 0) {
+ /* profiling */
+ for (j = 0; j < VG_TT_FAST_SIZE; j++) {
+ if (VG_(tt_fastN)[j] == NULL
+ && VG_(tt_fast)[j].guest != TRANSTAB_BOGUS_GUEST_ADDR)
+ return False;
+ if (VG_(tt_fastN)[j] != NULL
+ && VG_(tt_fast)[j].guest == TRANSTAB_BOGUS_GUEST_ADDR)
+ return False;
+ }
+ } else {
+ /* not profiling */
+ for (j = 0; j < VG_TT_FAST_SIZE; j++) {
+ if (VG_(tt_fastN)[j] != NULL)
+ return False;
+ }
+ }
+ return True;
+}
+
+static void initialiseSector ( Int sno )
+{
+ Int i;
+ SysRes sres;
+ Sector* sec;
+ vg_assert(isValidSector(sno));
+
+ sec = &sectors[sno];
+
+ if (sec->tc == NULL) {
+
+ /* Sector has never been used before. Need to allocate tt and
+ tc. */
+ vg_assert(sec->tt == NULL);
+ vg_assert(sec->tc_next == NULL);
+ vg_assert(sec->tt_n_inuse == 0);
+ for (i = 0; i < ECLASS_N; i++) {
+ vg_assert(sec->ec2tte_size[i] == 0);
+ vg_assert(sec->ec2tte_used[i] == 0);
+ vg_assert(sec->ec2tte[i] == NULL);
+ }
+
+ VG_(debugLog)(1,"transtab", "allocate sector %d\n", sno);
+
+ sres = VG_(am_mmap_anon_float_valgrind)( 8 * tc_sector_szQ );
+ if (sres.isError) {
+ VG_(out_of_memory_NORETURN)("initialiseSector(TC)",
+ 8 * tc_sector_szQ );
+ /*NOTREACHED*/
+ }
+ sec->tc = (ULong*)sres.res;
+
+ sres = VG_(am_mmap_anon_float_valgrind)
+ ( N_TTES_PER_SECTOR * sizeof(TTEntry) );
+ if (sres.isError) {
+ VG_(out_of_memory_NORETURN)("initialiseSector(TT)",
+ N_TTES_PER_SECTOR * sizeof(TTEntry) );
+ /*NOTREACHED*/
+ }
+ sec->tt = (TTEntry*)sres.res;
+
+ for (i = 0; i < N_TTES_PER_SECTOR; i++) {
+ sec->tt[i].status = Empty;
+ sec->tt[i].n_tte2ec = 0;
+ }
+
+ if (VG_(clo_verbosity) > 2)
+ VG_(message)(Vg_DebugMsg, "TT/TC: initialise sector %d", sno);
+
+ } else {
+
+ /* Sector has been used before. Dump the old contents. */
+ VG_(debugLog)(1,"transtab", "recycle sector %d\n", sno);
+ vg_assert(sec->tt != NULL);
+ vg_assert(sec->tc_next != NULL);
+ n_dump_count += sec->tt_n_inuse;
+
+ /* Visit each just-about-to-be-abandoned translation. */
+ for (i = 0; i < N_TTES_PER_SECTOR; i++) {
+ if (sec->tt[i].status == InUse) {
+ vg_assert(sec->tt[i].n_tte2ec >= 1);
+ vg_assert(sec->tt[i].n_tte2ec <= 3);
+ n_dump_osize += vge_osize(&sec->tt[i].vge);
+ /* Tell the tool too. */
+ if (VG_(needs).superblock_discards) {
+ VG_TDICT_CALL( tool_discard_superblock_info,
+ sec->tt[i].entry,
+ sec->tt[i].vge );
+ }
+ } else {
+ vg_assert(sec->tt[i].n_tte2ec == 0);
+ }
+ sec->tt[i].status = Empty;
+ sec->tt[i].n_tte2ec = 0;
+ }
+
+ /* Free up the eclass structures. */
+ for (i = 0; i < ECLASS_N; i++) {
+ if (sec->ec2tte_size[i] == 0) {
+ vg_assert(sec->ec2tte_used[i] == 0);
+ vg_assert(sec->ec2tte[i] == NULL);
+ } else {
+ vg_assert(sec->ec2tte[i] != NULL);
+ VG_(arena_free)(VG_AR_TTAUX, sec->ec2tte[i]);
+ sec->ec2tte[i] = NULL;
+ sec->ec2tte_size[i] = 0;
+ sec->ec2tte_used[i] = 0;
+ }
+ }
+
+ if (VG_(clo_verbosity) > 2)
+ VG_(message)(Vg_DebugMsg, "TT/TC: recycle sector %d", sno);
+ }
+
+ sec->tc_next = sec->tc;
+ sec->tt_n_inuse = 0;
+
+ invalidateFastCache();
+}
+
+static void invalidate_icache ( void *ptr, Int nbytes )
+{
+# if defined(VGA_ppc32) || defined(VGA_ppc64)
+ Addr startaddr = (Addr) ptr;
+ Addr endaddr = startaddr + nbytes;
+ Addr cls;
+ Addr addr;
+ VexArchInfo vai;
+
+ if (nbytes == 0) return;
+ vg_assert(nbytes > 0);
+
+ VG_(machine_get_VexArchInfo)( NULL, &vai );
+ cls = vai.ppc_cache_line_szB;
+
+ /* Stay sane .. */
+ vg_assert(cls == 32 || cls == 64 || cls == 128);
+
+ startaddr &= ~(cls - 1);
+ for (addr = startaddr; addr < endaddr; addr += cls)
+ asm volatile("dcbst 0,%0" : : "r" (addr));
+ asm volatile("sync");
+ for (addr = startaddr; addr < endaddr; addr += cls)
+ asm volatile("icbi 0,%0" : : "r" (addr));
+ asm volatile("sync; isync");
+
+# elif defined(VGA_x86)
+ /* no need to do anything, hardware provides coherence */
+
+# elif defined(VGA_amd64)
+ /* no need to do anything, hardware provides coherence */
+
+# else
+# error "Unknown ARCH"
+# endif
+}
+
+
+/* Add a translation of vge to TT/TC. The translation is temporarily
+ in code[0 .. code_len-1].
+
+ pre: youngest_sector points to a valid (although possibly full)
+ sector.
+*/
+void VG_(add_to_transtab)( VexGuestExtents* vge,
+ Addr64 entry,
+ AddrH code,
+ UInt code_len,
+ Bool is_self_checking )
+{
+ Int tcAvailQ, reqdQ, y, i;
+ ULong *tcptr, *tcptr2;
+ UChar* srcP;
+ UChar* dstP;
+
+ vg_assert(init_done);
+ vg_assert(vge->n_used >= 1 && vge->n_used <= 3);
+
+ /* 60000: should agree with N_TMPBUF in m_translate.c. */
+ vg_assert(code_len > 0 && code_len < 60000);
+
+ if (0)
+ VG_(printf)("add_to_transtab(entry = 0x%llx, len = %d)\n",
+ entry, code_len);
+
+ n_in_count++;
+ n_in_tsize += code_len;
+ n_in_osize += vge_osize(vge);
+ if (is_self_checking)
+ n_in_sc_count++;
+
+ y = youngest_sector;
+ vg_assert(isValidSector(y));
+
+ if (sectors[y].tc == NULL)
+ initialiseSector(y);
+
+ /* Try putting the translation in this sector. */
+ reqdQ = (code_len + 7) >> 3;
+
+ /* Will it fit in tc? */
+ tcAvailQ = ((ULong*)(&sectors[y].tc[tc_sector_szQ]))
+ - ((ULong*)(sectors[y].tc_next));
+ vg_assert(tcAvailQ >= 0);
+ vg_assert(tcAvailQ <= tc_sector_szQ);
+
+ if (tcAvailQ < reqdQ
+ || sectors[y].tt_n_inuse >= N_TTES_PER_SECTOR_USABLE) {
+ /* No. So move on to the next sector. Either it's never been
+ used before, in which case it will get its tt/tc allocated
+ now, or it has been used before, in which case it is set to be
+ empty, hence throwing out the oldest sector. */
+ vg_assert(tc_sector_szQ > 0);
+ VG_(debugLog)(1,"transtab",
+ "declare sector %d full "
+ "(TT loading %2d%%, TC loading %2d%%)\n",
+ y,
+ (100 * sectors[y].tt_n_inuse)
+ / N_TTES_PER_SECTOR,
+ (100 * (tc_sector_szQ - tcAvailQ))
+ / tc_sector_szQ);
+ youngest_sector++;
+ if (youngest_sector >= N_SECTORS)
+ youngest_sector = 0;
+ y = youngest_sector;
+ initialiseSector(y);
+ }
+
+ /* Be sure ... */
+ tcAvailQ = ((ULong*)(&sectors[y].tc[tc_sector_szQ]))
+ - ((ULong*)(sectors[y].tc_next));
+ vg_assert(tcAvailQ >= 0);
+ vg_assert(tcAvailQ <= tc_sector_szQ);
+ vg_assert(tcAvailQ >= reqdQ);
+ vg_assert(sectors[y].tt_n_inuse < N_TTES_PER_SECTOR_USABLE);
+ vg_assert(sectors[y].tt_n_inuse >= 0);
+
+ /* Copy into tc. */
+ tcptr = sectors[y].tc_next;
+ vg_assert(tcptr >= &sectors[y].tc[0]);
+ vg_assert(tcptr <= &sectors[y].tc[tc_sector_szQ]);
+
+ dstP = (UChar*)tcptr;
+ srcP = (UChar*)code;
+ for (i = 0; i < code_len; i++)
+ dstP[i] = srcP[i];
+ sectors[y].tc_next += reqdQ;
+ sectors[y].tt_n_inuse++;
+
+ invalidate_icache( dstP, code_len );
+
+ /* more paranoia */
+ tcptr2 = sectors[y].tc_next;
+ vg_assert(tcptr2 >= &sectors[y].tc[0]);
+ vg_assert(tcptr2 <= &sectors[y].tc[tc_sector_szQ]);
+
+ /* Find an empty tt slot, and use it. There must be such a slot
+ since tt is never allowed to get completely full. */
+ i = HASH_TT(entry);
+ vg_assert(i >= 0 && i < N_TTES_PER_SECTOR);
+ while (True) {
+ if (sectors[y].tt[i].status == Empty
+ || sectors[y].tt[i].status == Deleted)
+ break;
+ i++;
+ if (i >= N_TTES_PER_SECTOR)
+ i = 0;
+ }
+
+ sectors[y].tt[i].status = InUse;
+ sectors[y].tt[i].tcptr = tcptr;
+ sectors[y].tt[i].count = 0;
+ sectors[y].tt[i].weight = 1;
+ sectors[y].tt[i].vge = *vge;
+ sectors[y].tt[i].entry = entry;
+
+ /* Update the fast-cache. */
+ setFastCacheEntry( entry, tcptr, &sectors[y].tt[i].count );
+
+ /* Note the eclass numbers for this translation. */
+ upd_eclasses_after_add( &sectors[y], i );
+}
+
+
+/* Search for the translation of the given guest address. If
+ requested, a successful search can also cause the fast-caches to be
+ updated.
+*/
+Bool VG_(search_transtab) ( /*OUT*/AddrH* result,
+ Addr64 guest_addr,
+ Bool upd_cache )
+{
+ Int i, j, k, kstart, sno;
+
+ vg_assert(init_done);
+ /* Find the initial probe point just once. It will be the same in
+ all sectors and avoids multiple expensive % operations. */
+ n_full_lookups++;
+ k = -1;
+ kstart = HASH_TT(guest_addr);
+ vg_assert(kstart >= 0 && kstart < N_TTES_PER_SECTOR);
+
+ /* Search in all the sectors. Although the order should not matter,
+ it might be most efficient to search in the order youngest to
+ oldest. */
+ sno = youngest_sector;
+ for (i = 0; i < N_SECTORS; i++) {
+
+ if (sectors[sno].tc == NULL)
+ goto notfound; /* sector not in use. */
+
+ k = kstart;
+ for (j = 0; j < N_TTES_PER_SECTOR; j++) {
+ n_lookup_probes++;
+ if (sectors[sno].tt[k].status == InUse
+ && sectors[sno].tt[k].entry == guest_addr) {
+ /* found it */
+ if (upd_cache)
+ setFastCacheEntry(
+ guest_addr, sectors[sno].tt[k].tcptr,
+ &sectors[sno].tt[k].count );
+ if (result)
+ *result = (AddrH)sectors[sno].tt[k].tcptr;
+ return True;
+ }
+ if (sectors[sno].tt[k].status == Empty)
+ break; /* not found in this sector */
+ k++;
+ if (k == N_TTES_PER_SECTOR)
+ k = 0;
+ }
+
+ /* If we fall off the end, all entries are InUse and not
+ matching, or Deleted. In any case we did not find it in this
+ sector. */
+
+ notfound:
+ /* move to the next oldest sector */
+ sno = sno==0 ? (N_SECTORS-1) : (sno-1);
+ }
+
+ /* Not found in any sector. */
+ return False;
+}
+
+
+/*-------------------------------------------------------------*/
+/*--- Delete translations. ---*/
+/*-------------------------------------------------------------*/
+
+/* forward */
+static void unredir_discard_translations( Addr64, ULong );
+
+/* Stuff for deleting translations which intersect with a given
+ address range. Unfortunately, to make this run at a reasonable
+ speed, it is complex. */
+
+static inline
+Bool overlap1 ( Addr64 s1, ULong r1, Addr64 s2, ULong r2 )
+{
+ Addr64 e1 = s1 + r1 - 1ULL;
+ Addr64 e2 = s2 + r2 - 1ULL;
+ if (e1 < s2 || e2 < s1)
+ return False;
+ return True;
+}
+
+static inline
+Bool overlaps ( Addr64 start, ULong range, VexGuestExtents* vge )
+{
+ if (overlap1(start, range, vge->base[0], (UInt)vge->len[0]))
+ return True;
+ if (vge->n_used < 2)
+ return False;
+ if (overlap1(start, range, vge->base[1], (UInt)vge->len[1]))
+ return True;
+ if (vge->n_used < 3)
+ return False;
+ if (overlap1(start, range, vge->base[2], (UInt)vge->len[2]))
+ return True;
+ return False;
+}
+
+
+/* Delete a tt entry, and update all the eclass data accordingly. */
+
+static void delete_tte ( /*MOD*/Sector* sec, Int tteno )
+{
+ Int i, ec_num, ec_idx;
+ TTEntry* tte;
+
+ vg_assert(tteno >= 0 && tteno < N_TTES_PER_SECTOR);
+ tte = &sec->tt[tteno];
+ vg_assert(tte->status == InUse);
+ vg_assert(tte->n_tte2ec >= 1 && tte->n_tte2ec <= 3);
+
+ /* Deal with the ec-to-tte links first. */
+ for (i = 0; i < tte->n_tte2ec; i++) {
+ ec_num = (Int)tte->tte2ec_ec[i];
+ ec_idx = tte->tte2ec_ix[i];
+ vg_assert(ec_num >= 0 && ec_num < ECLASS_N);
+ vg_assert(ec_idx >= 0);
+ vg_assert(ec_idx < sec->ec2tte_used[ec_num]);
+ /* Assert that the two links point at each other. */
+ vg_assert(sec->ec2tte[ec_num][ec_idx] == (UShort)tteno);
+ /* "delete" the pointer back to here. */
+ sec->ec2tte[ec_num][ec_idx] = EC2TTE_DELETED;
+ }
+
+ /* Now fix up this TTEntry. */
+ tte->status = Deleted;
+ tte->n_tte2ec = 0;
+
+ /* Stats .. */
+ sec->tt_n_inuse--;
+ n_disc_count++;
+ n_disc_osize += vge_osize(&tte->vge);
+
+ /* Tell the tool too. */
+ if (VG_(needs).superblock_discards) {
+ VG_TDICT_CALL( tool_discard_superblock_info,
+ tte->entry,
+ tte->vge );
+ }
+}
+
+
+/* Delete translations from sec which intersect specified range, but
+ only consider translations in the specified eclass. */
+
+static
+Bool delete_translations_in_sector_eclass ( /*MOD*/Sector* sec,
+ Addr64 guest_start, ULong range,
+ Int ec )
+{
+ Int i;
+ UShort tteno;
+ Bool anyDeld = False;
+ TTEntry* tte;
+
+ vg_assert(ec >= 0 && ec < ECLASS_N);
+
+ for (i = 0; i < sec->ec2tte_used[ec]; i++) {
+
+ tteno = sec->ec2tte[ec][i];
+ if (tteno == EC2TTE_DELETED) {
+ /* already deleted */
+ continue;
+ }
+
+ vg_assert(tteno < N_TTES_PER_SECTOR);
+
+ tte = &sec->tt[tteno];
+ vg_assert(tte->status == InUse);
+
+ if (overlaps( guest_start, range, &tte->vge )) {
+ anyDeld = True;
+ delete_tte( sec, (Int)tteno );
+ }
+
+ }
+
+ return anyDeld;
+}
+
+
+/* Delete translations from sec which intersect specified range, the
+ slow way, by inspecting all translations in sec. */
+
+static
+Bool delete_translations_in_sector ( /*MOD*/Sector* sec,
+ Addr64 guest_start, ULong range )
+{
+ Int i;
+ Bool anyDeld = False;
+
+ for (i = 0; i < N_TTES_PER_SECTOR; i++) {
+ if (sec->tt[i].status == InUse
+ && overlaps( guest_start, range, &sec->tt[i].vge )) {
+ anyDeld = True;
+ delete_tte( sec, i );
+ }
+ }
+
+ return anyDeld;
+}
+
+
+void VG_(discard_translations) ( Addr64 guest_start, ULong range,
+ HChar* who )
+{
+ Sector* sec;
+ Int sno, ec;
+ Bool anyDeleted = False;
+
+ vg_assert(init_done);
+
+ VG_(debugLog)(2, "transtab",
+ "discard_translations(0x%llx, %lld) req by %s\n",
+ guest_start, range, who );
+
+ /* Pre-deletion sanity check */
+ if (VG_(clo_sanity_level >= 4)) {
+ Bool sane = sanity_check_all_sectors();
+ vg_assert(sane);
+ }
+
+ if (range == 0)
+ return;
+
+ /* There are two different ways to do this.
+
+ If the range fits within a single address-range equivalence
+ class, as will be the case for a cache line sized invalidation,
+ then we only have to inspect the set of translations listed in
+ that equivalence class, and also in the "sin-bin" equivalence
+ class ECLASS_MISC.
+
+ Otherwise, the invalidation is of a larger range and probably
+ results from munmap. In this case it's (probably!) faster just
+ to inspect all translations, dump those we don't want, and
+ regenerate the equivalence class information (since modifying it
+ in-situ is even more expensive).
+ */
+
+ /* First off, figure out if the range falls within a single class,
+ and if so which one. */
+
+ ec = ECLASS_MISC;
+ if (range < (1ULL << ECLASS_SHIFT))
+ ec = range_to_eclass( guest_start, (UInt)range );
+
+ /* if ec is ECLASS_MISC then we aren't looking at just a single
+ class, so use the slow scheme. Else use the fast scheme,
+ examining 'ec' and ECLASS_MISC. */
+
+ if (ec != ECLASS_MISC) {
+
+ VG_(debugLog)(2, "transtab",
+ " FAST, ec = %d\n", ec);
+
+ /* Fast scheme */
+ vg_assert(ec >= 0 && ec < ECLASS_MISC);
+
+ for (sno = 0; sno < N_SECTORS; sno++) {
+ sec = &sectors[sno];
+ if (sec->tc == NULL)
+ continue;
+ anyDeleted |= delete_translations_in_sector_eclass(
+ sec, guest_start, range, ec );
+ anyDeleted |= delete_translations_in_sector_eclass(
+ sec, guest_start, range, ECLASS_MISC );
+ }
+
+ } else {
+
+ /* slow scheme */
+
+ VG_(debugLog)(2, "transtab",
+ " SLOW, ec = %d\n", ec);
+
+ for (sno = 0; sno < N_SECTORS; sno++) {
+ sec = &sectors[sno];
+ if (sec->tc == NULL)
+ continue;
+ anyDeleted |= delete_translations_in_sector(
+ sec, guest_start, range );
+ }
+
+ }
+
+ if (anyDeleted)
+ invalidateFastCache();
+
+ /* don't forget the no-redir cache */
+ unredir_discard_translations( guest_start, range );
+
+ /* Post-deletion sanity check */
+ if (VG_(clo_sanity_level >= 4)) {
+ Int i;
+ TTEntry* tte;
+ Bool sane = sanity_check_all_sectors();
+ vg_assert(sane);
+ /* But now, also check the requested address range isn't
+ present anywhere. */
+ for (sno = 0; sno < N_SECTORS; sno++) {
+ sec = &sectors[sno];
+ if (sec->tc == NULL)
+ continue;
+ for (i = 0; i < N_TTES_PER_SECTOR; i++) {
+ tte = &sec->tt[i];
+ if (tte->status != InUse)
+ continue;
+ vg_assert(!overlaps( guest_start, range, &tte->vge ));
+ }
+ }
+ }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- AUXILIARY: the unredirected TT/TC ---*/
+/*------------------------------------------------------------*/
+
+/* A very simple translation cache which holds a small number of
+ unredirected translations. This is completely independent of the
+ main tt/tc structures. When unredir_tc or unredir_tt becomes full,
+ both structures are simply dumped and we start over.
+
+ Since these translations are unredirected, the search key is (by
+ definition) the first address entry in the .vge field. */
+
+/* Sized to hold 500 translations of average size 1000 bytes. */
+
+#define UNREDIR_SZB 1000
+
+#define N_UNREDIR_TT 500
+#define N_UNREDIR_TCQ (N_UNREDIR_TT * UNREDIR_SZB / sizeof(ULong))
+
+typedef
+ struct {
+ VexGuestExtents vge;
+ Addr hcode;
+ Bool inUse;
+ }
+ UTCEntry;
+
+/* We just allocate forwards in _tc, never deleting. */
+static ULong *unredir_tc;
+static Int unredir_tc_used = N_UNREDIR_TCQ;
+
+/* Slots in _tt can come into use and out again (.inUse).
+ Nevertheless _tt_highwater is maintained so that invalidations
+ don't have to scan all the slots when only a few are in use.
+ _tt_highwater holds the index of the highest ever allocated
+ slot. */
+static UTCEntry unredir_tt[N_UNREDIR_TT];
+static Int unredir_tt_highwater;
+
+
+static void init_unredir_tt_tc ( void )
+{
+ Int i;
+ if (unredir_tc == NULL) {
+ SysRes sres = VG_(am_mmap_anon_float_valgrind)( N_UNREDIR_TT * UNREDIR_SZB );
+ if (sres.isError) {
+ VG_(out_of_memory_NORETURN)("init_unredir_tt_tc", N_UNREDIR_TT * UNREDIR_SZB);
+ /*NOTREACHED*/
+ }
+ unredir_tc = (ULong *)sres.res;
+ }
+ unredir_tc_used = 0;
+ for (i = 0; i < N_UNREDIR_TT; i++)
+ unredir_tt[i].inUse = False;
+ unredir_tt_highwater = -1;
+}
+
+/* Do a sanity check; return False on failure. */
+static Bool sanity_check_redir_tt_tc ( void )
+{
+ Int i;
+ if (unredir_tt_highwater < -1) return False;
+ if (unredir_tt_highwater >= N_UNREDIR_TT) return False;
+
+ for (i = unredir_tt_highwater+1; i < N_UNREDIR_TT; i++)
+ if (unredir_tt[i].inUse)
+ return False;
+
+ if (unredir_tc_used < 0) return False;
+ if (unredir_tc_used > N_UNREDIR_TCQ) return False;
+
+ return True;
+}
+
+
+/* Add an UNREDIRECTED translation of vge to TT/TC. The translation
+ is temporarily in code[0 .. code_len-1].
+*/
+void VG_(add_to_unredir_transtab)( VexGuestExtents* vge,
+ Addr64 entry,
+ AddrH code,
+ UInt code_len )
+{
+ Int i, j, code_szQ;
+ HChar *srcP, *dstP;
+
+ vg_assert(sanity_check_redir_tt_tc());
+
+ /* This is the whole point: it's not redirected! */
+ vg_assert(entry == vge->base[0]);
+
+ /* How many unredir_tt slots are needed */
+ code_szQ = (code_len + 7) / 8;
+
+ /* Look for an empty unredir_tc slot */
+ for (i = 0; i < N_UNREDIR_TT; i++)
+ if (!unredir_tt[i].inUse)
+ break;
+
+ if (i >= N_UNREDIR_TT || code_szQ > (N_UNREDIR_TCQ - unredir_tc_used)) {
+ /* It's full; dump everything we currently have */
+ init_unredir_tt_tc();
+ i = 0;
+ }
+
+ vg_assert(unredir_tc_used >= 0);
+ vg_assert(unredir_tc_used <= N_UNREDIR_TCQ);
+ vg_assert(code_szQ > 0);
+ vg_assert(code_szQ + unredir_tc_used <= N_UNREDIR_TCQ);
+ vg_assert(i >= 0 && i < N_UNREDIR_TT);
+ vg_assert(unredir_tt[i].inUse == False);
+
+ if (i > unredir_tt_highwater)
+ unredir_tt_highwater = i;
+
+ dstP = (HChar*)&unredir_tc[unredir_tc_used];
+ srcP = (HChar*)code;
+ for (j = 0; j < code_len; j++)
+ dstP[j] = srcP[j];
+
+ invalidate_icache( dstP, code_len );
+
+ unredir_tt[i].inUse = True;
+ unredir_tt[i].vge = *vge;
+ unredir_tt[i].hcode = (Addr)dstP;
+
+ unredir_tc_used += code_szQ;
+ vg_assert(unredir_tc_used >= 0);
+ vg_assert(unredir_tc_used <= N_UNREDIR_TCQ);
+
+ vg_assert(&dstP[code_len] <= (HChar*)&unredir_tc[unredir_tc_used]);
+}
+
+Bool VG_(search_unredir_transtab) ( /*OUT*/AddrH* result,
+ Addr64 guest_addr )
+{
+ Int i;
+ for (i = 0; i < N_UNREDIR_TT; i++) {
+ if (!unredir_tt[i].inUse)
+ continue;
+ if (unredir_tt[i].vge.base[0] == guest_addr) {
+ *result = (AddrH)unredir_tt[i].hcode;
+ return True;
+ }
+ }
+ return False;
+}
+
+static void unredir_discard_translations( Addr64 guest_start, ULong range )
+{
+ Int i;
+
+ vg_assert(sanity_check_redir_tt_tc());
+
+ for (i = 0; i <= unredir_tt_highwater; i++) {
+ if (unredir_tt[i].inUse
+ && overlaps( guest_start, range, &unredir_tt[i].vge))
+ unredir_tt[i].inUse = False;
+ }
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Initialisation. ---*/
+/*------------------------------------------------------------*/
+
+void VG_(init_tt_tc) ( void )
+{
+ Int i, j, avg_codeszQ;
+
+ vg_assert(!init_done);
+ init_done = True;
+
+ /* Otherwise lots of things go wrong... */
+ vg_assert(sizeof(ULong) == 8);
+ vg_assert(sizeof(Addr64) == 8);
+ /* check fast cache entries really are 2 words long */
+ vg_assert(sizeof(Addr) == sizeof(void*));
+ vg_assert(sizeof(FastCacheEntry) == 2 * sizeof(Addr));
+ /* check fast cache entries are packed back-to-back with no spaces */
+ vg_assert(sizeof( VG_(tt_fast) ) == VG_TT_FAST_SIZE * sizeof(FastCacheEntry));
+ /* check fast cache is aligned as we requested. Not fatal if it
+ isn't, but we might as well make sure. */
+ vg_assert(VG_IS_16_ALIGNED( ((Addr) & VG_(tt_fast)[0]) ));
+
+ if (VG_(clo_verbosity) > 2)
+ VG_(message)(Vg_DebugMsg,
+ "TT/TC: VG_(init_tt_tc) "
+ "(startup of code management)");
+
+ /* Figure out how big each tc area should be. */
+ avg_codeszQ = (VG_(details).avg_translation_sizeB + 7) / 8;
+ tc_sector_szQ = N_TTES_PER_SECTOR_USABLE * (1 + avg_codeszQ);
+
+ /* Ensure the calculated value is not way crazy. */
+ vg_assert(tc_sector_szQ >= 2 * N_TTES_PER_SECTOR_USABLE);
+ vg_assert(tc_sector_szQ <= 80 * N_TTES_PER_SECTOR_USABLE);
+
+ /* Initialise the sectors */
+ youngest_sector = 0;
+ for (i = 0; i < N_SECTORS; i++) {
+ sectors[i].tc = NULL;
+ sectors[i].tt = NULL;
+ sectors[i].tc_next = NULL;
+ sectors[i].tt_n_inuse = 0;
+ for (j = 0; j < ECLASS_N; j++) {
+ sectors[i].ec2tte_size[j] = 0;
+ sectors[i].ec2tte_used[j] = 0;
+ sectors[i].ec2tte[j] = NULL;
+ }
+ }
+
+ /* Initialise the fast caches. If not profiling (the usual case),
+ we have to explicitly invalidate the fastN cache as
+ invalidateFastCache() won't do that for us. */
+ invalidateFastCache();
+ if (VG_(clo_profile_flags) == 0)
+ invalidateFastNCache();
+
+ /* and the unredir tt/tc */
+ init_unredir_tt_tc();
+
+ if (VG_(clo_verbosity) > 2) {
+ VG_(message)(Vg_DebugMsg,
+ "TT/TC: cache: %d sectors of %d bytes each = %d total",
+ N_SECTORS, 8 * tc_sector_szQ,
+ N_SECTORS * 8 * tc_sector_szQ );
+ VG_(message)(Vg_DebugMsg,
+ "TT/TC: table: %d total entries, max occupancy %d (%d%%)",
+ N_SECTORS * N_TTES_PER_SECTOR,
+ N_SECTORS * N_TTES_PER_SECTOR_USABLE,
+ SECTOR_TT_LIMIT_PERCENT );
+ }
+
+ VG_(debugLog)(2, "transtab",
+ "cache: %d sectors of %d bytes each = %d total\n",
+ N_SECTORS, 8 * tc_sector_szQ,
+ N_SECTORS * 8 * tc_sector_szQ );
+ VG_(debugLog)(2, "transtab",
+ "table: %d total entries, max occupancy %d (%d%%)\n",
+ N_SECTORS * N_TTES_PER_SECTOR,
+ N_SECTORS * N_TTES_PER_SECTOR_USABLE,
+ SECTOR_TT_LIMIT_PERCENT );
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Printing out statistics. ---*/
+/*------------------------------------------------------------*/
+
+static ULong safe_idiv( ULong a, ULong b )
+{
+ return (b == 0 ? 0 : a / b);
+}
+
+UInt VG_(get_bbs_translated) ( void )
+{
+ return n_in_count;
+}
+
+void VG_(print_tt_tc_stats) ( void )
+{
+ VG_(message)(Vg_DebugMsg,
+ " tt/tc: %'llu tt lookups requiring %'llu probes",
+ n_full_lookups, n_lookup_probes );
+ VG_(message)(Vg_DebugMsg,
+ " tt/tc: %'llu fast-cache updates, %'llu flushes",
+ n_fast_updates, n_fast_flushes );
+
+ VG_(message)(Vg_DebugMsg,
+ " transtab: new %'lld "
+ "(%'llu -> %'llu; ratio %'llu:10) [%'llu scs]",
+ n_in_count, n_in_osize, n_in_tsize,
+ safe_idiv(10*n_in_tsize, n_in_osize),
+ n_in_sc_count);
+ VG_(message)(Vg_DebugMsg,
+ " transtab: dumped %'llu (%'llu -> ?" "?)",
+ n_dump_count, n_dump_osize );
+ VG_(message)(Vg_DebugMsg,
+ " transtab: discarded %'llu (%'llu -> ?" "?)",
+ n_disc_count, n_disc_osize );
+
+ if (0) {
+ Int i;
+ VG_(printf)("\n");
+ for (i = 0; i < ECLASS_N; i++) {
+ VG_(printf)(" %4d", sectors[0].ec2tte_used[i]);
+ if (i % 16 == 15)
+ VG_(printf)("\n");
+ }
+ VG_(printf)("\n\n");
+ }
+}
+
+/*------------------------------------------------------------*/
+/*--- Printing out of profiling results. ---*/
+/*------------------------------------------------------------*/
+
+static ULong score ( TTEntry* tte )
+{
+ return ((ULong)tte->weight) * ((ULong)tte->count);
+}
+
+ULong VG_(get_BB_profile) ( BBProfEntry tops[], UInt n_tops )
+{
+ Int sno, i, r, s;
+ ULong score_total;
+
+ /* First, compute the total weighted count, and find the top N
+ ttes. tops contains pointers to the most-used n_tops blocks, in
+ descending order (viz, tops[0] is the highest scorer). */
+ for (i = 0; i < n_tops; i++) {
+ tops[i].addr = 0;
+ tops[i].score = 0;
+ }
+
+ score_total = 0;
+
+ for (sno = 0; sno < N_SECTORS; sno++) {
+ if (sectors[sno].tc == NULL)
+ continue;
+ for (i = 0; i < N_TTES_PER_SECTOR; i++) {
+ if (sectors[sno].tt[i].status != InUse)
+ continue;
+ score_total += score(&sectors[sno].tt[i]);
+ /* Find the rank for sectors[sno].tt[i]. */
+ r = n_tops-1;
+ while (True) {
+ if (r == -1)
+ break;
+ if (tops[r].addr == 0) {
+ r--;
+ continue;
+ }
+ if ( score(&sectors[sno].tt[i]) > tops[r].score ) {
+ r--;
+ continue;
+ }
+ break;
+ }
+ r++;
+ vg_assert(r >= 0 && r <= n_tops);
+ /* This bb should be placed at r, and bbs above it shifted
+ upwards one slot. */
+ if (r < n_tops) {
+ for (s = n_tops-1; s > r; s--)
+ tops[s] = tops[s-1];
+ tops[r].addr = sectors[sno].tt[i].entry;
+ tops[r].score = score( &sectors[sno].tt[i] );
+ }
+ }
+ }
+
+ return score_total;
+}
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_vki.c.svn-base b/coregrind/.svn/text-base/m_vki.c.svn-base
new file mode 100644
index 0000000..6d0631c
--- /dev/null
+++ b/coregrind/.svn/text-base/m_vki.c.svn-base
@@ -0,0 +1,52 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Notional "implementation" for m_vki. ---*/
+/*--- m_vki.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2006-2009 OpenWorks LLP
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h" /* self */
+
+/* We have pub_{core,tool}_vki.h. This is the matching implementation
+ for that interface. In fact there is no implementation, as the
+ sole purpose of the module is to export types and constants
+ describing the kernel interface, so this file is nearly empty. */
+
+
+/* ppc32/64-linux determines page size at startup, hence m_vki is
+ the logical place to store that info. */
+
+#if defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
+unsigned long VKI_PAGE_SHIFT = 12;
+unsigned long VKI_PAGE_SIZE = 1UL << 12;
+#endif
+
+
+/*--------------------------------------------------------------------*/
+/*--- end m_vki.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_vkiscnums.c.svn-base b/coregrind/.svn/text-base/m_vkiscnums.c.svn-base
new file mode 100644
index 0000000..a191496
--- /dev/null
+++ b/coregrind/.svn/text-base/m_vkiscnums.c.svn-base
@@ -0,0 +1,1129 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Notional "implementation" for m_vkiscnums. ---*/
+/*--- m_vkiscnums.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2006-2009 OpenWorks LLP
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_vkiscnums.h" /* self */
+
+/* We have pub_{core,tool}_vkiscnums.h. This is the matching implementation
+ for that interface.
+
+ On Linux, the interface exports a bunch of "#define __NR_foo 42" style
+ definitions, so there is no implementation.
+
+ On AIX, syscall numbers are not fixed ahead of time; in principle
+ each process can have its own assignment of numbers to actual
+ syscalls. As a result we have a bunch of global variables to store
+ the number for each syscall, which are assigned to at system
+ startup, and a bunch of #defines which map "__NR_foo" names to
+ these global variables. Initially, when we don't know what a
+ syscall's number is, it is set to __NR_AIX5_UNKNOWN.
+
+ Therefore, on AIX, this module provides a home for those variables.
+
+ It also provides VG_(aix5_register_syscall) to assign numbers to
+ those variables.
+*/
+
+#if defined(VGO_aix5)
+/* These ones are for AIX 5.2. */
+Int VG_(aix5_NR_utrchook_sc) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_create) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kfork) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kra_fork) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_execve) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ra_execve) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__load) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___unload) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_loadbind) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___loadx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_bindprocessor) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_trcgent) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_trcgen) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_trchk) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_trchkt) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_trchkl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_trchklt) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_trchkg) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_trchkgt) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kill) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__addcpucosts) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_mycpu) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_adjtime) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_checkpnt_block) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__checkpnt_kill) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__checkpnt_fail) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__checkpnt_commit) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__checkpnt_register) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__checkpnt) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_setcrid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getcrid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_mkcrid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_checkpnt_wait) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_checkpnt_deliver) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_gencore) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_terminate) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__exit) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kwaitpid64) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kwaitpid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_yield) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getprocs64) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getevars) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getargs) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getthrds64) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getthrds) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getprocs) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sigcleanup) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__setpri) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__getpri) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_profil) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_reboot) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_appgetrlimit) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_appsetrlimit) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__setpriority) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__getpriority) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_setrlimit64) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getrlimit64) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_appgetrusage) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getrusage64) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getvtid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getrtid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getrpid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_restart_wait) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_restart) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__rmcpucosts) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__clock_getcpuclockid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__clock_settime) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__clock_gettime) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__clock_getres) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__timer_settime) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__timer_gettime) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__timer_getoverrun) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__timer_delete) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__timer_create) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__sigqueue) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__sigsuspend) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__sigaction) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sigprocmask) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_siglocalmask) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_count_event_waiters) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_waitact) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_waitlock_local) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_waitlock) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_wait) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_unlock) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_twakeup_unlock) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_twakeup_event) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_twakeup) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_tsleep_event) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_tsleep_chkpnt) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_tsleep) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_post_many) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_post) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ue_proc_unregister) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ue_proc_register) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kthread_ctl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__thread_setsched) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_threads_runnable) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_getregs) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_terminate_unlock) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_terminate_ack) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_setstate_fast) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_setstate) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_setmymask_fast) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_setmystate_fast) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_setmystate) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_init) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_times) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__nsleep) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_reltimerid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_appresinc) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_apprestimer) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_appresabs) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_appsettimer) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_appgettimer) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_gettimerid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_incinterval) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_absinterval) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getinterval) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_upfget) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__wlm_wait) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__wlm_post) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__wlm_event_init) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__wlm_set_tag) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__wlm_set) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ptrace64) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ptracex) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ptrace) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ksetcontext_sigreturn) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ksetcontext) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kgetcontext) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sigreturn) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__wlm_get_bio_stats) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_splice) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_rmsock) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_nrecvmsg) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_socket_aio_dequeue) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getkerninfo) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getpeereid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getpeername) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ngetpeername) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getsockname) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ngetsockname) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getsockopt) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_setsockopt) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_shutdown) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_recvmsg) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_recv) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_nrecvfrom) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_recvfrom) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_nsendmsg) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sendmsg) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_send) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sendto) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_socketpair) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_accept) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_naccept) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_listen) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_bind) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_socket) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_connext) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_setdomainname) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getdomainname) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sethostname) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sethostid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_gethostid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_gethostname) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_send_file) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__rmlmbcost) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___rs_pickmcm) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_rs_getsystem) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_rs_getassociativity) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_rs_setpartition) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_rs_getpartition) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ra_getrset) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_rs_getinfo) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_rs_getrad) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_rs_numrads) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___kdb_format_print_rele) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___kdb_format_print_init) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_close) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kfsync_range) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_fsync) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kpwrite) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kwritev) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kwrite) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kpread) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kreadv) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kread) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_klseek) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__lseek) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_lseek) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__setsid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__setpgid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__setpgrp) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__getpgrpx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__getpgrp) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__getppid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__thread_self) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__getpid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kgetpgidx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_setuid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_setuidx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getuidx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_seteuid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_setreuid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_chdir) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_fchdir) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_chroot) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_fchmod) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_chmod) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_chown) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_lchown) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_fchown) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_fchownx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_chownx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kfclear) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_fclear) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ffinfo) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_finfo) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_fscntl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ktruncate) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kftruncate) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_truncate) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ftruncate) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getdirent64) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getdirent) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kioctl32) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kioctl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_link) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_klockf) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_lockf) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_mkdir) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_mknod) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_mntctl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_vmount) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_creat) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_openx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_open) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_quotactl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_rename) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_rmdir) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_fstatx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_statx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_symlink) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_readlink) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_syncvfs) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sync) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_umask) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_uvmount) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_umount) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_unameu) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_unamex) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_uname) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_unlink) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ustat) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_utimes) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___msgxrcv) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___msgrcv) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___msgsnd) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_msgctl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_msgget) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getgidx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___semop) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_semget) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_semctl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_shmctl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_shmdt) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_shmat) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_shmget) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ra_shmgetv) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ra_shmget) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_privcheck) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_disclaim) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__sem_destroy_unnamed) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__sem_wait) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__sem_close) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__sem_open) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sem_unlink) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sem_post) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sem_init) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sem_getvalue) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sem_destroy) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__mq_notify) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__mq_open) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_mq_unlink) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_mq_setattr) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_mq_send) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_mq_receive) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_mq_getattr) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_mq_close) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_shm_unlink) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_shm_open) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__poll) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__select) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sysconfig) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sys_parm) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_loadquery) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_knlist) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_brk) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_fjfs_sys_call) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_jfs_sys_call) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_acct) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__dr_unregister) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__dr_notify) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__dr_register) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getlparload) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_dr_reconfig) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_projctl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sbrk) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__sigpending) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__pause) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_kill) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sigstack) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sigaltstack) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_appulimit) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ras_service) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__wlm_class_descr2key) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__wlm_get_procinfo) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__wlm_get_info) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__wlm_getclassname) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__wlm_unload_classes) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__wlm_load) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__wlm_tune) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__wlm_assign) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__wlm_classify) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_fp_cpusync) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__fp_trapstate_ker) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__ewlm_classify_correlator) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__arm_stop_transaction) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__arm_destroy_application) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__arm_stop_application) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__arm_generate_correlator) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__arm_discard_transaction) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__arm_unbind_thread) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__arm_bind_thread) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__arm_unblock_transaction) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__arm_block_transaction) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__arm_update_transaction) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__arm_register_metric) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__arm_report_transaction) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__arm_start_transaction) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__arm_register_transaction) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__arm_start_application) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__arm_register_application) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__lsarm_getinfo) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__ewlm_init) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__ewlm_query) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ewlm_verify_policy) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ewlm_abort_policy) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ewlm_commit_policy) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ewlm_prepare_policy) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ewlm_get_completions) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ewlm_get_activedata) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ewlm_get_appldata) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ewlm_collect_samples) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ewlm_disconnect) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ewlm_connect) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_auditlog) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_auditproc) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getgroups) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_setgid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_setgidx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_setgroups) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_frevoke) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_revoke) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___pag_setvalue) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___pag_getvalue) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___pag_getid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___pag_getname) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___pag_setname) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kcap_set_proc) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kcap_get_proc) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_pipe) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_mwakeup) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___msleep) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kmmap) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_msem_remove) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_mincore) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_madvise) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_munmap) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_msync) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_mprotect) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_mmap) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_swapqry) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_swapon) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_swapoff) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_psdanger) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_vmgetinfo) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_rs_admregistername) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_rs_discardname) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_rs_setnameattr) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_rs_registername) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_rs_getnamedrset) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_rs_getnameattr) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_rs_getrsetnames) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ra_attachrset) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_ra_detachrset) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_dmapi_init) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kdm_ioctl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_access) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_accessx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kfcntl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___pfcntl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_fstatfs64) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_statfs64) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_fstatfs) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_statfs) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_probe) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_cmp_swap) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__validate_pag) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kgetsidx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kgetsid) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_plock) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_upfput) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_usrinfo) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_audit) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_auditobj) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_auditbin) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_auditevents) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_faccessx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___fchaclx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___chaclx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_fchacl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_chacl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___fstataclx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR___stataclx) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_fstatacl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_statacl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_setpriv) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getpriv) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_fstatpriv) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_statpriv) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_fchpriv) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_chpriv) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_i_int2cpu_pal) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_hd_cfg) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_putpmsg) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_putmsg) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getpmsg) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_getmsg) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_strinfo) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_strreset) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_dupmsg) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__kgrantpt) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_aixgsc) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_smaccept) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_smconnect) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_smlisten) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_smbind) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_smsocket) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_smdetatt) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_smattach) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_smselect) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_smwait) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_smsetthresh) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_smsendbuff) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_smfreebuff) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_smrcvbuff) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_smgetbuff) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_smversion) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_smtcheckinit) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_aio_nwait_timeout) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kaio_stats) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_aio_cntl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_listio) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_acancel) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_iosuspend) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kaio_rdwr) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_aio_nwait) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__posix_iofsync) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__posix_aio_nwait_timeout) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__posix_kaio_stats) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__posix_listio) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__posix_acancel) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__posix_iosuspend) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__posix_kaio_rdwr) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__posix_aio_cntl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__posix_aio_nwait) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_nfs_cntl) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_nfssvc) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_nfs_getfh) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_exportfs) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_lm_svc) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_pw_config) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_pw_post) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_pw_wait) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_pw_loadavg) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_pw_debug) = __NR_AIX5_UNKNOWN;
+
+/* Extras for AIX 5.3 */
+Int VG_(aix5_NR___libc_sbrk) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_thread_waitlock_) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__fp_fpscrx_sc) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_sched_get_priority_max) = __NR_AIX5_UNKNOWN;
+
+/* Extras for AIX 5.3 64-bit mode. */
+Int VG_(aix5_NR_kload) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR__fp_fpscrx64_) = __NR_AIX5_UNKNOWN;
+Int VG_(aix5_NR_kunload64) = __NR_AIX5_UNKNOWN;
+
+/* We need an extra fake syscall to denote signal handler returns, as
+ used in sigframe-ppc{32,64}-aix5.c. Since we don't know what
+ number we can assign to it, monitor the numbers passed to
+ VG_(aix5_register_syscall), and set it to 10000+the largest syscall
+ nummber seen. */
+Int VG_(aix5_NR_FAKE_SIGRETURN) = __NR_AIX5_UNKNOWN;
+
+
+
+/* Also make a record of the registered syscalls, so we can print the
+ name in bad_before() (syswrap-main.c) if needed. The obvious
+ approach would be to dump them in an XArray, but that requires
+ dynamic memory allocation, and syscall registration is done before
+ dynamic memory allocation is available. So just use a fixed size
+ array and hope it doesn't fill up. */
+#define N_BINDINGS 2000
+static Int bindings_used = 0;
+static Int bindings_sysno[N_BINDINGS];
+static UChar* bindings_sysname[N_BINDINGS];
+
+UChar* VG_(aix5_sysno_to_sysname)( Int sysno ) {
+ Int i;
+ for (i = 0; i < bindings_used; i++)
+ if (bindings_sysno[i] == sysno)
+ return bindings_sysname[i];
+ return "(unknown name)";
+}
+
+static Bool local_streq ( UChar* s1, UChar* s2 ); /* fwds */
+
+Bool VG_(aix5_register_syscall)( Int sysno, UChar* sysname )
+{
+ /* Establish the FAKE_SIGRETURN number. */
+ if (VG_(aix5_NR_FAKE_SIGRETURN) == __NR_AIX5_UNKNOWN)
+ VG_(aix5_NR_FAKE_SIGRETURN) = sysno + 10000;
+ else
+ if (sysno + 10000 > VG_(aix5_NR_FAKE_SIGRETURN))
+ VG_(aix5_NR_FAKE_SIGRETURN) = sysno + 10000;
+
+ /* Note the name, just in case bad_before() needs to complain. */
+ if (bindings_used < N_BINDINGS) {
+ bindings_sysno[bindings_used] = sysno;
+ bindings_sysname[bindings_used] = sysname;
+ bindings_used++;
+ }
+
+ /* Now do the normal name-to-number binding checks. */
+# define XXX(name) \
+ if (local_streq(sysname, #name)) { \
+ VG_(aix5_NR_##name) = sysno; \
+ return True; \
+ }
+ /* AIX 5.2 */
+ XXX(utrchook_sc)
+ XXX(thread_create)
+ XXX(kfork)
+ XXX(kra_fork)
+ XXX(execve)
+ XXX(ra_execve)
+ XXX(_load)
+ XXX(__unload)
+ XXX(loadbind)
+ XXX(__loadx)
+ XXX(bindprocessor)
+ XXX(trcgent)
+ XXX(trcgen)
+ XXX(trchk)
+ XXX(trchkt)
+ XXX(trchkl)
+ XXX(trchklt)
+ XXX(trchkg)
+ XXX(trchkgt)
+ XXX(kill)
+ XXX(_addcpucosts)
+ XXX(mycpu)
+ XXX(adjtime)
+ XXX(checkpnt_block)
+ XXX(_checkpnt_kill)
+ XXX(_checkpnt_fail)
+ XXX(_checkpnt_commit)
+ XXX(_checkpnt_register)
+ XXX(_checkpnt)
+ XXX(setcrid)
+ XXX(getcrid)
+ XXX(mkcrid)
+ XXX(checkpnt_wait)
+ XXX(checkpnt_deliver)
+ XXX(gencore)
+ XXX(thread_terminate)
+ XXX(_exit)
+ XXX(kwaitpid64)
+ XXX(kwaitpid)
+ XXX(yield)
+ XXX(getprocs64)
+ XXX(getevars)
+ XXX(getargs)
+ XXX(getthrds64)
+ XXX(getthrds)
+ XXX(getprocs)
+ XXX(sigcleanup)
+ XXX(_setpri)
+ XXX(_getpri)
+ XXX(profil)
+ XXX(reboot)
+ XXX(appgetrlimit)
+ XXX(appsetrlimit)
+ XXX(_setpriority)
+ XXX(_getpriority)
+ XXX(setrlimit64)
+ XXX(getrlimit64)
+ XXX(appgetrusage)
+ XXX(getrusage64)
+ XXX(getvtid)
+ XXX(getrtid)
+ XXX(getrpid)
+ XXX(restart_wait)
+ XXX(restart)
+ XXX(_rmcpucosts)
+ XXX(_clock_getcpuclockid)
+ XXX(_clock_settime)
+ XXX(_clock_gettime)
+ XXX(_clock_getres)
+ XXX(_timer_settime)
+ XXX(_timer_gettime)
+ XXX(_timer_getoverrun)
+ XXX(_timer_delete)
+ XXX(_timer_create)
+ XXX(_sigqueue)
+ XXX(_sigsuspend)
+ XXX(_sigaction)
+ XXX(sigprocmask)
+ XXX(siglocalmask)
+ XXX(count_event_waiters)
+ XXX(thread_waitact)
+ XXX(thread_waitlock_local)
+ XXX(thread_waitlock)
+ XXX(thread_wait)
+ XXX(thread_unlock)
+ XXX(thread_twakeup_unlock)
+ XXX(thread_twakeup_event)
+ XXX(thread_twakeup)
+ XXX(thread_tsleep_event)
+ XXX(thread_tsleep_chkpnt)
+ XXX(thread_tsleep)
+ XXX(thread_post_many)
+ XXX(thread_post)
+ XXX(ue_proc_unregister)
+ XXX(ue_proc_register)
+ XXX(kthread_ctl)
+ XXX(_thread_setsched)
+ XXX(threads_runnable)
+ XXX(thread_getregs)
+ XXX(thread_terminate_unlock)
+ XXX(thread_terminate_ack)
+ XXX(thread_setstate_fast)
+ XXX(thread_setstate)
+ XXX(thread_setmymask_fast)
+ XXX(thread_setmystate_fast)
+ XXX(thread_setmystate)
+ XXX(thread_init)
+ XXX(times)
+ XXX(_nsleep)
+ XXX(reltimerid)
+ XXX(appresinc)
+ XXX(apprestimer)
+ XXX(appresabs)
+ XXX(appsettimer)
+ XXX(appgettimer)
+ XXX(gettimerid)
+ XXX(incinterval)
+ XXX(absinterval)
+ XXX(getinterval)
+ XXX(upfget)
+ XXX(_wlm_wait)
+ XXX(_wlm_post)
+ XXX(_wlm_event_init)
+ XXX(_wlm_set_tag)
+ XXX(_wlm_set)
+ XXX(ptrace64)
+ XXX(ptracex)
+ XXX(ptrace)
+ XXX(ksetcontext_sigreturn)
+ XXX(ksetcontext)
+ XXX(kgetcontext)
+ XXX(sigreturn)
+ XXX(_wlm_get_bio_stats)
+ XXX(splice)
+ XXX(rmsock)
+ XXX(nrecvmsg)
+ XXX(socket_aio_dequeue)
+ XXX(getkerninfo)
+ XXX(getpeereid)
+ XXX(getpeername)
+ XXX(ngetpeername)
+ XXX(getsockname)
+ XXX(ngetsockname)
+ XXX(getsockopt)
+ XXX(setsockopt)
+ XXX(shutdown)
+ XXX(recvmsg)
+ XXX(recv)
+ XXX(nrecvfrom)
+ XXX(recvfrom)
+ XXX(nsendmsg)
+ XXX(sendmsg)
+ XXX(send)
+ XXX(sendto)
+ XXX(socketpair)
+ XXX(accept)
+ XXX(naccept)
+ XXX(listen)
+ XXX(bind)
+ XXX(socket)
+ XXX(connext)
+ XXX(setdomainname)
+ XXX(getdomainname)
+ XXX(sethostname)
+ XXX(sethostid)
+ XXX(gethostid)
+ XXX(gethostname)
+ XXX(send_file)
+ XXX(_rmlmbcost)
+ XXX(__rs_pickmcm)
+ XXX(rs_getsystem)
+ XXX(rs_getassociativity)
+ XXX(rs_setpartition)
+ XXX(rs_getpartition)
+ XXX(ra_getrset)
+ XXX(rs_getinfo)
+ XXX(rs_getrad)
+ XXX(rs_numrads)
+ XXX(__kdb_format_print_rele)
+ XXX(__kdb_format_print_init)
+ XXX(close)
+ XXX(kfsync_range)
+ XXX(fsync)
+ XXX(kpwrite)
+ XXX(kwritev)
+ XXX(kwrite)
+ XXX(kpread)
+ XXX(kreadv)
+ XXX(kread)
+ XXX(klseek)
+ XXX(_lseek)
+ XXX(lseek)
+ XXX(_setsid)
+ XXX(_setpgid)
+ XXX(_setpgrp)
+ XXX(_getpgrpx)
+ XXX(_getpgrp)
+ XXX(_getppid)
+ XXX(_thread_self)
+ XXX(_getpid)
+ XXX(kgetpgidx)
+ XXX(setuid)
+ XXX(setuidx)
+ XXX(getuidx)
+ XXX(seteuid)
+ XXX(setreuid)
+ XXX(chdir)
+ XXX(fchdir)
+ XXX(chroot)
+ XXX(fchmod)
+ XXX(chmod)
+ XXX(chown)
+ XXX(lchown)
+ XXX(fchown)
+ XXX(fchownx)
+ XXX(chownx)
+ XXX(kfclear)
+ XXX(fclear)
+ XXX(ffinfo)
+ XXX(finfo)
+ XXX(fscntl)
+ XXX(ktruncate)
+ XXX(kftruncate)
+ XXX(truncate)
+ XXX(ftruncate)
+ XXX(getdirent64)
+ XXX(getdirent)
+ XXX(kioctl32)
+ XXX(kioctl)
+ XXX(link)
+ XXX(klockf)
+ XXX(lockf)
+ XXX(mkdir)
+ XXX(mknod)
+ XXX(mntctl)
+ XXX(vmount)
+ XXX(creat)
+ XXX(openx)
+ XXX(open)
+ XXX(quotactl)
+ XXX(rename)
+ XXX(rmdir)
+ XXX(fstatx)
+ XXX(statx)
+ XXX(symlink)
+ XXX(readlink)
+ XXX(syncvfs)
+ XXX(sync)
+ XXX(umask)
+ XXX(uvmount)
+ XXX(umount)
+ XXX(unameu)
+ XXX(unamex)
+ XXX(uname)
+ XXX(unlink)
+ XXX(ustat)
+ XXX(utimes)
+ XXX(__msgxrcv)
+ XXX(__msgrcv)
+ XXX(__msgsnd)
+ XXX(msgctl)
+ XXX(msgget)
+ XXX(getgidx)
+ XXX(__semop)
+ XXX(semget)
+ XXX(semctl)
+ XXX(shmctl)
+ XXX(shmdt)
+ XXX(shmat)
+ XXX(shmget)
+ XXX(ra_shmgetv)
+ XXX(ra_shmget)
+ XXX(privcheck)
+ XXX(disclaim)
+ XXX(_sem_destroy_unnamed)
+ XXX(_sem_wait)
+ XXX(_sem_close)
+ XXX(_sem_open)
+ XXX(sem_unlink)
+ XXX(sem_post)
+ XXX(sem_init)
+ XXX(sem_getvalue)
+ XXX(sem_destroy)
+ XXX(_mq_notify)
+ XXX(_mq_open)
+ XXX(mq_unlink)
+ XXX(mq_setattr)
+ XXX(mq_send)
+ XXX(mq_receive)
+ XXX(mq_getattr)
+ XXX(mq_close)
+ XXX(shm_unlink)
+ XXX(shm_open)
+ XXX(_poll)
+ XXX(_select)
+ XXX(sysconfig)
+ XXX(sys_parm)
+ XXX(loadquery)
+ XXX(knlist)
+ XXX(brk)
+ XXX(fjfs_sys_call)
+ XXX(jfs_sys_call)
+ XXX(acct)
+ XXX(_dr_unregister)
+ XXX(_dr_notify)
+ XXX(_dr_register)
+ XXX(getlparload)
+ XXX(dr_reconfig)
+ XXX(projctl)
+ XXX(sbrk)
+ XXX(_sigpending)
+ XXX(_pause)
+ XXX(thread_kill)
+ XXX(sigstack)
+ XXX(sigaltstack)
+ XXX(appulimit)
+ XXX(ras_service)
+ XXX(_wlm_class_descr2key)
+ XXX(_wlm_get_procinfo)
+ XXX(_wlm_get_info)
+ XXX(_wlm_getclassname)
+ XXX(_wlm_unload_classes)
+ XXX(_wlm_load)
+ XXX(_wlm_tune)
+ XXX(_wlm_assign)
+ XXX(_wlm_classify)
+ XXX(fp_cpusync)
+ XXX(_fp_trapstate_ker)
+ XXX(_ewlm_classify_correlator)
+ XXX(_arm_stop_transaction)
+ XXX(_arm_destroy_application)
+ XXX(_arm_stop_application)
+ XXX(_arm_generate_correlator)
+ XXX(_arm_discard_transaction)
+ XXX(_arm_unbind_thread)
+ XXX(_arm_bind_thread)
+ XXX(_arm_unblock_transaction)
+ XXX(_arm_block_transaction)
+ XXX(_arm_update_transaction)
+ XXX(_arm_register_metric)
+ XXX(_arm_report_transaction)
+ XXX(_arm_start_transaction)
+ XXX(_arm_register_transaction)
+ XXX(_arm_start_application)
+ XXX(_arm_register_application)
+ XXX(_lsarm_getinfo)
+ XXX(_ewlm_init)
+ XXX(_ewlm_query)
+ XXX(ewlm_verify_policy)
+ XXX(ewlm_abort_policy)
+ XXX(ewlm_commit_policy)
+ XXX(ewlm_prepare_policy)
+ XXX(ewlm_get_completions)
+ XXX(ewlm_get_activedata)
+ XXX(ewlm_get_appldata)
+ XXX(ewlm_collect_samples)
+ XXX(ewlm_disconnect)
+ XXX(ewlm_connect)
+ XXX(auditlog)
+ XXX(auditproc)
+ XXX(getgroups)
+ XXX(setgid)
+ XXX(setgidx)
+ XXX(setgroups)
+ XXX(frevoke)
+ XXX(revoke)
+ XXX(__pag_setvalue)
+ XXX(__pag_getvalue)
+ XXX(__pag_getid)
+ XXX(__pag_getname)
+ XXX(__pag_setname)
+ XXX(kcap_set_proc)
+ XXX(kcap_get_proc)
+ XXX(pipe)
+ XXX(mwakeup)
+ XXX(__msleep)
+ XXX(kmmap)
+ XXX(msem_remove)
+ XXX(mincore)
+ XXX(madvise)
+ XXX(munmap)
+ XXX(msync)
+ XXX(mprotect)
+ XXX(mmap)
+ XXX(swapqry)
+ XXX(swapon)
+ XXX(swapoff)
+ XXX(psdanger)
+ XXX(vmgetinfo)
+ XXX(rs_admregistername)
+ XXX(rs_discardname)
+ XXX(rs_setnameattr)
+ XXX(rs_registername)
+ XXX(rs_getnamedrset)
+ XXX(rs_getnameattr)
+ XXX(rs_getrsetnames)
+ XXX(ra_attachrset)
+ XXX(ra_detachrset)
+ XXX(dmapi_init)
+ XXX(kdm_ioctl)
+ XXX(access)
+ XXX(accessx)
+ XXX(kfcntl)
+ XXX(__pfcntl)
+ XXX(fstatfs64)
+ XXX(statfs64)
+ XXX(fstatfs)
+ XXX(statfs)
+ XXX(probe)
+ XXX(cmp_swap)
+ XXX(_validate_pag)
+ XXX(kgetsidx)
+ XXX(kgetsid)
+ XXX(plock)
+ XXX(upfput)
+ XXX(usrinfo)
+ XXX(audit)
+ XXX(auditobj)
+ XXX(auditbin)
+ XXX(auditevents)
+ XXX(faccessx)
+ XXX(__fchaclx)
+ XXX(__chaclx)
+ XXX(fchacl)
+ XXX(chacl)
+ XXX(__fstataclx)
+ XXX(__stataclx)
+ XXX(fstatacl)
+ XXX(statacl)
+ XXX(setpriv)
+ XXX(getpriv)
+ XXX(fstatpriv)
+ XXX(statpriv)
+ XXX(fchpriv)
+ XXX(chpriv)
+ XXX(i_int2cpu_pal)
+ XXX(hd_cfg)
+ XXX(putpmsg)
+ XXX(putmsg)
+ XXX(getpmsg)
+ XXX(getmsg)
+ XXX(strinfo)
+ XXX(strreset)
+ XXX(dupmsg)
+ XXX(_kgrantpt)
+ XXX(aixgsc)
+ XXX(smaccept)
+ XXX(smconnect)
+ XXX(smlisten)
+ XXX(smbind)
+ XXX(smsocket)
+ XXX(smdetatt)
+ XXX(smattach)
+ XXX(smselect)
+ XXX(smwait)
+ XXX(smsetthresh)
+ XXX(smsendbuff)
+ XXX(smfreebuff)
+ XXX(smrcvbuff)
+ XXX(smgetbuff)
+ XXX(smversion)
+ XXX(smtcheckinit)
+ XXX(aio_nwait_timeout)
+ XXX(kaio_stats)
+ XXX(aio_cntl)
+ XXX(listio)
+ XXX(acancel)
+ XXX(iosuspend)
+ XXX(kaio_rdwr)
+ XXX(aio_nwait)
+ XXX(_posix_iofsync)
+ XXX(_posix_aio_nwait_timeout)
+ XXX(_posix_kaio_stats)
+ XXX(_posix_listio)
+ XXX(_posix_acancel)
+ XXX(_posix_iosuspend)
+ XXX(_posix_kaio_rdwr)
+ XXX(_posix_aio_cntl)
+ XXX(_posix_aio_nwait)
+ XXX(nfs_cntl)
+ XXX(nfssvc)
+ XXX(nfs_getfh)
+ XXX(exportfs)
+ XXX(lm_svc)
+ XXX(pw_config)
+ XXX(pw_post)
+ XXX(pw_wait)
+ XXX(pw_loadavg)
+ XXX(pw_debug)
+ /* Extras for AIX 5.3 */
+ XXX(__libc_sbrk)
+ XXX(thread_waitlock_)
+ XXX(_fp_fpscrx_sc)
+ XXX(sched_get_priority_max)
+ /* Extras for AIX 5.3 64-bit */
+ XXX(kload)
+ XXX(_fp_fpscrx64_)
+ XXX(kunload64)
+# undef XXX
+ return False;
+}
+
+
+static Bool local_streq ( UChar* s1, UChar* s2 )
+{
+ while (True) {
+ if (*s1 == 0 && *s2 == 0) return True;
+ if (*s1 == 0) return False;
+ if (*s2 == 0) return False;
+ if (*s1 != *s2) return False;
+ s1++; s2++;
+ }
+}
+
+#endif /* defined(VGO_aix5) */
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_wordfm.c.svn-base b/coregrind/.svn/text-base/m_wordfm.c.svn-base
new file mode 100644
index 0000000..a10ea90
--- /dev/null
+++ b/coregrind/.svn/text-base/m_wordfm.c.svn-base
@@ -0,0 +1,984 @@
+
+/*--------------------------------------------------------------------*/
+/*--- An AVL tree based finite map for word keys and word values. ---*/
+/*--- Inspired by Haskell's "FiniteMap" library. ---*/
+/*--- m_wordfm.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2007-2009 Julian Seward
+ jseward@acm.org
+
+ This code is based on previous work by Nicholas Nethercote
+ (coregrind/m_oset.c) which is
+
+ Copyright (C) 2005-2009 Nicholas Nethercote
+ njn@valgrind.org
+
+ which in turn was derived partially from:
+
+ AVL C library
+ Copyright (C) 2000,2002 Daniel Nagy
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of
+ the License, or (at your option) any later version.
+ [...]
+
+ (taken from libavl-0.4/debian/copyright)
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_wordfm.h" /* self */
+
+
+//------------------------------------------------------------------//
+//--- WordFM ---//
+//--- Implementation ---//
+//------------------------------------------------------------------//
+
+/* One element of the AVL tree */
+typedef
+ struct _AvlNode {
+ UWord key;
+ UWord val;
+ struct _AvlNode* child[2]; /* [0] is left subtree, [1] is right */
+ Char balance; /* do not make this unsigned */
+ }
+ AvlNode;
+
+typedef
+ struct {
+ UWord w;
+ Bool b;
+ }
+ MaybeWord;
+
+#define WFM_STKMAX 32 // At most 2**32 entries can be iterated over
+
+struct _WordFM {
+ AvlNode* root;
+ void* (*alloc_nofail)( HChar*, SizeT );
+ HChar* cc;
+ void (*dealloc)(void*);
+ Word (*kCmp)(UWord,UWord);
+ AvlNode* nodeStack[WFM_STKMAX]; // Iterator node stack
+ Int numStack[WFM_STKMAX]; // Iterator num stack
+ Int stackTop; // Iterator stack pointer, one past end
+};
+
+/* forward */
+static Bool avl_removeroot_wrk(AvlNode** t, Word(*kCmp)(UWord,UWord));
+
+/* Swing to the left. Warning: no balance maintainance. */
+static void avl_swl ( AvlNode** root )
+{
+ AvlNode* a = *root;
+ AvlNode* b = a->child[1];
+ *root = b;
+ a->child[1] = b->child[0];
+ b->child[0] = a;
+}
+
+/* Swing to the right. Warning: no balance maintainance. */
+static void avl_swr ( AvlNode** root )
+{
+ AvlNode* a = *root;
+ AvlNode* b = a->child[0];
+ *root = b;
+ a->child[0] = b->child[1];
+ b->child[1] = a;
+}
+
+/* Balance maintainance after especially nasty swings. */
+static void avl_nasty ( AvlNode* root )
+{
+ switch (root->balance) {
+ case -1:
+ root->child[0]->balance = 0;
+ root->child[1]->balance = 1;
+ break;
+ case 1:
+ root->child[0]->balance = -1;
+ root->child[1]->balance = 0;
+ break;
+ case 0:
+ root->child[0]->balance = 0;
+ root->child[1]->balance = 0;
+ break;
+ default:
+ tl_assert(0);
+ }
+ root->balance=0;
+}
+
+/* Find size of a non-NULL tree. */
+static UWord size_avl_nonNull ( AvlNode* nd )
+{
+ return 1 + (nd->child[0] ? size_avl_nonNull(nd->child[0]) : 0)
+ + (nd->child[1] ? size_avl_nonNull(nd->child[1]) : 0);
+}
+
+/* Unsignedly compare w1 and w2. If w1 < w2, produce a negative
+ number; if w1 > w2 produce a positive number, and if w1 == w2
+ produce zero. */
+static inline Word cmp_unsigned_Words ( UWord w1, UWord w2 ) {
+ if (w1 < w2) return -1;
+ if (w1 > w2) return 1;
+ return 0;
+}
+
+/* Insert element a into the AVL tree t. Returns True if the depth of
+ the tree has grown. If element with that key is already present,
+ just copy a->val to existing node, first returning old ->val field
+ of existing node in *oldV, so that the caller can finalize it
+ however it wants.
+*/
+static
+Bool avl_insert_wrk ( AvlNode** rootp,
+ /*OUT*/MaybeWord* oldV,
+ AvlNode* a,
+ Word (*kCmp)(UWord,UWord) )
+{
+ Word cmpres;
+
+ /* initialize */
+ a->child[0] = 0;
+ a->child[1] = 0;
+ a->balance = 0;
+ oldV->b = False;
+
+ /* insert into an empty tree? */
+ if (!(*rootp)) {
+ (*rootp) = a;
+ return True;
+ }
+
+ cmpres = kCmp ? /*boxed*/ kCmp( (*rootp)->key, a->key )
+ : /*unboxed*/ cmp_unsigned_Words( (UWord)(*rootp)->key,
+ (UWord)a->key );
+
+ if (cmpres > 0) {
+ /* insert into the left subtree */
+ if ((*rootp)->child[0]) {
+ AvlNode* left_subtree = (*rootp)->child[0];
+ if (avl_insert_wrk(&left_subtree, oldV, a, kCmp)) {
+ switch ((*rootp)->balance--) {
+ case 1: return False;
+ case 0: return True;
+ case -1: break;
+ default: tl_assert(0);
+ }
+ if ((*rootp)->child[0]->balance < 0) {
+ avl_swr( rootp );
+ (*rootp)->balance = 0;
+ (*rootp)->child[1]->balance = 0;
+ } else {
+ avl_swl( &((*rootp)->child[0]) );
+ avl_swr( rootp );
+ avl_nasty( *rootp );
+ }
+ } else {
+ (*rootp)->child[0] = left_subtree;
+ }
+ return False;
+ } else {
+ (*rootp)->child[0] = a;
+ if ((*rootp)->balance--)
+ return False;
+ return True;
+ }
+ tl_assert(0);/*NOTREACHED*/
+ }
+ else
+ if (cmpres < 0) {
+ /* insert into the right subtree */
+ if ((*rootp)->child[1]) {
+ AvlNode* right_subtree = (*rootp)->child[1];
+ if (avl_insert_wrk(&right_subtree, oldV, a, kCmp)) {
+ switch((*rootp)->balance++){
+ case -1: return False;
+ case 0: return True;
+ case 1: break;
+ default: tl_assert(0);
+ }
+ if ((*rootp)->child[1]->balance > 0) {
+ avl_swl( rootp );
+ (*rootp)->balance = 0;
+ (*rootp)->child[0]->balance = 0;
+ } else {
+ avl_swr( &((*rootp)->child[1]) );
+ avl_swl( rootp );
+ avl_nasty( *rootp );
+ }
+ } else {
+ (*rootp)->child[1] = right_subtree;
+ }
+ return False;
+ } else {
+ (*rootp)->child[1] = a;
+ if ((*rootp)->balance++)
+ return False;
+ return True;
+ }
+ tl_assert(0);/*NOTREACHED*/
+ }
+ else {
+ /* cmpres == 0, a duplicate - replace the val, but don't
+ incorporate the node in the tree */
+ oldV->b = True;
+ oldV->w = (*rootp)->val;
+ (*rootp)->val = a->val;
+ return False;
+ }
+}
+
+/* Remove an element a from the AVL tree t. a must be part of
+ the tree. Returns True if the depth of the tree has shrunk.
+*/
+static
+Bool avl_remove_wrk ( AvlNode** rootp,
+ AvlNode* a,
+ Word(*kCmp)(UWord,UWord) )
+{
+ Bool ch;
+ Word cmpres;
+ cmpres = kCmp ? /*boxed*/ kCmp( (*rootp)->key, a->key )
+ : /*unboxed*/ cmp_unsigned_Words( (UWord)(*rootp)->key,
+ (UWord)a->key );
+
+ if (cmpres > 0){
+ /* remove from the left subtree */
+ AvlNode* left_subtree = (*rootp)->child[0];
+ tl_assert(left_subtree);
+ ch = avl_remove_wrk(&left_subtree, a, kCmp);
+ (*rootp)->child[0]=left_subtree;
+ if (ch) {
+ switch ((*rootp)->balance++) {
+ case -1: return True;
+ case 0: return False;
+ case 1: break;
+ default: tl_assert(0);
+ }
+ switch ((*rootp)->child[1]->balance) {
+ case 0:
+ avl_swl( rootp );
+ (*rootp)->balance = -1;
+ (*rootp)->child[0]->balance = 1;
+ return False;
+ case 1:
+ avl_swl( rootp );
+ (*rootp)->balance = 0;
+ (*rootp)->child[0]->balance = 0;
+ return True;
+ case -1:
+ break;
+ default:
+ tl_assert(0);
+ }
+ avl_swr( &((*rootp)->child[1]) );
+ avl_swl( rootp );
+ avl_nasty( *rootp );
+ return True;
+ }
+ }
+ else
+ if (cmpres < 0) {
+ /* remove from the right subtree */
+ AvlNode* right_subtree = (*rootp)->child[1];
+ tl_assert(right_subtree);
+ ch = avl_remove_wrk(&right_subtree, a, kCmp);
+ (*rootp)->child[1] = right_subtree;
+ if (ch) {
+ switch ((*rootp)->balance--) {
+ case 1: return True;
+ case 0: return False;
+ case -1: break;
+ default: tl_assert(0);
+ }
+ switch ((*rootp)->child[0]->balance) {
+ case 0:
+ avl_swr( rootp );
+ (*rootp)->balance = 1;
+ (*rootp)->child[1]->balance = -1;
+ return False;
+ case -1:
+ avl_swr( rootp );
+ (*rootp)->balance = 0;
+ (*rootp)->child[1]->balance = 0;
+ return True;
+ case 1:
+ break;
+ default:
+ tl_assert(0);
+ }
+ avl_swl( &((*rootp)->child[0]) );
+ avl_swr( rootp );
+ avl_nasty( *rootp );
+ return True;
+ }
+ }
+ else {
+ tl_assert(cmpres == 0);
+ tl_assert((*rootp)==a);
+ return avl_removeroot_wrk(rootp, kCmp);
+ }
+ return 0;
+}
+
+/* Remove the root of the AVL tree *rootp.
+ * Warning: dumps core if *rootp is empty
+ */
+static
+Bool avl_removeroot_wrk ( AvlNode** rootp,
+ Word(*kCmp)(UWord,UWord) )
+{
+ Bool ch;
+ AvlNode* a;
+ if (!(*rootp)->child[0]) {
+ if (!(*rootp)->child[1]) {
+ (*rootp) = 0;
+ return True;
+ }
+ (*rootp) = (*rootp)->child[1];
+ return True;
+ }
+ if (!(*rootp)->child[1]) {
+ (*rootp) = (*rootp)->child[0];
+ return True;
+ }
+ if ((*rootp)->balance < 0) {
+ /* remove from the left subtree */
+ a = (*rootp)->child[0];
+ while (a->child[1]) a = a->child[1];
+ } else {
+ /* remove from the right subtree */
+ a = (*rootp)->child[1];
+ while (a->child[0]) a = a->child[0];
+ }
+ ch = avl_remove_wrk(rootp, a, kCmp);
+ a->child[0] = (*rootp)->child[0];
+ a->child[1] = (*rootp)->child[1];
+ a->balance = (*rootp)->balance;
+ (*rootp) = a;
+ if(a->balance == 0) return ch;
+ return False;
+}
+
+static
+AvlNode* avl_find_node ( AvlNode* t, Word k, Word(*kCmp)(UWord,UWord) )
+{
+ if (kCmp) {
+ /* Boxed comparisons */
+ Word cmpresS;
+ while (True) {
+ if (t == NULL) return NULL;
+ cmpresS = kCmp(t->key, k);
+ if (cmpresS > 0) t = t->child[0]; else
+ if (cmpresS < 0) t = t->child[1]; else
+ return t;
+ }
+ } else {
+ /* Unboxed comparisons */
+ Word cmpresS; /* signed */
+ UWord cmpresU; /* unsigned */
+ while (True) {
+ if (t == NULL) return NULL; /* unlikely ==> predictable */
+ cmpresS = cmp_unsigned_Words( (UWord)t->key, (UWord)k );
+ if (cmpresS == 0) return t; /* unlikely ==> predictable */
+ cmpresU = (UWord)cmpresS;
+ cmpresU >>=/*unsigned*/ (8 * sizeof(cmpresU) - 1);
+ t = t->child[cmpresU];
+ }
+ }
+}
+
+static
+Bool avl_find_bounds ( AvlNode* t,
+ /*OUT*/UWord* kMinP, /*OUT*/UWord* vMinP,
+ /*OUT*/UWord* kMaxP, /*OUT*/UWord* vMaxP,
+ UWord minKey, UWord minVal,
+ UWord maxKey, UWord maxVal,
+ UWord key,
+ Word(*kCmp)(UWord,UWord) )
+{
+ UWord kLowerBound = minKey;
+ UWord vLowerBound = minVal;
+ UWord kUpperBound = maxKey;
+ UWord vUpperBound = maxVal;
+ while (t) {
+ Word cmpresS = kCmp ? kCmp(t->key, key)
+ : cmp_unsigned_Words(t->key, key);
+ if (cmpresS < 0) {
+ kLowerBound = t->key;
+ vLowerBound = t->val;
+ t = t->child[1];
+ continue;
+ }
+ if (cmpresS > 0) {
+ kUpperBound = t->key;
+ vUpperBound = t->val;
+ t = t->child[0];
+ continue;
+ }
+ /* We should never get here. If we do, it means the given key
+ is actually present in the tree, which means the original
+ call was invalid -- an error on the caller's part, and we
+ cannot give any meaningful values for the bounds. (Well,
+ maybe we could, but we're not gonna. Ner!) */
+ return False;
+ }
+ if (kMinP) *kMinP = kLowerBound;
+ if (vMinP) *vMinP = vLowerBound;
+ if (kMaxP) *kMaxP = kUpperBound;
+ if (vMaxP) *vMaxP = vUpperBound;
+ return True;
+}
+
+// Clear the iterator stack.
+static void stackClear(WordFM* fm)
+{
+ Int i;
+ tl_assert(fm);
+ for (i = 0; i < WFM_STKMAX; i++) {
+ fm->nodeStack[i] = NULL;
+ fm->numStack[i] = 0;
+ }
+ fm->stackTop = 0;
+}
+
+// Push onto the iterator stack.
+static inline void stackPush(WordFM* fm, AvlNode* n, Int i)
+{
+ tl_assert(fm->stackTop < WFM_STKMAX);
+ tl_assert(1 <= i && i <= 3);
+ fm->nodeStack[fm->stackTop] = n;
+ fm-> numStack[fm->stackTop] = i;
+ fm->stackTop++;
+}
+
+// Pop from the iterator stack.
+static inline Bool stackPop(WordFM* fm, AvlNode** n, Int* i)
+{
+ tl_assert(fm->stackTop <= WFM_STKMAX);
+
+ if (fm->stackTop > 0) {
+ fm->stackTop--;
+ *n = fm->nodeStack[fm->stackTop];
+ *i = fm-> numStack[fm->stackTop];
+ tl_assert(1 <= *i && *i <= 3);
+ fm->nodeStack[fm->stackTop] = NULL;
+ fm-> numStack[fm->stackTop] = 0;
+ return True;
+ } else {
+ return False;
+ }
+}
+
+static
+AvlNode* avl_dopy ( AvlNode* nd,
+ UWord(*dopyK)(UWord),
+ UWord(*dopyV)(UWord),
+ void*(alloc_nofail)(HChar*,SizeT),
+ HChar* cc )
+{
+ AvlNode* nyu;
+ if (! nd)
+ return NULL;
+ nyu = alloc_nofail(cc, sizeof(AvlNode));
+ tl_assert(nyu);
+
+ nyu->child[0] = nd->child[0];
+ nyu->child[1] = nd->child[1];
+ nyu->balance = nd->balance;
+
+ /* Copy key */
+ if (dopyK) {
+ nyu->key = dopyK( nd->key );
+ if (nd->key != 0 && nyu->key == 0)
+ return NULL; /* oom in key dcopy */
+ } else {
+ /* copying assumedly unboxed keys */
+ nyu->key = nd->key;
+ }
+
+ /* Copy val */
+ if (dopyV) {
+ nyu->val = dopyV( nd->val );
+ if (nd->val != 0 && nyu->val == 0)
+ return NULL; /* oom in val dcopy */
+ } else {
+ /* copying assumedly unboxed vals */
+ nyu->val = nd->val;
+ }
+
+ /* Copy subtrees */
+ if (nyu->child[0]) {
+ nyu->child[0] = avl_dopy( nyu->child[0], dopyK, dopyV,
+ alloc_nofail, cc );
+ if (! nyu->child[0])
+ return NULL;
+ }
+ if (nyu->child[1]) {
+ nyu->child[1] = avl_dopy( nyu->child[1], dopyK, dopyV,
+ alloc_nofail, cc );
+ if (! nyu->child[1])
+ return NULL;
+ }
+
+ return nyu;
+}
+
+/* Initialise a WordFM. */
+static void initFM ( WordFM* fm,
+ void* (*alloc_nofail)( HChar*, SizeT ),
+ HChar* cc,
+ void (*dealloc)(void*),
+ Word (*kCmp)(UWord,UWord) )
+{
+ fm->root = 0;
+ fm->kCmp = kCmp;
+ fm->alloc_nofail = alloc_nofail;
+ fm->cc = cc;
+ fm->dealloc = dealloc;
+ fm->stackTop = 0;
+}
+
+/* --- Public interface functions --- */
+
+/* Allocate and initialise a WordFM. If kCmp is non-NULL, elements in
+ the set are ordered according to the ordering specified by kCmp,
+ which becomes obvious if you use VG_(initIterFM),
+ VG_(initIterAtFM), VG_(nextIterFM), VG_(doneIterFM) to iterate over
+ sections of the map, or the whole thing. If kCmp is NULL then the
+ ordering used is unsigned word ordering (UWord) on the key
+ values. */
+WordFM* VG_(newFM) ( void* (*alloc_nofail)( HChar*, SizeT ),
+ HChar* cc,
+ void (*dealloc)(void*),
+ Word (*kCmp)(UWord,UWord) )
+{
+ WordFM* fm = alloc_nofail(cc, sizeof(WordFM));
+ tl_assert(fm);
+ initFM(fm, alloc_nofail, cc, dealloc, kCmp);
+ return fm;
+}
+
+static void avl_free ( AvlNode* nd,
+ void(*kFin)(UWord),
+ void(*vFin)(UWord),
+ void(*dealloc)(void*) )
+{
+ if (!nd)
+ return;
+ if (nd->child[0])
+ avl_free(nd->child[0], kFin, vFin, dealloc);
+ if (nd->child[1])
+ avl_free(nd->child[1], kFin, vFin, dealloc);
+ if (kFin)
+ kFin( nd->key );
+ if (vFin)
+ vFin( nd->val );
+ VG_(memset)(nd, 0, sizeof(AvlNode));
+ dealloc(nd);
+}
+
+/* Free up the FM. If kFin is non-NULL, it is applied to keys
+ before the FM is deleted; ditto with vFin for vals. */
+void VG_(deleteFM) ( WordFM* fm, void(*kFin)(UWord), void(*vFin)(UWord) )
+{
+ void(*dealloc)(void*) = fm->dealloc;
+ avl_free( fm->root, kFin, vFin, dealloc );
+ VG_(memset)(fm, 0, sizeof(WordFM) );
+ dealloc(fm);
+}
+
+/* Add (k,v) to fm. */
+Bool VG_(addToFM) ( WordFM* fm, UWord k, UWord v )
+{
+ MaybeWord oldV;
+ AvlNode* node;
+ node = fm->alloc_nofail( fm->cc, sizeof(AvlNode) );
+ node->key = k;
+ node->val = v;
+ oldV.b = False;
+ oldV.w = 0;
+ avl_insert_wrk( &fm->root, &oldV, node, fm->kCmp );
+ //if (oldV.b && fm->vFin)
+ // fm->vFin( oldV.w );
+ if (oldV.b)
+ fm->dealloc(node);
+ return oldV.b;
+}
+
+// Delete key from fm, returning associated key and val if found
+Bool VG_(delFromFM) ( WordFM* fm,
+ /*OUT*/UWord* oldK, /*OUT*/UWord* oldV, UWord key )
+{
+ AvlNode* node = avl_find_node( fm->root, key, fm->kCmp );
+ if (node) {
+ avl_remove_wrk( &fm->root, node, fm->kCmp );
+ if (oldK)
+ *oldK = node->key;
+ if (oldV)
+ *oldV = node->val;
+ fm->dealloc(node);
+ return True;
+ } else {
+ return False;
+ }
+}
+
+// Look up in fm, assigning found key & val at spec'd addresses
+Bool VG_(lookupFM) ( WordFM* fm,
+ /*OUT*/UWord* keyP, /*OUT*/UWord* valP, UWord key )
+{
+ AvlNode* node = avl_find_node( fm->root, key, fm->kCmp );
+ if (node) {
+ if (keyP)
+ *keyP = node->key;
+ if (valP)
+ *valP = node->val;
+ return True;
+ } else {
+ return False;
+ }
+}
+
+// See comment in pub_tool_wordfm.h for explanation
+Bool VG_(findBoundsFM)( WordFM* fm,
+ /*OUT*/UWord* kMinP, /*OUT*/UWord* vMinP,
+ /*OUT*/UWord* kMaxP, /*OUT*/UWord* vMaxP,
+ UWord minKey, UWord minVal,
+ UWord maxKey, UWord maxVal,
+ UWord key )
+{
+ /* really we should assert that minKey <= key <= maxKey,
+ where <= is as defined by fm->kCmp. */
+ return avl_find_bounds( fm->root, kMinP, vMinP,
+ kMaxP, vMaxP,
+ minKey, minVal,
+ maxKey, maxVal,
+ key, fm->kCmp );
+}
+
+// See comment in pub_tool_wordfm.h for performance warning
+UWord VG_(sizeFM) ( WordFM* fm )
+{
+ // Hmm, this is a bad way to do this
+ return fm->root ? size_avl_nonNull( fm->root ) : 0;
+}
+
+// NB UNTESTED! TEST BEFORE USE!
+//Bool VG_(isEmptyFM)( WordFM* fm )
+//{
+// return fm->root ? False : True;
+//}
+
+// set up FM for iteration
+void VG_(initIterFM) ( WordFM* fm )
+{
+ tl_assert(fm);
+ stackClear(fm);
+ if (fm->root)
+ stackPush(fm, fm->root, 1);
+}
+
+// set up FM for iteration so that the first key subsequently produced
+// by VG_(nextIterFM) is the smallest key in the map >= start_at.
+// Naturally ">=" is defined by the comparison function supplied to
+// VG_(newFM), as documented above.
+void VG_(initIterAtFM) ( WordFM* fm, UWord start_at )
+{
+ Int i;
+ AvlNode *n, *t;
+ Word cmpresS; /* signed */
+ UWord cmpresU; /* unsigned */
+
+ tl_assert(fm);
+ stackClear(fm);
+
+ if (!fm->root)
+ return;
+
+ n = NULL;
+ // We need to do regular search and fill in the stack.
+ t = fm->root;
+
+ while (True) {
+ if (t == NULL) return;
+
+ cmpresS
+ = fm->kCmp ? /*boxed*/ fm->kCmp( t->key, start_at )
+ : /*unboxed*/ cmp_unsigned_Words( t->key, start_at );
+
+ if (cmpresS == 0) {
+ // We found the exact key -- we are done.
+ // The iteration should start with this node.
+ stackPush(fm, t, 2);
+ // The stack now looks like {2, 2, ... ,2, 2}
+ return;
+ }
+ cmpresU = (UWord)cmpresS;
+ cmpresU >>=/*unsigned*/ (8 * sizeof(cmpresU) - 1);
+ if (!cmpresU) {
+ // Push this node only if we go to the left child.
+ stackPush(fm, t, 2);
+ }
+ t = t->child[cmpresU];
+ }
+ if (stackPop(fm, &n, &i)) {
+ // If we've pushed something to stack and did not find the exact key,
+ // we must fix the top element of stack.
+ tl_assert(i == 2);
+ stackPush(fm, n, 3);
+ // the stack looks like {2, 2, ..., 2, 3}
+ }
+}
+
+// get next key/val pair. Will tl_assert if fm has been modified
+// or looked up in since initIter{,At}FM was called.
+Bool VG_(nextIterFM) ( WordFM* fm, /*OUT*/UWord* pKey, /*OUT*/UWord* pVal )
+{
+ Int i = 0;
+ AvlNode* n = NULL;
+
+ tl_assert(fm);
+
+ // This in-order traversal requires each node to be pushed and popped
+ // three times. These could be avoided by updating nodes in-situ on the
+ // top of the stack, but the push/pop cost is so small that it's worth
+ // keeping this loop in this simpler form.
+ while (stackPop(fm, &n, &i)) {
+ switch (i) {
+ case 1: case_1:
+ stackPush(fm, n, 2);
+ /* if (n->child[0]) stackPush(fm, n->child[0], 1); */
+ if (n->child[0]) { n = n->child[0]; goto case_1; }
+ break;
+ case 2:
+ stackPush(fm, n, 3);
+ if (pKey) *pKey = n->key;
+ if (pVal) *pVal = n->val;
+ return True;
+ case 3:
+ /* if (n->child[1]) stackPush(fm, n->child[1], 1); */
+ if (n->child[1]) { n = n->child[1]; goto case_1; }
+ break;
+ default:
+ tl_assert(0);
+ }
+ }
+
+ // Stack empty, iterator is exhausted, return NULL
+ return False;
+}
+
+// clear the I'm iterating flag
+void VG_(doneIterFM) ( WordFM* fm )
+{
+}
+
+WordFM* VG_(dopyFM) ( WordFM* fm, UWord(*dopyK)(UWord), UWord(*dopyV)(UWord) )
+{
+ WordFM* nyu;
+
+ /* can't clone the fm whilst iterating on it */
+ tl_assert(fm->stackTop == 0);
+
+ nyu = fm->alloc_nofail( fm->cc, sizeof(WordFM) );
+ tl_assert(nyu);
+
+ *nyu = *fm;
+
+ fm->stackTop = 0;
+ VG_(memset)(fm->nodeStack, 0, sizeof(fm->nodeStack));
+ VG_(memset)(fm->numStack, 0, sizeof(fm->numStack));
+
+ if (nyu->root) {
+ nyu->root = avl_dopy( nyu->root, dopyK, dopyV,
+ fm->alloc_nofail, fm->cc );
+ if (! nyu->root)
+ return NULL;
+ }
+
+ return nyu;
+}
+
+// admin: what's the 'common' allocation size (for tree nodes?)
+SizeT VG_(getNodeSizeFM)( void )
+{
+ return sizeof(AvlNode);
+}
+
+//------------------------------------------------------------------//
+//--- end WordFM ---//
+//--- Implementation ---//
+//------------------------------------------------------------------//
+
+//------------------------------------------------------------------//
+//--- WordBag (unboxed words only) ---//
+//--- Implementation ---//
+//------------------------------------------------------------------//
+
+/* A trivial container, to make it opaque. */
+struct _WordBag {
+ WordFM* fm;
+};
+
+WordBag* VG_(newBag) ( void* (*alloc_nofail)( HChar*, SizeT ),
+ HChar* cc,
+ void (*dealloc)(void*) )
+{
+ WordBag* bag = alloc_nofail(cc, sizeof(WordBag));
+ bag->fm = VG_(newFM)( alloc_nofail, cc, dealloc, NULL );
+ return bag;
+}
+
+void VG_(deleteBag) ( WordBag* bag )
+{
+ void (*dealloc)(void*) = bag->fm->dealloc;
+ VG_(deleteFM)( bag->fm, NULL, NULL );
+ VG_(memset)(bag, 0, sizeof(WordBag));
+ dealloc(bag);
+}
+
+void VG_(addToBag)( WordBag* bag, UWord w )
+{
+ UWord key, count;
+ if (VG_(lookupFM)(bag->fm, &key, &count, w)) {
+ tl_assert(key == w);
+ tl_assert(count >= 1);
+ VG_(addToFM)(bag->fm, w, count+1);
+ } else {
+ VG_(addToFM)(bag->fm, w, 1);
+ }
+}
+
+UWord VG_(elemBag) ( WordBag* bag, UWord w )
+{
+ UWord key, count;
+ if (VG_(lookupFM)( bag->fm, &key, &count, w)) {
+ tl_assert(key == w);
+ tl_assert(count >= 1);
+ return count;
+ } else {
+ return 0;
+ }
+}
+
+UWord VG_(sizeUniqueBag) ( WordBag* bag )
+{
+ return VG_(sizeFM)( bag->fm );
+}
+
+static UWord sizeTotalBag_wrk ( AvlNode* nd )
+{
+ /* unchecked pre: nd is non-NULL */
+ UWord w = nd->val;
+ tl_assert(w >= 1);
+ if (nd->child[0])
+ w += sizeTotalBag_wrk(nd->child[0]);
+ if (nd->child[1])
+ w += sizeTotalBag_wrk(nd->child[1]);
+ return w;
+}
+UWord VG_(sizeTotalBag)( WordBag* bag )
+{
+ if (bag->fm->root)
+ return sizeTotalBag_wrk(bag->fm->root);
+ else
+ return 0;
+}
+
+Bool VG_(delFromBag)( WordBag* bag, UWord w )
+{
+ UWord key, count;
+ if (VG_(lookupFM)(bag->fm, &key, &count, w)) {
+ tl_assert(key == w);
+ tl_assert(count >= 1);
+ if (count > 1) {
+ VG_(addToFM)(bag->fm, w, count-1);
+ } else {
+ tl_assert(count == 1);
+ VG_(delFromFM)( bag->fm, NULL, NULL, w );
+ }
+ return True;
+ } else {
+ return False;
+ }
+}
+
+Bool VG_(isEmptyBag)( WordBag* bag )
+{
+ return VG_(sizeFM)(bag->fm) == 0;
+}
+
+Bool VG_(isSingletonTotalBag)( WordBag* bag )
+{
+ AvlNode* nd;
+ if (VG_(sizeFM)(bag->fm) != 1)
+ return False;
+ nd = bag->fm->root;
+ tl_assert(nd);
+ tl_assert(!nd->child[0]);
+ tl_assert(!nd->child[1]);
+ return nd->val == 1;
+}
+
+UWord VG_(anyElementOfBag)( WordBag* bag )
+{
+ /* Return an arbitrarily chosen element in the bag. We might as
+ well return the one at the root of the underlying AVL tree. */
+ AvlNode* nd = bag->fm->root;
+ tl_assert(nd); /* if this fails, 'bag' is empty - caller is in error. */
+ tl_assert(nd->val >= 1);
+ return nd->key;
+}
+
+void VG_(initIterBag)( WordBag* bag )
+{
+ VG_(initIterFM)(bag->fm);
+}
+
+Bool VG_(nextIterBag)( WordBag* bag, /*OUT*/UWord* pVal, /*OUT*/UWord* pCount )
+{
+ return VG_(nextIterFM)( bag->fm, pVal, pCount );
+}
+
+void VG_(doneIterBag)( WordBag* bag )
+{
+ VG_(doneIterFM)( bag->fm );
+}
+
+//------------------------------------------------------------------//
+//--- end WordBag (unboxed words only) ---//
+//--- Implementation ---//
+//------------------------------------------------------------------//
+
+/*--------------------------------------------------------------------*/
+/*--- end m_wordfm.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/m_xarray.c.svn-base b/coregrind/.svn/text-base/m_xarray.c.svn-base
new file mode 100644
index 0000000..45a1e59
--- /dev/null
+++ b/coregrind/.svn/text-base/m_xarray.c.svn-base
@@ -0,0 +1,281 @@
+
+/*--------------------------------------------------------------------*/
+/*--- An expandable array implementation. m_xarray.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2007-2009 OpenWorks LLP
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_xarray.h" /* self */
+
+
+/* See pub_tool_xarray.h for details of what this is all about. */
+
+struct _XArray {
+ void* (*alloc) ( HChar*, SizeT ); /* alloc fn (nofail) */
+ HChar* cc; /* cost centre for alloc */
+ void (*free) ( void* ); /* free fn */
+ Int (*cmpFn) ( void*, void* ); /* cmp fn (may be NULL) */
+ Word elemSzB; /* element size in bytes */
+ void* arr; /* pointer to elements */
+ Word usedsizeE; /* # used elements in arr */
+ Word totsizeE; /* max size of arr, in elements */
+ Bool sorted; /* is it sorted? */
+};
+
+
+XArray* VG_(newXA) ( void*(*alloc_fn)(HChar*,SizeT),
+ HChar* cc,
+ void(*free_fn)(void*),
+ Word elemSzB )
+{
+ struct _XArray* xa;
+ /* Implementation relies on Word being signed and (possibly)
+ on SizeT being unsigned. */
+ vg_assert( sizeof(Word) == sizeof(void*) );
+ vg_assert( ((Word)(-1)) < ((Word)(0)) );
+ vg_assert( ((SizeT)(-1)) > ((SizeT)(0)) );
+ /* check user-supplied info .. */
+ vg_assert(alloc_fn);
+ vg_assert(free_fn);
+ vg_assert(elemSzB > 0);
+ xa = alloc_fn( cc, sizeof(struct _XArray) );
+ vg_assert(xa);
+ xa->alloc = alloc_fn;
+ xa->cc = cc;
+ xa->free = free_fn;
+ xa->cmpFn = NULL;
+ xa->elemSzB = elemSzB;
+ xa->usedsizeE = 0;
+ xa->totsizeE = 0;
+ xa->sorted = False;
+ xa->arr = NULL;
+ return xa;
+}
+
+XArray* VG_(cloneXA)( HChar* cc, XArray* xao )
+{
+ struct _XArray* xa = (struct _XArray*)xao;
+ struct _XArray* nyu;
+ HChar* nyu_cc;
+ vg_assert(xa);
+ vg_assert(xa->alloc);
+ vg_assert(xa->free);
+ vg_assert(xa->elemSzB >= 1);
+ nyu_cc = cc ? cc : xa->cc;
+ nyu = xa->alloc( nyu_cc, sizeof(struct _XArray) );
+ if (!nyu)
+ return NULL;
+ /* Copy everything verbatim ... */
+ *nyu = *xa;
+ nyu->cc = nyu_cc;
+ /* ... except we have to clone the contents-array */
+ if (nyu->arr) {
+ /* Restrict the total size of the new array to its current
+ actual size. That means we don't waste space copying the
+ unused tail of the original. The tradeoff is that it
+ guarantees we will have to resize the child if even one more
+ element is later added to it, unfortunately. */
+ nyu->totsizeE = nyu->usedsizeE;
+ /* and allocate .. */
+ nyu->arr = nyu->alloc( nyu->cc, nyu->totsizeE * nyu->elemSzB );
+ if (!nyu->arr) {
+ nyu->free(nyu);
+ return NULL;
+ }
+ VG_(memcpy)( nyu->arr, xa->arr, nyu->totsizeE * nyu->elemSzB );
+ }
+ /* We're done! */
+ return nyu;
+}
+
+void VG_(deleteXA) ( XArray* xao )
+{
+ struct _XArray* xa = (struct _XArray*)xao;
+ vg_assert(xa);
+ vg_assert(xa->free);
+ if (xa->arr)
+ xa->free(xa->arr);
+ xa->free(xa);
+}
+
+void VG_(setCmpFnXA) ( XArray* xao, Int (*compar)(void*,void*) )
+{
+ struct _XArray* xa = (struct _XArray*)xao;
+ vg_assert(xa);
+ vg_assert(compar);
+ xa->cmpFn = compar;
+ xa->sorted = False;
+}
+
+inline void* VG_(indexXA) ( XArray* xao, Word n )
+{
+ struct _XArray* xa = (struct _XArray*)xao;
+ vg_assert(xa);
+ vg_assert(n >= 0);
+ vg_assert(n < xa->usedsizeE);
+ return ((char*)xa->arr) + n * xa->elemSzB;
+}
+
+static inline void ensureSpaceXA ( struct _XArray* xa )
+{
+ if (xa->usedsizeE == xa->totsizeE) {
+ void* tmp;
+ Word newsz;
+ if (xa->totsizeE == 0)
+ vg_assert(!xa->arr);
+ if (xa->totsizeE > 0)
+ vg_assert(xa->arr);
+ if (xa->totsizeE == 0) {
+ /* No point in having tiny (eg) 2-byte allocations for the
+ element array, since all allocs are rounded up to 8 anyway.
+ Hence increase the initial array size for tiny elements in
+ an attempt to avoid reallocations of size 2, 4, 8 if the
+ array does start to fill up. */
+ if (xa->elemSzB == 1) newsz = 8;
+ else if (xa->elemSzB == 2) newsz = 4;
+ else newsz = 2;
+ } else {
+ newsz = 2 + (3 * xa->totsizeE) / 2; /* 2 * xa->totsizeE; */
+ }
+ if (0 && xa->totsizeE >= 10000)
+ VG_(printf)("addToXA: increasing from %ld to %ld\n",
+ xa->totsizeE, newsz);
+ tmp = xa->alloc(xa->cc, newsz * xa->elemSzB);
+ vg_assert(tmp);
+ if (xa->usedsizeE > 0)
+ VG_(memcpy)(tmp, xa->arr, xa->usedsizeE * xa->elemSzB);
+ if (xa->arr)
+ xa->free(xa->arr);
+ xa->arr = tmp;
+ xa->totsizeE = newsz;
+ }
+}
+
+Word VG_(addToXA) ( XArray* xao, void* elem )
+{
+ struct _XArray* xa = (struct _XArray*)xao;
+ vg_assert(xa);
+ vg_assert(elem);
+ vg_assert(xa->totsizeE >= 0);
+ vg_assert(xa->usedsizeE >= 0 && xa->usedsizeE <= xa->totsizeE);
+ ensureSpaceXA( xa );
+ vg_assert(xa->usedsizeE < xa->totsizeE);
+ vg_assert(xa->arr);
+ VG_(memcpy)( ((UChar*)xa->arr) + xa->usedsizeE * xa->elemSzB,
+ elem, xa->elemSzB );
+ xa->usedsizeE++;
+ xa->sorted = False;
+ return xa->usedsizeE-1;
+}
+
+Word VG_(addBytesToXA) ( XArray* xao, void* bytesV, Word nbytes )
+{
+ Word r, i;
+ struct _XArray* xa = (struct _XArray*)xao;
+ vg_assert(xa);
+ vg_assert(xa->elemSzB == 1);
+ vg_assert(nbytes >= 0);
+ vg_assert(xa->totsizeE >= 0);
+ vg_assert(xa->usedsizeE >= 0 && xa->usedsizeE <= xa->totsizeE);
+ r = xa->usedsizeE;
+ for (i = 0; i < nbytes; i++) {
+ ensureSpaceXA( xa );
+ vg_assert(xa->usedsizeE < xa->totsizeE);
+ vg_assert(xa->arr);
+ * (((UChar*)xa->arr) + xa->usedsizeE) = ((UChar*)bytesV)[i];
+ xa->usedsizeE++;
+ }
+ xa->sorted = False;
+ return r;
+}
+
+void VG_(sortXA) ( XArray* xao )
+{
+ struct _XArray* xa = (struct _XArray*)xao;
+ vg_assert(xa);
+ vg_assert(xa->cmpFn);
+ VG_(ssort)( xa->arr, xa->usedsizeE, xa->elemSzB, xa->cmpFn );
+ xa->sorted = True;
+}
+
+Bool VG_(lookupXA) ( XArray* xao, void* key, Word* first, Word* last )
+{
+ Word lo, mid, hi, cres;
+ void* midv;
+ struct _XArray* xa = (struct _XArray*)xao;
+ vg_assert(xa);
+ vg_assert(xa->cmpFn);
+ vg_assert(xa->sorted);
+ vg_assert(first);
+ vg_assert(last);
+ lo = 0;
+ hi = xa->usedsizeE-1;
+ while (True) {
+ /* current unsearched space is from lo to hi, inclusive. */
+ if (lo > hi) return False; /* not found */
+ mid = (lo + hi) / 2;
+ midv = VG_(indexXA)( xa, mid );
+ cres = xa->cmpFn( key, midv );
+ if (cres < 0) { hi = mid-1; continue; }
+ if (cres > 0) { lo = mid+1; continue; }
+ /* Found it, at mid. See how far we can expand this. */
+ vg_assert(xa->cmpFn( key, VG_(indexXA)(xa, lo) ) >= 0);
+ vg_assert(xa->cmpFn( key, VG_(indexXA)(xa, hi) ) <= 0);
+ *first = *last = mid;
+ while (*first > 0
+ && 0 == xa->cmpFn( key, VG_(indexXA)(xa, (*first)-1)))
+ (*first)--;
+ while (*last < xa->usedsizeE-1
+ && 0 == xa->cmpFn( key, VG_(indexXA)(xa, (*last)+1)))
+ (*last)++;
+ return True;
+ }
+}
+
+Word VG_(sizeXA) ( XArray* xao )
+{
+ struct _XArray* xa = (struct _XArray*)xao;
+ vg_assert(xa);
+ return xa->usedsizeE;
+}
+
+void VG_(dropTailXA) ( XArray* xao, Word n )
+{
+ struct _XArray* xa = (struct _XArray*)xao;
+ vg_assert(xa);
+ vg_assert(n >= 0);
+ vg_assert(n <= xa->usedsizeE);
+ xa->usedsizeE -= n;
+}
+
+
+/*--------------------------------------------------------------------*/
+/*--- end m_xarray.c ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/no_op_client_for_valgrind.c.svn-base b/coregrind/.svn/text-base/no_op_client_for_valgrind.c.svn-base
new file mode 100644
index 0000000..5956607
--- /dev/null
+++ b/coregrind/.svn/text-base/no_op_client_for_valgrind.c.svn-base
@@ -0,0 +1,16 @@
+
+/* This program doesn't do anything. So why is it here? It's a
+ helper for ptraced-based launchers (eg aix5). They can't run 'no
+ program' if the user types "valgrind --help", so they run this
+ do-nothing program. m_main notices that and turns the exe name
+ back into NULL. Then --help, --version etc work as they should. */
+
+#include <stdio.h>
+int main ( void )
+{
+ fprintf(stderr,
+ "This program (part of Valgrind) does nothing except print\n"
+ "this text. You should not see this text. If you do, some\n"
+ "part of valgrind's launch mechanism is not working correctly.\n");
+ return 0;
+}
diff --git a/coregrind/.svn/text-base/pub_core_aspacemgr.h.svn-base b/coregrind/.svn/text-base/pub_core_aspacemgr.h.svn-base
new file mode 100644
index 0000000..066d1cf
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_aspacemgr.h.svn-base
@@ -0,0 +1,408 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The address space manager. pub_core_aspacemgr.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_ASPACEMGR_H
+#define __PUB_CORE_ASPACEMGR_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module deals with management of the entire process
+// address space. Almost everything depends upon it, including dynamic
+// memory management. Hence this module is almost completely
+// standalone; the only module it uses is m_debuglog. DO NOT CHANGE
+// THIS.
+//--------------------------------------------------------------------
+
+#include "pub_tool_aspacemgr.h"
+
+//--------------------------------------------------------------
+// Definition of address-space segments
+
+/* types SegKind, ShrinkMode and NSegment are described in
+ the tool-visible header file, not here. */
+
+
+//--------------------------------------------------------------
+// Initialisation
+
+/* Initialise the address space manager, setting up the initial
+ segment list, and reading /proc/self/maps into it. This must
+ be called before any other function.
+
+ Takes a pointer to the SP at the time V gained control. This is
+ taken to be the highest usable address (more or less). Based on
+ that (and general consultation of tea leaves, etc) return a
+ suggested end address for the client's stack. */
+extern Addr VG_(am_startup) ( Addr sp_at_startup );
+
+
+//--------------------------------------------------------------
+// Querying current status
+
+/* Finds the segment containing 'a'. Only returns file/anon/resvn
+ segments. This returns a 'NSegment const *' - a pointer to
+ readonly data. */
+// Is in tool-visible header file.
+// extern NSegment const * VG_(am_find_nsegment) ( Addr a );
+
+/* Find the next segment along from 'here', if it is a file/anon/resvn
+ segment. */
+extern NSegment const* VG_(am_next_nsegment) ( NSegment* here, Bool fwds );
+
+/* Is the area [start .. start+len-1] validly accessible by the
+ client with at least the permissions 'prot' ? To find out
+ simply if said area merely belongs to the client, pass
+ VKI_PROT_NONE as 'prot'. Will return False if any part of the
+ area does not belong to the client or does not have at least
+ the stated permissions. */
+// Is in tool-visible header file.
+// extern Bool VG_(am_is_valid_for_client)
+// ( Addr start, SizeT len, UInt prot );
+
+/* Variant of VG_(am_is_valid_for_client) which allows free areas to
+ be consider part of the client's addressable space. It also
+ considers reservations to be allowable, since from the client's
+ point of view they don't exist. */
+extern Bool VG_(am_is_valid_for_client_or_free_or_resvn)
+ ( Addr start, SizeT len, UInt prot );
+
+/* Trivial fn: return the total amount of space in anonymous mappings,
+ both for V and the client. Is used for printing stats in
+ out-of-memory messages. */
+extern ULong VG_(am_get_anonsize_total)( void );
+
+/* Show the segment array on the debug log, at given loglevel. */
+extern void VG_(am_show_nsegments) ( Int logLevel, HChar* who );
+
+/* Get the filename corresponding to this segment, if known and if it
+ has one. The returned name's storage cannot be assumed to be
+ persistent, so the caller should immediately copy the name
+ elsewhere. This may return NULL if the file name is not known or
+ for arbitrary other implementation-dependent reasons, so callers
+ need to be able to handle a NULL return value. */
+// Is in tool-visible header file.
+// extern HChar* VG_(am_get_filename)( NSegment* );
+
+/* VG_(am_get_segment_starts) is also part of this section, but its
+ prototype is tool-visible, hence not in this header file. */
+
+/* Sanity check: check that Valgrind and the kernel agree on the
+ address space layout. Prints offending segments and call point if
+ a discrepancy is detected, but does not abort the system. Returned
+ Bool is False if a discrepancy was found. */
+
+extern Bool VG_(am_do_sync_check) ( const HChar* fn,
+ const HChar* file, Int line );
+
+
+//--------------------------------------------------------------
+// Functions pertaining to the central query-notify mechanism
+// used to handle mmap/munmap/mprotect resulting from client
+// syscalls.
+
+/* Describes a request for VG_(am_get_advisory). */
+typedef
+ struct {
+ enum { MFixed, MHint, MAny } rkind;
+ Addr start;
+ Addr len;
+ }
+ MapRequest;
+
+/* Query aspacem to ask where a mapping should go. On success, the
+ advised placement is returned, and *ok is set to True. On failure,
+ zero is returned and *ok is set to False. Note that *ok must be
+ consulted by the caller to establish success or failure; that
+ cannot be established reliably from the returned value. If *ok is
+ set to False, it means aspacem has vetoed the mapping, and so the
+ caller should not proceed with it. */
+extern Addr VG_(am_get_advisory)
+ ( MapRequest* req, Bool forClient, /*OUT*/Bool* ok );
+
+/* Convenience wrapper for VG_(am_get_advisory) for client floating or
+ fixed requests. If start is zero, a floating request is issued; if
+ nonzero, a fixed request at that address is issued. Same comments
+ about return values apply. */
+extern Addr VG_(am_get_advisory_client_simple)
+ ( Addr start, SizeT len, /*OUT*/Bool* ok );
+
+/* Notifies aspacem that the client completed an mmap successfully.
+ The segment array is updated accordingly. If the returned Bool is
+ True, the caller should immediately discard translations from the
+ specified address range. */
+extern Bool VG_(am_notify_client_mmap)
+ ( Addr a, SizeT len, UInt prot, UInt flags, Int fd, Off64T offset );
+
+/* Notifies aspacem that the client completed a shmat successfully.
+ The segment array is updated accordingly. If the returned Bool is
+ True, the caller should immediately discard translations from the
+ specified address range. */
+extern Bool VG_(am_notify_client_shmat)( Addr a, SizeT len, UInt prot );
+
+/* Notifies aspacem that an mprotect was completed successfully. The
+ segment array is updated accordingly. Note, as with
+ VG_(am_notify_munmap), it is not the job of this function to reject
+ stupid mprotects, for example the client doing mprotect of
+ non-client areas. Such requests should be intercepted earlier, by
+ the syscall wrapper for mprotect. This function merely records
+ whatever it is told. If the returned Bool is True, the caller
+ should immediately discard translations from the specified address
+ range. */
+extern Bool VG_(am_notify_mprotect)( Addr start, SizeT len, UInt prot );
+
+/* Notifies aspacem that an munmap completed successfully. The
+ segment array is updated accordingly. As with
+ VG_(am_notify_munmap), we merely record the given info, and don't
+ check it for sensibleness. If the returned Bool is True, the
+ caller should immediately discard translations from the specified
+ address range. */
+extern Bool VG_(am_notify_munmap)( Addr start, SizeT len );
+
+/* Hand a raw mmap to the kernel, without aspacem updating the segment
+ array. THIS FUNCTION IS DANGEROUS -- it will cause aspacem's view
+ of the address space to diverge from that of the kernel. DO NOT
+ USE IT UNLESS YOU UNDERSTAND the request-notify model used by
+ aspacem. In short, DO NOT USE THIS FUNCTION. */
+extern SysRes VG_(am_do_mmap_NO_NOTIFY)
+ ( Addr start, SizeT length, UInt prot, UInt flags, UInt fd, Off64T offset);
+
+
+//--------------------------------------------------------------
+// Functions pertaining to AIX5-specific notifications.
+
+/* Describes followup actions that need to be done following a call to
+ VG_(am_aix5_reread_procmap). When acquire==True, the specified
+ code and data segments have been mapped into the process, and so
+ m_debuginfo needs to read info for it; also m_redir needs to know,
+ and the tool needs to be told. When acquire==False, the specified
+ segments have been unloaded and m_debuginfo, m_redir and the tool
+ (and m_transtab?) need to notified appropriately. */
+typedef
+ struct {
+ Addr code_start;
+ Word code_len;
+ Addr data_start;
+ Word data_len;
+ UChar* file_name;
+ UChar* mem_name;
+ Bool is_mainexe;
+ Bool acquire;
+ }
+ AixCodeSegChange;
+
+/* Tell aspacem that /proc/<pid>/map may have changed (eg following
+ __loadx) and so it should be re-read, and the code/data segment
+ list updated accordingly. The resulting array of AixCodeChangeSeg
+ directives are written to 'directives', and the number of entries
+ to *ndirectives. */
+extern void VG_(am_aix5_reread_procmap)
+ ( /*OUT*/AixCodeSegChange* directives, /*OUT*/Int* ndirectives );
+
+/* Find out the size of the AixCodeSegChange that must be
+ presented to VG_(am_aix5_reread_procmap). */
+extern Int VG_(am_aix5_reread_procmap_howmany_directives)(void);
+
+/* Tell aspacem where the initial client stack is, so that it
+ can later produce a faked-up NSegment in response to
+ VG_(am_find_nsegment) for athat address, if asked. */
+extern void VG_(am_aix5_set_initial_client_sp)( Addr );
+
+/* The AIX5 aspacem implementation needs to be told when it is and
+ isn't allowed to use sbrk to allocate memory. Hence: */
+extern Bool VG_(am_aix5_sbrk_allowed);
+
+
+//--------------------------------------------------------------
+// Dealing with mappings which do not arise directly from the
+// simulation of the client. These are typically used for
+// loading the client and building its stack/data segment, before
+// execution begins. Also for V's own administrative use.
+
+/* --- --- --- map, unmap, protect --- --- --- */
+
+/* Map a file at a fixed address for the client, and update the
+ segment array accordingly. */
+extern SysRes VG_(am_mmap_file_fixed_client)
+ ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset );
+
+/* Map anonymously at a fixed address for the client, and update
+ the segment array accordingly. */
+extern SysRes VG_(am_mmap_anon_fixed_client)
+ ( Addr start, SizeT length, UInt prot );
+
+
+/* Map anonymously at an unconstrained address for the client, and
+ update the segment array accordingly. */
+extern SysRes VG_(am_mmap_anon_float_client) ( SizeT length, Int prot );
+
+/* Similarly, acquire new address space for the client but with
+ considerable restrictions on what can be done with it: (1) the
+ actual protections may exceed those stated in 'prot', (2) the
+ area's protections cannot be later changed using any form of
+ mprotect, and (3) the area cannot be freed using any form of
+ munmap. On Linux this behaves the same as
+ VG_(am_mmap_anon_float_client). On AIX5 this *may* allocate memory
+ by using sbrk, so as to make use of large pages on AIX. */
+extern SysRes VG_(am_sbrk_anon_float_client) ( SizeT length, Int prot );
+
+
+/* Map anonymously at an unconstrained address for V, and update the
+ segment array accordingly. This is fundamentally how V allocates
+ itself more address space when needed. */
+extern SysRes VG_(am_mmap_anon_float_valgrind)( SizeT cszB );
+
+/* Same comments apply as per VG_(am_sbrk_anon_float_client). On
+ Linux this behaves the same as VG_(am_mmap_anon_float_valgrind). */
+extern SysRes VG_(am_sbrk_anon_float_valgrind)( SizeT cszB );
+
+
+/* Map a file at an unconstrained address for V, and update the
+ segment array accordingly. This is used by V for transiently
+ mapping in object files to read their debug info. */
+extern SysRes VG_(am_mmap_file_float_valgrind)
+ ( SizeT length, UInt prot, Int fd, Off64T offset );
+
+/* Unmap the given address range and update the segment array
+ accordingly. This fails if the range isn't valid for the client.
+ If *need_discard is True after a successful return, the caller
+ should immediately discard translations from the specified address
+ range. */
+extern SysRes VG_(am_munmap_client)( /*OUT*/Bool* need_discard,
+ Addr start, SizeT length );
+
+/* Let (start,len) denote an area within a single Valgrind-owned
+ segment (anon or file). Change the ownership of [start, start+len)
+ to the client instead. Fails if (start,len) does not denote a
+ suitable segment. */
+extern Bool VG_(am_change_ownership_v_to_c)( Addr start, SizeT len );
+
+/* 'seg' must be NULL or have been obtained from
+ VG_(am_find_nsegment), and still valid. If non-NULL, and if it
+ denotes a SkAnonC (anonymous client mapping) area, set the .isCH
+ (is-client-heap) flag for that area. Otherwise do nothing.
+ (Bizarre interface so that the same code works for both Linux and
+ AIX and does not impose inefficiencies on the Linux version.) */
+extern void VG_(am_set_segment_isCH_if_SkAnonC)( NSegment* seg );
+
+/* Same idea as VG_(am_set_segment_isCH_if_SkAnonC), except set the
+ segment's hasT bit (has-cached-code) if this is SkFileC or SkAnonC
+ segment. */
+extern void VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( NSegment* );
+
+/* --- --- --- reservations --- --- --- */
+
+/* Create a reservation from START .. START+LENGTH-1, with the given
+ ShrinkMode. When checking whether the reservation can be created,
+ also ensure that at least abs(EXTRA) extra free bytes will remain
+ above (> 0) or below (< 0) the reservation.
+
+ The reservation will only be created if it, plus the extra-zone,
+ falls entirely within a single free segment. The returned Bool
+ indicates whether the creation succeeded. */
+extern Bool VG_(am_create_reservation)
+ ( Addr start, SizeT length, ShrinkMode smode, SSizeT extra );
+
+/* Let SEG be an anonymous client mapping. This fn extends the
+ mapping by DELTA bytes, taking the space from a reservation section
+ which must be adjacent. If DELTA is positive, the segment is
+ extended forwards in the address space, and the reservation must be
+ the next one along. If DELTA is negative, the segment is extended
+ backwards in the address space and the reservation must be the
+ previous one. DELTA must be page aligned. abs(DELTA) must not
+ exceed the size of the reservation segment minus one page, that is,
+ the reservation segment after the operation must be at least one
+ page long. */
+extern Bool VG_(am_extend_into_adjacent_reservation_client)
+ ( NSegment* seg, SSizeT delta );
+
+/* --- --- --- resizing/move a mapping --- --- --- */
+
+/* Let SEG be a client mapping (anonymous or file). This fn extends
+ the mapping forwards only by DELTA bytes, and trashes whatever was
+ in the new area. Fails if SEG is not a single client mapping or if
+ the new area is not accessible to the client. Fails if DELTA is
+ not page aligned. *seg is invalid after a successful return. If
+ *need_discard is True after a successful return, the caller should
+ immediately discard translations from the new area. */
+extern Bool VG_(am_extend_map_client)( /*OUT*/Bool* need_discard,
+ NSegment* seg, SizeT delta );
+
+/* Remap the old address range to the new address range. Fails if any
+ parameter is not page aligned, if the either size is zero, if any
+ wraparound is implied, if the old address range does not fall
+ entirely within a single segment, if the new address range overlaps
+ with the old one, or if the old address range is not a valid client
+ mapping. If *need_discard is True after a successful return, the
+ caller should immediately discard translations from both specified
+ address ranges. */
+extern Bool VG_(am_relocate_nooverlap_client)( /*OUT*/Bool* need_discard,
+ Addr old_addr, SizeT old_len,
+ Addr new_addr, SizeT new_len );
+
+//--------------------------------------------------------------
+// Valgrind (non-client) thread stacks. V itself runs on such
+// stacks. The address space manager provides and suitably
+// protects such stacks.
+
+#if defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
+# define VG_STACK_GUARD_SZB 65536 // 1 or 16 pages
+# define VG_STACK_ACTIVE_SZB 131072 // 2 or 32 pages
+#else
+# define VG_STACK_GUARD_SZB 8192 // 2 pages
+# define VG_STACK_ACTIVE_SZB 65536 // 16 pages
+#endif
+
+typedef
+ struct {
+ HChar bytes[VG_STACK_GUARD_SZB
+ + VG_STACK_ACTIVE_SZB
+ + VG_STACK_GUARD_SZB];
+ }
+ VgStack;
+
+
+/* Allocate and initialise a VgStack (anonymous client space).
+ Protect the stack active area and the guard areas appropriately.
+ Returns NULL on failure, else the address of the bottom of the
+ stack. On success, also sets *initial_sp to what the stack pointer
+ should be set to. */
+
+extern VgStack* VG_(am_alloc_VgStack)( /*OUT*/Addr* initial_sp );
+
+/* Figure out how many bytes of the stack's active area have not
+ been used. Used for estimating if we are close to overflowing it. */
+
+extern Int VG_(am_get_VgStack_unused_szB)( VgStack* stack );
+
+
+#endif // __PUB_CORE_ASPACEMGR_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_basics.h.svn-base b/coregrind/.svn/text-base/pub_core_basics.h.svn-base
new file mode 100644
index 0000000..eb38f6b
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_basics.h.svn-base
@@ -0,0 +1,70 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Header included by every core C file. pub_core_basics.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_BASICS_H
+#define __PUB_CORE_BASICS_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This header should be imported by every single C file
+// in the core. It contains the basic types and other things needed
+// everywhere.
+//--------------------------------------------------------------------
+
+#include "pub_tool_basics.h"
+
+/* ---------------------------------------------------------------------
+ Other headers to include
+ ------------------------------------------------------------------ */
+
+// Might as well have the following two in here, their contents are used so
+// broadly (eg. in pub_core_threadstate.h).
+
+#include "libvex.h"
+
+#if defined(VGA_x86)
+# include "libvex_guest_x86.h"
+#elif defined(VGA_amd64)
+# include "libvex_guest_amd64.h"
+#elif defined(VGA_ppc32)
+# include "libvex_guest_ppc32.h"
+#elif defined(VGA_ppc64)
+# include "libvex_guest_ppc64.h"
+#else
+# error Unknown arch
+#endif
+
+// For jmp_buf
+#include <setjmp.h>
+
+#endif // __PUB_CORE_BASICS_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_basics_asm.h.svn-base b/coregrind/.svn/text-base/pub_core_basics_asm.h.svn-base
new file mode 100644
index 0000000..a71e8ff
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_basics_asm.h.svn-base
@@ -0,0 +1,50 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Header imported directly by every core asm file. ---*/
+/*--- pub_core_basics_asm.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_BASICS_ASM_H
+#define __PUB_CORE_BASICS_ASM_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This header should be imported by every single asm file
+// in the core. It contains really basic things needed everywhere.
+//--------------------------------------------------------------------
+
+#include "pub_tool_basics_asm.h"
+
+// Autoconf-generated settings
+#include "config.h"
+
+#endif /* __PUB_CORE_BASICS_ASM_H */
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
+
diff --git a/coregrind/.svn/text-base/pub_core_clientstate.h.svn-base b/coregrind/.svn/text-base/pub_core_clientstate.h.svn-base
new file mode 100644
index 0000000..1809074
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_clientstate.h.svn-base
@@ -0,0 +1,93 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Misc client state info pub_core_clientstate.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_CLIENTSTATE_H
+#define __PUB_CORE_CLIENTSTATE_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module holds various bits of client state which don't
+// live comfortably anywhere else. Note that the ThreadStates for the
+// client don't live here; they instead live in m_threadstate.h. Most
+// of these values are set once at startup and not changed later.
+//--------------------------------------------------------------------
+
+#include "pub_tool_clientstate.h"
+
+// Address space globals
+
+extern Addr VG_(clstk_base); // client stack range
+extern Addr VG_(clstk_end);
+extern UWord VG_(clstk_id); // client stack id
+
+extern Addr VG_(brk_base); // start of brk
+extern Addr VG_(brk_limit); // current brk
+
+/* A fd which refers to the client executable. */
+extern Int VG_(cl_exec_fd);
+
+/* A fd which refers to the fake /proc/<pid>/cmdline in /tmp. The
+ idea is: make up the /proc/<pid>/cmdline file the client would
+ expect to see if it was running natively. Copy into a file in
+ /tmp. When the client then does an open of /proc/<pid>/cmdline or
+ /proc/self/cmdline, instead give it a file handle to the file in
+ /tmp. The problem of deleting said file when Valgrind exits is
+ neatly sidestepped by unlinking it as soon as it has been created,
+ but holding on to the file handle. That causes the kernel to keep
+ the file contents alive exactly until the process exits. */
+extern Int VG_(cl_cmdline_fd);
+
+// Client's original rlimit data and rlimit stack
+extern struct vki_rlimit VG_(client_rlimit_data);
+extern struct vki_rlimit VG_(client_rlimit_stack);
+
+// Name of the launcher, as extracted from VALGRIND_LAUNCHER at
+// startup.
+extern HChar* VG_(name_of_launcher);
+
+/* Application-visible file descriptor limits */
+extern Int VG_(fd_soft_limit);
+extern Int VG_(fd_hard_limit);
+
+/* Useful addresses extracted from the client */
+/* Where is the __libc_freeres_wrapper routine we made? */
+extern Addr VG_(client___libc_freeres_wrapper);
+
+/* x86-linux only: where is ld.so's _dl_sysinfo_int80 function?
+ Finding it isn't essential, but knowing where it is does sometimes
+ help produce better back traces. See big comment in
+ VG_(get_StackTrace) in m_stacktrace.c for further info. */
+extern Addr VG_(client__dl_sysinfo_int80);
+
+
+#endif // __PUB_CORE_CLIENTSTATE_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_clreq.h.svn-base b/coregrind/.svn/text-base/pub_core_clreq.h.svn-base
new file mode 100644
index 0000000..179dc0c
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_clreq.h.svn-base
@@ -0,0 +1,80 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Internal client requests. pub_core_clreq.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_CLREQ_H
+#define __PUB_CORE_CLREQ_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module defined client requests.
+//--------------------------------------------------------------------
+
+#include "pub_tool_clreq.h"
+
+// The strange choice of values here is due to historical reasons -- there
+// used to be many more internal client requests.
+typedef
+ enum {
+ /* Denote the finish of __libc_freeres_wrapper(). Also causes exit. */
+ VG_USERREQ__LIBC_FREERES_DONE = 0x3029,
+
+ /* Get the tool's malloc-wrapping functions */
+ VG_USERREQ__GET_MALLOCFUNCS = 0x3030,
+
+ /* Internal equivalent of VALGRIND_PRINTF . */
+ VG_USERREQ__INTERNAL_PRINTF = 0x3103,
+
+ } Vg_InternalClientRequest;
+
+// Function for printing from code within Valgrind, but which runs on the
+// sim'd CPU. Must be a function rather than macros so that va_list can
+// be used.
+
+int VALGRIND_INTERNAL_PRINTF(char *format, ...);
+__attribute__((format(__printf__, 1, 2)))
+__attribute__((weak))
+int VALGRIND_INTERNAL_PRINTF(char *format, ...)
+{
+ unsigned long _qzz_res = 0;
+ va_list vargs;
+ va_start(vargs, format);
+ VALGRIND_DO_CLIENT_REQUEST(
+ _qzz_res, 0, VG_USERREQ__INTERNAL_PRINTF,
+ (unsigned long)format, (unsigned long)vargs, 0, 0, 0
+ );
+ va_end(vargs);
+ return _qzz_res;
+}
+
+
+#endif // __PUB_CORE_CLREQ_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_commandline.h.svn-base b/coregrind/.svn/text-base/pub_core_commandline.h.svn-base
new file mode 100644
index 0000000..aebfe61
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_commandline.h.svn-base
@@ -0,0 +1,47 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Command line handling. pub_core_commandline.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_COMMANDLINE_H
+#define __PUB_CORE_COMMANDLINE_H
+
+
+/* Split up the args presented by the launcher to m_main.main(), and
+ park them in VG_(args_for_client), VG_(args_for_valgrind) and
+ VG_(args_for_valgrind_extras). The latter are acquired from
+ $VALGRIND_OPTS, ./.valgrindrc and ~/.valgrindrc. */
+
+extern void VG_(split_up_argv)( Int argc, HChar** argv );
+
+
+#endif // __PUB_CORE_COMMANDLINE_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_coredump.h.svn-base b/coregrind/.svn/text-base/pub_core_coredump.h.svn-base
new file mode 100644
index 0000000..e325a42
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_coredump.h.svn-base
@@ -0,0 +1,45 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Dumping core. pub_core_coredump.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_COREDUMP_H
+#define __PUB_CORE_COREDUMP_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module produces a core dump when asked.
+//--------------------------------------------------------------------
+
+extern void VG_(make_coredump) ( ThreadId tid, const vki_siginfo_t *si,
+ UInt max_size );
+
+#endif // __PUB_CORE_COREDUMP_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_cpuid.h.svn-base b/coregrind/.svn/text-base/pub_core_cpuid.h.svn-base
new file mode 100644
index 0000000..4712e71
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_cpuid.h.svn-base
@@ -0,0 +1,45 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Interface to CPUID. pub_core_cpuid.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_CPUID_H
+#define __PUB_CORE_CPUID_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module provides Valgrind's interface to the x86/amd64
+// CPUID instruction.
+//--------------------------------------------------------------------
+
+#include "pub_tool_cpuid.h"
+
+#endif // __PUB_CORE_CPUID_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_debugger.h.svn-base b/coregrind/.svn/text-base/pub_core_debugger.h.svn-base
new file mode 100644
index 0000000..9c224de
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_debugger.h.svn-base
@@ -0,0 +1,45 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Attaching a debugger. pub_core_debugger.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_DEBUGGER_H
+#define __PUB_CORE_DEBUGGER_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This simple module just deals with attaching a debugger to the
+// running program.
+//--------------------------------------------------------------------
+
+extern void VG_(start_debugger) ( ThreadId tid );
+
+#endif // __PUB_CORE_DEBUGGER_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_debuginfo.h.svn-base b/coregrind/.svn/text-base/pub_core_debuginfo.h.svn-base
new file mode 100644
index 0000000..6f1c944
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_debuginfo.h.svn-base
@@ -0,0 +1,141 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Debug info. pub_core_debuginfo.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_DEBUGINFO_H
+#define __PUB_CORE_DEBUGINFO_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module deals with reading debug info and symbol tables
+// to get file and function names, line numbers, variable types, and
+// to help stack unwinding.
+//--------------------------------------------------------------------
+
+#include "pub_tool_debuginfo.h"
+
+/* Initialise the entire module. Must be called first of all. */
+extern void VG_(di_initialise) ( void );
+
+/* LINUX: Notify the debuginfo system about a new mapping, or the
+ disappearance of such, or a permissions change on an existing
+ mapping. This is the way new debug information gets loaded. If
+ allow_SkFileV is True, it will try load debug info if the mapping
+ at 'a' belongs to Valgrind; whereas normally (False) it will not do
+ that. This allows us to carefully control when the thing will read
+ symbols from the Valgrind executable itself.
+
+ If a call to VG_(di_notify_mmap) causes debug info to be read, then
+ the returned ULong is an abstract handle which can later be used to
+ refer to the debuginfo read as a result of this specific mapping,
+ in later queries to m_debuginfo. In this case the handle value
+ will be one or above. If the returned value is zero, no debug info
+ was read. */
+#if defined(VGO_linux)
+extern ULong VG_(di_notify_mmap)( Addr a, Bool allow_SkFileV );
+
+extern void VG_(di_notify_munmap)( Addr a, SizeT len );
+
+extern void VG_(di_notify_mprotect)( Addr a, SizeT len, UInt prot );
+
+/* this should really return ULong, as per VG_(di_notify_mmap). */
+extern void VG_(di_notify_pdb_debuginfo)( Int fd, Addr avma,
+ SizeT total_size,
+ PtrdiffT unknown_purpose__reloc );
+#endif
+
+#if defined(VGO_aix5)
+/* AIX5: Very similar, except packaged more neatly. The supplied
+ parameters describe a code segment and its associated data segment,
+ that have recently been mapped in -- so we need to read debug info
+ for it -- or conversely, have recently been dumped, in which case
+ the relevant debug info has to be unloaded.
+
+ The returned ULong has the same meaning as documented for
+ VG_(di_notify_mmap) just above. */
+extern ULong VG_(di_aix5_notify_segchange)(
+ Addr code_start,
+ Word code_len,
+ Addr data_start,
+ Word data_len,
+ UChar* file_name,
+ UChar* mem_name,
+ Bool is_mainexe,
+ Bool acquire
+ );
+#endif
+
+extern void VG_(di_discard_ALL_debuginfo)( void );
+
+/* Like VG_(get_fnname), but it does not do C++ demangling nor Z-demangling
+ * nor below-main renaming.
+ * It should not be used for any names that will be shown to users.
+ * It should only be used in cases where the names of interest will have
+ * particular (ie. non-mangled) forms, or the mangled form is acceptable. */
+extern
+Bool VG_(get_fnname_raw) ( Addr a, Char* buf, Int nbuf );
+
+/* Like VG_(get_fnname), but without C++ demangling. (But it does
+ * Z-demangling and below-main renaming.) */
+extern
+Bool VG_(get_fnname_no_cxx_demangle) ( Addr a, Char* buf, Int nbuf );
+
+/* Use DWARF2/3 CFA information to do one step of stack unwinding. */
+extern Bool VG_(use_CF_info) ( /*MOD*/Addr* ipP,
+ /*MOD*/Addr* spP,
+ /*MOD*/Addr* fpP,
+ Addr min_accessible,
+ Addr max_accessible );
+
+/* Use MSVC FPO data to do one step of stack unwinding. */
+extern Bool VG_(use_FPO_info) ( /*MOD*/Addr* ipP,
+ /*MOD*/Addr* spP,
+ /*MOD*/Addr* fpP,
+ Addr min_accessible,
+ Addr max_accessible );
+
+/* ppc64-linux only: find the TOC pointer (R2 value) that should be in
+ force at the entry point address of the function containing
+ guest_code_addr. Returns 0 if not known. */
+extern Addr VG_(get_tocptr) ( Addr guest_code_addr );
+
+/* Map a function name to its entry point and toc pointer. Is done by
+ sequential search of all symbol tables, so is very slow. To
+ mitigate the worst performance effects, you may specify a soname
+ pattern, and only objects matching that pattern are searched.
+ Therefore specify "*" to search all the objects. On TOC-afflicted
+ platforms, a symbol is deemed to be found only if it has a nonzero
+ TOC pointer. */
+extern
+Bool VG_(lookup_symbol_SLOW)(UChar* sopatt, UChar* name, Addr* pEnt, Addr* pToc);
+
+#endif // __PUB_CORE_DEBUGINFO_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_debuglog.h.svn-base b/coregrind/.svn/text-base/pub_core_debuglog.h.svn-base
new file mode 100644
index 0000000..3a34fd7
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_debuglog.h.svn-base
@@ -0,0 +1,92 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Debug (not-for-user) logging. pub_core_debuglog.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_DEBUGLOG_H
+#define __PUB_CORE_DEBUGLOG_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module provides a low-level debug logging facility
+// that works through all the twists and turns of program startup. Is
+// is completely independent of everything, including all memory
+// facilities, and emits the debug log on file descriptor 2 (stderr).
+// This module is the first to be initialised at system startup.
+//
+// Because VG_(debugLog) does printf-style formatting, and because
+// this module depends on NO OTHERS, this module contains Valgrind's
+// vfprintf implementation too.
+//--------------------------------------------------------------------
+
+/* Gaaah! We don't want glibc dependencies, but there is no easy,
+ portable way to avoid using stdarg.h. */
+#include <stdarg.h>
+
+#include "pub_tool_basics.h" /* For definition of VG_ macro */
+
+/* There are no tool-visible exports from m_debuglog, hence no header
+ file for it. */
+/* #include "pub_tool_debuglog.h" */
+
+
+/* Module startup. */
+extern
+void VG_(debugLog_startup) ( Int level, HChar* who );
+
+
+/* Get the logging threshold level, as set by the most recent call to
+ VG_(debugLog_startup), or zero if there have been no such calls so
+ far. */
+extern
+Int VG_(debugLog_getLevel) ( void );
+
+
+/* Send debugging output. Nothing happens unless 'level'
+ does not exceed the logging threshold level. */
+extern
+__attribute__((format(__printf__, 3, 4)))
+void VG_(debugLog) ( Int level, const HChar* modulename,
+ const HChar* format, ... );
+
+
+/* A simple vprintf(). For each emitted byte, (*send) is called with
+ that byte, and 'send_arg2' as its second param. */
+extern
+UInt VG_(debugLog_vprintf) (
+ void (*send)(HChar,void*), /* byte sink */
+ void* send_arg2, /* 2nd arg for byte sink */
+ const HChar *format,
+ va_list vargs
+ );
+
+
+#endif // __PUB_CORE_DEBUGLOG_H
+
+/*--------------------------------------------------------------------*/
+/*--- end pub_core_debuglog.h ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_demangle.h.svn-base b/coregrind/.svn/text-base/pub_core_demangle.h.svn-base
new file mode 100644
index 0000000..bac72c0
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_demangle.h.svn-base
@@ -0,0 +1,68 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The C++ name demangler. pub_core_demangle.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_DEMANGLE_H
+#define __PUB_CORE_DEMANGLE_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module exports functions for demangling C++ and
+// Z-encoded names.
+//--------------------------------------------------------------------
+
+/* This is the main, standard demangler entry point. It does two things:
+ * (1) undoes Z-encoding, if "do_z_demangle" is True;
+ * (2) undoes C++ demangling, if 'do_cxx_demangle' is True. */
+extern
+void VG_(demangle) ( Bool do_cxx_demangling, Bool do_z_demangling,
+ Char* orig, Char* result, Int result_size );
+
+/* Demangle a Z-encoded name as described in pub_tool_redir.h.
+ Z-encoded names are used by Valgrind for doing function
+ interception/wrapping.
+
+ Demangle 'sym' into its soname and fnname parts, putting them in
+ the specified buffers. Returns a Bool indicating whether the
+ demangled failed or not. A failure can occur because the prefix
+ isn't recognised, the internal Z-escaping is wrong, or because one
+ or the other (or both) of the output buffers becomes full. Passing
+ 'so' as NULL is acceptable if the caller is only interested in the
+ function name part. */
+
+extern
+Bool VG_(maybe_Z_demangle) ( const HChar* sym,
+ /*OUT*/HChar* so, Int soLen,
+ /*OUT*/HChar* fn, Int fnLen,
+ /*OUT*/Bool* isWrap );
+
+#endif // __PUB_CORE_DEMANGLE_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_dispatch.h.svn-base b/coregrind/.svn/text-base/pub_core_dispatch.h.svn-base
new file mode 100644
index 0000000..e4aa11c
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_dispatch.h.svn-base
@@ -0,0 +1,97 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The dispatcher. pub_core_dispatch.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_DISPATCH_H
+#define __PUB_CORE_DISPATCH_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module contains the inner loop of the execution
+// mechanism, which is: find next basic block, execute it, repeat until
+// the next bb isn't found in the fast-cache; or if the current bb
+// exited with a request for some special action before continuing; or
+// if the current thread has used up its scheduling quantum.
+//--------------------------------------------------------------------
+
+#include "pub_core_dispatch_asm.h"
+
+/* This subroutine is called from the C world. It is passed
+ a pointer to the VEX guest state (arch.vex). It must run code
+ from the instruction pointer in the guest state, and exit when
+ VG_(dispatch_ctr) reaches zero, or we need to defer to the scheduler.
+ The return value must indicate why it returned back to the scheduler.
+ It can also be exited if the executing code throws a non-resumable
+ signal, for example SIGSEGV, in which case control longjmp()s back past
+ here.
+
+ If do_profiling is nonzero, the profile counters arrays should be
+ updated for each translation run.
+
+ This code simply handles the common case fast -- when the translation
+ address is found in the translation cache. For anything else, the
+ scheduler does the work.
+
+ NOTE, VG_(run_innerloop) MUST NOT BE USED for noredir translations.
+ Instead use VG_(run_a_noredir_translation).
+*/
+extern
+UWord VG_(run_innerloop) ( void* guest_state, UWord do_profiling );
+#if defined(VGA_x86) || defined(VGA_amd64)
+/* We need to locate a couple of labels inside VG_(run_innerloop), so
+ that Vex can add branches to them from generated code. Hence the
+ following somewhat bogus decls. At least on x86 and amd64. ppc32
+ and ppc64 use straightforward bl-blr to get from dispatcher to
+ translation and back and so do not need these labels. */
+extern void VG_(run_innerloop__dispatch_unprofiled);
+extern void VG_(run_innerloop__dispatch_profiled);
+#endif
+
+
+/* Run a no-redir translation. argblock points to 4 UWords, 2 to carry args
+ and 2 to carry results:
+ 0: input: ptr to translation
+ 1: input: ptr to guest state
+ 2: output: next guest PC
+ 3: output: guest state pointer afterwards (== thread return code)
+ MUST NOT BE USED for non-noredir (normal) translations.
+*/
+extern void VG_(run_a_noredir_translation) ( volatile UWord* argblock );
+#if defined(VGA_x86) || defined(VGA_amd64)
+/* We need to a label inside VG_(run_a_noredir_translation), so that
+ Vex can add branches to them from generated code. Hence the
+ following somewhat bogus decl. */
+extern void VG_(run_a_noredir_translation__return_point);
+#endif
+
+
+#endif // __PUB_CORE_DISPATCH_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_dispatch_asm.h.svn-base b/coregrind/.svn/text-base/pub_core_dispatch_asm.h.svn-base
new file mode 100644
index 0000000..c1af70a
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_dispatch_asm.h.svn-base
@@ -0,0 +1,55 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Asm-only dispatcher stuff. pub_core_dispatch_asm.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_DISPATCH_ASM_H
+#define __PUB_CORE_DISPATCH_ASM_H
+
+/* Magic values that the guest state might be set to when returning to the
+ dispatcher. The only other legitimate value is to point to the
+ start of the thread's VEX guest state. These also are return values from
+ from VG_(run_innerloop) to the scheduler.
+*/
+/* Defines values for JMP_EMWARN, JMP_SYSCALL, JMP_CLIENTREQ and
+ JMP_YIELD */
+#include "libvex_trc_values.h"
+
+/* And some more of our own. These must not have the same values as
+ those from libvex_trc_values.h. (viz, 60 or below is safe). */
+#define VG_TRC_BORING 29 /* no event; just keep going */
+#define VG_TRC_INNER_FASTMISS 37 /* TRC only; means fast-cache miss. */
+#define VG_TRC_INNER_COUNTERZERO 41 /* TRC only; means bb ctr == 0 */
+#define VG_TRC_FAULT_SIGNAL 43 /* TRC only; got sigsegv/sigbus */
+#define VG_TRC_INVARIANT_FAILED 47 /* TRC only; invariant violation */
+
+#endif // __PUB_CORE_DISPATCH_ASM_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_errormgr.h.svn-base b/coregrind/.svn/text-base/pub_core_errormgr.h.svn-base
new file mode 100644
index 0000000..6ceeb9b
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_errormgr.h.svn-base
@@ -0,0 +1,70 @@
+
+/*--------------------------------------------------------------------*/
+/*--- ErrorMgr: management of errors and suppressions. ---*/
+/*--- pub_core_errormgr.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_ERRORMGR_H
+#define __PUB_CORE_ERRORMGR_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module manages errors recording and printing,
+// which includes suppression reading and writing.
+//--------------------------------------------------------------------
+
+#include "pub_tool_errormgr.h"
+
+// These must be negative, so as to not overlap with tool error kinds.
+typedef
+ enum {
+ // Nb: thread errors are a relic of the time when Valgrind's core
+ // could detect them. This example is left as an example should new
+ // core errors ever be added.
+ ThreadErr = -1,
+ }
+ CoreErrorKind;
+
+extern void VG_(load_suppressions) ( void );
+
+extern void VG_(show_all_errors) ( void );
+
+extern void VG_(show_error_counts_as_XML) ( void );
+
+extern Bool VG_(is_action_requested) ( Char* action, Bool* clo );
+
+extern Bool VG_(showing_core_errors) ( void );
+
+extern UInt VG_(get_n_errs_found) ( void );
+
+extern void VG_(print_errormgr_stats) ( void );
+
+#endif // __PUB_CORE_ERRORMGR_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_execontext.h.svn-base b/coregrind/.svn/text-base/pub_core_execontext.h.svn-base
new file mode 100644
index 0000000..3cb2ae0
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_execontext.h.svn-base
@@ -0,0 +1,61 @@
+
+/*--------------------------------------------------------------------*/
+/*--- ExeContexts: long-lived, non-dup'd stack traces. ---*/
+/*--- pub_core_execontext.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_EXECONTEXT_H
+#define __PUB_CORE_EXECONTEXT_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module provides an abstract data type, ExeContext,
+// which is a stack trace stored in such a way that duplicates are
+// avoided. This also facilitates fast comparisons if necessary.
+//--------------------------------------------------------------------
+
+#include "pub_tool_execontext.h"
+
+/* The maximum number of calls we're prepared to save in an ExeContext. */
+#define VG_DEEPEST_BACKTRACE 50
+
+// Print stats (informational only).
+extern void VG_(print_ExeContext_stats) ( void );
+
+// Extract the StackTrace from an ExeContext.
+// (Minor hack: we use Addr* as the return type instead of StackTrace so
+// that modules #including this file don't also have to #include
+// pub_core_stacktrace.h also.)
+extern
+/*StackTrace*/Addr* VG_(get_ExeContext_StackTrace) ( ExeContext* e );
+
+
+#endif // __PUB_CORE_EXECONTEXT_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_hashtable.h.svn-base b/coregrind/.svn/text-base/pub_core_hashtable.h.svn-base
new file mode 100644
index 0000000..f0d2ab2
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_hashtable.h.svn-base
@@ -0,0 +1,48 @@
+
+/*--------------------------------------------------------------------*/
+/*--- A separately-chained hash table. pub_core_hashtable.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2005-2009 Nicholas Nethercote
+ njn@valgrind.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_HASHTABLE_H
+#define __PUB_CORE_HASHTABLE_H
+
+//--------------------------------------------------------------------
+// PURPOSE: A generic data structure with fairly fast lookup for not too
+// many elements, eg. up to a few thousand.
+//--------------------------------------------------------------------
+
+#include "pub_tool_hashtable.h"
+
+// No core-only exports; everything in this module is visible to both
+// the core and tools.
+
+#endif // __PUB_CORE_HASHTABLE_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_initimg.h.svn-base b/coregrind/.svn/text-base/pub_core_initimg.h.svn-base
new file mode 100644
index 0000000..9e9416b
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_initimg.h.svn-base
@@ -0,0 +1,181 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Create initial process image on for the client ---*/
+/*--- pub_core_initimg.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2006-2009 OpenWorks LLP
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_INITIMG_H
+#define __PUB_CORE_INITIMG_H
+
+
+//--------------------------------------------------------------------
+// PURPOSE: Map the client executable into memory, then set up its
+// stack, environment and data section, ready for execution. Quite a
+// lot of work on Linux (ELF) but nearly a no-op on AIX (XCOFF) since
+// the AIX kernel does most of the work for us.
+//--------------------------------------------------------------------
+
+/* These are OS-specific and defined below. */
+typedef struct _IICreateImageInfo IICreateImageInfo;
+typedef struct _IIFinaliseImageInfo IIFinaliseImageInfo;
+
+/* This is a two stage process. The first stage, which is most of the
+ work, creates the initial image in memory to the extent possible.
+ To do this it takes a bundle of information in an IICreateImageInfo
+ structure, which is gathered in an OS-specific way at startup.
+ This returns an IIFinaliseImageInfo structure: */
+extern
+IIFinaliseImageInfo VG_(ii_create_image)( IICreateImageInfo );
+
+/* Just before starting the client, we may need to make final
+ adjustments to its initial image. Also we need to set up the VEX
+ guest state for thread 1 (the root thread) and copy in essential
+ starting values. This is handed the IIFinaliseImageInfo created by
+ VG_(ii_create_image). */
+extern
+void VG_(ii_finalise_image)( IIFinaliseImageInfo );
+
+
+/* Note that both IICreateImageInfo and IIFinaliseImageInfo are
+ OS-specific. We now go on to give instantiations of them
+ for supported OSes. */
+
+/* ------------------------- Linux ------------------------- */
+
+#if defined(VGO_linux)
+
+struct _IICreateImageInfo {
+ /* ------ Mandatory fields ------ */
+ HChar* toolname;
+ Addr sp_at_startup;
+ Addr clstack_top;
+ /* ------ Per-OS fields ------ */
+ HChar** argv;
+ HChar** envp;
+};
+
+struct _IIFinaliseImageInfo {
+ /* ------ Mandatory fields ------ */
+ SizeT clstack_max_size;
+ Addr initial_client_SP;
+ /* ------ Per-OS fields ------ */
+ Addr initial_client_IP;
+ Addr initial_client_TOC;
+ UInt* client_auxv;
+};
+
+
+/* ------------------------- AIX5 ------------------------- */
+
+#elif defined(VGO_aix5)
+
+/* First we need to define this auxiliary structure. */
+typedef
+ struct {
+ /* NOTE: VG_(ppc32/64_aix5_do_preloads_then_start_client) has
+ these offsets hardwired in. Do not change them without
+ changing it too. */
+ /* system call numbers */
+ /* 0 */ UInt nr_load; /* is __NR___loadx for 32-bit,
+ __NR_kload for 64 */
+ /* 4 */ UInt nr_kwrite;
+ /* 8 */ UInt nr__exit;
+ /* offset/length of error message, if the preloads fail */
+ /* 12 */ UInt off_errmsg;
+ /* 16 */ UInt len_errmsg;
+ /* offsets from start of this struct to the the preload file
+ names */
+ /* 20 */ UInt off_preloadcorename;
+ /* 24 */ UInt off_preloadtoolname;
+ /* 28 */ UInt off_ld_preloadname;
+ /* Once the preloading is done, we'll need to restore the guest
+ state to what it needs to be at client startup. Here's the
+ relevant info. Are ULongs; for 32-bit the data is at the
+ lsb (high addressed) end. */
+ /* 32 */ ULong client_start;
+ /* 40 */ ULong r2;
+ /* 48 */ ULong r3;
+ /* 56 */ ULong r4;
+ /* 64 */ ULong r5;
+ /* 72 */ ULong r6;
+ /* 80 */ ULong r7;
+ /* 88 */ ULong r8;
+ /* 96 */ ULong r9;
+ /* 104 */ ULong r10;
+ /* If the loading fails, we'll want to call a diagnostic
+ function in C to figure out what happened. Here's it's
+ function descriptor. Note, this runs on the simd cpu
+ (a kludge, and will segfault in 64-bit mode). */
+ /* 112 */ void* p_diagnose_load_failure;
+ }
+ AIX5PreloadPage;
+
+struct _IICreateImageInfo {
+ /* ------ Mandatory fields ------ */
+ HChar* toolname;
+ Addr sp_at_startup; /* Not used on AIX. */
+ Addr clstack_top; /* Not used on AIX. */
+ /* ------ Per-OS fields ------ */
+ /* Initial values for guest int registers (GPR0 .. GPR31, PC, CR,
+ LR, CTR, XER). Passed to us from the launcher. */
+ ULong* intregs37;
+ /* AIX5Bootblock*, really */
+ void* bootblock;
+ /* Adler32 checksum of uncompressed data of compressed page. */
+ UInt adler32_exp;
+};
+
+struct _IIFinaliseImageInfo {
+ /* ------ Mandatory fields ------ */
+ SizeT clstack_max_size;
+ /* Initial value for SP (which is merely a copy of r1's value,
+ intregs37[1]). */
+ Addr initial_client_SP;
+ /* ------ Per-OS fields ------ */
+ /* Pointer to the preload page. The preload page and this pointer
+ to it are set up by VG_(ii_create_image). */
+ AIX5PreloadPage* preloadpage;
+ /* Initial values for guest int registers (GPR0 .. GPR31, PC,
+ CR, LR, CTR, XER). Copied from the CII. */
+ ULong* intregs37;
+ /* Address of the page compressed by the launcher. */
+ Addr compressed_page;
+ /* Adler32 checksum of uncompressed data of said page. */
+ UInt adler32_exp;
+};
+
+#else
+# error "Unknown OS"
+#endif
+
+
+#endif // __PUB_CORE_INITIMG_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_libcassert.h.svn-base b/coregrind/.svn/text-base/pub_core_libcassert.h.svn-base
new file mode 100644
index 0000000..5be3516
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_libcassert.h.svn-base
@@ -0,0 +1,81 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Assertions, etc. pub_core_libcassert.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_LIBCASSERT_H
+#define __PUB_CORE_LIBCASSERT_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module contains all the libc code related to assertions,
+// panics and aborting.
+//--------------------------------------------------------------------
+
+#include "pub_tool_libcassert.h"
+
+// Useful for making failing stubs, when certain things haven't yet been
+// implemented.
+#define I_die_here \
+ VG_(assert_fail) (/*isCore*//*BOGUS*/True, \
+ "Unimplemented functionality", \
+ __FILE__, __LINE__, __PRETTY_FUNCTION__, \
+ "valgrind", VG_BUGS_TO, "")
+
+#define vg_assert(expr) \
+ ((void) ((expr) ? 0 : \
+ (VG_(assert_fail) (/*isCore*/True, #expr, \
+ __FILE__, __LINE__, __PRETTY_FUNCTION__, \
+ ""), \
+ 0)))
+
+#define vg_assert2(expr, format, args...) \
+ ((void) ((expr) ? 0 : \
+ (VG_(assert_fail) (/*isCore*/True, #expr, \
+ __FILE__, __LINE__, __PRETTY_FUNCTION__, \
+ format, ##args), \
+ 0)))
+
+__attribute__ ((__noreturn__))
+extern void VG_(core_panic) ( Char* str );
+__attribute__ ((__noreturn__))
+extern void VG_(core_panic_at) ( Char* str,
+ Addr ip, Addr sp, Addr fp, Addr lr );
+
+/* Called when some unhandleable client behaviour is detected.
+ Prints a msg and aborts. */
+extern void VG_(unimplemented) ( Char* msg )
+ __attribute__((__noreturn__));
+
+/* Show the state of all threads. Mostly for debugging V. */
+extern void VG_(show_sched_status) ( void );
+
+#endif // __PUB_CORE_LIBCASSERT_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_libcbase.h.svn-base b/coregrind/.svn/text-base/pub_core_libcbase.h.svn-base
new file mode 100644
index 0000000..d8d5f17
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_libcbase.h.svn-base
@@ -0,0 +1,46 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Standalone libc stuff. pub_core_libcbase.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_LIBCBASE_H
+#define __PUB_CORE_LIBCBASE_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module contains all the libc code that is entirely
+// standalone (other than the VG_() macro and some types defined
+// elsewhere): string functions, char functions, and a few other things.
+//--------------------------------------------------------------------
+
+#include "pub_tool_libcbase.h"
+
+#endif // __PUB_CORE_LIBCBASE_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_libcfile.h.svn-base b/coregrind/.svn/text-base/pub_core_libcfile.h.svn-base
new file mode 100644
index 0000000..310a5d7
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_libcfile.h.svn-base
@@ -0,0 +1,97 @@
+
+/*--------------------------------------------------------------------*/
+/*--- File/socket-related libc stuff. pub_core_libcfile.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_LIBCFILE_H
+#define __PUB_CORE_LIBCFILE_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module contains all the libc code that relates to
+// files and sockets: opening, reading, writing, etc.
+// To use, you must first include: pub_core_vki.h
+//--------------------------------------------------------------------
+
+#include "pub_tool_libcfile.h"
+
+/* Move an fd into the Valgrind-safe range */
+extern Int VG_(safe_fd) ( Int oldfd );
+extern Int VG_(fcntl) ( Int fd, Int cmd, Int arg );
+
+/* Convert an fd into a filename */
+extern Bool VG_(resolve_filename) ( Int fd, HChar* buf, Int n_buf );
+
+/* Return the size of a file, or -1 in case of error */
+extern Long VG_(fsize) ( Int fd );
+
+/* Is the file a directory? */
+extern Bool VG_(is_dir) ( HChar* f );
+
+/* Default destination port to be used in logging over a network, if
+ none specified. */
+#define VG_CLO_DEFAULT_LOGPORT 1500
+
+extern UInt VG_(htonl) ( UInt x );
+extern UInt VG_(ntohl) ( UInt x );
+extern UShort VG_(htons) ( UShort x );
+extern UShort VG_(ntohs) ( UShort x );
+
+extern Int VG_(write_socket)( Int sd, void *msg, Int count );
+extern Int VG_(connect_via_socket)( UChar* str );
+extern Int VG_(getsockname) ( Int sd, struct vki_sockaddr *name, Int *namelen );
+extern Int VG_(getpeername) ( Int sd, struct vki_sockaddr *name, Int *namelen );
+extern Int VG_(getsockopt) ( Int sd, Int level, Int optname, void *optval,
+ Int *optlen );
+
+extern Int VG_(access) ( HChar* path, Bool irusr, Bool iwusr, Bool ixusr );
+
+/* Is the file executable? Returns: 0 = success, non-0 is failure */
+extern Int VG_(check_executable)(/*OUT*/Bool* is_setuid,
+ HChar* f, Bool allow_setuid);
+
+extern SysRes VG_(pread) ( Int fd, void* buf, Int count, OffT offset );
+
+/* Create and open (-rw------) a tmp file name incorporating said arg.
+ Returns -1 on failure, else the fd of the file. If fullname is
+ non-NULL, the file's name is written into it. The number of bytes
+ written is guaranteed not to exceed 64+strlen(part_of_name). */
+extern Int VG_(mkstemp) ( HChar* part_of_name, /*OUT*/HChar* fullname );
+
+/* Record the process' working directory at startup. Is intended to
+ be called exactly once, at startup, before the working directory
+ changes. Return True for success, False for failure, so that the
+ caller can bomb out suitably without creating module cycles if
+ there is a problem. The saved value can later be acquired by
+ calling VG_(get_startup_wd) (in pub_tool_libcfile.h). */
+extern Bool VG_(record_startup_wd) ( void );
+
+#endif // __PUB_CORE_LIBCFILE_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_libcprint.h.svn-base b/coregrind/.svn/text-base/pub_core_libcprint.h.svn-base
new file mode 100644
index 0000000..3b3460c
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_libcprint.h.svn-base
@@ -0,0 +1,55 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Printing libc stuff. pub_core_libcprint.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_LIBCPRINT_H
+#define __PUB_CORE_LIBCPRINT_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module contains all the libc code that is related to
+// higher-level (ie. higher than DebugLog) printing, eg. VG_(printf)().
+//--------------------------------------------------------------------
+
+#include "pub_tool_libcprint.h"
+
+/* Tell the logging mechanism whether we are logging to a file
+ descriptor or a socket descriptor. */
+extern Bool VG_(logging_to_socket);
+
+/* Get the elapsed wallclock time since startup into buf, which must
+ 16 chars long. This is unchecked. It also relies on the
+ millisecond timer having been set to zero by an initial read in
+ m_main during startup. */
+void VG_(elapsed_wallclock_time) ( /*OUT*/HChar* buf );
+
+#endif // __PUB_CORE_LIBCPRINT_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_libcproc.h.svn-base b/coregrind/.svn/text-base/pub_core_libcproc.h.svn-base
new file mode 100644
index 0000000..68f8958
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_libcproc.h.svn-base
@@ -0,0 +1,91 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Process-related libc stuff. pub_core_libcproc.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_LIBCPROC_H
+#define __PUB_CORE_LIBCPROC_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module contains libc code related to the process.
+// It's a bit of a mixed bag.
+//--------------------------------------------------------------------
+
+#include "config.h" // Crucial: ensure we get ENABLE_INNER
+#include "pub_tool_libcproc.h"
+
+/* The directory we look for all our auxillary files in. Useful for
+ running Valgrind out of a build tree without having to do "make
+ install". Inner valgrinds require a different lib variable, else
+ they end up picking up .so's etc intended for the outer
+ valgrind. */
+#ifdef ENABLE_INNER
+# define VALGRIND_LIB "VALGRIND_LIB_INNER"
+#else
+# define VALGRIND_LIB "VALGRIND_LIB"
+#endif
+
+/* Additional command-line arguments; they are overridden by actual
+ command-line option. Each argument is separated by spaces. There
+ is no quoting mechanism. */
+#define VALGRIND_OPTS "VALGRIND_OPTS"
+
+/* The full name of Valgrind's stage1 (launcher) executable. This is
+ set by stage1 and read by stage2, and is used for recursive
+ invocations of Valgrind on child processes.
+
+ For self-hosting, the inner and outer Valgrinds must use different
+ names to avoid collisions. */
+#ifdef ENABLE_INNER
+# define VALGRIND_LAUNCHER "VALGRIND_LAUNCHER_INNER"
+#else
+# define VALGRIND_LAUNCHER "VALGRIND_LAUNCHER"
+#endif
+
+
+// Environment manipulations
+extern Char **VG_(env_setenv) ( Char ***envp, const Char* varname,
+ const Char *val );
+extern void VG_(env_unsetenv) ( Char **env, const Char *varname );
+extern void VG_(env_remove_valgrind_env_stuff) ( Char** env );
+extern Char **VG_(env_clone) ( Char **env_clone );
+
+// misc
+extern Int VG_(getgroups)( Int size, UInt* list );
+extern Int VG_(ptrace)( Int request, Int pid, void *addr, void *data );
+
+// atfork
+extern void VG_(do_atfork_pre) ( ThreadId tid );
+extern void VG_(do_atfork_parent) ( ThreadId tid );
+extern void VG_(do_atfork_child) ( ThreadId tid );
+
+#endif // __PUB_CORE_LIBCPROC_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_libcsignal.h.svn-base b/coregrind/.svn/text-base/pub_core_libcsignal.h.svn-base
new file mode 100644
index 0000000..9e8ebe7
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_libcsignal.h.svn-base
@@ -0,0 +1,86 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Signal-related libc stuff. pub_core_libcsignal.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_LIBCSIGNAL_H
+#define __PUB_CORE_LIBCSIGNAL_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module contains all the libc code related to signals.
+//--------------------------------------------------------------------
+
+#include "pub_tool_libcsignal.h"
+
+/* Note that these use the vki_ (kernel) structure
+ definitions, which are different in places from those that glibc
+ defines. Since we're operating right at the kernel interface, glibc's view
+ of the world is entirely irrelevant. */
+
+/* --- Signal set ops --- */
+extern Int VG_(sigfillset) ( vki_sigset_t* set );
+extern Int VG_(sigemptyset) ( vki_sigset_t* set );
+
+extern Bool VG_(isfullsigset) ( const vki_sigset_t* set );
+extern Bool VG_(isemptysigset) ( const vki_sigset_t* set );
+extern Bool VG_(iseqsigset) ( const vki_sigset_t* set1,
+ const vki_sigset_t* set2 );
+
+extern Int VG_(sigaddset) ( vki_sigset_t* set, Int signum );
+extern Int VG_(sigdelset) ( vki_sigset_t* set, Int signum );
+extern Int VG_(sigismember) ( const vki_sigset_t* set, Int signum );
+
+extern void VG_(sigaddset_from_set) ( vki_sigset_t* dst, vki_sigset_t* src );
+extern void VG_(sigdelset_from_set) ( vki_sigset_t* dst, vki_sigset_t* src );
+
+/* --- Mess with the kernel's sig state --- */
+/* VG_(sigprocmask) is in pub_tool_libcsignal.h. */
+
+extern Int VG_(sigaction) ( Int signum,
+ const struct vki_sigaction* act,
+ struct vki_sigaction* oldact );
+
+extern Int VG_(kill) ( Int pid, Int signo );
+extern Int VG_(tkill) ( ThreadId tid, Int signo );
+
+/* A cut-down version of POSIX sigtimedwait: poll for pending signals
+ mentioned in the sigset_t, and if any are present, select one
+ arbitrarily, return its number (which must be > 0), and put
+ auxiliary info about it in the siginfo_t, and make it
+ not-pending-any-more. If none are pending, return zero. The _zero
+ refers to the fact that there is zero timeout, so if no signals are
+ pending it returns immediately. Perhaps a better name would be
+ 'sigpoll'. Returns -1 on error, 0 if no signals pending, and n > 0
+ if signal n was selected. */
+extern Int VG_(sigtimedwait_zero)( const vki_sigset_t *, vki_siginfo_t * );
+
+#endif // __PUB_CORE_LIBCSIGNAL_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_machine.h.svn-base b/coregrind/.svn/text-base/pub_core_machine.h.svn-base
new file mode 100644
index 0000000..5da26dc
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_machine.h.svn-base
@@ -0,0 +1,182 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Machine-related things. pub_core_machine.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_MACHINE_H
+#define __PUB_CORE_MACHINE_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module contains code related to the particular
+// architecture, things like accessing guest state, endianness, word size,
+// etc.
+//--------------------------------------------------------------------
+
+#include "pub_tool_machine.h"
+
+#if defined(VGP_x86_linux)
+# define VG_ELF_DATA2XXX ELFDATA2LSB
+# define VG_ELF_MACHINE EM_386
+# define VG_ELF_CLASS ELFCLASS32
+# undef VG_PLAT_USES_PPCTOC
+#elif defined(VGP_amd64_linux)
+# define VG_ELF_DATA2XXX ELFDATA2LSB
+# define VG_ELF_MACHINE EM_X86_64
+# define VG_ELF_CLASS ELFCLASS64
+# undef VG_PLAT_USES_PPCTOC
+#elif defined(VGP_ppc32_linux)
+# define VG_ELF_DATA2XXX ELFDATA2MSB
+# define VG_ELF_MACHINE EM_PPC
+# define VG_ELF_CLASS ELFCLASS32
+# undef VG_PLAT_USES_PPCTOC
+#elif defined(VGP_ppc64_linux)
+# define VG_ELF_DATA2XXX ELFDATA2MSB
+# define VG_ELF_MACHINE EM_PPC64
+# define VG_ELF_CLASS ELFCLASS64
+# define VG_PLAT_USES_PPCTOC 1
+#elif defined(VGO_aix5)
+# undef VG_ELF_DATA2XXX
+# undef VG_ELF_MACHINE
+# undef VG_ELF_CLASS
+# define VG_PLAT_USES_PPCTOC 1
+#else
+# error Unknown platform
+#endif
+
+#if defined(VGA_x86)
+# define VG_INSTR_PTR guest_EIP
+# define VG_STACK_PTR guest_ESP
+# define VG_FRAME_PTR guest_EBP
+#elif defined(VGA_amd64)
+# define VG_INSTR_PTR guest_RIP
+# define VG_STACK_PTR guest_RSP
+# define VG_FRAME_PTR guest_RBP
+#elif defined(VGA_ppc32)
+# define VG_INSTR_PTR guest_CIA
+# define VG_STACK_PTR guest_GPR1
+# define VG_FRAME_PTR guest_GPR1 // No frame ptr for PPC
+#elif defined(VGA_ppc64)
+# define VG_INSTR_PTR guest_CIA
+# define VG_STACK_PTR guest_GPR1
+# define VG_FRAME_PTR guest_GPR1 // No frame ptr for PPC
+#else
+# error Unknown arch
+#endif
+
+
+// Offsets for the Vex state
+#define VG_O_STACK_PTR (offsetof(VexGuestArchState, VG_STACK_PTR))
+
+
+//-------------------------------------------------------------
+/* Details about the capabilities of the underlying (host) CPU. These
+ details are acquired by (1) enquiring with the CPU at startup, or
+ (2) from the AT_SYSINFO entries the kernel gave us (ppc cache
+ line size). It's a bit nasty in the sense that there's no obvious
+ way to stop uses of some of this info before it's ready to go.
+
+ Current dependencies are:
+
+ x86: initially: call VG_(machine_get_hwcaps)
+
+ then safe to use VG_(machine_get_VexArchInfo)
+ and VG_(machine_x86_have_mxcsr)
+ -------------
+ amd64: initially: call VG_(machine_get_hwcaps)
+
+ then safe to use VG_(machine_get_VexArchInfo)
+ -------------
+ ppc32: initially: call VG_(machine_get_hwcaps)
+ call VG_(machine_ppc32_set_clszB)
+
+ then safe to use VG_(machine_get_VexArchInfo)
+ and VG_(machine_ppc32_has_FP)
+ and VG_(machine_ppc32_has_VMX)
+ -------------
+ ppc64: initially: call VG_(machine_get_hwcaps)
+ call VG_(machine_ppc64_set_clszB)
+
+ then safe to use VG_(machine_get_VexArchInfo)
+ and VG_(machine_ppc64_has_VMX)
+
+ VG_(machine_get_hwcaps) may use signals (although it attempts to
+ leave signal state unchanged) and therefore should only be
+ called before m_main sets up the client's signal state.
+*/
+
+/* Determine what insn set and insn set variant the host has, and
+ record it. To be called once at system startup. Returns False if
+ this a CPU incapable of running Valgrind. */
+extern Bool VG_(machine_get_hwcaps)( void );
+
+/* Fetch host cpu info, as per above comment. */
+extern void VG_(machine_get_VexArchInfo)( /*OUT*/VexArch*,
+ /*OUT*/VexArchInfo* );
+
+/* Notify host cpu cache line size, as per above comment. */
+#if defined(VGA_ppc32)
+extern void VG_(machine_ppc32_set_clszB)( Int );
+#endif
+
+#if defined(VGA_ppc64)
+extern void VG_(machine_ppc64_set_clszB)( Int );
+#endif
+
+/* X86: set to 1 if the host is able to do {ld,st}mxcsr (load/store
+ the SSE control/status register), else zero. Is referenced from
+ assembly code, so do not change from a 32-bit int. */
+#if defined(VGA_x86)
+extern UInt VG_(machine_x86_have_mxcsr);
+#endif
+
+/* PPC32: set to 1 if FP instructions are supported in user-space,
+ else 0. Is referenced from assembly code, so do not change from a
+ 32-bit int. */
+#if defined(VGA_ppc32)
+extern UInt VG_(machine_ppc32_has_FP);
+#endif
+
+/* PPC32: set to 1 if Altivec instructions are supported in
+ user-space, else 0. Is referenced from assembly code, so do not
+ change from a 32-bit int. */
+#if defined(VGA_ppc32)
+extern UInt VG_(machine_ppc32_has_VMX);
+#endif
+
+/* PPC64: set to 1 if Altivec instructions are supported in
+ user-space, else 0. Is referenced from assembly code, so do not
+ change from a 64-bit int. */
+#if defined(VGA_ppc64)
+extern ULong VG_(machine_ppc64_has_VMX);
+#endif
+
+#endif // __PUB_CORE_MACHINE_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_mallocfree.h.svn-base b/coregrind/.svn/text-base/pub_core_mallocfree.h.svn-base
new file mode 100644
index 0000000..8281e4d
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_mallocfree.h.svn-base
@@ -0,0 +1,117 @@
+
+/*--------------------------------------------------------------------*/
+/*--- High-level memory management. pub_core_mallocfree.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_MALLOCFREE_H
+#define __PUB_CORE_MALLOCFREE_H
+
+#include "pub_tool_mallocfree.h"
+
+//--------------------------------------------------------------------
+// PURPOSE: high-level memory allocation (malloc/free), for the core and
+// tools.
+//--------------------------------------------------------------------
+
+/* Allocation arenas.
+
+ CORE for the core's general use.
+ TOOL for the tool to use (and the only one it uses).
+ DINFO for debug info (symbols, line #s, CFI, etc) storage.
+ CLIENT for the client's mallocs/frees, if the tool replaces glibc's
+ malloc() et al -- redzone size is chosen by the tool.
+ DEMANGLE for the C++ demangler.
+ EXECTXT for storing ExeContexts.
+ ERRORS for storing CoreErrors.
+ TTAUX for storing TT/TC auxiliary structures (address range
+ equivalence classes).
+
+ When adding a new arena, remember also to add it to ensure_mm_init().
+*/
+typedef Int ArenaId;
+
+#define VG_N_ARENAS 8
+
+#define VG_AR_CORE 0
+#define VG_AR_TOOL 1
+#define VG_AR_DINFO 2
+#define VG_AR_CLIENT 3
+#define VG_AR_DEMANGLE 4
+#define VG_AR_EXECTXT 5
+#define VG_AR_ERRORS 6
+#define VG_AR_TTAUX 7
+
+// This is both the minimum payload size of a malloc'd block, and its
+// minimum alignment. Must be a power of 2 greater than 4, and should be
+// greater than 8.
+#define VG_MIN_MALLOC_SZB 8
+
+/* This struct definition MUST match the system one. */
+/* SVID2/XPG mallinfo structure */
+struct vg_mallinfo {
+ int arena; /* total space allocated from system */
+ int ordblks; /* number of non-inuse chunks */
+ int smblks; /* unused -- always zero */
+ int hblks; /* number of mmapped regions */
+ int hblkhd; /* total space in mmapped regions */
+ int usmblks; /* unused -- always zero */
+ int fsmblks; /* unused -- always zero */
+ int uordblks; /* total allocated space */
+ int fordblks; /* total non-inuse space */
+ int keepcost; /* top-most, releasable (via malloc_trim) space */
+};
+
+extern void* VG_(arena_malloc) ( ArenaId arena, HChar* cc, SizeT nbytes );
+extern void VG_(arena_free) ( ArenaId arena, void* ptr );
+extern void* VG_(arena_calloc) ( ArenaId arena, HChar* cc,
+ SizeT nmemb, SizeT bytes_per_memb );
+extern void* VG_(arena_realloc) ( ArenaId arena, HChar* cc,
+ void* ptr, SizeT size );
+extern void* VG_(arena_memalign)( ArenaId aid, HChar* cc,
+ SizeT req_alignB, SizeT req_pszB );
+extern Char* VG_(arena_strdup) ( ArenaId aid, HChar* cc,
+ const Char* s);
+
+// Nb: The ThreadId doesn't matter, it's not used.
+extern SizeT VG_(arena_payload_szB) ( ThreadId tid, ArenaId aid, void* payload );
+
+extern SizeT VG_(arena_malloc_usable_size) ( ArenaId aid, void* payload );
+
+extern void VG_(mallinfo) ( ThreadId tid, struct vg_mallinfo* mi );
+
+extern void VG_(sanity_check_malloc_all) ( void );
+
+extern void VG_(print_all_arena_stats) ( void );
+
+extern void VG_(print_arena_cc_analysis) ( void );
+
+#endif // __PUB_CORE_MALLOCFREE_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_options.h.svn-base b/coregrind/.svn/text-base/pub_core_options.h.svn-base
new file mode 100644
index 0000000..486cf94
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_options.h.svn-base
@@ -0,0 +1,202 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Command line options. pub_core_options.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_OPTIONS_H
+#define __PUB_CORE_OPTIONS_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module holds the variables for all command line options,
+// plus some functions and macros for manipulating them. Almost every
+// other module imports this one, if only for VG_(clo_verbosity).
+//--------------------------------------------------------------------
+
+#include "pub_tool_options.h"
+
+/* The max number of suppression files. */
+#define VG_CLO_MAX_SFILES 100
+
+/* Should we stop collecting errors if too many appear? default: YES */
+extern Bool VG_(clo_error_limit);
+/* Alternative exit code to hand to parent if errors were found.
+ default: 0 (no, return the application's exit code in the normal
+ way. */
+extern Int VG_(clo_error_exitcode);
+/* Enquire about whether to attach to a debugger at errors? default: NO */
+extern Bool VG_(clo_db_attach);
+/* The debugger command? default: whatever gdb ./configure found */
+extern Char* VG_(clo_db_command);
+/* Generating a suppression for each error? default: 0 (NO)
+ Other values: 1 (yes, but ask user), 2 (yes, don't ask user) */
+extern Int VG_(clo_gen_suppressions);
+/* Sanity-check level: 0 = none, 1 (default), > 1 = expensive. */
+extern Int VG_(clo_sanity_level);
+/* Automatically attempt to demangle C++ names? default: YES */
+extern Bool VG_(clo_demangle);
+/* Simulate child processes? default: NO */
+extern Bool VG_(clo_trace_children);
+/* After a fork, the child's output can become confusingly
+ intermingled with the parent's output. This is especially
+ problematic when VG_(clo_xml) is True. Setting
+ VG_(clo_child_silent_after_fork) causes children to fall silent
+ after fork() calls. */
+extern Bool VG_(clo_child_silent_after_fork);
+
+/* Where logging output is to be sent to.
+
+ With --log-fd (and by default), clo_log_fd holds the file id, and is
+ taken from the command line. (fd 2, stderr, is the default.)
+ clo_log_name is irrelevant.
+
+ With --log-file, clo_log_name holds the log-file name, and is taken from
+ the command line (and possibly has process ID/env var contents in it, if
+ the %p or %q format specifiers are used). clo_log_fd is then made to
+ hold the relevant file id, by opening clo_log_name (concatenated with the
+ process ID) for writing.
+
+ With --log-socket, clo_log_name holds the hostname:portnumber pair,
+ and is taken from the command line. clo_log_fd is then made to hold
+ the relevant file handle, by opening a connection to that
+ hostname:portnumber pair.
+
+ Global default is to set log_to == VgLogTo_Fd and log_fd == 2
+ (stderr). */
+extern Int VG_(clo_log_fd);
+extern Char* VG_(clo_log_name);
+
+/* Add timestamps to log messages? default: NO */
+extern Bool VG_(clo_time_stamp);
+
+/* The file descriptor to read for input. default: 0 == stdin */
+extern Int VG_(clo_input_fd);
+/* The number of suppression files specified. */
+extern Int VG_(clo_n_suppressions);
+/* The names of the suppression files. */
+extern Char* VG_(clo_suppressions)[VG_CLO_MAX_SFILES];
+
+/* DEBUG: print generated code? default: 00000000 ( == NO ) */
+extern UChar VG_(clo_trace_flags);
+/* DEBUG: do bb profiling? default: 00000000 ( == NO ) */
+extern UChar VG_(clo_profile_flags);
+/* DEBUG: if tracing codegen, be quiet until after this bb ( 0 ) */
+extern Int VG_(clo_trace_notbelow);
+/* DEBUG: print system calls? default: NO */
+extern Bool VG_(clo_trace_syscalls);
+/* DEBUG: print signal details? default: NO */
+extern Bool VG_(clo_trace_signals);
+/* DEBUG: print symtab details? default: NO */
+extern Bool VG_(clo_trace_symtab);
+/* DEBUG: restrict symtab etc details to object name pattern. Default: "*" */
+extern HChar* VG_(clo_trace_symtab_patt);
+/* DEBUG: print call-frame-info details? default: NO */
+extern Bool VG_(clo_trace_cfi);
+/* DEBUG: mimic /usr/bin/readelf --syms? default: NO */
+extern Bool VG_(clo_debug_dump_syms);
+/* DEBUG: mimic /usr/bin/readelf --debug-dump=line? default: NO */
+extern Bool VG_(clo_debug_dump_line);
+/* DEBUG: mimic /usr/bin/readelf --debug-dump=frames? default: NO */
+extern Bool VG_(clo_debug_dump_frames);
+/* DEBUG: print redirection details? default: NO */
+extern Bool VG_(clo_trace_redir);
+/* DEBUG: print thread scheduling events? default: NO */
+extern Bool VG_(clo_trace_sched);
+/* DEBUG: do heap profiling? default: NO */
+extern Bool VG_(clo_profile_heap);
+/* DEBUG: display gory details for the k'th most popular error.
+ default: Infinity. */
+extern Int VG_(clo_dump_error);
+/* Engage miscellaneous weird hacks needed for some progs. */
+extern Char* VG_(clo_sim_hints);
+/* Show symbols in the form 'name+offset' ? Default: NO */
+extern Bool VG_(clo_sym_offsets);
+/* Read DWARF3 variable info even if tool doesn't ask for it? */
+extern Bool VG_(clo_read_var_info);
+
+/* Track open file descriptors? */
+extern Bool VG_(clo_track_fds);
+
+/* Should we run __libc_freeres at exit? Sometimes causes crashes.
+ Default: YES. Note this is subservient to VG_(needs).libc_freeres;
+ if the latter says False, then the setting of VG_(clo_run_libc_freeres)
+ is ignored. Ie if a tool says no, I don't want this to run, that
+ cannot be overridden from the command line. */
+extern Bool VG_(clo_run_libc_freeres);
+
+/* Should we show VEX emulation warnings? Default: NO */
+extern Bool VG_(clo_show_emwarns);
+
+/* How much does the stack pointer have to change before tools
+ consider a stack switch to have happened? Default: 2000000 bytes
+ NB: must be host-word-sized to be correct (hence Word). */
+extern Word VG_(clo_max_stackframe);
+/* How large should Valgrind allow the primary thread's guest stack to
+ be? */
+extern Word VG_(clo_main_stacksize);
+
+/* Delay startup to allow GDB to be attached? Default: NO */
+extern Bool VG_(clo_wait_for_gdb);
+
+/* To what extent should self-checking translations be made? These
+ are needed to deal with self-modifying code on uncooperative
+ platforms. */
+typedef
+ enum {
+ Vg_SmcNone, // never generate self-checking translations
+ Vg_SmcStack, // generate s-c-t's for code found in stacks
+ // (this is the default)
+ Vg_SmcAll // make all translations self-checking.
+ }
+ VgSmc;
+
+/* Describe extent to which self-modifying-code should be
+ auto-detected. */
+extern VgSmc VG_(clo_smc_check);
+
+/* String containing comma-separated names of minor kernel variants,
+ so they can be properly handled by m_syswrap. */
+extern HChar* VG_(clo_kernel_variant);
+
+/* --------- Functions --------- */
+
+/* Call this if the executable is missing. This function prints an
+ error message, then shuts down the entire system. */
+__attribute__((noreturn))
+extern void VG_(err_missing_prog) ( void );
+
+/* Similarly - complain and stop if there is some kind of config
+ error. */
+__attribute__((noreturn))
+extern void VG_(err_config_error) ( Char* msg );
+
+
+#endif // __PUB_CORE_OPTIONS_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_oset.h.svn-base b/coregrind/.svn/text-base/pub_core_oset.h.svn-base
new file mode 100644
index 0000000..d178b7c
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_oset.h.svn-base
@@ -0,0 +1,49 @@
+
+/*--------------------------------------------------------------------*/
+/*--- An ordered set implementation. pub_core_oset.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2005-2009 Nicholas Nethercote
+ njn@valgrind.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_OSET_H
+#define __PUB_CORE_OSET_H
+
+//--------------------------------------------------------------------
+// PURPOSE: A generic data structure with fast (eg. amortised log(n) or
+// better) insertion, lookup and deletion of elements. It does not allow
+// duplicates.
+//--------------------------------------------------------------------
+
+#include "pub_tool_oset.h"
+
+// No core-only exports; everything in this module is visible to both
+// the core and tools.
+
+#endif // __PUB_CORE_OSET_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_redir.h.svn-base b/coregrind/.svn/text-base/pub_core_redir.h.svn-base
new file mode 100644
index 0000000..c993c27
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_redir.h.svn-base
@@ -0,0 +1,125 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Function replacement and wrapping. pub_core_redir.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_REDIR_H
+#define __PUB_CORE_REDIR_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module deals with:
+// - code replacement: intercepting calls to client functions, and
+// pointing them to a different piece of code.
+// - loading notification: telling the core where certain client-space
+// functions are when they get loaded.
+// - function wrapping: add calls to code before and after client
+// functions execute, for inspection and/or modification.
+//--------------------------------------------------------------------
+
+#include "pub_tool_redir.h"
+
+
+//--------------------------------------------------------------------
+// Notifications - by which we are told of state changes
+//--------------------------------------------------------------------
+
+/* Notify the module of a new DebugInfo (called from m_debuginfo). */
+extern void VG_(redir_notify_new_DebugInfo)( DebugInfo* );
+
+/* Notify the module of the disappearance of a DebugInfo (also called
+ from m_debuginfo). */
+extern void VG_(redir_notify_delete_DebugInfo)( DebugInfo* );
+
+/* Initialise the module, and load initial "hardwired" redirects. */
+extern void VG_(redir_initialise)( void );
+
+
+//--------------------------------------------------------------------
+// Queries
+//--------------------------------------------------------------------
+
+/* This is the crucial redirection function. It answers the question:
+ should this code address be redirected somewhere else? It's used
+ just before translating a basic block. If a redir is found,
+ *isWrap allows to distinguish wrap- from replace- style
+ redirections. */
+extern Addr VG_(redir_do_lookup) ( Addr orig, Bool* isWrap );
+
+
+//--------------------------------------------------------------------
+// Loading notification
+//--------------------------------------------------------------------
+
+/* Functions named with this macro have the property that the core will
+ be told what their address is when they are loaded. This can be useful
+ if the core wants to call them at some point, and so needs to know their
+ address. This is a weaker but more general mechanism than code
+ replacement.
+
+ Functions named with this macro should be in client space, ie. in
+ vgpreload_<tool>.h or vgpreload_core.h. */
+
+#define VG_NOTIFY_ON_LOAD(name) _vgnU_##name
+#define VG_NOTIFY_ON_LOAD_PREFIX "_vgnU_"
+#define VG_NOTIFY_ON_LOAD_PREFIX_LEN 6
+
+
+//--------------------------------------------------------------------
+// Function wrapping
+//--------------------------------------------------------------------
+
+// This is currently not working(?) --njn
+
+/* Wrapping machinery */
+//enum return_type {
+ // RT_RETURN,
+ // RT_LONGJMP,
+ // RT_EXIT,
+ //};
+//
+//typedef struct _FuncWrapper FuncWrapper;
+//struct _FuncWrapper {
+ // void *(*before)(va_list args);
+ // void (*after) (void *nonce, enum return_type, Word retval);
+ //};
+//
+//extern void VG_(wrap_function)(Addr eip, const FuncWrapper *wrapper);
+//extern const FuncWrapper *VG_(is_wrapped)(Addr eip);
+//extern Bool VG_(is_wrapper_return)(Addr eip);
+
+/* Primary interface for adding wrappers for client-side functions. */
+//extern CodeRedirect *VG_(add_wrapper)(const Char *from_lib, const Char *from_sym,
+// const FuncWrapper *wrapper);
+//
+//extern Bool VG_(is_resolved)(const CodeRedirect *redir);
+
+#endif // __PUB_CORE_REDIR_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_replacemalloc.h.svn-base b/coregrind/.svn/text-base/pub_core_replacemalloc.h.svn-base
new file mode 100644
index 0000000..1561c20
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_replacemalloc.h.svn-base
@@ -0,0 +1,61 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Replacing malloc. pub_core_replacemalloc.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_REPLACEMALLOC_H
+#define __PUB_CORE_REPLACEMALLOC_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module provides a short-cut for tools that need to
+// replace malloc/free with their own versions.
+//--------------------------------------------------------------------
+
+#include "pub_tool_replacemalloc.h"
+
+// things vg_replace_malloc.o needs to know about
+struct vg_mallocfunc_info {
+ void* (*tl_malloc) (ThreadId tid, SizeT n);
+ void* (*tl___builtin_new) (ThreadId tid, SizeT n);
+ void* (*tl___builtin_vec_new) (ThreadId tid, SizeT n);
+ void* (*tl_memalign) (ThreadId tid, SizeT align, SizeT n);
+ void* (*tl_calloc) (ThreadId tid, SizeT nmemb, SizeT n);
+ void (*tl_free) (ThreadId tid, void* p);
+ void (*tl___builtin_delete) (ThreadId tid, void* p);
+ void (*tl___builtin_vec_delete)(ThreadId tid, void* p);
+ void* (*tl_realloc) (ThreadId tid, void* p, SizeT size);
+ SizeT (*tl_malloc_usable_size) (ThreadId tid, void* payload);
+ void (*mallinfo) (ThreadId tid, struct vg_mallinfo* mi);
+ Bool clo_trace_malloc;
+};
+
+#endif // __PUB_CORE_REPLACEMALLOC_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_scheduler.h.svn-base b/coregrind/.svn/text-base/pub_core_scheduler.h.svn-base
new file mode 100644
index 0000000..029472b
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_scheduler.h.svn-base
@@ -0,0 +1,101 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The scheduler. pub_core_scheduler.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_SCHEDULER_H
+#define __PUB_CORE_SCHEDULER_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module is the scheduler, which is the main loop
+// controlling the running of all the program's threads.
+// It's at the centre of everything.
+//--------------------------------------------------------------------
+
+/* Allocate a new ThreadState */
+extern ThreadId VG_(alloc_ThreadState)(void);
+
+/* A thread exits. tid must currently be running. */
+extern void VG_(exit_thread)(ThreadId tid);
+
+/* If 'tid' is blocked in a syscall, send it SIGVGKILL so as to get it
+ out of the syscall and onto doing the next thing, whatever that is.
+ If it isn't blocked in a syscall, has no effect on the thread. */
+extern void VG_(get_thread_out_of_syscall)(ThreadId tid);
+
+/* Nuke all threads except tid. */
+extern void VG_(nuke_all_threads_except) ( ThreadId me,
+ VgSchedReturnCode reason );
+
+/* Make a thread the running thread. The thread must previously been
+ sleeping, and not holding the CPU lock. This will set the
+ thread state to VgTs_Runnable, and the thread will attempt to take
+ the CPU lock. By the time it returns, tid will be the running
+ thread. */
+extern void VG_(acquire_BigLock) ( ThreadId tid, HChar* who );
+
+/* Set a thread into a sleeping state. Before the call, the thread
+ must be runnable, and holding the CPU lock. When this call
+ returns, the thread will be set to the specified sleeping state,
+ and will not be holding the CPU lock. Note that another
+ thread could be running by the time this call returns, so the
+ caller must be careful not to touch any shared state. It is also
+ the caller's responsibility to actually block until the thread is
+ ready to run again. */
+extern void VG_(release_BigLock) ( ThreadId tid, ThreadStatus state, HChar* who );
+
+/* Yield the CPU for a while */
+extern void VG_(vg_yield)(void);
+
+// The scheduler.
+extern VgSchedReturnCode VG_(scheduler) ( ThreadId tid );
+
+// Initialise, phase 1. Zero out VG_(threads), decide on the root
+// ThreadId and initialise the bigLock.
+extern ThreadId VG_(scheduler_init_phase1) ( void );
+
+// Initialise, phase 2. Is passed the extent of the root thread's
+// client stack and the root ThreadId decided on by phase 1.
+extern void VG_(scheduler_init_phase2) ( ThreadId main_tid,
+ Addr clstack_end,
+ SizeT clstack_size );
+
+/* Stats ... */
+extern void VG_(print_scheduler_stats) ( void );
+
+/* If False, a fault is Valgrind-internal (ie, a bug) */
+extern Bool VG_(in_generated_code);
+
+/* Sanity checks which may be done at any time. The scheduler decides when. */
+extern void VG_(sanity_check_general) ( Bool force_expensive );
+
+#endif // __PUB_CORE_SCHEDULER_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_seqmatch.h.svn-base b/coregrind/.svn/text-base/pub_core_seqmatch.h.svn-base
new file mode 100644
index 0000000..c2391f8
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_seqmatch.h.svn-base
@@ -0,0 +1,51 @@
+
+/*--------------------------------------------------------------------*/
+/*--- A simple sequence matching facility. ---*/
+/*--- pub_core_seqmatch.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2008-2009 OpenWorks Ltd
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_SEQMATCH_H
+#define __PUB_CORE_SEQMATCH_H
+
+//--------------------------------------------------------------------
+// PURPOSE: Provides a simple generic sequence matching facility, with
+// '*' and '?' style wildcards. Also provides a parameterisation
+// thereof suitable for matching strings. See pub_tool_seqmatch.h
+// for details.
+//--------------------------------------------------------------------
+
+// No core-only exports; everything in this module is visible to both
+// the core and tools.
+
+#include "pub_tool_seqmatch.h"
+
+#endif // __PUB_CORE_SEQMATCH_H
+
+/*--------------------------------------------------------------------*/
+/*--- end pub_core_seqmatch.h ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_sigframe.h.svn-base b/coregrind/.svn/text-base/pub_core_sigframe.h.svn-base
new file mode 100644
index 0000000..6646a82
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_sigframe.h.svn-base
@@ -0,0 +1,65 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Create/destroy signal delivery frames. ---*/
+/*--- pub_core_sigframe.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_SIGFRAME_H
+#define __PUB_CORE_SIGFRAME_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module creates and destroys signal delivery frames
+// for client threads, saving/restoring the thread CPU state in the
+// frame appropriately.
+//--------------------------------------------------------------------
+
+/* There are no tool-visible exports from m_sigframe, hence no header
+ file for it. */
+/* #include "pub_tool_sigframe.h" */
+
+/* Create a signal frame for thread 'tid'. */
+extern
+void VG_(sigframe_create) ( ThreadId tid,
+ Addr sp_top_of_frame,
+ const vki_siginfo_t *siginfo,
+ const struct vki_ucontext *uc,
+ void *handler,
+ UInt flags,
+ const vki_sigset_t *mask,
+ void *restorer );
+
+/* Remove a signal frame from thread 'tid's stack, and
+ restore the CPU state from it. */
+extern
+void VG_(sigframe_destroy)( ThreadId tid, Bool isRT );
+
+#endif // __PUB_CORE_SIGFRAME_H
+
+/*--------------------------------------------------------------------*/
+/*--- end pub_core_sigframe.h ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_signals.h.svn-base b/coregrind/.svn/text-base/pub_core_signals.h.svn-base
new file mode 100644
index 0000000..9d174ae
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_signals.h.svn-base
@@ -0,0 +1,88 @@
+
+/*--------------------------------------------------------------------*/
+/*--- POSIX signals. pub_core_signals.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_SIGNALS_H
+#define __PUB_CORE_SIGNALS_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module implements all the signal handling stuff.
+//--------------------------------------------------------------------
+
+#include "pub_tool_signals.h" // I want to get rid of this header...
+
+/* Highest signal the kernel will let us use */
+extern Int VG_(max_signal);
+
+/* Use high signals because native pthreads wants to use low */
+#define VG_SIGVGKILL (VG_(max_signal)-0)
+#define VG_SIGVGRTUSERMAX (VG_(max_signal)-1)
+
+extern void VG_(sigstartup_actions) ( void );
+
+/* Poll a thread's set of pending signals, and update the Thread's
+ context to deliver one (viz, create signal frames if needed) */
+extern void VG_(poll_signals) ( ThreadId );
+
+/* Fake system calls for signal handling. */
+extern SysRes VG_(do_sys_sigaltstack) ( ThreadId tid, vki_stack_t* ss,
+ vki_stack_t* oss );
+extern SysRes VG_(do_sys_sigaction) ( Int signo,
+ const struct vki_sigaction *new_act,
+ struct vki_sigaction *old_act );
+extern SysRes VG_(do_sys_sigprocmask) ( ThreadId tid, Int how,
+ vki_sigset_t* set,
+ vki_sigset_t* oldset );
+
+extern void VG_(clear_out_queued_signals)
+ ( ThreadId tid, /* OUT */ vki_sigset_t* saved_mask );
+
+extern void VG_(kill_self)(Int sigNo);
+
+/* These function synthesize a fault, as if the running instruction
+ had had a fault. These functions do not return - they longjmp back
+ into the scheduler so the signal can be delivered. */
+extern void VG_(synth_fault) (ThreadId tid);
+extern void VG_(synth_fault_mapping)(ThreadId tid, Addr addr);
+extern void VG_(synth_fault_perms) (ThreadId tid, Addr addr);
+extern void VG_(synth_sigill) (ThreadId tid, Addr addr);
+extern void VG_(synth_sigtrap) (ThreadId tid);
+
+/* Extend the stack to cover addr, if possible */
+extern Bool VG_(extend_stack)(Addr addr, UInt maxsize);
+
+/* Forces the client's signal handler to SIG_DFL - generally just
+ before using that signal to kill the process. */
+extern void VG_(set_default_handler)(Int sig);
+
+#endif // __PUB_CORE_SIGNALS_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_sparsewa.h.svn-base b/coregrind/.svn/text-base/pub_core_sparsewa.h.svn-base
new file mode 100644
index 0000000..c382ef9
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_sparsewa.h.svn-base
@@ -0,0 +1,51 @@
+
+/*--------------------------------------------------------------------*/
+/*--- An sparse array (of words) implementation. ---*/
+/*--- pub_core_sparsewa.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2008-2009 OpenWorks Ltd
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_SPARSEWA_H
+#define __PUB_CORE_SPARSEWA_H
+
+//--------------------------------------------------------------------
+// PURPOSE: Provides an implementation of a sparse array of host words
+// (UWord). The indices are themselves host words. The implementation
+// uses a 256-way radix tree, which is therefore 4 levels deep on a
+// 32-bit platform and 8 levels deep on a 64-bit platform.
+//--------------------------------------------------------------------
+
+// No core-only exports; everything in this module is visible to both
+// the core and tools.
+
+#include "pub_tool_sparsewa.h"
+
+#endif // __PUB_CORE_SPARSEWA_H
+
+/*--------------------------------------------------------------------*/
+/*--- end pub_core_sparsewa.h ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_stacks.h.svn-base b/coregrind/.svn/text-base/pub_core_stacks.h.svn-base
new file mode 100644
index 0000000..ffa2db7
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_stacks.h.svn-base
@@ -0,0 +1,52 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Stack management. m_stacks.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_STACKS_H
+#define __PUB_CORE_STACKS_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module deals with the registration of stacks for the
+// purposes of detecting stack switches.
+//--------------------------------------------------------------------
+
+extern UWord VG_(register_stack) ( Addr start, Addr end );
+extern void VG_(deregister_stack) ( UWord id );
+extern void VG_(change_stack) ( UWord id, Addr start, Addr end );
+extern void VG_(stack_limits) ( Addr SP, Addr *start, Addr *end );
+
+extern VG_REGPARM(3)
+ void VG_(unknown_SP_update) ( Addr old_SP, Addr new_SP, UInt otag );
+
+#endif // __PUB_CORE_STACKS_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
+
diff --git a/coregrind/.svn/text-base/pub_core_stacktrace.h.svn-base b/coregrind/.svn/text-base/pub_core_stacktrace.h.svn-base
new file mode 100644
index 0000000..201bccf
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_stacktrace.h.svn-base
@@ -0,0 +1,63 @@
+/*--------------------------------------------------------------------*/
+/*--- Stack traces: getting, traversing, printing. ---*/
+/*--- pub_core_stacktrace.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_STACKTRACE_H
+#define __PUB_CORE_STACKTRACE_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module deals with stack traces: getting them,
+// traversing them, and printing them.
+//--------------------------------------------------------------------
+
+#include "pub_tool_stacktrace.h"
+
+// Variant that gives a little more control over the stack-walking
+// (this is the "worker" function that actually does the walking).
+// If you know what the thread ID for this stack is, send that
+// as the first parameter, else send zero. This helps generate
+// better stack traces on ppc64-linux and has no effect on other
+// platforms.
+//
+// The acquired IP values are placed in
+// ips[0 .. min(n_ips,return_value)]. If sps and fps are non-NULL,
+// the corresponding frame-pointer and stack-pointer values for each
+// frame are stored there.
+
+UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
+ /*OUT*/Addr* ips, UInt n_ips,
+ /*OUT*/Addr* sps, /*OUT*/Addr* fps,
+ Addr ip, Addr sp, Addr fp, Addr lr,
+ Addr fp_min, Addr fp_max_orig );
+
+#endif // __PUB_CORE_STACKTRACE_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_syscall.h.svn-base b/coregrind/.svn/text-base/pub_core_syscall.h.svn-base
new file mode 100644
index 0000000..9954754
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_syscall.h.svn-base
@@ -0,0 +1,89 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Doing system calls. pub_core_syscall.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_SYSCALL_H
+#define __PUB_CORE_SYSCALL_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module contains the code for actually executing syscalls.
+//--------------------------------------------------------------------
+
+/* Do a syscall on this platform, with 6 args, and return the result
+ in canonical format in a SysRes value. */
+
+// We use a full prototype for VG_(do_syscall) rather than "..." to ensure
+// that all arguments get converted to a UWord appropriately. Not doing so
+// can cause problems when passing 32-bit integers on 64-bit platforms,
+// because the top 32-bits might not be zeroed appropriately, eg. as would
+// happen with the 6th arg on AMD64 which is passed on the stack.
+
+extern SysRes VG_(do_syscall) ( UWord sysno,
+ UWord, UWord, UWord,
+ UWord, UWord, UWord,
+ UWord, UWord );
+
+/* Macros make life easier. */
+
+#define vgPlain_do_syscall0(s) VG_(do_syscall)((s),0,0,0,0,0,0,0,0)
+#define vgPlain_do_syscall1(s,a) VG_(do_syscall)((s),(a),\
+ 0,0,0,0,0,0,0)
+#define vgPlain_do_syscall2(s,a,b) VG_(do_syscall)((s),(a),(b),\
+ 0,0,0,0,0,0)
+#define vgPlain_do_syscall3(s,a,b,c) VG_(do_syscall)((s),(a),(b),(c),\
+ 0,0,0,0,0)
+#define vgPlain_do_syscall4(s,a,b,c,d) VG_(do_syscall)((s),(a),(b),\
+ (c),(d),0,0,0,0)
+#define vgPlain_do_syscall5(s,a,b,c,d,e) VG_(do_syscall)((s),(a),(b),\
+ (c),(d),(e),0,0,0)
+#define vgPlain_do_syscall6(s,a,b,c,d,e,f) VG_(do_syscall)((s),(a),(b),(c),\
+ (d),(e),(f),0,0)
+
+extern SysRes VG_(mk_SysRes_x86_linux) ( UInt val );
+extern SysRes VG_(mk_SysRes_amd64_linux) ( ULong val );
+extern SysRes VG_(mk_SysRes_ppc32_linux) ( UInt val, UInt cr0so );
+extern SysRes VG_(mk_SysRes_ppc64_linux) ( ULong val, ULong cr0so );
+extern SysRes VG_(mk_SysRes_ppc32_aix5) ( UInt val, UInt err );
+extern SysRes VG_(mk_SysRes_ppc64_aix5) ( ULong val, ULong err );
+extern SysRes VG_(mk_SysRes_Error) ( UWord val );
+extern SysRes VG_(mk_SysRes_Success) ( UWord val );
+
+
+/* Return a string which gives the name of an error value. Note,
+ unlike the standard C syserror fn, the returned string is not
+ malloc-allocated or writable -- treat it as a constant. */
+
+extern const HChar* VG_(strerror) ( UWord errnum );
+
+
+#endif // __PUB_CORE_SYSCALL_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_syswrap.h.svn-base b/coregrind/.svn/text-base/pub_core_syswrap.h.svn-base
new file mode 100644
index 0000000..57bfa07
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_syswrap.h.svn-base
@@ -0,0 +1,83 @@
+
+/*--------------------------------------------------------------------*/
+/*--- System call wrappers, etc. pub_core_syswrap.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_SYSWRAP_H
+#define __PUB_CORE_SYSWRAP_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module contains all the syscall junk: mostly PRE/POST
+// wrappers, but also the main syscall jacketing code.
+//--------------------------------------------------------------------
+
+// Allocates a stack for the first thread, then runs it,
+// as if the thread had been set up by clone()
+extern void VG_(main_thread_wrapper_NORETURN)(ThreadId tid);
+
+extern void VG_(client_syscall) ( ThreadId tid );
+
+extern void VG_(post_syscall) ( ThreadId tid );
+
+/* Clear this module's private state for thread 'tid' */
+extern void VG_(clear_syscallInfo) ( Int tid );
+
+// Fix up a thread's state when syscall is interrupted by a signal.
+extern void VG_(fixup_guest_state_after_syscall_interrupted)(
+ ThreadId tid,
+ Addr ip,
+ UWord sysnum,
+ SysRes sysret,
+ Bool restart
+ );
+
+// Wait until all other threads are dead
+extern void VG_(reap_threads)(ThreadId self);
+
+// Release resources held by this thread
+extern void VG_(cleanup_thread) ( ThreadArchState* );
+
+/* fd leakage calls. */
+extern void VG_(init_preopened_fds) ( void );
+extern void VG_(show_open_fds) ( void );
+
+// When the final thread is done, where shall I call to shutdown the
+// system cleanly? Is set once at startup (in m_main) and never
+// changes after that. Is basically a pointer to the exit
+// continuation. This is all just a nasty hack to avoid calling
+// directly from m_syswrap to m_main at exit, since that would cause
+// m_main to become part of a module cycle, which is silly.
+extern void (* VG_(address_of_m_main_shutdown_actions_NORETURN) )
+ (ThreadId,VgSchedReturnCode);
+
+#endif // __PUB_CORE_SYSWRAP_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
+
diff --git a/coregrind/.svn/text-base/pub_core_threadstate.h.svn-base b/coregrind/.svn/text-base/pub_core_threadstate.h.svn-base
new file mode 100644
index 0000000..bcba985
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_threadstate.h.svn-base
@@ -0,0 +1,272 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The thread state. pub_core_threadstate.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_THREADSTATE_H
+#define __PUB_CORE_THREADSTATE_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module defines the ThreadState type and the
+// VG_(threads)[] data structure which holds all the important thread
+// state. It also defines some simple operations on the data structure
+// that don't require any external help. (m_scheduler does the complex
+// stuff).
+//--------------------------------------------------------------------
+
+#include "pub_tool_threadstate.h"
+
+/*------------------------------------------------------------*/
+/*--- Types ---*/
+/*------------------------------------------------------------*/
+
+/*
+ Thread state machine:
+
+ Empty -> Init -> Runnable <=> WaitSys/Yielding
+ ^ |
+ \---- Zombie -----/
+ */
+typedef
+ enum ThreadStatus {
+ VgTs_Empty, /* this slot is not in use */
+ VgTs_Init, /* just allocated */
+ VgTs_Runnable, /* ready to run */
+ VgTs_WaitSys, /* waiting for a syscall to complete */
+ VgTs_Yielding, /* temporarily yielding the CPU */
+ VgTs_Zombie, /* transient state just before exiting */
+ }
+ ThreadStatus;
+
+/* Return codes from the scheduler. */
+typedef
+ enum {
+ VgSrc_None, /* not exiting yet */
+ VgSrc_ExitThread, /* just this thread is exiting */
+ VgSrc_ExitProcess, /* entire process is exiting */
+ VgSrc_FatalSig /* Killed by the default action of a fatal
+ signal */
+ }
+ VgSchedReturnCode;
+
+
+#if defined(VGA_x86)
+ typedef VexGuestX86State VexGuestArchState;
+#elif defined(VGA_amd64)
+ typedef VexGuestAMD64State VexGuestArchState;
+#elif defined(VGA_ppc32)
+ typedef VexGuestPPC32State VexGuestArchState;
+#elif defined(VGA_ppc64)
+ typedef VexGuestPPC64State VexGuestArchState;
+#else
+# error Unknown architecture
+#endif
+
+
+/* Architecture-specific thread state */
+typedef
+ struct {
+ /* --- BEGIN vex-mandated guest state --- */
+
+ /* Note that for code generation reasons, we require that the
+ guest state area, its two shadows, and the spill area, are
+ 16-aligned and have 16-aligned sizes, and there are no holes
+ in between. This is checked by do_pre_run_checks() in
+ scheduler.c. */
+
+ /* Saved machine context. */
+ VexGuestArchState vex __attribute__((aligned(16)));
+
+ /* Saved shadow context (2 copies). */
+ VexGuestArchState vex_shadow1 __attribute__((aligned(16)));
+ VexGuestArchState vex_shadow2 __attribute__((aligned(16)));
+
+ /* Spill area. */
+ UChar vex_spill[LibVEX_N_SPILL_BYTES] __attribute__((aligned(16)));
+
+ /* --- END vex-mandated guest state --- */
+ }
+ ThreadArchState;
+
+
+/* OS-specific thread state */
+typedef
+ struct {
+ /* who we are */
+ Int lwpid; // PID of kernel task
+ Int threadgroup; // thread group id
+
+ ThreadId parent; // parent tid (if any)
+
+ /* runtime details */
+ Addr valgrind_stack_base; // Valgrind's stack (VgStack*)
+ Addr valgrind_stack_init_SP; // starting value for SP
+
+ /* exit details */
+ Word exitcode; // in the case of exitgroup, set by someone else
+ Int fatalsig; // fatal signal
+
+# if defined(VGO_aix5)
+ /* AIX specific fields to make thread cancellation sort-of work */
+ /* What is this thread's current cancellation state a la
+ POSIX (deferred vs async, enable vs disabled) ? */
+ Bool cancel_async; // current cancel mode (async vs deferred)
+ Bool cancel_disabled; // cancellation disabled?
+ /* What's happened so far? */
+ enum { Canc_NoRequest=0, // no cancellation requested
+ Canc_Requested=1, // requested but not actioned
+ Canc_Actioned=2 } // requested and actioned
+ cancel_progress;
+ /* Initial state is False, False, Canc_Normal. */
+# endif
+ }
+ ThreadOSstate;
+
+
+/* Overall thread state */
+typedef struct {
+ /* ThreadId == 0 (and hence vg_threads[0]) is NEVER USED.
+ The thread identity is simply the index in vg_threads[].
+ ThreadId == 1 is the root thread and has the special property
+ that we don't try and allocate or deallocate its stack. For
+ convenience of generating error message, we also put the
+ ThreadId in this tid field, but be aware that it should
+ ALWAYS == the index in vg_threads[]. */
+ ThreadId tid;
+
+ /* Current scheduling status. */
+ ThreadStatus status;
+
+ /* This is set if the thread is in the process of exiting for any
+ reason. The precise details of the exit are in the OS-specific
+ state. */
+ VgSchedReturnCode exitreason;
+
+ /* Architecture-specific thread state. */
+ ThreadArchState arch;
+
+ /* This thread's blocked-signals mask. Semantics is that for a
+ signal to be delivered to this thread, the signal must not be
+ blocked by this signal mask. If more than one thread accepts a
+ signal, then it will be delivered to one at random. If all
+ threads block the signal, it will remain pending until either a
+ thread unblocks it or someone uses sigwaitsig/sigtimedwait. */
+ vki_sigset_t sig_mask;
+
+ /* tmp_sig_mask is usually the same as sig_mask, and is kept in
+ sync whenever sig_mask is changed. The only time they have
+ different values is during the execution of a sigsuspend, where
+ tmp_sig_mask is the temporary mask which sigsuspend installs.
+ It is only consulted to compute the signal mask applied to a
+ signal handler. */
+ vki_sigset_t tmp_sig_mask;
+
+ /* A little signal queue for signals we can't get the kernel to
+ queue for us. This is only allocated as needed, since it should
+ be rare. */
+ struct SigQueue *sig_queue;
+
+ /* Client stacks. When a thread slot is freed, we don't deallocate its
+ stack; we just leave it lying around for the next use of the
+ slot. If the next use of the slot requires a larger stack,
+ only then is the old one deallocated and a new one
+ allocated.
+
+ For the main thread (threadid == 1), this mechanism doesn't
+ apply. We don't know the size of the stack since we didn't
+ allocate it, and furthermore we never reallocate it. */
+
+ /* The allocated size of this thread's stack */
+ SizeT client_stack_szB;
+
+ /* Address of the highest legitimate word in this stack. This is
+ used for error messages only -- not critical for execution
+ correctness. Is is set for all stacks, specifically including
+ ThreadId == 1 (the main thread). */
+ Addr client_stack_highest_word;
+
+ /* Alternate signal stack */
+ vki_stack_t altstack;
+
+ /* OS-specific thread state */
+ ThreadOSstate os_state;
+
+ /* Per-thread jmp_buf to resume scheduler after a signal */
+ Bool sched_jmpbuf_valid;
+ jmp_buf sched_jmpbuf;
+}
+ThreadState;
+
+
+/*------------------------------------------------------------*/
+/*--- The thread table. ---*/
+/*------------------------------------------------------------*/
+
+/* A statically allocated array of threads. NOTE: [0] is
+ never used, to simplify the simulation of initialisers for
+ LinuxThreads. */
+extern ThreadState VG_(threads)[VG_N_THREADS];
+
+// The running thread. m_scheduler should be the only other module
+// to write to this.
+extern ThreadId VG_(running_tid);
+
+/*------------------------------------------------------------*/
+/*--- Basic operations on the thread table. ---*/
+/*------------------------------------------------------------*/
+
+// Convert a ThreadStatus to a string.
+const HChar* VG_(name_of_ThreadStatus) ( ThreadStatus status );
+
+/* Get the ThreadState for a particular thread */
+extern ThreadState *VG_(get_ThreadState) ( ThreadId tid );
+
+/* Check that tid is in range and denotes a non-Empty thread. */
+extern Bool VG_(is_valid_tid) ( ThreadId tid );
+
+/* Returns true if a thread is currently running (ie, has the CPU lock) */
+extern Bool VG_(is_running_thread)(ThreadId tid);
+
+/* Returns true if the thread is in the process of exiting */
+extern Bool VG_(is_exiting)(ThreadId tid);
+
+/* Return the number of non-dead Threads */
+extern Int VG_(count_living_threads)(void);
+
+/* Return the number of threads in VgTs_Runnable state */
+extern Int VG_(count_runnable_threads)(void);
+
+/* Given an LWP id (ie, real kernel thread id), find the corresponding
+ ThreadId */
+extern ThreadId VG_(lwpid_to_vgtid)(Int lwpid);
+
+#endif // __PUB_CORE_THREADSTATE_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_tooliface.h.svn-base b/coregrind/.svn/text-base/pub_core_tooliface.h.svn-base
new file mode 100644
index 0000000..1131127
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_tooliface.h.svn-base
@@ -0,0 +1,245 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The core/tool interface. pub_core_tooliface.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_TOOLIFACE_H
+#define __PUB_CORE_TOOLIFACE_H
+
+#include "pub_tool_tooliface.h"
+
+//--------------------------------------------------------------------
+// PURPOSE: This module encapsulates the key parts of the core/tool
+// interface: 'details', 'needs' and 'trackable events'.
+//--------------------------------------------------------------------
+
+// Note the use of C's comma operator here -- it means that we execute both
+// statements, and the rvalue of the whole thing is the rvalue of the last
+// statement. This lets us say "x = VG_TDICT_CALL(...)" in the required
+// places, while still checking the assertion.
+#define VG_TDICT_CALL(fn, args...) \
+ ( tl_assert2(VG_(tdict).fn, \
+ "you forgot to set VgToolInterface function '" #fn "'"), \
+ VG_(tdict).fn(args) )
+
+#define VG_TRACK(fn, args...) \
+ do { \
+ if (VG_(tdict).track_##fn) \
+ VG_(tdict).track_##fn(args); \
+ } while(0)
+
+/* These structs are not exposed to tools to mitigate possibility of
+ binary-incompatibilities when the core/tool interface changes. Instead,
+ set functions are provided (see include/pub_tool_tooliface.h). */
+
+/* ---------------------------------------------------------------------
+ 'Details'
+ ------------------------------------------------------------------ */
+
+typedef
+ struct {
+ Char* name;
+ Char* version;
+ Char* description;
+ Char* copyright_author;
+ Char* bug_reports_to;
+ UInt avg_translation_sizeB;
+ }
+ VgDetails;
+
+extern VgDetails VG_(details);
+
+/* ---------------------------------------------------------------------
+ 'Needs'
+ ------------------------------------------------------------------ */
+
+typedef
+ struct {
+ Bool libc_freeres;
+ Bool core_errors;
+ Bool tool_errors;
+ Bool superblock_discards;
+ Bool command_line_options;
+ Bool client_requests;
+ Bool syscall_wrapper;
+ Bool sanity_checks;
+ Bool var_info;
+ Bool malloc_replacement;
+ Bool xml_output;
+ Bool final_IR_tidy_pass;
+ }
+ VgNeeds;
+
+extern VgNeeds VG_(needs);
+
+/* ---------------------------------------------------------------------
+ The dictionary of callable tool functions
+ ------------------------------------------------------------------ */
+
+typedef struct {
+ // -- 'Needs'-related functions ----------------------------------
+ // Basic functions
+ void (*tool_pre_clo_init) (void);
+ void (*tool_post_clo_init)(void);
+ IRSB* (*tool_instrument) (VgCallbackClosure*,
+ IRSB*,
+ VexGuestLayout*, VexGuestExtents*,
+ IRType, IRType);
+ void (*tool_fini) (Int);
+
+ // VG_(needs).core_errors
+ // (none)
+
+ // VG_(needs).tool_errors
+ Bool (*tool_eq_Error) (VgRes, Error*, Error*);
+ void (*tool_pp_Error) (Error*);
+ Bool tool_show_ThreadIDs_for_errors;
+ UInt (*tool_update_extra) (Error*);
+ Bool (*tool_recognised_suppression) (Char*, Supp*);
+ Bool (*tool_read_extra_suppression_info) (Int, Char*, Int, Supp*);
+ Bool (*tool_error_matches_suppression) (Error*, Supp*);
+ Char* (*tool_get_error_name) (Error*);
+ void (*tool_print_extra_suppression_info)(Error*);
+
+ // VG_(needs).superblock_discards
+ void (*tool_discard_superblock_info)(Addr64, VexGuestExtents);
+
+ // VG_(needs).command_line_options
+ Bool (*tool_process_cmd_line_option)(Char*);
+ void (*tool_print_usage) (void);
+ void (*tool_print_debug_usage) (void);
+
+ // VG_(needs).client_requests
+ Bool (*tool_handle_client_request)(ThreadId, UWord*, UWord*);
+
+ // VG_(needs).syscall_wrapper
+ void (*tool_pre_syscall) (ThreadId, UInt);
+ void (*tool_post_syscall)(ThreadId, UInt, SysRes);
+
+ // VG_(needs).sanity_checks
+ Bool (*tool_cheap_sanity_check)(void);
+ Bool (*tool_expensive_sanity_check)(void);
+
+ // VG_(needs).malloc_replacement
+ void* (*tool_malloc) (ThreadId, SizeT);
+ void* (*tool___builtin_new) (ThreadId, SizeT);
+ void* (*tool___builtin_vec_new) (ThreadId, SizeT);
+ void* (*tool_memalign) (ThreadId, SizeT, SizeT);
+ void* (*tool_calloc) (ThreadId, SizeT, SizeT);
+ void (*tool_free) (ThreadId, void*);
+ void (*tool___builtin_delete) (ThreadId, void*);
+ void (*tool___builtin_vec_delete)(ThreadId, void*);
+ void* (*tool_realloc) (ThreadId, void*, SizeT);
+ SizeT (*tool_malloc_usable_size) (ThreadId, void*);
+ SizeT tool_client_redzone_szB;
+
+ // VG_(needs).final_IR_tidy_pass
+ IRSB* (*tool_final_IR_tidy_pass) (IRSB*);
+
+ // -- Event tracking functions ------------------------------------
+ void (*track_new_mem_startup) (Addr, SizeT, Bool, Bool, Bool, ULong);
+ void (*track_new_mem_stack_signal)(Addr, SizeT, ThreadId);
+ void (*track_new_mem_brk) (Addr, SizeT, ThreadId);
+ void (*track_new_mem_mmap) (Addr, SizeT, Bool, Bool, Bool, ULong);
+
+ void (*track_copy_mem_remap) (Addr src, Addr dst, SizeT);
+ void (*track_change_mem_mprotect) (Addr, SizeT, Bool, Bool, Bool);
+ void (*track_die_mem_stack_signal)(Addr, SizeT);
+ void (*track_die_mem_brk) (Addr, SizeT);
+ void (*track_die_mem_munmap) (Addr, SizeT);
+
+ void VG_REGPARM(2) (*track_new_mem_stack_4_w_ECU) (Addr,UInt);
+ void VG_REGPARM(2) (*track_new_mem_stack_8_w_ECU) (Addr,UInt);
+ void VG_REGPARM(2) (*track_new_mem_stack_12_w_ECU) (Addr,UInt);
+ void VG_REGPARM(2) (*track_new_mem_stack_16_w_ECU) (Addr,UInt);
+ void VG_REGPARM(2) (*track_new_mem_stack_32_w_ECU) (Addr,UInt);
+ void VG_REGPARM(2) (*track_new_mem_stack_112_w_ECU)(Addr,UInt);
+ void VG_REGPARM(2) (*track_new_mem_stack_128_w_ECU)(Addr,UInt);
+ void VG_REGPARM(2) (*track_new_mem_stack_144_w_ECU)(Addr,UInt);
+ void VG_REGPARM(2) (*track_new_mem_stack_160_w_ECU)(Addr,UInt);
+ void (*track_new_mem_stack_w_ECU)(Addr,SizeT,UInt);
+
+ void VG_REGPARM(1) (*track_new_mem_stack_4) (Addr);
+ void VG_REGPARM(1) (*track_new_mem_stack_8) (Addr);
+ void VG_REGPARM(1) (*track_new_mem_stack_12) (Addr);
+ void VG_REGPARM(1) (*track_new_mem_stack_16) (Addr);
+ void VG_REGPARM(1) (*track_new_mem_stack_32) (Addr);
+ void VG_REGPARM(1) (*track_new_mem_stack_112)(Addr);
+ void VG_REGPARM(1) (*track_new_mem_stack_128)(Addr);
+ void VG_REGPARM(1) (*track_new_mem_stack_144)(Addr);
+ void VG_REGPARM(1) (*track_new_mem_stack_160)(Addr);
+ void (*track_new_mem_stack)(Addr,SizeT);
+
+ void VG_REGPARM(1) (*track_die_mem_stack_4) (Addr);
+ void VG_REGPARM(1) (*track_die_mem_stack_8) (Addr);
+ void VG_REGPARM(1) (*track_die_mem_stack_12) (Addr);
+ void VG_REGPARM(1) (*track_die_mem_stack_16) (Addr);
+ void VG_REGPARM(1) (*track_die_mem_stack_32) (Addr);
+ void VG_REGPARM(1) (*track_die_mem_stack_112)(Addr);
+ void VG_REGPARM(1) (*track_die_mem_stack_128)(Addr);
+ void VG_REGPARM(1) (*track_die_mem_stack_144)(Addr);
+ void VG_REGPARM(1) (*track_die_mem_stack_160)(Addr);
+ void (*track_die_mem_stack)(Addr, SizeT);
+
+ void (*track_ban_mem_stack)(Addr, SizeT);
+
+ void (*track_pre_mem_read) (CorePart, ThreadId, Char*, Addr, SizeT);
+ void (*track_pre_mem_read_asciiz)(CorePart, ThreadId, Char*, Addr);
+ void (*track_pre_mem_write) (CorePart, ThreadId, Char*, Addr, SizeT);
+ void (*track_post_mem_write) (CorePart, ThreadId, Addr, SizeT);
+
+ void (*track_pre_reg_read) (CorePart, ThreadId, Char*, PtrdiffT, SizeT);
+ void (*track_post_reg_write)(CorePart, ThreadId, PtrdiffT, SizeT);
+ void (*track_post_reg_write_clientcall_return)(ThreadId, PtrdiffT, SizeT,
+ Addr);
+
+ void (*track_start_client_code)(ThreadId, ULong);
+ void (*track_stop_client_code) (ThreadId, ULong);
+
+ void (*track_pre_thread_ll_create)(ThreadId, ThreadId);
+ void (*track_pre_thread_first_insn)(ThreadId);
+ void (*track_pre_thread_ll_exit) (ThreadId);
+
+ void (*track_pre_deliver_signal) (ThreadId, Int sigNo, Bool);
+ void (*track_post_deliver_signal)(ThreadId, Int sigNo);
+
+} VgToolInterface;
+
+extern VgToolInterface VG_(tdict);
+
+/* ---------------------------------------------------------------------
+ Miscellaneous functions
+ ------------------------------------------------------------------ */
+
+Bool VG_(sanity_check_needs) ( Char** failmsg );
+
+#endif // __PUB_CORE_TOOLIFACE_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_trampoline.h.svn-base b/coregrind/.svn/text-base/pub_core_trampoline.h.svn-base
new file mode 100644
index 0000000..630e52b
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_trampoline.h.svn-base
@@ -0,0 +1,119 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The trampoline code page. pub_core_trampoline.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_TRAMPOLINE_H
+#define __PUB_CORE_TRAMPOLINE_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module defines a few replacement functions for Linux
+// vsyscalls, which we can't implement directly. It also contains
+// stubs for signal returns. Note, all the code within runs on the
+// simulated CPU. The vsyscall stubs are gotten to by use of the
+// redirect mechanism.
+//
+// Note: generally, putting replacement functions in here is a bad
+// idea, since any Dwarf frame-unwind info attached to them will not
+// be seen by the unwinder in gcc's runtime support. This means
+// unwinding during exception handling by gcc tends to fail if it
+// encounters one of these replacement functions. A better place to
+// put them is in one of the .so's preloaded into the client, since
+// the client's ld.so will know about it and so gcc's unwinder
+// (somehow) is able to get hold of it.
+//--------------------------------------------------------------------
+
+/* These two delimit our handwritten assembly code, so we can tell
+ tools which track memory that this area should be regarded as
+ readable, at least. Otherwise Memcheck complains we're jumping to
+ invalid addresses. */
+
+extern void VG_(trampoline_stuff_start);
+extern void VG_(trampoline_stuff_end);
+
+#if defined(VGP_x86_linux)
+extern void VG_(x86_linux_SUBST_FOR_sigreturn);
+extern void VG_(x86_linux_SUBST_FOR_rt_sigreturn);
+extern Char* VG_(x86_linux_REDIR_FOR_index) ( const Char*, Int );
+#endif
+
+#if defined(VGP_amd64_linux)
+extern void VG_(amd64_linux_SUBST_FOR_rt_sigreturn);
+extern void VG_(amd64_linux_REDIR_FOR_vgettimeofday);
+extern void VG_(amd64_linux_REDIR_FOR_vtime);
+#endif
+
+#if defined(VGP_ppc32_linux)
+extern void VG_(ppc32_linux_SUBST_FOR_sigreturn);
+extern void VG_(ppc32_linux_SUBST_FOR_rt_sigreturn);
+extern UInt VG_(ppc32_linux_REDIR_FOR_strlen)( void* );
+extern UInt VG_(ppc32_linux_REDIR_FOR_strcmp)( void*, void* );
+extern void* VG_(ppc32_linux_REDIR_FOR_strchr)( void*, Int );
+#endif
+
+#if defined(VGP_ppc64_linux)
+extern void VG_(ppc64_linux_SUBST_FOR_rt_sigreturn);
+extern UInt VG_(ppc64_linux_REDIR_FOR_strlen)( void* );
+extern void* VG_(ppc64_linux_REDIR_FOR_strchr)( void*, Int );
+/* A label (sans dot) marking the ultra-magical return stub via which
+ all redirected and wrapped functions are made to "return" on
+ ppc64-linux/ppc64-aix5/ppc32-aix5. The one insn at this label is
+ never really translated. Instead, m_translate generates IR to
+ restore the thread's LR and R2 registers from a small stack in the
+ ppc64 guest state structure, and then branch to LR. Convoluted?
+ Confusing? You betcha. Could I think of anything simpler? No. */
+extern void VG_(ppctoc_magic_redirect_return_stub);
+#endif
+
+#if defined(VGP_ppc32_aix5)
+/* A label (sans dot) marking the client start point for ppc32_aix5.
+ This function is entered with r3 holding a pointer to the
+ AIX5PreloadPage struct set up by m_initimg. It first tries to
+ __loadx the _core.so and _tool.so preloads mentioned in the struct;
+ then it cleans up the register state to be more what it really
+ should be at client startup, and finally it jumps to the client's
+ real entry point. */
+extern void VG_(ppc32_aix5_do_preloads_then_start_client);
+
+/* See comment for VG_(ppctoc_magic_redirect_return_stub) above. */
+extern void VG_(ppctoc_magic_redirect_return_stub);
+#endif
+
+#if defined(VGP_ppc64_aix5)
+/* See comment for VG_(ppctoc_magic_redirect_return_stub) above. */
+extern void VG_(ppctoc_magic_redirect_return_stub);
+
+/* See comment for ppc32_aix5 equivalent above. */
+extern void VG_(ppc64_aix5_do_preloads_then_start_client);
+#endif
+
+#endif // __PUB_CORE_TRAMPOLINE_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_translate.h.svn-base b/coregrind/.svn/text-base/pub_core_translate.h.svn-base
new file mode 100644
index 0000000..8e0cea4
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_translate.h.svn-base
@@ -0,0 +1,53 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The JITter wrapper. pub_core_translate.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_TRANSLATE_H
+#define __PUB_CORE_TRANSLATE_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module is Valgrind's interface to the JITter. It's
+// basically a wrapper around Vex.
+//--------------------------------------------------------------------
+
+extern
+Bool VG_(translate) ( ThreadId tid,
+ Addr64 orig_addr,
+ Bool debugging_translation,
+ Int debugging_verbosity,
+ ULong bbs_done,
+ Bool allow_redirection );
+
+extern void VG_(print_translation_stats) ( void );
+
+#endif // __PUB_CORE_TRANSLATE_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_transtab.h.svn-base b/coregrind/.svn/text-base/pub_core_transtab.h.svn-base
new file mode 100644
index 0000000..79005f2
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_transtab.h.svn-base
@@ -0,0 +1,104 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The translation table and cache. ---*/
+/*--- pub_core_transtab.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_TRANSTAB_H
+#define __PUB_CORE_TRANSTAB_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module is responsible for caching translations, and
+// enabling fast look-ups of them.
+//--------------------------------------------------------------------
+
+#include "pub_core_transtab_asm.h"
+
+/* The fast-cache for tt-lookup, and for finding counters. Unused
+ entries are denoted by .guest == 1, which is assumed to be a bogus
+ address for all guest code. */
+typedef
+ struct {
+ Addr guest;
+ Addr host;
+ }
+ FastCacheEntry;
+
+extern __attribute__((aligned(16)))
+ FastCacheEntry VG_(tt_fast) [VG_TT_FAST_SIZE];
+
+#define TRANSTAB_BOGUS_GUEST_ADDR ((Addr)1)
+
+extern UInt* VG_(tt_fastN)[VG_TT_FAST_SIZE];
+
+extern void VG_(init_tt_tc) ( void );
+
+extern
+void VG_(add_to_transtab)( VexGuestExtents* vge,
+ Addr64 entry,
+ AddrH code,
+ UInt code_len,
+ Bool is_self_checking );
+
+extern Bool VG_(search_transtab) ( /*OUT*/AddrH* result,
+ Addr64 guest_addr,
+ Bool upd_cache );
+
+extern void VG_(discard_translations) ( Addr64 start, ULong range,
+ HChar* who );
+
+extern void VG_(print_tt_tc_stats) ( void );
+
+extern UInt VG_(get_bbs_translated) ( void );
+
+/* Add to / search the auxiliary, small, unredirected translation
+ table. */
+
+extern
+void VG_(add_to_unredir_transtab)( VexGuestExtents* vge,
+ Addr64 entry,
+ AddrH code,
+ UInt code_len );
+extern
+Bool VG_(search_unredir_transtab) ( /*OUT*/AddrH* result,
+ Addr64 guest_addr );
+
+// BB profiling stuff
+
+typedef struct _BBProfEntry {
+ Addr64 addr;
+ ULong score;
+} BBProfEntry;
+
+extern ULong VG_(get_BB_profile) ( BBProfEntry tops[], UInt n_tops );
+
+#endif // __PUB_CORE_TRANSTAB_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_transtab_asm.h.svn-base b/coregrind/.svn/text-base/pub_core_transtab_asm.h.svn-base
new file mode 100644
index 0000000..3c96867
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_transtab_asm.h.svn-base
@@ -0,0 +1,63 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Asm-only TransTab stuff. pub_core_transtab_asm.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_TRANSTAB_ASM_H
+#define __PUB_CORE_TRANSTAB_ASM_H
+
+/* Constants for the fast translation lookup cache. It is a direct
+ mapped cache, with 2^VG_TT_FAST_BITS entries.
+
+ On x86/amd64, the cache index is computed as
+ 'address[VG_TT_FAST_BITS-1 : 0]'.
+
+ On ppc32/ppc64, the bottom two bits of instruction addresses are
+ zero, which means that function causes only 1/4 of the entries to
+ ever be used. So instead the function is '(address >>u
+ 2)[VG_TT_FAST_BITS-1 : 0]' on those targets. */
+
+#define VG_TT_FAST_BITS 15
+#define VG_TT_FAST_SIZE (1 << VG_TT_FAST_BITS)
+#define VG_TT_FAST_MASK ((VG_TT_FAST_SIZE) - 1)
+
+/* This macro isn't usable in asm land; nevertheless this seems
+ like a good place to put it. */
+#if defined(VGA_x86) || defined(VGA_amd64)
+# define VG_TT_FAST_HASH(_addr) ((((UWord)(_addr)) ) & VG_TT_FAST_MASK)
+#elif defined(VGA_ppc32) || defined(VGA_ppc64)
+# define VG_TT_FAST_HASH(_addr) ((((UWord)(_addr)) >> 2) & VG_TT_FAST_MASK)
+#else
+# error "VG_TT_FAST_HASH: unknown platform"
+#endif
+
+#endif // __PUB_CORE_TRANSTAB_ASM_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_ume.h.svn-base b/coregrind/.svn/text-base/pub_core_ume.h.svn-base
new file mode 100644
index 0000000..2a16e2e
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_ume.h.svn-base
@@ -0,0 +1,85 @@
+
+/*--------------------------------------------------------------------*/
+/*--- User-mode execve. pub_core_ume.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_UME_H
+#define __PUB_CORE_UME_H
+
+//--------------------------------------------------------------------
+// PURPOSE: This module implements user-mode execve, ie. program loading
+// and exec'ing.
+//--------------------------------------------------------------------
+
+/*------------------------------------------------------------*/
+/*--- Loading files ---*/
+/*------------------------------------------------------------*/
+
+// Info needed to load and run a program. IN/INOUT/OUT refers to the
+// inputs/outputs of do_exec().
+typedef
+ struct {
+ HChar** argv; // IN: the original argv
+
+ Addr exe_base; // INOUT: lowest (allowed) address of exe
+ Addr exe_end; // INOUT: highest (allowed) address
+
+ Addr phdr; // OUT: address phdr was mapped at
+ Int phnum; // OUT: number of phdrs
+ Addr interp_base; // OUT: where interpreter (ld.so) was mapped
+ Addr entry; // OUT: entrypoint in main executable
+ Addr init_ip; // OUT: address of first instruction to execute
+ Addr brkbase; // OUT: base address of brk segment
+ Addr init_toc; // OUT: address of table-of-contents, on
+ // platforms for which that makes sense
+ // (ppc64-linux only)
+
+ // These are the extra args added by #! scripts
+ HChar* interp_name; // OUT: the interpreter name
+ HChar* interp_args; // OUT: the args for the interpreter
+ }
+ ExeInfo;
+
+// Do a number of appropriate checks to see if the file looks executable by
+// the kernel: ie. it's a file, it's readable and executable, and it's in
+// either binary or "#!" format. On success, 'out_fd' gets the fd of the file
+// if it's non-NULL. Otherwise the fd is closed.
+extern SysRes VG_(pre_exec_check)(const HChar* exe_name, Int* out_fd,
+ Bool allow_setuid);
+
+// Does everything short of actually running 'exe': finds the file,
+// checks execute permissions, sets up interpreter if program is a script,
+// reads headers, maps file into memory, and returns important info about
+// the program.
+extern Int VG_(do_exec)(const HChar* exe, ExeInfo* info);
+
+#endif /* __PUB_CORE_UME_H */
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_vki.h.svn-base b/coregrind/.svn/text-base/pub_core_vki.h.svn-base
new file mode 100644
index 0000000..e01df8b
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_vki.h.svn-base
@@ -0,0 +1,50 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Top level for kernel interface declarations. ---*/
+/*--- pub_core_vki.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+ Copyright (C) 2005-2009 Nicholas Nethercote
+ njn@valgrind.org
+ Copyright (C) 2006-2009 OpenWorks LLP
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_VKI_H
+#define __PUB_CORE_VKI_H
+
+/* Most unfortunately, all the kernel decls are visible to tools. Not
+ really necessary, but to avoid this would require some tedious
+ refactoring of the sources. Anyway, we live with this kludge, and
+ that means the only thing to be done here is ... */
+
+#include "pub_tool_vki.h"
+
+#endif // __PUB_CORE_VKI_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_vkiscnums.h.svn-base b/coregrind/.svn/text-base/pub_core_vkiscnums.h.svn-base
new file mode 100644
index 0000000..0adbb94
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_vkiscnums.h.svn-base
@@ -0,0 +1,62 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Top level for kernel interface declarations. ---*/
+/*--- pub_core_vkiscnums.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+ Copyright (C) 2005-2009 Nicholas Nethercote
+ njn@valgrind.org
+ Copyright (C) 2006-2009 OpenWorks LLP
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_VKISCNUMS_H
+#define __PUB_CORE_VKISCNUMS_H
+
+/* Most unfortunately, all the kernel decls are visible to tools. Not
+ really necessary, but to avoid this would require some tedious
+ refactoring of the sources. Anyway, we live with this kludge, and
+ that means the only thing to be done here is ... */
+
+#include "pub_tool_vkiscnums.h"
+
+
+/* Make it possible to include this file in assembly sources. */
+#if !defined(VG_IN_ASSEMBLY_SOURCE)
+
+#if defined(VGO_aix5)
+/* Bind the given syscall name to the given number. Returns True if
+ successful, False if the name is unknown. */
+extern Bool VG_(aix5_register_syscall)( Int, UChar* );
+#endif
+
+#endif /* !defined(VG_IN_ASSEMBLY_SOURCE) */
+
+#endif // __PUB_CORE_VKISCNUMS_H
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_wordfm.h.svn-base b/coregrind/.svn/text-base/pub_core_wordfm.h.svn-base
new file mode 100644
index 0000000..4365055
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_wordfm.h.svn-base
@@ -0,0 +1,64 @@
+
+/*--------------------------------------------------------------------*/
+/*--- An AVL tree based finite map for word keys and word values. ---*/
+/*--- Inspired by Haskell's "FiniteMap" library. ---*/
+/*--- pub_core_wordfm.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2007-2009 Julian Seward
+ jseward@acm.org
+
+ This code is based on previous work by Nicholas Nethercote
+ (coregrind/m_oset.c) which is
+
+ Copyright (C) 2005-2009 Nicholas Nethercote
+ njn@valgrind.org
+
+ which in turn was derived partially from:
+
+ AVL C library
+ Copyright (C) 2000,2002 Daniel Nagy
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of
+ the License, or (at your option) any later version.
+ [...]
+
+ (taken from libavl-0.4/debian/copyright)
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_WORDFM_H
+#define __PUB_CORE_WORDFM_H
+
+/* No core-only exports; everything in this module is visible to both
+ the core and tools. */
+
+#include "pub_tool_wordfm.h"
+
+#endif /* ! __PUB_CORE_WORDFM_H */
+
+/*--------------------------------------------------------------------*/
+/*--- end pub_core_wordfm.h ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/pub_core_xarray.h.svn-base b/coregrind/.svn/text-base/pub_core_xarray.h.svn-base
new file mode 100644
index 0000000..67231e9
--- /dev/null
+++ b/coregrind/.svn/text-base/pub_core_xarray.h.svn-base
@@ -0,0 +1,49 @@
+
+/*--------------------------------------------------------------------*/
+/*--- An expandable array implementation. pub_core_xarray.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2007-2009 OpenWorks LLP
+ info@open-works.co.uk
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PUB_CORE_XARRAY_H
+#define __PUB_CORE_XARRAY_H
+
+//--------------------------------------------------------------------
+// PURPOSE: Provides a simple but useful structure, which is an array
+// in which elements can be added at the end. See pub_tool_xarray.h
+// for further details.
+//--------------------------------------------------------------------
+
+// No core-only exports; everything in this module is visible to both
+// the core and tools.
+
+#include "pub_tool_xarray.h"
+
+#endif // __PUB_CORE_XARRAY_H
+
+/*--------------------------------------------------------------------*/
+/*--- end pub_core_xarray.h ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/.svn/text-base/vg_preloaded.c.svn-base b/coregrind/.svn/text-base/vg_preloaded.c.svn-base
new file mode 100644
index 0000000..3539832
--- /dev/null
+++ b/coregrind/.svn/text-base/vg_preloaded.c.svn-base
@@ -0,0 +1,72 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Client-space code for the core. vg_preloaded.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+
+/* ---------------------------------------------------------------------
+ ALL THE CODE IN THIS FILE RUNS ON THE SIMULATED CPU.
+
+ These functions are not called directly - they're the targets of code
+ redirection or load notifications (see pub_core_redir.h for info).
+ They're named weirdly so that the intercept code can find them when the
+ shared object is initially loaded.
+
+ Note that this filename has the "vg_" prefix because it can appear
+ in stack traces, and the "vg_" makes it a little clearer that it
+ originates from Valgrind.
+ ------------------------------------------------------------------ */
+
+#include "pub_core_basics.h"
+#include "pub_core_clreq.h"
+#include "pub_core_debuginfo.h" // Needed for pub_core_redir.h
+#include "pub_core_redir.h" // For VG_NOTIFY_ON_LOAD
+
+/* ---------------------------------------------------------------------
+ Hook for running __libc_freeres once the program exits.
+ ------------------------------------------------------------------ */
+
+void VG_NOTIFY_ON_LOAD(freeres)( void );
+void VG_NOTIFY_ON_LOAD(freeres)( void )
+{
+ int res;
+#if !defined(__UCLIBC__) && !defined(VGO_aix5)
+ extern void __libc_freeres(void);
+ __libc_freeres();
+#endif
+ VALGRIND_DO_CLIENT_REQUEST(res, 0 /* default */,
+ VG_USERREQ__LIBC_FREERES_DONE,
+ 0, 0, 0, 0, 0);
+ /*NOTREACHED*/
+ *(int *)0 = 'x';
+}
+
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/