summaryrefslogtreecommitdiff
path: root/toolkit/src2xml
diff options
context:
space:
mode:
authorKurt Zenker <kz@openoffice.org>2008-03-06 10:56:04 +0000
committerKurt Zenker <kz@openoffice.org>2008-03-06 10:56:04 +0000
commit0ef878a5d321cf7d485388e2f35d6b756e82e5d2 (patch)
tree8b4c83a4ef29d27d96303e8eb8526b16e858f279 /toolkit/src2xml
parent6c3eb0f507e45e254a2fa66487341f0cfe4cde13 (diff)
INTEGRATION: CWS layout_DEV300 (1.1.2); FILE ADDED
2008/02/13 08:56:23 jcn 1.1.2.1: Import src2xml and doc/layout from GIT.
Diffstat (limited to 'toolkit/src2xml')
-rw-r--r--toolkit/src2xml/source/macroexpander_test.py71
-rw-r--r--toolkit/src2xml/source/macroparser.py130
-rw-r--r--toolkit/src2xml/source/macroparser_test.py20
-rw-r--r--toolkit/src2xml/source/src2xml.py219
-rw-r--r--toolkit/src2xml/source/srclexer.py488
-rw-r--r--toolkit/src2xml/source/srcparser.py416
6 files changed, 1344 insertions, 0 deletions
diff --git a/toolkit/src2xml/source/macroexpander_test.py b/toolkit/src2xml/source/macroexpander_test.py
new file mode 100644
index 000000000000..823bcdb36bc2
--- /dev/null
+++ b/toolkit/src2xml/source/macroexpander_test.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+
+import srclexer, srcparser, globals
+
+class TestCase:
+
+ @staticmethod
+ def run (tokens, defines):
+ mcExpander = srcparser.MacroExpander(tokens, defines)
+ mcExpander.debug = True
+ mcExpander.expand()
+ tokens = mcExpander.getTokens()
+ print tokens
+
+ @staticmethod
+ def simpleNoArgs ():
+ tokens = ['FUNC_FOO', '(', 'left', ',', 'right', ')']
+ defines = {}
+ macro = globals.Macro('FUNC_FOO')
+ macro.tokens = ['Here', 'comes', 'X', 'and', 'Y']
+ defines['FUNC_FOO'] = macro
+ TestCase.run(tokens, defines)
+
+ @staticmethod
+ def simpleArgs ():
+ tokens = ['FUNC_FOO', '(', 'left', ',', 'right', ')']
+ defines = {}
+ macro = globals.Macro('FUNC_FOO')
+ macro.tokens = ['Here', 'comes', 'X', 'and', 'Y']
+ macro.vars['X'] = 0
+ macro.vars['Y'] = 1
+ defines['FUNC_FOO'] = macro
+ TestCase.run(tokens, defines)
+
+ @staticmethod
+ def multiTokenArgs ():
+ tokens = ['FUNC_FOO', '(', 'left1', 'left2', 'left3', ',', 'right', ')']
+ defines = {}
+ macro = globals.Macro('FUNC_FOO')
+ macro.tokens = ['Here', 'comes', 'X', 'and', 'Y']
+ macro.vars['X'] = 0
+ macro.vars['Y'] = 1
+ defines['FUNC_FOO'] = macro
+ TestCase.run(tokens, defines)
+
+ @staticmethod
+ def nestedTokenArgs ():
+ tokens = ['FUNC_BAA', '(', 'left', ',', 'right', ')']
+ defines = {}
+ macro = globals.Macro('FUNC_FOO')
+ macro.tokens = ['Here', 'comes', 'X', 'and', 'Y']
+ macro.vars['X'] = 0
+ macro.vars['Y'] = 1
+ defines['FUNC_FOO'] = macro
+ macro = globals.Macro('FUNC_BAA')
+ macro.tokens = ['FUNC_FOO']
+ defines['FUNC_BAA'] = macro
+ TestCase.run(tokens, defines)
+
+def main ():
+ print "simple expansion with no arguments"
+ TestCase.simpleNoArgs()
+ print "simple argument expansion"
+ TestCase.simpleArgs()
+ print "multi-token argument expansion"
+ TestCase.multiTokenArgs()
+ print "nested argument expansion"
+ TestCase.nestedTokenArgs()
+
+if __name__ == '__main__':
+ main()
diff --git a/toolkit/src2xml/source/macroparser.py b/toolkit/src2xml/source/macroparser.py
new file mode 100644
index 000000000000..1a221b404d42
--- /dev/null
+++ b/toolkit/src2xml/source/macroparser.py
@@ -0,0 +1,130 @@
+
+import sys
+from globals import *
+import srclexer
+
+class MacroParser(object):
+
+ def __init__ (self, buf):
+ self.buffer = buf
+ self.macro = None
+ self.debug = False
+
+ def parse (self):
+ """
+A macro with arguments must have its open paren immediately following
+its name without any whitespace.
+"""
+ if self.debug:
+ print "-"*68
+ print "parsing '%s'"%self.buffer
+
+ i = 0
+ bufSize = len(self.buffer)
+ name, buf = '', ''
+ while i < bufSize:
+ c = self.buffer[i]
+ if c in [' ', "\t"] and len(name) == 0:
+ # This is a simple macro with no arguments.
+ name = buf
+ vars = []
+ content = self.buffer[i:]
+ self.setMacro(name, vars, content)
+ return
+ elif c == '(' and len(name) == 0:
+ # This one has arguments.
+ name = buf
+ buf = self.buffer[i:]
+ vars, content = self.parseArgs(buf)
+ self.setMacro(name, vars, content)
+ return
+ else:
+ buf += c
+ i += 1
+
+ def parseArgs (self, buffer):
+ """Parse arguments.
+
+The buffer is expected to be formatted like '(a, b, c)' where the first
+character is the open paren.
+"""
+ scope = 0
+ buf = ''
+ vars = []
+ content = ''
+ bufSize = len(buffer)
+ i = 0
+ while i < bufSize:
+ c = buffer[i]
+ if c == '(':
+ scope += 1
+ elif c == ')':
+ scope -= 1
+ if len(buf) > 0:
+ vars.append(buf)
+ if scope == 0:
+ break
+ elif c == ',':
+ if len(buf) == 0:
+ raise globals.ParseError ('')
+ vars.append(buf)
+ buf = ''
+ elif c in " \t" and scope > 0:
+ pass
+ else:
+ buf += c
+
+ i += 1
+
+ if scope > 0:
+ raise globals.ParseError ('')
+
+ return vars, buffer[i+1:]
+
+
+ def setMacro (self, name, vars, content):
+ if self.debug:
+ print "-"*68
+ print "name: %s"%name
+ for var in vars:
+ print "var: %s"%var
+ if len(vars) == 0:
+ print "no vars"
+ print "content: '%s'"%content
+
+ if len(content) > 0:
+ self.macro = Macro(name)
+ for i in xrange(0, len(vars)):
+ self.macro.vars[vars[i]] = i
+
+ # tokinize it using lexer.
+ mclexer = srclexer.SrcLexer(content)
+ mclexer.expandHeaders = False
+ mclexer.inMacroDefine = True
+ mclexer.tokenize()
+ self.macro.tokens = mclexer.getTokens()
+ if self.debug:
+ print self.macro.tokens
+
+ if not self.isValidMacro(self.macro):
+ self.macro = None
+
+ if self.debug:
+ if self.macro != None:
+ print "macro registered!"
+ else:
+ print "macro not registered"
+
+ def isValidMacro (self, macro):
+
+ n = len(macro.tokens)
+ if n == 0:
+ return False
+ elif len(macro.name) > 4 and macro.name[1:4] == 'ID_':
+ # We don't want to expand macros like HID_, SID_, WID_, etc.
+ return False
+ return True
+
+
+ def getMacro (self):
+ return self.macro
diff --git a/toolkit/src2xml/source/macroparser_test.py b/toolkit/src2xml/source/macroparser_test.py
new file mode 100644
index 000000000000..a7064832d1f8
--- /dev/null
+++ b/toolkit/src2xml/source/macroparser_test.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+import macroparser
+
+def runParser (buf):
+ mparser = macroparser.MacroParser(buf)
+ mparser.debug = True
+ mparser.parse()
+
+def main ():
+ buf = 'FOO (asdfsdaf)'
+ runParser(buf)
+ buf = 'FOO (x, y) (x) + (y)'
+ runParser(buf)
+ buf = 'FOO(x, y) (x) + (y)'
+ runParser(buf)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/toolkit/src2xml/source/src2xml.py b/toolkit/src2xml/source/src2xml.py
new file mode 100644
index 000000000000..8d082ee3e444
--- /dev/null
+++ b/toolkit/src2xml/source/src2xml.py
@@ -0,0 +1,219 @@
+#!/usr/bin/env python
+
+import getopt
+import os
+import re
+import sys
+#
+from srclexer import SrcLexer
+from srcparser import SrcParser
+from boxer import Boxer
+# FIXME
+from globals import *
+
+def option_parser ():
+ import optparse
+ p = optparse.OptionParser ()
+
+ p.usage = '''src2xml.py [OPTION]... SRC-FILE...'''
+
+ examples = '''
+Examples:
+ src2xml.py --output-dir=. --post-process --ignore-includes zoom.src
+ src2xml.py --dry-run -I svx/inc -I svx/source/dialog zoom.src
+'''
+
+ def format_examples (self):
+ return examples
+
+ if 'epilog' in p.__dict__:
+ p.formatter.format_epilog = format_examples
+ p.epilog = examples
+ else:
+ p.formatter.format_description = format_examples
+ p.description = examples
+
+ p.description = '''OOo SRC To Layout XML Converter.
+
+Convert OO.o's existing dialog resource files into XML layout files.
+'''
+
+ p.add_option ('-l', '--debug-lexer', action='store_true',
+ dest='debug_lexer', default=False,
+ help='debug lexer')
+
+ p.add_option ('-p', '--debug-parser', action='store_true',
+ dest='debug_parser', default=False,
+ help='debug parser')
+
+ p.add_option ('-m', '--debug-macro', action='store_true',
+ dest='debug_macro', default=False,
+ help='debug macro')
+
+ p.add_option ('-n', '--dry-run', action='store_true',
+ dest='dry_run', default=False,
+ help='dry run')
+
+ p.add_option ('-k', '--keep-going', action='store_true',
+ dest='keep_going', default=False,
+ help='continue after error')
+
+ p.add_option ('-i', '--ignore-includes', action='store_true',
+ dest='ignore_includes', default=False,
+ help='ignore #include directives')
+
+ p.add_option ('-I', '--include-dir', action='append',
+ dest='include_path',
+ default=[],
+ metavar='DIR',
+ help='append DIR to include path')
+
+ def from_file (option, opt_str, value, parser):
+ lst = getattr (parser.values, option.dest)
+ lst += file (value).read ().split ('\n')
+ setattr (parser.values, option.dest, lst)
+
+ def from_path (option, opt_str, value, parser):
+ lst = getattr (parser.values, option.dest)
+ lst += value.split (':')
+ setattr (parser.values, option.dest, lst)
+
+ # Junk me?
+ p.add_option ('--includes-from-file', action='callback', callback=from_file,
+ dest='include_path',
+ default=[],
+ type='string',
+ metavar='FILE',
+ help='append directory list from FILE to include path')
+
+ p.add_option ('--include-path', action='callback', callback=from_path,
+ dest='include_path',
+ type='string',
+ default=[],
+ metavar='PATH',
+ help='append PATH to include path')
+
+ p.add_option ('--only-expand-macros', action='store_true',
+ dest='only_expand_macros', default=False,
+ help='FIXME: better to say what NOT to expand?')
+
+ p.add_option ('-o', '--output-dir', action='store',
+ dest='output_dir', default=None,
+ metavar='DIR',
+ help='Output to DIR')
+
+ p.add_option ('-s', '--post-process', action='store_true',
+ dest='post_process', default=False,
+ help='post process output for use in Layout')
+
+ p.add_option ('--stop-on-header', action='store_true',
+ dest='stopOnHeader', default=False,
+ help='FIXME: remove this?')
+
+ return p
+
+
+def convert (file_name, options):
+ progress ("parsing %(file_name)s ..." % locals ())
+ fullpath = os.path.abspath(file_name)
+ if not os.path.isfile(fullpath):
+ error("no such file", exit=True)
+
+ ##options.include_path.append (os.path.dirname (fullpath))
+
+ input = file (fullpath, 'r').read()
+ lexer = SrcLexer(input, fullpath)
+ lexer.expandHeaders = not options.ignore_includes
+ lexer.includeDirs = options.include_path
+ lexer.stopOnHeader = options.stopOnHeader
+ lexer.debugMacro = options.debug_macro
+# lexer.debug = True
+ if options.debug_lexer:
+ lexer.debug = True
+ lexer.tokenize()
+ progress ("-"*68 + "\n")
+ progress ("** token dump\n")
+ lexer.dumpTokens()
+ progress ("** end of token dump\n")
+ return
+
+ # Tokenize it using lexer
+ lexer.tokenize()
+
+ parser = SrcParser(lexer.getTokens(), lexer.getDefines())
+ parser.only_expand_macros = options.only_expand_macros
+ if options.debug_parser:
+ parser.debug = True
+ root = parser.parse()
+ print root.dump()
+ return
+
+ # Parse the tokens.
+ root = parser.parse()
+
+ # Box it, and return the XML tree.
+ root = Boxer(root).layout()
+ output = root.dump()
+ if not options.dry_run:
+ progress ("\n")
+ return output
+
+def dry_one_file (file_name, options):
+ try:
+ str = convert(file_name, options)
+ progress (" SUCCESS\n")
+ except Exception, e:
+ if options.keep_going:
+ progress (" FAILED\n")
+ else:
+ import traceback
+ print traceback.format_exc (None)
+ raise e
+
+def post_process (s):
+ """Make output directly usable by layout module."""
+ s = re.sub ('(</?)([a-z]+)-([a-z]+)-([a-z]+)', r'\1\2\3\4', s)
+ s = re.sub ('(</?)([a-z]+)-([a-z]+)', r'\1\2\3', s)
+ s = re.sub ('(<(radiobutton|(fixed(info|text)))[^>]*) text=', r'\1 label=', s)
+ s = re.sub (' (height|width|x|y)="[0-9]*"', '', s)
+
+ s = s.replace ('<modaldialog', '<modaldialog sizeable="true"')
+ s = s.replace (' rid=', ' id=')
+ s = s.replace (' border="true"', ' has_border="true"')
+ s = s.replace (' def-button="true"', ' default="true"')
+ return s
+
+XML_HEADER = '<?xml version="1.0" encoding="UTF-8"?>\n'
+
+def do_one_file (file_name, options):
+ str = XML_HEADER
+ str += convert(file_name, options)
+ str += '\n'
+
+ if options.post_process:
+ str = post_process (str)
+ h = sys.stdout
+ if options.output_dir:
+ base = os.path.basename (file_name)
+ root, ext = os.path.splitext (base)
+ out_name = options.output_dir + '/' + root + '.xml'
+ progress ("writing %(out_name)s ..." % locals ())
+ h = file (out_name, 'w')
+ h.write (str)
+ h.flush ()
+ progress ("\n")
+
+def main ():
+ p = option_parser ()
+ (options, files) = option_parser ().parse_args ()
+ if not files:
+ p.error ("no input files")
+
+ for f in files:
+ if options.dry_run:
+ dry_one_file (f, options)
+ else:
+ do_one_file (f, options)
+
+if __name__ == '__main__':
+ main ()
diff --git a/toolkit/src2xml/source/srclexer.py b/toolkit/src2xml/source/srclexer.py
new file mode 100644
index 000000000000..5a5a3319b0d1
--- /dev/null
+++ b/toolkit/src2xml/source/srclexer.py
@@ -0,0 +1,488 @@
+import sys, os.path
+from globals import *
+import macroparser
+
+class EOF(Exception):
+ def __init__ (self):
+ pass
+
+ def str (self):
+ return "end of file"
+
+class BOF(Exception):
+ def __init__ (self):
+ pass
+
+ def str (self):
+ return "beginning of file"
+
+
+def removeHeaderQuotes (orig):
+ if len(orig) <= 2:
+ return orig
+ elif orig[0] == orig[-1] == '"':
+ return orig[1:-1]
+ elif orig[0] == '<' and orig[-1] == '>':
+ return orig[1:-1]
+
+ return orig
+
+
+def dumpTokens (tokens, toError=False):
+
+ scope = 0
+ indent = " "
+ line = ''
+ chars = ''
+
+ for token in tokens:
+ if token in '{<':
+ if len(line) > 0:
+ chars += indent*scope + line + "\n"
+ line = ''
+ chars += indent*scope + token + "\n"
+ scope += 1
+
+ elif token in '}>':
+ if len(line) > 0:
+ chars += indent*scope + line + "\n"
+ line = ''
+ scope -= 1
+ chars += indent*scope + token
+
+ elif token == ';':
+ if len(line) > 0:
+ chars += indent*scope + line + ";\n"
+ line = ''
+ else:
+ chars += ";\n"
+ elif len(token) > 0:
+ line += token + ' '
+
+ if len(line) > 0:
+ chars += line
+ chars += "\n"
+ if toError:
+ sys.stderr.write(chars)
+ else:
+ sys.stdout.write(chars)
+
+
+class HeaderData(object):
+ def __init__ (self):
+ self.defines = {}
+ self.tokens = []
+
+
+class SrcLexer(object):
+ """Lexicographical analyzer for .src format.
+
+The role of a lexer is to parse the source file and break it into
+appropriate tokens. Such tokens are later passed to a parser to
+build the syntax tree.
+"""
+ headerCache = {}
+
+ VISIBLE = 0
+ INVISIBLE_PRE = 1
+ INVISIBLE_POST = 2
+
+ def __init__ (self, chars, filepath = None):
+ self.filepath = filepath
+ self.parentLexer = None
+ self.chars = chars
+ self.bufsize = len(self.chars)
+
+ # TODO: use parameters for this
+ # Properties that can be copied.
+ self.headerDict = dict ()
+ self.debug = False
+ self.debugMacro = False
+ self.includeDirs = list ()
+ self.expandHeaders = True
+ self.inMacroDefine = False
+ self.stopOnHeader = False
+
+ def copyProperties (self, other):
+ """Copy properties from another instance of SrcLexer."""
+
+ # TODO: use parameters for this
+ self.headerDict = other.headerDict
+ self.debug = other.debug
+ self.debugMacro = other.debugMacro
+ self.includeDirs = other.includeDirs[:]
+ self.expandHeaders = other.expandHeaders
+ self.inMacroDefine = other.inMacroDefine
+ self.stopOnHeader = other.stopOnHeader
+
+ def init (self):
+ self.firstNonBlank = ''
+ self.token = ''
+ self.tokens = []
+ self.defines = {}
+ self.visibilityStack = []
+
+ def getTokens (self):
+ return self.tokens
+
+ def getDefines (self):
+ return self.defines
+
+ def nextPos (self, i):
+ while True:
+ i += 1
+ try:
+ c = self.chars[i]
+ except IndexError:
+ raise EOF
+
+ if ord(c) in [0x0D]:
+ continue
+ break
+ return i
+
+ def prevPos (self, i):
+ while True:
+ i -= 1
+ try:
+ c = self.chars[i]
+ except IndexError:
+ raise BOF
+
+ if ord(c) in [0x0D]:
+ continue
+ break
+ return i
+
+ def isCodeVisible (self):
+ if len(self.visibilityStack) == 0:
+ return True
+ for item in self.visibilityStack:
+ if item != SrcLexer.VISIBLE:
+ return False
+ return True
+
+ def tokenize (self):
+ self.init()
+
+ i = 0
+ while True:
+ c = self.chars[i]
+
+ if self.firstNonBlank == '' and not c in [' ', "\n", "\t"]:
+ # Store the first non-blank in a line.
+ self.firstNonBlank = c
+ elif c == "\n":
+ self.firstNonBlank = ''
+
+ if c == '#':
+ i = self.pound(i)
+ elif c == '/':
+ i = self.slash(i)
+ elif c == "\n":
+ i = self.lineBreak(i)
+ elif c == '"':
+ i = self.doubleQuote(i)
+ elif c in [' ', "\t"]:
+ i = self.blank(i)
+ elif c in ";()[]{}<>,=+-*":
+ # Any outstanding single-character token.
+ i = self.anyToken(i, c)
+ elif self.isCodeVisible():
+ self.token += c
+
+ try:
+ i = self.nextPos(i)
+ except EOF:
+ break
+
+ if len(self.token):
+ self.tokens.append(self.token)
+
+ if not self.parentLexer and self.debug:
+ progress ("-"*68 + "\n")
+ progress ("All defines found in this translation unit:\n")
+ keys = self.defines.keys()
+ keys.sort()
+ for key in keys:
+ progress ("@ %s\n"%key)
+
+ def dumpTokens (self, toError=False):
+ dumpTokens(self.tokens, toError)
+
+
+ def maybeAddToken (self):
+ if len(self.token) > 0:
+ self.tokens.append(self.token)
+ self.token = ''
+
+
+ #--------------------------------------------------------------------
+ # character handlers
+
+ def blank (self, i):
+ if not self.isCodeVisible():
+ return i
+
+ self.maybeAddToken()
+ return i
+
+
+ def pound (self, i):
+
+ if self.inMacroDefine:
+ return i
+
+ if not self.firstNonBlank == '#':
+ return i
+
+ self.maybeAddToken()
+ # We are in preprocessing mode.
+
+ # Get the macro command name '#<command> .....'
+
+ command, define, buf = '', '', ''
+ firstNonBlank = False
+ while True:
+ try:
+ i = self.nextPos(i)
+ c = self.chars[i]
+ if c == '\\' and self.chars[self.nextPos(i)] == "\n":
+ i = self.nextPos(i)
+ continue
+ except EOF:
+ break
+
+ if c == "\n":
+ if len(buf) > 0 and len(command) == 0:
+ command = buf
+ i = self.prevPos(i)
+ break
+ elif c in [' ', "\t"]:
+ if not firstNonBlank:
+ # Ignore any leading blanks after the '#'.
+ continue
+
+ if len(command) == 0:
+ command = buf
+ buf = ''
+ else:
+ buf += ' '
+ elif c == '(':
+ if len(buf) > 0 and len(command) == 0:
+ command = buf
+ buf += c
+ else:
+ if not firstNonBlank:
+ firstNonBlank = True
+ buf += c
+
+ if command == 'define':
+ self.handleMacroDefine(buf)
+ elif command == 'include':
+ self.handleMacroInclude(buf)
+ elif command == 'ifdef':
+ defineName = buf.strip()
+ if self.defines.has_key(defineName):
+ self.visibilityStack.append(SrcLexer.VISIBLE)
+ else:
+ self.visibilityStack.append(SrcLexer.INVISIBLE_PRE)
+
+ elif command == 'ifndef':
+ defineName = buf.strip()
+ if self.defines.has_key(defineName):
+ self.visibilityStack.append(SrcLexer.INVISIBLE_PRE)
+ else:
+ self.visibilityStack.append(SrcLexer.VISIBLE)
+
+ elif command == 'if':
+ if self.evalCodeVisibility(buf):
+ self.visibilityStack.append(SrcLexer.VISIBLE)
+ else:
+ self.visibilityStack.append(SrcLexer.INVISIBLE_PRE)
+
+ elif command == 'elif':
+ if len(self.visibilityStack) == 0:
+ raise ParseError ('')
+
+ if self.visibilityStack[-1] == SrcLexer.VISIBLE:
+ self.visibilityStack[-1] = SrcLexer.INVISIBLE_POST
+ elif self.visibilityStack[-1] == SrcLexer.INVISIBLE_PRE:
+ # Evaluate only if the current visibility is false.
+ if self.evalCodeVisibility(buf):
+ self.visibilityStack[-1] = SrcLexer.VISIBLE
+
+ elif command == 'else':
+ if len(self.visibilityStack) == 0:
+ raise ParseError ('')
+
+ if self.visibilityStack[-1] == SrcLexer.VISIBLE:
+ self.visibilityStack[-1] = SrcLexer.INVISIBLE_POST
+ if self.visibilityStack[-1] == SrcLexer.INVISIBLE_PRE:
+ self.visibilityStack[-1] = SrcLexer.VISIBLE
+
+ elif command == 'endif':
+ if len(self.visibilityStack) == 0:
+ raise ParseError ('')
+ self.visibilityStack.pop()
+
+ elif command == 'undef':
+ pass
+ elif command in ['error', 'pragma']:
+ pass
+ else:
+ print "'%s' '%s'"%(command, buf)
+ print self.filepath
+ sys.exit(0)
+
+ return i
+
+
+ def evalCodeVisibility (self, buf):
+ try:
+ return eval(buf)
+ except:
+ return True
+
+ def handleMacroDefine (self, buf):
+
+ mparser = macroparser.MacroParser(buf)
+ mparser.debug = self.debugMacro
+ mparser.parse()
+ macro = mparser.getMacro()
+ if macro:
+ self.defines[macro.name] = macro
+
+ def handleMacroInclude (self, buf):
+
+ # Strip excess string if any.
+ pos = buf.find(' ')
+ if pos >= 0:
+ buf = buf[:pos]
+ headerSub = removeHeaderQuotes(buf)
+
+ if not self.expandHeaders:
+ # We don't want to expand headers. Bail out.
+ if self.debug:
+ progress ("%s ignored\n"%headerSub)
+ return
+
+ defines = {}
+ headerPath = None
+ for includeDir in self.includeDirs:
+ hpath = includeDir + '/' + headerSub
+ if os.path.isfile(hpath) and hpath != self.filepath:
+ headerPath = hpath
+ break
+
+ if not headerPath:
+ error("included header file " + headerSub + " not found\n", self.stopOnHeader)
+ return
+
+ if self.debug:
+ progress ("%s found\n"%headerPath)
+
+ if headerPath in self.headerDict:
+ if self.debug:
+ progress ("%s already included\n"%headerPath)
+ return
+
+ if SrcLexer.headerCache.has_key(headerPath):
+ if self.debug:
+ progress ("%s in cache\n"%headerPath)
+ for key in SrcLexer.headerCache[headerPath].defines.keys():
+ self.defines[key] = SrcLexer.headerCache[headerPath].defines[key]
+ return
+
+ chars = open(headerPath, 'r').read()
+ mclexer = SrcLexer(chars, headerPath)
+ mclexer.copyProperties(self)
+ mclexer.parentLexer = self
+ mclexer.tokenize()
+ hdrData = HeaderData()
+ hdrData.tokens = mclexer.getTokens()
+ headerDefines = mclexer.getDefines()
+ for key in headerDefines.keys():
+ defines[key] = headerDefines[key]
+ hdrData.defines[key] = headerDefines[key]
+
+ self.headerDict[headerPath] = True
+ SrcLexer.headerCache[headerPath] = hdrData
+
+ # Update the list of headers that have already been expaneded.
+ for key in mclexer.headerDict.keys():
+ self.headerDict[key] = True
+
+ if self.debug:
+ progress ("defines found in header %s:\n"%headerSub)
+ for key in defines.keys():
+ progress (" '%s'\n"%key)
+
+ for key in defines.keys():
+ self.defines[key] = defines[key]
+
+
+ def slash (self, i):
+ if not self.isCodeVisible():
+ return i
+
+ if i < self.bufsize - 1 and self.chars[i+1] == '/':
+ # Parse line comment.
+ line = ''
+ i += 2
+ while i < self.bufsize:
+ c = self.chars[i]
+ if ord(c) in [0x0A, 0x0D]:
+ return i - 1
+ line += c
+ i += 1
+ self.token = ''
+ elif i < self.bufsize - 1 and self.chars[i+1] == '*':
+ comment = ''
+ i += 2
+ while i < self.bufsize:
+ c = self.chars[i]
+ if c == '/' and self.chars[i-1] == '*':
+ return i
+ comment += c
+ i += 1
+ else:
+ return self.anyToken(i, '/')
+
+ return i
+
+
+ def lineBreak (self, i):
+ if not self.isCodeVisible():
+ return i
+
+ self.maybeAddToken()
+
+ return i
+
+
+ def doubleQuote (self, i):
+ if not self.isCodeVisible():
+ return i
+
+ literal = ''
+ i += 1
+ while i < self.bufsize:
+ c = self.chars[i]
+ if c == '"':
+ self.tokens.append('"'+literal+'"')
+ break
+ literal += c
+ i += 1
+
+ return i
+
+
+ def anyToken (self, i, token):
+ if not self.isCodeVisible():
+ return i
+
+ self.maybeAddToken()
+ self.token = token
+ self.maybeAddToken()
+ return i
diff --git a/toolkit/src2xml/source/srcparser.py b/toolkit/src2xml/source/srcparser.py
new file mode 100644
index 000000000000..b25d39ecb66e
--- /dev/null
+++ b/toolkit/src2xml/source/srcparser.py
@@ -0,0 +1,416 @@
+import sys
+from globals import *
+import srclexer
+
+# simple name translation map
+postTransMap = {"ok-button": "okbutton",
+ "cancel-button": "cancelbutton",
+ "help-button": "helpbutton"}
+
+def transName (name):
+ """Translate a mixed-casing name to dash-separated name.
+
+Translate a mixed-casing name (e.g. MyLongName) to a dash-separated name
+(e.g. my-long-name).
+"""
+ def isUpper (c):
+ return c >= 'A' and c <= 'Z'
+
+ newname = ''
+ parts = []
+ buf = ''
+ for c in name:
+ if isUpper(c) and len(buf) > 1:
+ parts.append(buf)
+ buf = c
+ else:
+ buf += c
+
+ if len(buf) > 0:
+ parts.append(buf)
+
+ first = True
+ for part in parts:
+ if first:
+ first = False
+ else:
+ newname += '-'
+ newname += part.lower()
+
+ # special-case mapping ...
+ if 0: #postTransMap.has_key(newname):
+ newname = postTransMap[newname]
+
+ return newname
+
+
+def transValue (value):
+ """Translate certain values.
+
+Examples of translated values include TRUE -> true, FALSE -> false.
+"""
+ if value.lower() in ["true", "false"]:
+ value = value.lower()
+ return value
+
+
+def renameAttribute (name, elemName):
+
+ # TODO: all manner of evil special cases ...
+ if elemName == 'metric-field' and name == 'spin-size':
+ return 'step-size'
+
+ return name
+
+
+class Statement(object):
+ """Container to hold information for a single statement.
+
+Each statement consists of the left-hand-side token(s), and right-hand-side
+tokens, separated by a '=' token. This class stores the information on the
+left-hand-side tokens.
+"""
+ def __init__ (self):
+ self.leftTokens = []
+ self.leftScope = None
+
+
+class MacroExpander(object):
+ def __init__ (self, tokens, defines):
+ self.tokens = tokens
+ self.defines = defines
+
+ def expand (self):
+ self.pos = 0
+ while self.pos < len(self.tokens):
+ self.expandToken()
+
+ def expandToken (self):
+ token = self.tokens[self.pos]
+ if not self.defines.has_key(token):
+ self.pos += 1
+ return
+
+ macro = self.defines[token]
+ nvars = len(macro.vars.keys())
+ if nvars == 0:
+ # Simple expansion
+ self.tokens[self.pos:self.pos+1] = macro.tokens
+ return
+ else:
+ # Expansion with arguments.
+ values, lastPos = self.parseValues()
+ newtokens = []
+ for mtoken in macro.tokens:
+ if macro.vars.has_key(mtoken):
+ # variable
+ pos = macro.vars[mtoken]
+ valtokens = values[pos]
+ for valtoken in valtokens:
+ newtokens.append(valtoken)
+ else:
+ # not a variable
+ newtokens.append(mtoken)
+
+ self.tokens[self.pos:self.pos+lastPos+1] = newtokens
+
+
+ def parseValues (self):
+ """Parse tokens to get macro function variable values.
+
+Be aware that there is an implicit quotes around the text between the open
+paren, the comma(s), and the close paren. For instance, if a macro is defined
+as FOO(a, b) and is used as FOO(one two three, and four), then the 'a' must be
+replaced with 'one two three', and the 'b' replaced with 'and four'. In other
+words, whitespace does not end a token.
+
+"""
+ values = []
+ i = 1
+ scope = 0
+ value = []
+ while True:
+ try:
+ tk = self.tokens[self.pos+i]
+ except IndexError:
+ progress ("error parsing values (%d)\n"%i)
+ for j in xrange(0, i):
+ print self.tokens[self.pos+j],
+ print ''
+ srclexer.dumpTokens(self.tokens)
+ srclexer.dumpTokens(self.newtokens)
+ print "tokens expanded so far:"
+ for tk in self.expandedTokens:
+ print "-"*20
+ print tk
+ srclexer.dumpTokens(self.defines[tk].tokens)
+ sys.exit(1)
+ if tk == '(':
+ value = []
+ scope += 1
+ elif tk == ',':
+ values.append(value)
+ value = []
+ elif tk == ')':
+ scope -= 1
+ values.append(value)
+ value = []
+ if scope == 0:
+ break
+ else:
+ raise ParseError ('')
+ else:
+ value.append(tk)
+ i += 1
+
+ return values, i
+
+ def getTokens (self):
+ return self.tokens
+
+
+class SrcParser(object):
+
+ def __init__ (self, tokens, defines = None):
+ self.tokens = tokens
+ self.defines = defines
+ self.debug = False
+ self.onlyExpandMacros = False
+
+ def init (self):
+ self.elementStack = [RootNode()]
+ self.stmtData = Statement()
+ self.tokenBuf = []
+ self.leftTokens = []
+
+ # Expand defined macros.
+ if self.debug:
+ progress ("-"*68+"\n")
+ for key in self.defines.keys():
+ progress ("define: %s\n"%key)
+
+ self.expandMacro()
+ self.tokenSize = len(self.tokens)
+
+ def expandMacro (self):
+ macroExp = MacroExpander(self.tokens, self.defines)
+ macroExp.expand()
+ self.tokens = macroExp.getTokens()
+ if self.onlyExpandMacros:
+ srclexer.dumpTokens(self.tokens)
+ sys.exit(0)
+
+ def parse (self):
+ """Parse it!
+
+This is the main loop for the parser. This is where it all begins and ends.
+"""
+ self.init()
+
+ i = 0
+ while i < self.tokenSize:
+ tk = self.tokens[i]
+ if tk == '{':
+ i = self.openBrace(i)
+ elif tk == '}':
+ i = self.closeBrace(i)
+ elif tk == ';':
+ i = self.semiColon(i)
+ elif tk == '=':
+ i = self.assignment(i)
+ else:
+ self.tokenBuf.append(tk)
+
+ i += 1
+
+ return self.elementStack[0]
+
+ #-------------------------------------------------------------------------
+ # Token Handlers
+
+ """
+Each token handler takes the current token position and returns the position
+of the last token processed. For the most part, the current token position
+and the last processed token are one and the same, in which case the handler
+can simply return the position value it receives without incrementing it.
+
+If you need to read ahead to process more tokens than just the current token,
+make sure that the new token position points to the last token that has been
+processed, not the next token that has not yet been processed. This is
+because the main loop increments the token position when it returns from the
+handler.
+"""
+
+ # assignment token '='
+ def assignment (self, i):
+ self.leftTokens = self.tokenBuf[:]
+ if self.stmtData.leftScope == None:
+ # Keep track of lhs data in case of compound statement.
+ self.stmtData.leftTokens = self.tokenBuf[:]
+ self.stmtData.leftScope = len(self.elementStack) - 1
+
+ self.tokenBuf = []
+ return i
+
+ # open brace token '{'
+ def openBrace (self, i):
+ bufSize = len(self.tokenBuf)
+ leftSize = len(self.leftTokens)
+ obj = None
+ if bufSize == 0 and leftSize > 0:
+ # Name = { ...
+ obj = Element(self.leftTokens[0])
+
+ elif bufSize > 0 and leftSize == 0:
+ # Type Name { ...
+ wgtType = self.tokenBuf[0]
+ wgtRID = None
+ if bufSize >= 2:
+ wgtRID = self.tokenBuf[1]
+ obj = Element(wgtType, wgtRID)
+
+ else:
+ # LeftName = Name { ...
+ obj = Element(self.leftTokens[0])
+ obj.setAttr("type", self.tokenBuf[0])
+
+ obj.name = transName(obj.name)
+
+ if obj.name == 'string-list':
+ i = self.parseStringList(i)
+ elif obj.name == 'filter-list':
+ i = self.parseFilterList(i, obj)
+ else:
+ self.elementStack[-1].appendChild(obj)
+ self.elementStack.append(obj)
+
+ self.tokenBuf = []
+ self.leftTokens = []
+
+ return i
+
+ # close brace token '}'
+ def closeBrace (self, i):
+ if len(self.tokenBuf) > 0:
+ if self.debug:
+ print self.tokenBuf
+ raise ParseError ('')
+ self.elementStack.pop()
+ return i
+
+ # semi colon token ';'
+ def semiColon (self, i):
+ stackSize = len(self.elementStack)
+ scope = stackSize - 1
+ if len(self.tokenBuf) == 0:
+ pass
+ elif scope == 0:
+ # We are not supposed to have any statment in global scope.
+ # Just ignore this statement.
+ pass
+ else:
+ # Statement within a scope. Import it as an attribute for the
+ # current element.
+ elem = self.elementStack[-1]
+
+ name = "none"
+ if len(self.leftTokens) > 0:
+ # Use the leftmost token as the name for now. If we need to
+ # do more complex parsing of lhs, add more code here.
+ name = self.leftTokens[0]
+ name = transName(name)
+
+ if name == 'pos':
+ i = self.parsePosAttr(i)
+ elif name == 'size':
+ i = self.parseSizeAttr(i)
+ elif len (self.tokenBuf) == 1:
+ # Simple value
+ value = transValue(self.tokenBuf[0])
+ name = renameAttribute(name, elem.name)
+ elem.setAttr(name, value)
+
+ if not self.stmtData.leftScope == None and self.stmtData.leftScope < scope:
+ # This is a nested scope within a statement. Do nothing for now.
+ pass
+
+ if self.stmtData.leftScope == scope:
+ # end of (nested) statement.
+ self.stmtData.leftScope = None
+
+ self.tokenBuf = []
+ self.leftTokens = []
+
+ return i
+
+ def parseStringList (self, i):
+
+ i += 1
+ while i < self.tokenSize:
+ tk = self.tokens[i]
+ if tk == '}':
+ break
+ i += 1
+
+ return i
+
+ def parseFilterList (self, i, obj):
+ self.elementStack[-1].appendChild(obj)
+ self.elementStack.append(obj)
+
+ return i
+
+ def parsePosAttr (self, i):
+
+ # MAP_APPFONT ( 6 , 5 )
+ elem = self.elementStack[-1]
+ x, y = self.parseMapAppfont(self.tokenBuf)
+ elem.setAttr("x", x)
+ elem.setAttr("y", y)
+
+ return i
+
+ def parseSizeAttr (self, i):
+
+ # MAP_APPFONT ( 6 , 5 )
+ elem = self.elementStack[-1]
+ width, height = self.parseMapAppfont(self.tokenBuf)
+ elem.setAttr("width", width)
+ elem.setAttr("height", height)
+
+ return i
+
+ def parseMapAppfont (self, tokens):
+ values = []
+ scope = 0
+ val = ''
+ for tk in tokens:
+ if tk == '(':
+ scope += 1
+ if scope == 1:
+ val = ''
+ else:
+ val += tk
+ elif tk == ')':
+ scope -= 1
+ if scope == 0:
+ if len(val) == 0:
+ raise ParseError ('')
+ values.append(val)
+ break
+ else:
+ val += tk
+ elif tk == ',':
+ if len(val) == 0:
+ raise ParseError ('')
+ values.append(val)
+ val = ''
+ elif scope > 0:
+ val += tk
+
+ if len(values) != 2:
+ raise ParseError ('')
+
+ return eval(values[0]), eval(values[1])
+
+