From 0a393b07484319bcda806ed31047cfb3a95d461c Mon Sep 17 00:00:00 2001 From: Markus Rosenstihl Date: Thu, 26 Jun 2014 11:10:51 +0000 Subject: [PATCH] migrate to standard svn repo layout --- MANIFEST.in | 7 + README | 17 + debian/changelog | 68 + debian/compat | 1 + debian/control | 16 + debian/damaris.desktop | 10 + debian/pycompat | 1 + debian/python-damaris.menu | 1 + debian/rules | 65 + doc/Doxyfile | 242 +++ doc/README.txt | 24 + doc/dump_wiki.py | 177 ++ doc/index.html | 84 + doxygen.conf | 1864 +++++++++++++++++++++ scripts/DAMARIS | 51 + setup.py | 121 ++ src/__init__.py | 18 + src/data/ADC_Result.py | 523 ++++++ src/data/Accumulation.py | 824 ++++++++++ src/data/Config_Result.py | 54 + src/data/DaFFT.py | 211 +++ src/data/DamarisFFT.py | 196 +++ src/data/DataPool.py | 177 ++ src/data/Drawable.py | 192 +++ src/data/Error_Result.py | 71 + src/data/Errorable.py | 76 + src/data/MeasurementResult.py | 291 ++++ src/data/Persistance.py | 29 + src/data/Resultable.py | 65 + src/data/Signalpath.py | 9 + src/data/Temp_Result.py | 35 + src/data/__init__.py | 9 + src/data/autophase.py | 63 + src/experiments/Experiment.py | 491 ++++++ src/experiments/__init__.py | 3 + src/experiments/dac.py | 12 + src/gui/BackendDriver.py | 294 ++++ src/gui/DAMARIS.ico | Bin 0 -> 11454 bytes src/gui/DAMARIS.png | Bin 0 -> 664 bytes src/gui/DamarisGUI.py | 2880 +++++++++++++++++++++++++++++++++ src/gui/ExperimentHandling.py | 95 ++ src/gui/ExperimentWriter.py | 66 + src/gui/ResultHandling.py | 78 + src/gui/ResultReader.py | 576 +++++++ src/gui/__init__.py | 0 src/gui/damaris.glade | 2106 ++++++++++++++++++++++++ src/gui/damaris.gladep | 8 + src/gui/gtkcodebuffer.py | 681 ++++++++ src/gui/python.xml | 210 +++ src/gui/script_interface.py | 152 ++ src/tools/__init__.py | 0 src/tools/eurotherm.py | 119 ++ src/tools/ranges.py | 127 ++ src/tools/signal.py | 30 + tests/datatest.py | 97 ++ 55 files changed, 13617 insertions(+) create mode 100644 MANIFEST.in create mode 100644 README create mode 100644 debian/changelog create mode 100644 debian/compat create mode 100644 debian/control create mode 100644 debian/damaris.desktop create mode 100644 debian/pycompat create mode 100644 debian/python-damaris.menu create mode 100755 debian/rules create mode 100644 doc/Doxyfile create mode 100644 doc/README.txt create mode 100644 doc/dump_wiki.py create mode 100644 doc/index.html create mode 100644 doxygen.conf create mode 100755 scripts/DAMARIS create mode 100644 setup.py create mode 100644 src/__init__.py create mode 100644 src/data/ADC_Result.py create mode 100644 src/data/Accumulation.py create mode 100644 src/data/Config_Result.py create mode 100644 src/data/DaFFT.py create mode 100644 src/data/DamarisFFT.py create mode 100644 src/data/DataPool.py create mode 100644 src/data/Drawable.py create mode 100644 src/data/Error_Result.py create mode 100644 src/data/Errorable.py create mode 100644 src/data/MeasurementResult.py create mode 100644 src/data/Persistance.py create mode 100644 src/data/Resultable.py create mode 100644 src/data/Signalpath.py create mode 100644 src/data/Temp_Result.py create mode 100644 src/data/__init__.py create mode 100644 src/data/autophase.py create mode 100644 src/experiments/Experiment.py create mode 100644 src/experiments/__init__.py create mode 100644 src/experiments/dac.py create mode 100644 src/gui/BackendDriver.py create mode 100644 src/gui/DAMARIS.ico create mode 100644 src/gui/DAMARIS.png create mode 100644 src/gui/DamarisGUI.py create mode 100644 src/gui/ExperimentHandling.py create mode 100644 src/gui/ExperimentWriter.py create mode 100644 src/gui/ResultHandling.py create mode 100644 src/gui/ResultReader.py create mode 100644 src/gui/__init__.py create mode 100644 src/gui/damaris.glade create mode 100644 src/gui/damaris.gladep create mode 100644 src/gui/gtkcodebuffer.py create mode 100644 src/gui/python.xml create mode 100644 src/gui/script_interface.py create mode 100644 src/tools/__init__.py create mode 100644 src/tools/eurotherm.py create mode 100644 src/tools/ranges.py create mode 100644 src/tools/signal.py create mode 100644 tests/datatest.py diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..0871dc7 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,7 @@ +include src/gui/DAMARIS.png +include src/gui/DAMARIS.ico +include src/gui/damaris.glade +include src/gui/damaris.gladep +include doc/index.html +recursive-include doc/reference-html *.html *.gif *.png *.css +recursive-include doc/tutorial-html *.html *.gif *.png *.css *.tar.gz *.sh diff --git a/README b/README new file mode 100644 index 0000000..7ca3811 --- /dev/null +++ b/README @@ -0,0 +1,17 @@ + + + +Installing into private directory +================================= + +For whatever reason, it might be useful to install the +python frontend into a private directory. +Here are some examples on how to do it: + + python setup.py install --home=/opt/damaris-private +or + python setup.py install --home=$HOME/devel/damaris-installed + +To start Damaris use for example: + + /opt/damaris-private/bin/DAMARIS diff --git a/debian/changelog b/debian/changelog new file mode 100644 index 0000000..923ceef --- /dev/null +++ b/debian/changelog @@ -0,0 +1,68 @@ +python-damaris (0.14-svn) experimental; urgency=low + + * MeasurementResults (MR) can be lineplots (Oleg Petrov) + * Default symbol for MR changed to "*" + * Several more improvements to GUI code + * Fixed some errors in FFT module (oelg Petrov) + * Config now according to XDG standard + + -- Markus Rosenstihl Wed, 07 Nov 2012 12:36:40 +0000 + +python-damaris (0.13-0) experimental; urgency=low + + * using cElementTree if available, giving the ResultReader almost 20x speed up + + -- Markus Rosenstihl Wed, 03 Nov 2010 18:49:46 +0000 + +python-damaris (0.12-0.0) experimental; urgency=low + + * new development version + + -- Achim Gaedke Sat, 24 May 2008 17:49:25 +0200 + +python-damaris (0.11-0.1) experimental; urgency=low + + * updated debian python policy + * job descriptions: types are saved and recovered + * support for system wide defaults file thanks to Christian + * new interleaved range by Markus Rosenstihl + * revised display source change event handling, found some severe errors + * cursor movement and selection problems solved + * log window information are saved in hdf files + + -- Achim Gaedke Thu, 17 Mar 2008 17:28:13 +0100 + +python-damaris (0.11-0.0) experimental; urgency=low + + * rename to python-damaris + * x axis log plot + * %()s functions for hdf file naming + * line number widgets for scripts + * Persistance class added (scope like fade away of signal changes) + + -- Achim Gaedke Thu, 07 Feb 2008 02:21:45 +0100 + +greendamaris (0.10-0.2) experimental; urgency=medium + + * increased compatibility with pytables version 1 and 2 + * sending signals to backend, when it fails to quit properly + * correcting quit event return value + + -- Achim Gaedke Fri, 12 Oct 2007 13:34:33 +0200 + +greendamaris (0.10-0.1) experimental; urgency=low + + * added docs and browser button for docs + * limited number of points to display for GTKCairo backend + * numarray replaced by numpy + * adapted width of window to fit even gnome icon size + + -- Achim Gaedke Tue, 02 Oct 2007 19:11:33 +0200 + +greendamaris (0.10-0.0) experimental; urgency=medium + + * started separate source package of pyhton-damaris + * pycentral used to support both python 2.4 and 2.5 + + -- Achim Gaedke Sat, 08 Sep 2007 12:56:06 +0200 + diff --git a/debian/compat b/debian/compat new file mode 100644 index 0000000..7ed6ff8 --- /dev/null +++ b/debian/compat @@ -0,0 +1 @@ +5 diff --git a/debian/control b/debian/control new file mode 100644 index 0000000..8f0b722 --- /dev/null +++ b/debian/control @@ -0,0 +1,16 @@ +Source: python-damaris +Section: science +Priority: optional +Maintainer: Achim Gaedke +Build-Depends: debhelper, python-dev, python-central, dpkg-dev +Standards-Version: 3.7.3 +XS-Python-Version: current + +Package: python-damaris +Architecture: all +XB-Python-Version: ${python:Versions} +Provides: ${python:Provides}, damaris-frontend +Depends: ${python:Depends}, python-numpy (>=1.0), python-scipy, python-gtk2 (>=2.8.0), python-glade2, python-matplotlib (>=0.90), python-tables (>=1.3.2), lzop +Recommends: damaris-backends, python-numpy-ext +Suggests: python-doc, python-tables-doc, python-numpy-doc +Description: python frontend for DAMARIS project diff --git a/debian/damaris.desktop b/debian/damaris.desktop new file mode 100644 index 0000000..9c37198 --- /dev/null +++ b/debian/damaris.desktop @@ -0,0 +1,10 @@ +[Desktop Entry] +Name=DAMARIS +Comment=DArmstadt MAgnetic Resonance Instrument Software +Exec=/usr/bin/DAMARIS %F +X-MultipleArgs=true +Terminal=false +MimeType=text/x-python;text/plain; +Type=Application +Categories=Science;Education;Physics;Chemistry; +Icon=DAMARIS diff --git a/debian/pycompat b/debian/pycompat new file mode 100644 index 0000000..0cfbf08 --- /dev/null +++ b/debian/pycompat @@ -0,0 +1 @@ +2 diff --git a/debian/python-damaris.menu b/debian/python-damaris.menu new file mode 100644 index 0000000..a12878d --- /dev/null +++ b/debian/python-damaris.menu @@ -0,0 +1 @@ +?package(python-damaris): needs="X11" section="Apps/Science" title="DAMARIS" longtitle="DArmstadt Magnetic Resonance Instrument Software" command="DAMARIS -dGTKAgg" icon="/usr/share/python-damaris/images/DAMARIS.png" diff --git a/debian/rules b/debian/rules new file mode 100755 index 0000000..f0c58ed --- /dev/null +++ b/debian/rules @@ -0,0 +1,65 @@ +#!/usr/bin/make -f + +# Achim Gaedke +# May 2007 + +# Uncomment this to turn on verbose mode. +export DH_VERBOSE=1 +PYVERS=$(shell pyversions -vr) +PACKAGE_NAME=python-damaris +MODULE_NAME=damaris +PYBASE=$(CURDIR)/debian/${PACKAGE_NAME} +DH_ALWAYS_EXCLUDE=CVS:.svn + +clean: + dh_testdir + dh_testroot + dh_installdirs + # Add here commands to clean up after the build process. + rm -f *-stamp + rm -rf dist build $(PYVERS:%=build-ext-%) + find . -name *\.py[co] -exec rm {} \; + dh_clean + +build: build-stamp +build-stamp: $(PYVERS:%=build-ext-%) + touch $@ +build-ext-%: + dh_testdir + python$* setup.py build + touch $@ + +install: install-stamp +# install menu related things + dh_install debian/damaris.desktop /usr/share/applications/ + +install-stamp: build-stamp $(PYVERS:%=install-ext-%) +install-ext-%: + python$* setup.py install --root $(PYBASE) --install-layout=deb + echo "DAMARIS script: removing path to local installation" + -sed 's/^sys.path.insert(0,.*).*/# damaris package is found on default path/' <$(PYBASE)/usr/bin/DAMARIS >$(PYBASE)/usr/bin/DAMARIS.new + -mv $(PYBASE)/usr/bin/DAMARIS.new $(PYBASE)/usr/bin/DAMARIS + +binary-indep: build install + dh_link usr/share/python-damaris/doc usr/share/doc/python-damaris/html + dh_link usr/share/python-damaris/images usr/share/doc/python-damaris/images + dh_link usr/share/python-damaris/images/DAMARIS.png usr/share/icons/DAMARIS.png + dh_testdir -i + dh_testroot -i + dh_installchangelogs -i + dh_installdocs -i + dh_pysupport + dh_installmenu -i + dh_desktop -i + dh_compress -i -X.py + dh_fixperms -i + dh_installdeb -i + dh_gencontrol -i + dh_md5sums -i + dh_builddeb -i + +binary-arch: build install + +binary: binary-indep binary-arch + +.PHONY: build clean binary-indep binary-arch binary install configure diff --git a/doc/Doxyfile b/doc/Doxyfile new file mode 100644 index 0000000..c8c23e8 --- /dev/null +++ b/doc/Doxyfile @@ -0,0 +1,242 @@ +# Doxyfile 1.5.3 + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- +DOXYFILE_ENCODING = UTF-8 +PROJECT_NAME = "python DAMARIS" +PROJECT_NUMBER = 0.13-svn +OUTPUT_DIRECTORY = . +CREATE_SUBDIRS = NO +OUTPUT_LANGUAGE = English +BRIEF_MEMBER_DESC = YES +REPEAT_BRIEF = YES +ABBREVIATE_BRIEF = "The $name class " \ + "The $name widget " \ + "The $name file " \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the +ALWAYS_DETAILED_SEC = NO +INLINE_INHERITED_MEMB = NO +FULL_PATH_NAMES = NO +STRIP_FROM_PATH = +STRIP_FROM_INC_PATH = +SHORT_NAMES = NO +JAVADOC_AUTOBRIEF = NO +QT_AUTOBRIEF = NO +MULTILINE_CPP_IS_BRIEF = NO +DETAILS_AT_TOP = NO +INHERIT_DOCS = YES +SEPARATE_MEMBER_PAGES = NO +TAB_SIZE = 8 +ALIASES = +OPTIMIZE_OUTPUT_FOR_C = NO +OPTIMIZE_OUTPUT_JAVA = NO +BUILTIN_STL_SUPPORT = NO +CPP_CLI_SUPPORT = NO +DISTRIBUTE_GROUP_DOC = NO +SUBGROUPING = YES +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- +EXTRACT_ALL = YES +EXTRACT_PRIVATE = YES +EXTRACT_STATIC = YES +EXTRACT_LOCAL_CLASSES = YES +EXTRACT_LOCAL_METHODS = NO +EXTRACT_ANON_NSPACES = NO +HIDE_UNDOC_MEMBERS = NO +HIDE_UNDOC_CLASSES = NO +HIDE_FRIEND_COMPOUNDS = NO +HIDE_IN_BODY_DOCS = NO +INTERNAL_DOCS = NO +CASE_SENSE_NAMES = YES +HIDE_SCOPE_NAMES = NO +SHOW_INCLUDE_FILES = YES +INLINE_INFO = YES +SORT_MEMBER_DOCS = YES +SORT_BRIEF_DOCS = NO +SORT_BY_SCOPE_NAME = NO +GENERATE_TODOLIST = YES +GENERATE_TESTLIST = YES +GENERATE_BUGLIST = YES +GENERATE_DEPRECATEDLIST= YES +ENABLED_SECTIONS = +MAX_INITIALIZER_LINES = 30 +SHOW_USED_FILES = YES +SHOW_DIRECTORIES = NO +FILE_VERSION_FILTER = +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- +QUIET = NO +WARNINGS = YES +WARN_IF_UNDOCUMENTED = YES +WARN_IF_DOC_ERROR = YES +WARN_NO_PARAMDOC = NO +WARN_FORMAT = "$file:$line: $text " +WARN_LOGFILE = +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- +INPUT = damaris +INPUT_ENCODING = ISO-8859-15 +FILE_PATTERNS = *.py \ + *.PY +RECURSIVE = YES +EXCLUDE = +EXCLUDE_SYMLINKS = NO +EXCLUDE_PATTERNS = +EXCLUDE_SYMBOLS = +EXAMPLE_PATH = +EXAMPLE_PATTERNS = * +EXAMPLE_RECURSIVE = YES +IMAGE_PATH = +INPUT_FILTER = +FILTER_PATTERNS = +FILTER_SOURCE_FILES = NO +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- +SOURCE_BROWSER = YES +INLINE_SOURCES = NO +STRIP_CODE_COMMENTS = YES +REFERENCED_BY_RELATION = NO +REFERENCES_RELATION = NO +REFERENCES_LINK_SOURCE = YES +USE_HTAGS = NO +VERBATIM_HEADERS = NO +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- +ALPHABETICAL_INDEX = NO +COLS_IN_ALPHA_INDEX = 5 +IGNORE_PREFIX = +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- +GENERATE_HTML = YES +HTML_OUTPUT = reference-html +HTML_FILE_EXTENSION = .html +HTML_HEADER = +HTML_FOOTER = +HTML_STYLESHEET = +HTML_ALIGN_MEMBERS = YES +GENERATE_HTMLHELP = NO +HTML_DYNAMIC_SECTIONS = NO +CHM_FILE = +HHC_LOCATION = +GENERATE_CHI = NO +BINARY_TOC = NO +TOC_EXPAND = NO +DISABLE_INDEX = NO +ENUM_VALUES_PER_LINE = 4 +GENERATE_TREEVIEW = NO +TREEVIEW_WIDTH = 250 +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- +GENERATE_LATEX = NO +LATEX_OUTPUT = latex +LATEX_CMD_NAME = latex +MAKEINDEX_CMD_NAME = makeindex +COMPACT_LATEX = NO +PAPER_TYPE = a4wide +EXTRA_PACKAGES = +LATEX_HEADER = +PDF_HYPERLINKS = NO +USE_PDFLATEX = NO +LATEX_BATCHMODE = NO +LATEX_HIDE_INDICES = NO +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- +GENERATE_RTF = NO +RTF_OUTPUT = rtf +COMPACT_RTF = NO +RTF_HYPERLINKS = NO +RTF_STYLESHEET_FILE = +RTF_EXTENSIONS_FILE = +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- +GENERATE_MAN = NO +MAN_OUTPUT = man +MAN_EXTENSION = .3 +MAN_LINKS = NO +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- +GENERATE_XML = NO +XML_OUTPUT = xml +XML_SCHEMA = +XML_DTD = +XML_PROGRAMLISTING = YES +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- +GENERATE_AUTOGEN_DEF = NO +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- +GENERATE_PERLMOD = NO +PERLMOD_LATEX = NO +PERLMOD_PRETTY = YES +PERLMOD_MAKEVAR_PREFIX = +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- +ENABLE_PREPROCESSING = NO +MACRO_EXPANSION = NO +EXPAND_ONLY_PREDEF = NO +SEARCH_INCLUDES = YES +INCLUDE_PATH = +INCLUDE_FILE_PATTERNS = +PREDEFINED = +EXPAND_AS_DEFINED = +SKIP_FUNCTION_MACROS = YES +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- +TAGFILES = +GENERATE_TAGFILE = +ALLEXTERNALS = NO +EXTERNAL_GROUPS = YES +PERL_PATH = /usr/bin/perl +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- +CLASS_DIAGRAMS = YES +MSCGEN_PATH = +HIDE_UNDOC_RELATIONS = YES +HAVE_DOT = YES +CLASS_GRAPH = YES +COLLABORATION_GRAPH = YES +GROUP_GRAPHS = YES +UML_LOOK = NO +TEMPLATE_RELATIONS = NO +INCLUDE_GRAPH = YES +INCLUDED_BY_GRAPH = YES +CALL_GRAPH = NO +CALLER_GRAPH = NO +GRAPHICAL_HIERARCHY = YES +DIRECTORY_GRAPH = YES +DOT_IMAGE_FORMAT = png +DOT_PATH = /usr/bin/ +DOTFILE_DIRS = +DOT_GRAPH_MAX_NODES = 50 +MAX_DOT_GRAPH_DEPTH = 1000 +DOT_TRANSPARENT = NO +DOT_MULTI_TARGETS = NO +GENERATE_LEGEND = YES +DOT_CLEANUP = YES +#--------------------------------------------------------------------------- +# Configuration::additions related to the search engine +#--------------------------------------------------------------------------- +SEARCHENGINE = NO diff --git a/doc/README.txt b/doc/README.txt new file mode 100644 index 0000000..d951574 --- /dev/null +++ b/doc/README.txt @@ -0,0 +1,24 @@ +by now the documentation creation is not automatized... + +# html reference +# requires dot and doxygen + +cd doc +ln -s ../src damaris +doxygen Doxyfile +rm damaris + +# todo: copy damaris logo + +# html wiki export +# requires moinmoin and damaris/data as wikidata + +cd doc +# underlay must be writable, so we have to copy it... +cp -r /usr/share/moin/underlay wikiunderlay +python dump_wiki.py +cp -r /usr/share/moin/htdocs/modern tutorial-html +rm -r wikiunderlay wikiconfig.py + +# get useful numpy doc +wget http://www.scipy.org/Numpy_Example_List_With_Doc?action=print diff --git a/doc/dump_wiki.py b/doc/dump_wiki.py new file mode 100644 index 0000000..680a33e --- /dev/null +++ b/doc/dump_wiki.py @@ -0,0 +1,177 @@ +# -*- coding: iso-8859-1 -*- +""" + MoinMoin - Dump a MoinMoin wiki to static pages + + based on "moin.py export dump" command + +""" + +import sys, os, time, StringIO, codecs, shutil, re, errno + +from MoinMoin import config, wikiutil, Page +from MoinMoin.request import RequestCLI +from MoinMoin.action import AttachFile + +HTML_SUFFIX = ".html" + +page_template = u''' + + + +%(pagename)s + + + + + +
+

%(pagename)s

+%(pagehtml)s +
+
+%(timestamp)s + + +''' + +def _attachment(request, pagename, filename, outputdir): + filename = filename.encode(config.charset) + source_dir = AttachFile.getAttachDir(request, pagename) + source_file = os.path.join(source_dir, filename) + dest_dir = os.path.join(outputdir, "attachments", wikiutil.quoteWikinameFS(pagename)) + dest_file = os.path.join(dest_dir, filename) + dest_url = "attachments/%s/%s" % (wikiutil.quoteWikinameFS(pagename), wikiutil.url_quote(filename)) + if os.access(source_file, os.R_OK): + if not os.access(dest_dir, os.F_OK): + try: + os.makedirs(dest_dir) + except: + print ("Cannot create attachment directory '%s'" % dest_dir) + raise + elif not os.path.isdir(dest_dir): + print ("'%s' is not a directory" % dest_dir) + + shutil.copyfile(source_file, dest_file) + print ('Writing "%s"...' % dest_url) + return dest_url + else: + return "" + + +class PluginScript: #(MoinScript): + """ Dump script class """ + + def __init__(self): + pass + + def mainloop(self): + """ moin-dump's main code. """ + + # Prepare output directory + outputdir=os.path.join(os.curdir,"tutorial-html") + try: + os.mkdir(outputdir) + print "Created output directory '%s'!" % outputdir + except OSError, err: + if err.errno != errno.EEXIST: + print "Cannot create output directory '%s'!" % outputdir + raise + + sys.path.insert(0, os.path.abspath(os.curdir)) + + wikiconfig_template=""" +from MoinMoin.multiconfig import DefaultConfig +class Config(DefaultConfig): + sitename = u'DAMARIS Homepage and Usergroup' + logo_string = u'DAMARIS Logo' + page_front_page = u"Welcome" + interwikiname = 'damaris' + data_dir = '%(pwd)s/wikidata/' + data_underlay_dir = '%(pwd)s/wikiunderlay' + url_prefix = '/damaris/wiki' + theme_default = 'modern' +"""%{"pwd": os.curdir, "underlay": "/home/achim/underlay" } + + config_file = open("wikiconfig.py","w") + print >>config_file, wikiconfig_template + config_file.close() + + # start with wiki entry page + request = RequestCLI(pagename="Welcome") + + # fix url_prefix so we get relative paths in output html + url_prefix = "." + request.cfg.url_prefix = url_prefix + + pages = request.rootpage.getPageList(user='') # get list of all pages in wiki + pages.sort() + + # extract a list of pages to be extracted + # trial session to fat!!! + try: + pages_match = re.compile("^(Tutorial|auxiliary tools|overview|installation|code snippets)") + pages = [page for page in pages if pages_match.match(page)] + except: + print "did not find suitable pages" + raise + + wikiutil.quoteWikinameURL = lambda pagename, qfn=wikiutil.quoteWikinameFS: (qfn(pagename) + HTML_SUFFIX) + + AttachFile.getAttachUrl = lambda pagename, filename, request, addts=0, escaped=0: (_attachment(request, pagename, filename, outputdir)) + + errfile = os.path.join(outputdir, 'error.log') + errlog = open(errfile, 'w') + errcnt = 0 + + page_front_page = wikiutil.getSysPage(request, request.cfg.page_front_page).page_name + page_title_index = wikiutil.getSysPage(request, 'TitleIndex').page_name + page_word_index = wikiutil.getSysPage(request, 'WordIndex').page_name + + navibar_html = '' + for p in [page_front_page, page_title_index, page_word_index]: + navibar_html += ' [%s]' % (wikiutil.quoteWikinameURL(p), wikiutil.escape(p)) + + urlbase = request.url # save wiki base url + for pagename in pages: + # we have the same name in URL and FS + file = wikiutil.quoteWikinameURL(pagename) + print ('Writing "%s"...' % file) + try: + pagehtml = '' + request.url = urlbase + pagename # add current pagename to url base + page = Page.Page(request, pagename) + request.page = page + try: + request.reset() + pagehtml = request.redirectedOutput(page.send_page, request, count_hit=0, content_only=1) + except: + errcnt = errcnt + 1 + print >>sys.stderr, "*** Caught exception while writing page!" + print >>errlog, "~" * 78 + print >>errlog, file # page filename + import traceback + traceback.print_exc(None, errlog) + finally: + timestamp = time.strftime("%Y-%m-%d %H:%M") + filepath = os.path.join(outputdir, file) + fileout = codecs.open(filepath, 'w', config.charset) + logo_html = '' + fileout.write(page_template % { + 'charset': config.charset, + 'pagename': pagename, + 'pagehtml': pagehtml, + 'logo_html': logo_html, + 'navibar_html': navibar_html, + 'timestamp': timestamp, + 'theme': request.cfg.theme_default, + }) + fileout.close() + + # ToDo: insert style sheets and logo + + errlog.close() + if errcnt: + print >>sys.stderr, "*** %d error(s) occurred, see '%s'!" % (errcnt, errfile) + +if __name__=="__main__": + PluginScript().mainloop() diff --git a/doc/index.html b/doc/index.html new file mode 100644 index 0000000..f77cfe1 --- /dev/null +++ b/doc/index.html @@ -0,0 +1,84 @@ + + + + + python-damaris documentation index + + +

DAMARISDAMARIS +Documentation Index
+

+

Local Resources:

+
+Documentation from DAMARIS wiki:
+ +python-damaris source code reference (generated by doxygen)
+ +

Internet Resources:

+ + + diff --git a/doxygen.conf b/doxygen.conf new file mode 100644 index 0000000..df16662 --- /dev/null +++ b/doxygen.conf @@ -0,0 +1,1864 @@ +# Doxyfile 1.8.3 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" "). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# http://www.gnu.org/software/libiconv for the list of possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or sequence of words) that should +# identify the project. Note that if you do not use Doxywizard you need +# to put quotes around the project name if it contains spaces. + +PROJECT_NAME = "DAMARIS Python-Front End" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer +# a quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify an logo or icon that is +# included in the documentation. The maximum height of the logo should not +# exceed 55 pixels and the maximum width should not exceed 200 pixels. +# Doxygen will copy the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = ../../documents/python-frontend + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, +# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English +# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, +# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, +# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = YES + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = YES + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. Note that you specify absolute paths here, but also +# relative paths, which will be relative from the directory where doxygen is +# started. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful if your file system +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like regular Qt-style comments +# (thus requiring an explicit @brief command for a brief description.) + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then Doxygen will +# interpret the first line (until the first dot) of a Qt-style +# comment as the brief description. If set to NO, the comments +# will behave just like regular Qt-style comments (thus requiring +# an explicit \brief command for a brief description.) + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding +# "class=itcl::class" will allow you to use the command class in the +# itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for +# Java. For instance, namespaces will be presented as packages, qualified +# scopes will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = yes + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources only. Doxygen will then generate output that is more tailored for +# Fortran. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for +# VHDL. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, +# and language is one of the parsers supported by doxygen: IDL, Java, +# Javascript, CSharp, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, +# C++. For instance to make doxygen treat .inc files as Fortran files (default +# is PHP), and .f files as C (default is Fortran), use: inc=Fortran f=C. Note +# that for custom extensions you also need to set FILE_PATTERNS otherwise the +# files are not read by doxygen. + +EXTENSION_MAPPING = + +# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all +# comments according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you +# can mix doxygen, HTML, and XML commands with Markdown formatting. +# Disable only in case of backward compatibilities issues. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented classes, +# or namespaces to their corresponding documentation. Such a link can be +# prevented in individual cases by by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also makes the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. +# Doxygen will parse them like normal C++ but will assume all classes use public +# instead of private inheritance when no explicit protection keyword is present. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES (the +# default) will make doxygen replace the get and set methods by a property in +# the documentation. This will only work if the methods are indeed getting or +# setting a simple type. If this is not the case, or you want to show the +# methods anyway, you should set this option to NO. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and +# unions are shown inside the group in which they are included (e.g. using +# @ingroup) instead of on a separate page (for HTML and Man pages) or +# section (for LaTeX and RTF). + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and +# unions with only public data fields will be shown inline in the documentation +# of the scope in which they are defined (i.e. file, namespace, or group +# documentation), provided this scope is documented. If set to NO (the default), +# structs, classes, and unions are shown on a separate page (for HTML and Man +# pages) or section (for LaTeX and RTF). + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum +# is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically +# be useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. + +TYPEDEF_HIDES_STRUCT = NO + +# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to +# determine which symbols to keep in memory and which to flush to disk. +# When the cache is full, less often used symbols will be written to disk. +# For small to medium size projects (<1000 input files) the default value is +# probably good enough. For larger projects a too small cache size can cause +# doxygen to be busy swapping symbols to and from disk most of the time +# causing a significant performance penalty. +# If the system has enough physical memory increasing the cache will improve the +# performance by keeping more symbols in memory. Note that the value works on +# a logarithmic scale so increasing the size by one will roughly double the +# memory usage. The cache size is given by this formula: +# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, +# corresponding to a cache size of 2^16 = 65536 symbols. + +SYMBOL_CACHE_SIZE = 0 + +# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be +# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given +# their name and scope. Since this can be an expensive process and often the +# same symbol appear multiple times in the code, doxygen keeps a cache of +# pre-resolved symbols. If the cache is too small doxygen will become slower. +# If the cache is too large, memory is wasted. The cache size is given by this +# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0, +# corresponding to a cache size of 2^16 = 65536 symbols. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = no + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal +# scope will be included in the documentation. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base +# name of the file that contains the anonymous namespace. By default +# anonymous namespaces are hidden. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = yes + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = NO + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen +# will list include files with double quotes in the documentation +# rather than with sharp brackets. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen +# will sort the (brief and detailed) documentation of class members so that +# constructors and destructors are listed first. If set to NO (the default) +# the constructors will appear in the respective orders defined by +# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. +# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO +# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the +# hierarchy of group names into alphabetical order. If set to NO (the default) +# the group names will appear in their defined order. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to +# do proper type resolution of all parameters of a function it will reject a +# match between the prototype and the implementation of a member function even +# if there is only one candidate or it is obvious which candidate to choose +# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen +# will still accept a match between prototype and implementation in such cases. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if section-label ... \endif +# and \cond section-label ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or macro consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and macros in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. +# This will remove the Files entry from the Quick Index and from the +# Folder Tree View (if specified). The default is YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the +# Namespaces page. +# This will remove the Namespaces entry from the Quick Index +# and from the Folder Tree View (if specified). The default is YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. +# You can optionally specify a file name after the option, if omitted +# DoxygenLayout.xml will be used as the name of the layout file. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files +# containing the references data. This must be a list of .bib files. The +# .bib extension is automatically appended if omitted. Using this command +# requires the bibtex tool to be installed. See also +# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style +# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this +# feature you need bibtex and perl available in the search path. Do not use +# file names with spaces, bibtex cannot handle them. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# The WARN_NO_PARAMDOC option can be enabled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is +# also the default input encoding. Doxygen uses libiconv (or the iconv built +# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for +# the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh +# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py +# *.f90 *.f *.for *.vhd *.vhdl + +FILE_PATTERNS = + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. +# If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. +# Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. +# The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty or if +# non of the patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) +# and it is also possible to disable source filtering for a specific pattern +# using *.ext= (so without naming a filter). This option only has effect when +# FILTER_SOURCE_FILES is enabled. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MD_FILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page (index.html). +# This can be useful if you have a project on for instance GitHub and want reuse +# the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = YES + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C, C++ and Fortran comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. +# Otherwise they will link to the documentation. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = YES + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. Note that when using a custom header you are responsible +# for the proper inclusion of any scripts and style sheets that doxygen +# needs, which is dependent on the configuration options used. +# It is advised to generate a default header using "doxygen -w html +# header.html footer.html stylesheet.css YourConfigFile" and then modify +# that header. Note that the header is subject to change so you typically +# have to redo this when upgrading to a newer version of doxygen or when +# changing the value of configuration settings such as GENERATE_TREEVIEW! + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If left blank doxygen will +# generate a default style sheet. Note that it is recommended to use +# HTML_EXTRA_STYLESHEET instead of this one, as it is more robust and this +# tag will in the future become obsolete. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional +# user-defined cascading style sheet that is included after the standard +# style sheets created by doxygen. Using this option one can overrule +# certain style aspects. This is preferred over using HTML_STYLESHEET +# since it does not replace the standard style sheet and is therefor more +# robust against future updates. Doxygen will copy the style sheet file to +# the output directory. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that +# the files will be copied as-is; there are no commands or markers available. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. +# Doxygen will adjust the colors in the style sheet and background images +# according to this color. Hue is specified as an angle on a colorwheel, +# see http://en.wikipedia.org/wiki/Hue for more information. +# For instance the value 0 represents red, 60 is yellow, 120 is green, +# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. +# The allowed range is 0 to 359. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of +# the colors in the HTML output. For a value of 0 the output will use +# grayscales only. A value of 255 will produce the most vivid colors. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to +# the luminance component of the colors in the HTML output. Values below +# 100 gradually make the output lighter, whereas values above 100 make +# the output darker. The value divided by 100 is the actual gamma applied, +# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, +# and 100 does not change the gamma. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting +# this to NO can help when comparing the output of multiple runs. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of +# entries shown in the various tree structured indices initially; the user +# can expand and collapse entries dynamically later on. Doxygen will expand +# the tree to such a level that at most the specified number of entries are +# visible (unless a fully collapsed tree already exceeds this amount). +# So setting the number of entries 1 will produce a full collapsed tree by +# default. 0 is a special value representing an infinite number of entries +# and will result in a full expanded tree by default. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files +# will be generated that can be used as input for Apple's Xcode 3 +# integrated development environment, introduced with OSX 10.5 (Leopard). +# To create a documentation set, doxygen will generate a Makefile in the +# HTML output directory. Running make will produce the docset in that +# directory and running "make install" will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find +# it at startup. +# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. + +GENERATE_DOCSET = NO + +# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the +# feed. A documentation feed provides an umbrella under which multiple +# documentation sets from a single provider (such as a company or product suite) +# can be grouped. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that +# should uniquely identify the documentation set bundle. This should be a +# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen +# will append .docset to the name. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely +# identify the documentation publisher. This should be a reverse domain-name +# style string, e.g. com.mycompany.MyDocSet.documentation. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING +# is used to encode HtmlHelp index (hhk), content (hhc) and project file +# content. + +CHM_INDEX_ENCODING = + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated +# that can be used as input for Qt's qhelpgenerator to generate a +# Qt Compressed Help (.qch) of the generated HTML documentation. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can +# be used to specify the file name of the resulting .qch file. +# The path specified is relative to the HTML output folder. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#namespace + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#virtual-folders + +QHP_VIRTUAL_FOLDER = doc + +# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to +# add. For more information please see +# http://doc.trolltech.com/qthelpproject.html#custom-filters + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see +# +# Qt Help Project / Custom Filters. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's +# filter section matches. +# +# Qt Help Project / Filter Attributes. + +QHP_SECT_FILTER_ATTRS = + +# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can +# be used to specify the location of Qt's qhelpgenerator. +# If non-empty doxygen will try to run qhelpgenerator on the generated +# .qhp file. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files +# will be generated, which together with the HTML files, form an Eclipse help +# plugin. To install this plugin and make it available under the help contents +# menu in Eclipse, the contents of the directory containing the HTML and XML +# files needs to be copied into the plugins directory of eclipse. The name of +# the directory within the plugins directory should be the same as +# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before +# the help appears. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have +# this name. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) +# at top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. Since the tabs have the same information as the +# navigation tree you can set this option to NO if you already set +# GENERATE_TREEVIEW to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. +# If the tag value is set to YES, a side panel will be generated +# containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). +# Windows users are probably better off using the HTML help feature. +# Since the tree basically has the same information as the tab index you +# could consider to set DISABLE_INDEX to NO when enabling this option. + +GENERATE_TREEVIEW = YES + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values +# (range [0,1..20]) that doxygen will group on one line in the generated HTML +# documentation. Note that a value of 0 will completely suppress the enum +# values from appearing in the overview section. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open +# links to external symbols imported via tag files in a separate window. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of Latex formulas included +# as images in the HTML documentation. The default is 10. Note that +# when you change the font size after a successful doxygen run you need +# to manually remove any form_*.png images from the HTML output directory +# to force them to be regenerated. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are +# not supported properly for IE 6.0, but are supported on all modern browsers. +# Note that when changing this option you need to delete any form_*.png files +# in the HTML output before the changes have effect. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax +# (see http://www.mathjax.org) which uses client side Javascript for the +# rendering instead of using prerendered bitmaps. Use this if you do not +# have LaTeX installed or if you want to formulas look prettier in the HTML +# output. When enabled you may also need to install MathJax separately and +# configure the path to it using the MATHJAX_RELPATH option. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# thA MathJax output. Supported types are HTML-CSS, NativeMML (i.e. MathML) and +# SVG. The default value is HTML-CSS, which is slower, but has the best +# compatibility. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the +# HTML output directory using the MATHJAX_RELPATH option. The destination +# directory should contain the MathJax.js script. For instance, if the mathjax +# directory is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to +# the MathJax Content Delivery Network so you can quickly see the result without +# installing MathJax. +# However, it is strongly recommended to install a local +# copy of MathJax from http://www.mathjax.org before deployment. + +MATHJAX_RELPATH = http://www.mathjax.org/mathjax + +# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension +# names that should be enabled during MathJax rendering. + +MATHJAX_EXTENSIONS = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box +# for the HTML output. The underlying search engine uses javascript +# and DHTML and should work on any modern browser. Note that when using +# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets +# (GENERATE_DOCSET) there is already a search function so this one should +# typically be disabled. For large projects the javascript based search engine +# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. + +SEARCHENGINE = YES + +# When the SERVER_BASED_SEARCH tag is enabled the search engine will be +# implemented using a web server instead of a web client using Javascript. +# There are two flavours of web server based search depending on the +# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for +# searching and an index file used by the script. When EXTERNAL_SEARCH is +# enabled the indexing and searching needs to be provided by external tools. +# See the manual for details. + +SERVER_BASED_SEARCH = NO + +# When EXTERNAL_SEARCH is enabled doxygen will no longer generate the PHP +# script for searching. Instead the search results are written to an XML file +# which needs to be processed by an external indexer. Doxygen will invoke an +# external search engine pointed to by the SEARCHENGINE_URL option to obtain +# the search results. Doxygen ships with an example indexer (doxyindexer) and +# search engine (doxysearch.cgi) which are based on the open source search engine +# library Xapian. See the manual for configuration details. + +EXTERNAL_SEARCH = NO + +# The SEARCHENGINE_URL should point to a search engine hosted by a web server +# which will returned the search results when EXTERNAL_SEARCH is enabled. +# Doxygen ships with an example search engine (doxysearch) which is based on +# the open source search engine library Xapian. See the manual for configuration +# details. + +SEARCHENGINE_URL = + +# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed +# search data is written to a file for indexing by an external tool. With the +# SEARCHDATA_FILE tag the name of this file can be specified. + +SEARCHDATA_FILE = searchdata.xml + +# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through other +# doxygen projects that are not otherwise connected via tags files, but are +# all added to the same search index. Each project needs to have a tag file set +# via GENERATE_TAGFILE. The search mapping then maps the name of the tag file +# to a relative location where the documentation can be found, +# similar to the +# TAGFILES option but without actually processing the tag file. +# The format is: EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ... + +EXTRA_SEARCH_MAPPINGS = + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. +# Note that when enabling USE_PDFLATEX this option is only used for +# generating bitmaps for formulas in the HTML output, but not in the +# Makefile that is written to the output directory. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4 + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for +# the generated latex document. The footer should contain everything after +# the last chapter. If it is left blank doxygen will generate a +# standard footer. Notice: only use this tag if you know what you are doing! + +LATEX_FOOTER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +# If LATEX_SOURCE_CODE is set to YES then doxygen will include +# source code with syntax highlighting in the LaTeX output. +# Note that which sources are shown also depends on other settings +# such as SOURCE_BROWSER. + +LATEX_SOURCE_CODE = NO + +# The LATEX_BIB_STYLE tag can be used to specify the style to use for the +# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See +# http://en.wikipedia.org/wiki/BibTeX for more info. + +LATEX_BIB_STYLE = plain + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load style sheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. +# This is useful +# if you want to understand what is going on. +# On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = NO + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# pointed to by INCLUDE_PATH will be searched when a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition that +# overrules the definition found in the source code. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all references to function-like macros +# that are alone on a line, have an all uppercase name, and do not end with a +# semicolon, because these will confuse the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. For each +# tag file the location of the external documentation should be added. The +# format of a tag file without this location is as follows: +# +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths +# or URLs. Note that each tag file must have a unique name (where the name does +# NOT include the path). If a tag file is not located in the directory in which +# doxygen is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option also works with HAVE_DOT disabled, but it is recommended to +# install and use dot, since it yields more powerful graphs. + +CLASS_DIAGRAMS = YES + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see +# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the +# documentation. The MSCGEN_PATH tag allows you to specify the directory where +# the mscgen tool resides. If left empty the tool is assumed to be found in the +# default search path. + +MSCGEN_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = YES + +# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is +# allowed to run in parallel. When set to 0 (the default) doxygen will +# base this on the number of processors available in the system. You can set it +# explicitly to a value larger than 0 to get control over the balance +# between CPU load and processing speed. + +DOT_NUM_THREADS = 0 + +# By default doxygen will use the Helvetica font for all dot files that +# doxygen generates. When you want a differently looking font you can specify +# the font name using DOT_FONTNAME. You need to make sure dot is able to find +# the font, which can be done by putting it in a standard location or by setting +# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the +# directory containing the font. + +DOT_FONTNAME = Helvetica + +# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. +# The default size is 10pt. + +DOT_FONTSIZE = 12 + +# By default doxygen will tell dot to use the Helvetica font. +# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to +# set the path where dot can find it. + +DOT_FONTPATH = + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If the UML_LOOK tag is enabled, the fields and methods are shown inside +# the class node. If there are many fields or methods and many nodes the +# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS +# threshold limits the number of items for each type to make the size more +# managable. Set this to 0 for no limit. Note that the threshold may be +# exceeded by 50% before the limit is enforced. + +UML_LIMIT_NUM_FIELDS = 10 + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT options are set to YES then +# doxygen will generate a call dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable call graphs +# for selected functions only using the \callgraph command. + +CALL_GRAPH = YES + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then +# doxygen will generate a caller dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable caller +# graphs for selected functions only using the \callergraph command. + +CALLER_GRAPH = YES + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will generate a graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are svg, png, jpg, or gif. +# If left blank png will be used. If you choose svg you need to set +# HTML_FILE_EXTENSION to xhtml in order to make the SVG files +# visible in IE 9+ (other browsers do not have this requirement). + +DOT_IMAGE_FORMAT = png + +# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to +# enable generation of interactive SVG images that allow zooming and panning. +# Note that this requires a modern browser other than Internet Explorer. +# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you +# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files +# visible. Older versions of IE do not have SVG support. + +INTERACTIVE_SVG = NO + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MSCFILE_DIRS tag can be used to specify one or more directories that +# contain msc files that are included in the documentation (see the +# \mscfile command). + +MSCFILE_DIRS = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen if the +# number of direct children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note +# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not +# seem to support this out of the box. Warning: Depending on the platform used, +# enabling this option may lead to badly anti-aliased labels on the edges of +# a graph (i.e. they become hard to read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = NO + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES diff --git a/scripts/DAMARIS b/scripts/DAMARIS new file mode 100755 index 0000000..b602cad --- /dev/null +++ b/scripts/DAMARIS @@ -0,0 +1,51 @@ +#!/usr/bin/python + +# setup script will insert local DAMARIS installation path behind import sys statement +# this must happen before any damaris stuff is called! +import sys + +import os +# for numpy-1.1 and later: check the environment for LANG and LC_NUMERIC +# see: http://projects.scipy.org/scipy/numpy/ticket/902 +if os.environ.get("LANG","").startswith("de") or os.environ.get("LC_NUMERIC", "").startswith("de"): + os.environ["LC_NUMERIC"]="C" + +import damaris.gui.DamarisGUI +import matplotlib + +import os.path +# argv is already parsed by gtk initialisation +myargs=sys.argv[1:] +myname=os.path.basename(sys.argv[0]) + +# find debug flag: +if "--debug" in myargs: + damaris.gui.DamarisGUI.debug = True + print "debug flag set" + try: + import resource + resource.setrlimit(resource.RLIMIT_CORE, (-1,-1)) + except ImportError: + pass + matplotlib.rcParams["verbose.level"]="debug" + myargs.remove("--debug") + +# remove matplotlib flags +if "-d"+matplotlib.rcParams["backend"] in myargs: + myargs.remove("-d"+matplotlib.rcParams["backend"]) + +# find scripts to load in parameter list +exp_script = None +res_script = None +if len(myargs)<=2: + if len(myargs)>=1: + exp_script=myargs[0] + if len(myargs)==2: + res_script=myargs[1] +else: + print """too many arguments.\n%s [--debug] [-dGTK(Agg|Cairo|)] (Experiment File|"") (Result File|"")"""%(myname) + +d=damaris.gui.DamarisGUI.DamarisGUI(exp_script, res_script) +d.run() +sys.stdout=sys.__stdout__ +sys.stderr=sys.__stderr__ diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..9340840 --- /dev/null +++ b/setup.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python + +import shutil +import os +import os.path +import sys +from distutils.core import setup +from distutils.command.build_scripts import build_scripts as _build_scripts +from distutils import log +from distutils.util import convert_path +from distutils.dep_util import newer + +#if sys.version_info < (2, 5, 3): +# log.error("**** ERROR: Install manually: python setup.py install ****") +# raise ValueError + +class build_damaris_scripts(_build_scripts): + + #user_options=_build_scripts.user_options[:] + #user_options.append(('install-dir=', 'd', "directory to install scripts to")) + + def initialize_options (self): + _build_scripts.initialize_options(self) + self.damaris_dir = None + + def finalize_options (self): + _build_scripts.finalize_options(self) + self.set_undefined_options('install', + ('install_lib', 'damaris_dir')) + + def run (self): + "change PYTHON_PATH for DAMARIS executable" + _build_scripts.run(self) + script="scripts/DAMARIS" + script = convert_path(script) + outfile = os.path.join(self.build_dir, os.path.basename(script)) + self.damaris_dir=os.path.normpath(self.damaris_dir) + if self.damaris_dir in sys.path: + log.debug("not changing %s (this path is on standard path)", script) + # nothing to do for us + return + + # now change PATH in DAMARIS script + # copy backup + log.info("adapting DAMARIS script to use local installation") + shutil.copyfile(outfile, outfile+".bak") + # the file should keep all its attributes (executable...) + inf=file(outfile+".bak","r") + outf=file(outfile,"w") + l=inf.readline() + while not l.startswith("import sys") and l!="": + outf.write(l) + l=inf.readline() + + if l!="": + outf.write(l) + l=inf.readline() + while l.endswith("# inserted by setup.py\n"): + l=inf.readline() + outf.write("sys.path.insert(0,\"%s\") # inserted by setup.py\n"%self.damaris_dir) + outf.write(l) + + outf.writelines(inf.readlines()) + inf.close() + outf.close() + os.remove(outfile+".bak") + +# create doc data file information +distribution_doc_prefix=os.path.join("share","python-damaris","doc") +distribution_data_files = [[ "share", []], + [os.path.join("share", "python-damaris", "images"), + ["src/gui/DAMARIS.png", "src/gui/DAMARIS.ico"]], + [os.path.join("share", "python-damaris"), []], + [distribution_doc_prefix, ['doc/index.html']]] + +if os.path.isdir(os.path.join("doc","reference-html")): + # no subdirs, work can be done in simple way + distribution_data_files.append([os.path.join(distribution_doc_prefix, 'reference-html'), + [os.path.join('doc', 'reference-html', f) + for f in os.listdir(os.path.join('doc', 'reference-html'))]]) + +if os.path.isdir(os.path.join("doc","tutorial-html")): + # here, modern style file and attachment directories should be handled + for d in os.walk(os.path.join("doc","tutorial-html")): + distribution_data_files.append([os.path.join(os.path.dirname(distribution_doc_prefix),d[0]), + [os.path.join(d[0], f) for f in d[2]]]) + +LONG_DESCRIPTION=""" +DArmstadt MAgnetic Resonance Instrument Software +""" + +GPL_LICENCE = "feed licence here" + +setup ( + name = 'python-damaris', + version = "0.14-svn", + description = 'python frontend for DAMARIS (DArmstadt MAgnetic Resonance Instrument Software)', + long_description = LONG_DESCRIPTION, + author = 'Achim Gaedke', + author_email = 'Achim.Gaedke@physik.tu-darmstadt.de', + maintainer = 'Achim Gaedke', + maintainer_email = 'Achim.Gaedke@physik.tu-darmstadt.de', + url = 'http://www.fkp.physik.tu-darmstadt.de/damaris/', + license = GPL_LICENCE, + platforms = ('Any',), + keywords = ('NMR', 'data-processing'), + packages = [ 'damaris', + 'damaris.data', + 'damaris.experiments', + 'damaris.gui', + 'damaris.tools' ], + package_dir = { 'damaris': 'src', + 'damaris.data': 'src/data', + 'damaris.experiments': 'src/experiments', + 'damaris.gui': 'src/gui', + 'damaris.tools': 'src/tools' }, + package_data = { 'damaris.gui': ['DAMARIS.png', 'DAMARIS.ico', 'damaris.glade', 'damaris.gladep', 'python.xml']}, + scripts = ['scripts/DAMARIS'], + cmdclass={"build_scripts": build_damaris_scripts}, + data_files = distribution_data_files + ) diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 0000000..4baf7a6 --- /dev/null +++ b/src/__init__.py @@ -0,0 +1,18 @@ +##\mainpage DArmstadt MAgnetic Resonance Instrument Software +# +#Python Frontend based on +# - Python/GTK +# - Matplotlib +# - Numpy +# - PyTables +# +#Written by +# - Achim Gaedke +# - Christopher Schmitt +# - Markus Rosenstihl +# - Holger Stork +# - Christian Tacke + +## module contents +# +__all__=["experiments", "data", "gui"] diff --git a/src/data/ADC_Result.py b/src/data/ADC_Result.py new file mode 100644 index 0000000..1a2d5a1 --- /dev/null +++ b/src/data/ADC_Result.py @@ -0,0 +1,523 @@ +# -*- coding: iso-8859-1 -*- + +from Resultable import Resultable +from Drawable import Drawable +from Signalpath import Signalpath +from DamarisFFT import DamarisFFT +import threading +import numpy +import sys +import types +import datetime +import tables +############################################################################# +# # +# Name: Class ADC_Result # +# # +# Purpose: Specialised class of Resultable and Drawable # +# Contains recorded ADC Data # +# # +############################################################################# + +class ADC_Result(Resultable, Drawable, DamarisFFT, Signalpath): + def __init__(self, x = None, y = None, index = None, sampl_freq = None, desc = None, job_id = None, job_date = None): + Resultable.__init__(self) + Drawable.__init__(self) + + # Title of this accumulation: set Values: Job-ID and Description (plotted in GUI -> look Drawable) + # Is set in ResultReader.py (or in copy-construktor) + self.__title_pattern = "ADC-Result: job_id = %s, desc = %s" + + # Axis-Labels (inherited from Drawable) + self.xlabel = "Time (s)" + self.ylabel = "Samples [Digits]" + self.lock=threading.RLock() + self.nChannels = 0 + + if (x is None) and (y is None) and (index is None) and (sampl_freq is None) and (desc is None) and (job_id is None) and (job_date is None): + self.cont_data = False + self.sampling_rate = 0 + self.index = [] + self.x = [] + self.y = [] + + elif (x is not None) and (y is not None) and (index is not None) and (sampl_freq is not None) and (desc is not None) and (job_id is not None) and (job_date is not None): + self.x = x + self.y = y + self.index = index + self.sampling_rate = sampl_freq + self.cont_data = True + self.description = desc + self.job_id = job_id + self.job_date = job_date + title="ADC-Result: job-id=%d"%int(self.job_id) + if len(self.description)>0: + for k,v in self.description.iteritems(): + # string keys can be made invisible by adding two underscores in front of them + if not (type(k) in types.StringTypes and k[0] == '_' and k[1] == '_'): + title+=", %s=%s"%(k,v) + self.set_title(title) + + else: + raise ValueError("Wrong usage of __init__!") + + + def create_data_space(self, channels, samples): + "Initialises the internal data-structures" + + if self.contains_data(): + print "Warning ADC-Result: Tried to run \"create_data_space()\" more than once." + return + + if channels <= 0: raise ValueError("ValueError: You cant create an ADC-Result with less than 1 channel!") + if samples <= 0: raise ValueError("ValueError: You cant create an ADC-Result with less than 1 sample!") + + for i in range(channels): + self.y.append(numpy.zeros((samples,), dtype="Int16")) + + self.x = numpy.zeros((samples,), dtype="Float64") + + self.index.append((0, samples-1)) + self.cont_data = True + + + def contains_data(self): + "Returns true if ADC_Result contains data. (-> create_data_space() was called)" + return self.cont_data + + + def add_sample_space(self, samples): + "Adds space for n samples, where n can also be negative (deletes space). New space is filled up with \"0\"" + + self.lock.acquire() + + if not self.cont_data: + print "Warning ADC-Result: Tried to resize empty array!" + return + + length = len(self.y[0]) + + self.x = numpy.resize(self.x, (length+samples)) + + for i in range(self.get_number_of_channels()): + self.y[i] = numpy.resize(self.y[i], (length+samples)) + + self.index.append((length, len(self.y[0])-1)) + self.lock.release() + + + def get_result_by_index(self, index): + + self.lock.acquire() + try: + start = self.index[index][0] + end = self.index[index][1] + except: + self.lock.release() + raise + + tmp_x = self.x[start:end+1].copy() + tmp_y = [] + + for i in range(self.get_number_of_channels()): + tmp_y.append(self.y[i][start:end+1].copy()) + + r = ADC_Result(x = tmp_x, y = tmp_y, index = [(0,len(tmp_y[0])-1)], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date) + self.lock.release() + return r + + + def get_sampling_rate(self): + "Returns the samplingfrequency" + return self.sampling_rate + 0 + + + def set_sampling_rate(self, hz): + "Sets the samplingfrequency in hz" + self.sampling_rate = float(hz) + + + def get_nChannels(self): + "Gets the number of channels" + return self.nChannels + 0 + + def set_nChannels(self, channels): + "Sets the number of channels" + self.nChannels = int(channels) + + + def get_index_bounds(self, index): + "Returns a tuple with (start, end) of the wanted result" + return self.index[index] + + def uses_statistics(self): + return False + + def write_to_csv(self, destination=sys.stdout, delimiter=" "): + """ + writes the data to a file or to sys.stdout + destination can be a file or a filename + suitable for further processing + """ + # write sorted + the_destination=destination + if type(destination) in types.StringTypes: + the_destination=file(destination, "w") + + the_destination.write("# adc_result\n") + the_destination.write("# t y0 y1 ...\n") + self.lock.acquire() + try: + xdata=self.get_xdata() + ch_no=self.get_number_of_channels() + ydata=map(self.get_ydata, xrange(ch_no)) + #yerr=map(self.get_yerr, xrange(ch_no)) + for i in xrange(len(xdata)): + the_destination.write("%e"%xdata[i]) + for j in xrange(ch_no): + the_destination.write("%s%e"%(delimiter, ydata[j][i])) + the_destination.write("\n") + the_destination=None + xdata=ydata=None + finally: + self.lock.release() + + def write_to_simpson(self, destination=sys.stdout, delimiter=" "): + """ + writes the data to a text file or sys.stdout in Simpson format, + for further processing with the NMRnotebook software; + destination can be a file or a filename + """ + # write sorted + the_destination=destination + if type(destination) in types.StringTypes: + the_destination=file(destination, "w") + + self.lock.acquire() + try: + xdata=self.get_xdata() + the_destination.write("SIMP\n") + the_destination.write("%s%i%s"%("NP=", len(xdata), "\n")) + the_destination.write("%s%i%s"%("SW=", self.get_sampling_rate(), "\n")) + the_destination.write("TYPE=FID\n") + the_destination.write("DATA\n") + ch_no=self.get_number_of_channels() + ydata=map(self.get_ydata, xrange(ch_no)) + for i in xrange(len(xdata)): + for j in xrange(ch_no): + the_destination.write("%g%s"%(ydata[j][i], delimiter)) + the_destination.write("\n") + the_destination.write("END\n") + the_destination=None + xdata=ydata=None + finally: + self.lock.release() + + def write_to_hdf(self, hdffile, where, name, title, complib=None, complevel=None): + accu_group=hdffile.createGroup(where=where,name=name,title=title) + accu_group._v_attrs.damaris_type="ADC_Result" + if self.contains_data(): + self.lock.acquire() + try: + # save time stamps + if "job_date" in dir(self) and self.job_date is not None: + accu_group._v_attrs.time="%04d%02d%02d %02d:%02d:%02d.%03d"%(self.job_date.year, + self.job_date.month, + self.job_date.day, + self.job_date.hour, + self.job_date.minute, + self.job_date.second, + self.job_date.microsecond/1000) + + if self.description is not None: + for (key,value) in self.description.iteritems(): + accu_group._v_attrs.__setattr__("description_"+key,str(value)) + + # save interval information + filter=None + if complib is not None: + if complevel is None: + complevel=9 + filter=tables.Filters(complevel=complevel,complib=complib,shuffle=1) + + index_table=hdffile.createTable(where=accu_group, + name="indices", + description={"start": tables.UInt64Col(), + "length": tables.UInt64Col(), + "start_time": tables.Float64Col(), + "dwelltime": tables.Float64Col()}, + title="indices of adc data intervals", + filters=filter, + expectedrows=len(self.index)) + index_table.flavor="numpy" + # save channel data + new_row=index_table.row + for i in xrange(len(self.index)): + new_row["start"]=self.index[i][0] + new_row["dwelltime"]=1.0/self.sampling_rate + new_row["start_time"]=1.0/self.sampling_rate*self.index[i][0] + new_row["length"]=self.index[i][1]-self.index[i][0]+1 + new_row.append() + + index_table.flush() + new_row=None + index_table=None + + # prepare saving data + channel_no=len(self.y) + timedata=numpy.empty((len(self.y[0]),channel_no), + dtype = "Int32") + for ch in xrange(channel_no): + timedata[:,ch]=self.get_ydata(ch) + + # save data + time_slice_data=None + if filter is not None: + chunkshape = numpy.shape(timedata) + if len(chunkshape) <= 1: + chunkshape = (min(chunkshape[0],1024*8),) + else: + chunkshape = (min(chunkshape[0],1024*8), chunkshape[1]) + if tables.__version__[0]=="1": + time_slice_data=hdffile.createCArray(accu_group, + name="adc_data", + shape=timedata.shape, + atom=tables.Int32Atom(shape=chunkshape, + flavor="numpy"), + filters=filter, + title="adc data") + else: + time_slice_data=hdffile.createCArray(accu_group, + name="adc_data", + shape=timedata.shape, + chunkshape=chunkshape, + atom=tables.Int32Atom(), + filters=filter, + title="adc data") + time_slice_data[:]=timedata + else: + time_slice_data=hdffile.createArray(accu_group, + name="adc_data", + object=timedata, + title="adc data") + + finally: + timedata=None + time_slice_data=None + accu_group=None + self.lock.release() + + # �berladen von Operatoren und Built-Ins ------------------------------------------------------- + + def __len__(self): + "Redefining len(ADC_Result obj), returns the number of samples in one channel and 0 without data" + if len(self.y)>0: + return len(self.y[0]) + return 0 + + def __repr__(self): + """ + writes job meta data and data to string returned + """ + tmp_string = "Job ID: " + str(self.job_id) + "\n" + tmp_string += "Job Date: " + str(self.job_date) + "\n" + tmp_string += "Description: " + str(self.description) + "\n" + if len(self.y)>0: + tmp_string += "Indexes: " + str(self.index) + "\n" + tmp_string += "Samples per Channel: " + str(len(self.y[0])) + "\n" + tmp_string += "Samplingfrequency: " + str(self.sampling_rate) + "\n" + tmp_string += "X: " + repr(self.x) + "\n" + for i in range(self.get_number_of_channels()): + tmp_string += ("Y(%d): " % i) + repr(self.y[i]) + "\n" + + return tmp_string + + def __add__(self, other): + "Redefining self + other (scalar)" + if isinstance(other, types.IntType) or isinstance(other, types.FloatType): + self.lock.acquire() + tmp_y = [] + + for i in range(self.get_number_of_channels()): + tmp_y.append(numpy.array(self.y[i], dtype="Float64") + other) + + r = ADC_Result(x = self.x[:], y = tmp_y, index = self.index[:], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date) + self.lock.release() + return r + else: + raise ValueError("ValueError: Cannot add \"%s\" to ADC-Result!" % str(other.__class__)) + + + def __radd__(self, other): + "Redefining other (scalar) + self" + return self.__add__(other) + + + def __sub__(self, other): + "Redefining self - other (scalar)" + if isinstance(other, types.IntType) or isinstance(other, types.FloatType): + self.lock.acquire() + tmp_y = [] + + for i in range(self.get_number_of_channels()): + tmp_y.append(numpy.array(self.y[i], dtype="Float64") - other) + + r = ADC_Result(x = self.x[:], y = tmp_y, index = self.index[:], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date) + self.lock.release() + return r + else: + raise ValueError("ValueError: Cannot subtract \"%s\" to ADC-Result!") % str(other.__class__) + + + def __rsub__(self, other): + "Redefining other (scalar) - self" + if isinstance(other, types.IntType) or isinstance(other, types.FloatType): + self.lock.acquire() + tmp_y = [] + + for i in range(self.get_number_of_channels()): + tmp_y.append(other - numpy.array(self.y[i], dtype="Float64")) + + r = ADC_Result(x = self.x[:], y = tmp_y, index = self.index[:], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date) + self.lock.release() + return r + + else: + raise ValueError("ValueError: Cannot subtract \"%s\" to ADC-Result!") % str(other.__class__) + + + def __mul__(self, other): + "Redefining self * other (scalar)" + if isinstance(other, types.IntType) or isinstance(other, types.FloatType): + self.lock.acquire() + tmp_y = [] + + for i in range(self.get_number_of_channels()): + tmp_y.append(numpy.array(self.y[i], dtype="Float64") * other) + + r = ADC_Result(x = self.x[:], y = tmp_y, index = self.index[:], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date) + self.lock.release() + else: + raise ValueError("ValueError: Cannot multiply \"%s\" to ADC-Result!") % str(other.__class__) + + + def __rmul__(self, other): + "Redefining other (scalar) * self" + return self.__mul__(other) + + + def __pow__(self, other): + "Redefining self ** other (scalar)" + if isinstance(other, types.IntType) or isinstance(other, types.FloatType): + self.lock.acquire() + tmp_y = [] + + for i in range(self.get_number_of_channels()): + tmp_y.append(numpy.array(self.y[i], dtype="Float64") ** other) + + r = ADC_Result(x = self.x[:], y = tmp_y, index = self.index[:], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date) + self.lock.release() + return r + else: + raise ValueError("ValueError: Cannot multiply \"%s\" to ADC-Result!") % str(other.__class__) + + + def __div__(self, other): + "Redefining self / other (scalar)" + if isinstance(other, types.IntType) or isinstance(other, types.FloatType): + self.lock.acquire() + tmp_y = [] + + for i in range(self.get_number_of_channels()): + tmp_y.append(numpy.array(self.y[i], dtype="Float64") / other) + + r = ADC_Result(x = self.x[:], y = tmp_y, index = self.index[:], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date) + self.lock.release() + return r + else: + raise ValueError("ValueError: Cannot multiply \"%s\" to ADC-Result!") % str(other.__class__) + + + def __rdiv__(self, other): + "Redefining other (scalar) / self" + if isinstance(other, types.IntType) or isinstance(other, types.FloatType): + self.lock.acquire() + tmp_y = [] + + for i in range(self.get_number_of_channels()): + tmp_y.append(other / numpy.array(self.y[i], dtype="Float64")) + + r = ADC_Result(x = self.x[:], y = tmp_y, index = self.index[:], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date) + self.lock.release() + else: + raise ValueError("ValueError: Cannot multiply \"%s\" to ADC-Result!") % str(other.__class__) + + + def __neg__(self): + "Redefining -self" + self.lock.acquire() + tmp_y = [] + + for i in range(self.get_number_of_channels()): + tmp_y.append(numpy.array(-self.y[i])) + + r = ADC_Result(x = self.x[:], y = tmp_y, index = self.index[:], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date) + self.lock.release() + return r + + +def read_from_hdf(hdf_node): + """ + read accumulation data from HDF node and return it. + """ + + # formal checks first + if not isinstance(hdf_node, tables.Group): + return None + + if hdf_node._v_attrs.damaris_type!="ADC_Result": + return None + + if not (hdf_node.__contains__("indices") and hdf_node.__contains__("adc_data")): + return None + + # job id and x,y titles are missing + adc=ADC_Result() + # populate description dictionary + adc.description={} + for attrname in hdf_node._v_attrs._v_attrnamesuser: + if attrname.startswith("description_"): + adc.description[attrname[12:]]=hdf_node._v_attrs.__getattr__(attrname) + + if "time" in dir(hdf_node._v_attrs): + timestring=hdf_node._v_attrs.__getattr__("time") + adc.job_date=datetime.datetime(int(timestring[:4]), # year + int(timestring[4:6]), # month + int(timestring[6:8]), # day + int(timestring[9:11]), # hour + int(timestring[12:14]), # minute + int(timestring[15:17]), # second + int(timestring[18:21])*1000 # microsecond + ) + + + # start with indices + for r in hdf_node.indices.iterrows(): + adc.index.append((r["start"],r["start"]+r["length"]-1)) + adc.sampling_rate=1.0/r["dwelltime"] + + # now really belief there are no data + if len(adc.index)==0: + adc.cont_data=False + return adc + + adc.cont_data=True + # now do the real data + adc_data=hdf_node.adc_data.read() + + adc.x=numpy.arange(adc_data.shape[0], dtype="Float64")/adc.sampling_rate + + for ch in xrange(adc_data.shape[1]): + adc.y.append(adc_data[:,ch]) + + return adc diff --git a/src/data/Accumulation.py b/src/data/Accumulation.py new file mode 100644 index 0000000..15fa1f0 --- /dev/null +++ b/src/data/Accumulation.py @@ -0,0 +1,824 @@ +# -*- coding: iso-8859-1 -*- + +############################################################################# +# # +# Name: Class Accumulation # +# # +# Purpose: Specialised class of Errorable and Drawable # +# Contains accumulated ADC-Data # +# # +############################################################################# + +from Errorable import Errorable +from Drawable import Drawable +from DamarisFFT import DamarisFFT +from Signalpath import Signalpath +import sys +import threading +import types +import tables +import numpy +import datetime + +class Accumulation(Errorable, Drawable, DamarisFFT, Signalpath): + def __init__(self, x = None, y = None, y_2 = None, n = None, index = None, sampl_freq = None, error = False): + Errorable.__init__(self) + Drawable.__init__(self) + + # Title of this accumulation (plotted in GUI -> look Drawable) + self.__title_pattern = "Accumulation: n = %d" + + # Axis-Labels (inherited from Drawable) + self.xlabel = "Time (s)" + self.ylabel = "Avg. Samples [Digits]" + self.lock=threading.RLock() + + self.common_descriptions=None + self.time_period=[] + + self.use_error = error + + if self.uses_statistics(): + if (y_2 is not None): + self.y_square = y_2 + elif (y_2 is None) : + self.y_square = [] + else: + raise ValueError("Wrong usage of __init__!") + + + if (x is None) and (y is None) and (index is None) and (sampl_freq is None) and (n is None): + self.sampling_rate = 0 + + self.n = 0 + self.set_title(self.__title_pattern % self.n) + + self.cont_data = False + self.index = [] + self.x = [] + self.y = [] + + elif (x is not None) and (y is not None) and (index is not None) and (sampl_freq is not None) and (n is not None): + self.x = x + self.y = y + self.sampling_rate = sampl_freq + + self.n = n + self.set_title(self.__title_pattern % self.n) + + self.index = index + self.cont_data = True + + else: + raise ValueError("Wrong usage of __init__!") + + + def get_accu_by_index(self, index): + self.lock.acquire() + try: + start = self.index[index][0] + end = self.index[index][1] + except: + self.lock.release() + raise + + tmp_x = self.x[start:end+1] + tmp_y = [] + + for i in range(self.get_number_of_channels()): + tmp_y.append(self.y[i][start:end+1]) + + r = Accumulation(x = tmp_x, y = tmp_y, n = self.n, index = [(0,len(tmp_y[0])-1)], sampl_freq = self.sampling_rate, error = self.use_error) + self.lock.release() + return r + + def get_ysquare(self, channel): + if self.uses_statistics(): + try: + return self.y_square[channel] + except: + raise + else: return None + + def contains_data(self): + return self.cont_data + + + def get_sampling_rate(self): + "Returns the samplingfrequency" + return self.sampling_rate + 0 + + + def get_index_bounds(self, index): + "Returns a tuple with (start, end) of the wanted result" + return self.index[index] + + + def uses_statistics(self): + return self.use_error + + # Schnittstellen nach Außen -------------------------------------------------------------------- + + def get_yerr(self, channel): + """ + return error (std.dev/sqrt(n)) of mean + """ + + if not self.uses_statistics(): return numpy.zeros((len(self.y[0]),),dtype="Float64") + if not self.contains_data(): return [] + + self.lock.acquire() + if self.n < 2: + retval=numpy.zeros((len(self.y[0]),),dtype="Float64") + self.lock.release() + return retval + try: + variance_over_n = (self.y_square[channel] - (self.y[channel]**2 / float(self.n)))/float((self.n-1)*self.n) + except IndexError: + print "Warning Accumulation.get_ydata(channel): Channel index does not exist." + variance_over_n = numpy.zeros((len(self.y[0]),), dtype="Float64") + self.lock.release() + # sample standard deviation / sqrt(n) + return numpy.nan_to_num(numpy.sqrt(variance_over_n)) + + def get_ydata(self, channel): + """ + return mean data + """ + + if not self.contains_data(): return [] + self.lock.acquire() + + try: + tmp_y = self.y[channel] / self.n + except IndexError: + print "Warning Accumulation.get_ydata(channel): Channel index does not exist." + tmp_y = numpy.zeros((len(self.y[0]),), dtype="Float64") + + self.lock.release() + return tmp_y + + + def get_ymin(self): + + if not self.contains_data(): return 0 + + tmp_min = [] + self.lock.acquire() + for i in range(self.get_number_of_channels()): + tmp_min.append(self.get_ydata(i).min()) + + if self.uses_statistics() and self.ready_for_drawing_error(): + for i in range(self.get_number_of_channels()): + tmp_min.append((self.get_ydata(i) - self.get_yerr(i)).min()) + self.lock.release() + + return min(tmp_min) + + + def get_ymax(self): + + if not self.contains_data(): return 0 + + tmp_max = [] + self.lock.acquire() + for i in range(self.get_number_of_channels()): + tmp_max.append(self.get_ydata(i).max()) + + if self.uses_statistics() and self.ready_for_drawing_error(): + for i in range(self.get_number_of_channels()): + tmp_max.append((self.get_ydata(i) + self.get_yerr(i)).max()) + self.lock.release() + return max(tmp_max) + + def get_job_id(self): + return None + + def write_to_csv(self, destination=sys.stdout, delimiter=" "): + """ + writes the data to a file or to sys.stdout + destination can be a file or a filename + suitable for further processing + """ + + the_destination=destination + if type(destination) in types.StringTypes: + the_destination=file(destination, "w") + + the_destination.write("# accumulation %d\n"%self.n) + self.lock.acquire() + try: + if self.common_descriptions is not None: + for (key,value) in self.common_descriptions.iteritems(): + the_destination.write("# %s : %s\n"%(key, str(value))) + the_destination.write("# t") + ch_no=self.get_number_of_channels() + if self.use_error: + for i in xrange(ch_no): the_destination.write(" ch%d_mean ch%d_err"%(i,i)) + else: + for i in xrange(ch_no): the_destination.write(" ch%d_mean"%i) + the_destination.write("\n") + xdata=self.get_xdata() + ydata=map(self.get_ydata, xrange(ch_no)) + yerr=None + if self.use_error: + yerr=map(self.get_yerr, xrange(ch_no)) + for i in xrange(len(xdata)): + the_destination.write("%e"%xdata[i]) + for j in xrange(ch_no): + if self.use_error: + the_destination.write("%s%e%s%e"%(delimiter, ydata[j][i], delimiter, yerr[j][i])) + else: + the_destination.write("%s%e"%(delimiter,ydata[j][i])) + the_destination.write("\n") + the_destination=None + xdata=yerr=ydata=None + finally: + self.lock.release() + + def write_to_simpson(self, destination=sys.stdout, delimiter=" "): + """ + writes the data to a text file or sys.stdout in Simpson format, + for further processing with the NMRnotebook software; + destination can be a file or a filename + """ + # write sorted + the_destination=destination + if type(destination) in types.StringTypes: + the_destination=file(destination, "w") + + self.lock.acquire() + try: + xdata=self.get_xdata() + the_destination.write("SIMP\n") + the_destination.write("%s%i%s"%("NP=", len(xdata), "\n")) + the_destination.write("%s%i%s"%("SW=", self.get_sampling_rate(), "\n")) + the_destination.write("TYPE=FID\n") + the_destination.write("DATA\n") + ch_no=self.get_number_of_channels() + ydata=map(self.get_ydata, xrange(ch_no)) + for i in xrange(len(xdata)): + for j in xrange(ch_no): + the_destination.write("%g%s"%(ydata[j][i], delimiter)) + the_destination.write("\n") + the_destination.write("END\n") + the_destination=None + xdata=ydata=None + finally: + self.lock.release() + + def write_to_hdf(self, hdffile, where, name, title, complib=None, complevel=None): + accu_group=hdffile.createGroup(where=where,name=name,title=title) + accu_group._v_attrs.damaris_type="Accumulation" + if self.contains_data(): + self.lock.acquire() + try: + # save time stamps + if self.time_period is not None and len(self.time_period)>0: + accu_group._v_attrs.earliest_time="%04d%02d%02d %02d:%02d:%02d.%03d"%(self.time_period[0].year, + self.time_period[0].month, + self.time_period[0].day, + self.time_period[0].hour, + self.time_period[0].minute, + self.time_period[0].second, + self.time_period[0].microsecond/1000) + accu_group._v_attrs.oldest_time="%04d%02d%02d %02d:%02d:%02d.%03d"%(self.time_period[1].year, + self.time_period[1].month, + self.time_period[1].day, + self.time_period[1].hour, + self.time_period[1].minute, + self.time_period[1].second, + self.time_period[1].microsecond/1000) + if self.common_descriptions is not None: + for (key,value) in self.common_descriptions.iteritems(): + accu_group._v_attrs.__setattr__("description_"+key,str(value)) + + # save interval information + filter=None + if complib is not None: + if complevel is None: + complevel=9 + filter=tables.Filters(complevel=complevel,complib=complib,shuffle=1) + + # tried compression filter, but no effect... + index_table=hdffile.createTable(where=accu_group, + name="indices", + description={"start": tables.UInt64Col(), + "length": tables.UInt64Col(), + "start_time": tables.Float64Col(), + "dwelltime": tables.Float64Col(), + "number": tables.UInt64Col()}, + title="indices of adc data intervals", + filters=filter, + expectedrows=len(self.index)) + index_table.flavor="numpy" + # save interval data + new_row=index_table.row + for i in xrange(len(self.index)): + new_row["start"]=self.index[i][0] + new_row["dwelltime"]=1.0/self.sampling_rate + new_row["start_time"]=1.0/self.sampling_rate*self.index[i][0] + new_row["length"]=self.index[i][1]-self.index[i][0]+1 + new_row["number"]=self.n + new_row.append() + + index_table.flush() + new_row=None + index_table=None + + # prepare saving data + channel_no=len(self.y) + timedata=numpy.empty((len(self.y[0]),channel_no*2), dtype = "Float64") + for ch in xrange(channel_no): + timedata[:,ch*2]=self.get_ydata(ch) + if self.uses_statistics(): + timedata[:,ch*2+1]=self.get_yerr(ch) + else: + timedata[:,ch*2+1]=numpy.zeros((len(self.y[0]),),dtype = "Float64") + + # save data + time_slice_data=None + if filter is not None: + chunkshape=timedata.shape + if len(chunkshape) <= 1: + chunkshape = (min(chunkshape[0],1024*8),) + else: + chunkshape = (min(chunkshape[0],1024*8), chunkshape[1]) + if tables.__version__[0]=="1": + time_slice_data=hdffile.createCArray(accu_group, + name="accu_data", + shape=timedata.shape, + atom=tables.Float64Atom(shape=chunkshape, + flavor="numpy"), + filters=filter, + title="accu data") + else: + time_slice_data=hdffile.createCArray(accu_group, + name="accu_data", + shape=timedata.shape, + chunkshape=chunkshape, + atom=tables.Float64Atom(), + filters=filter, + title="accu data") + + time_slice_data[:]=timedata + else: + time_slice_data=hdffile.createArray(accu_group, + name="accu_data", + object=timedata, + title="accu data") + + + + finally: + time_slice_data=None + accu_group=None + self.lock.release() + + # / Schnittstellen nach Außen ------------------------------------------------------------------ + + # Überladen von Operatoren --------------------------------------------------------------------- + + def __len__(self): + """ + return number of samples per channel, 0 if empty + """ + if len(self.y)>0: + return len(self.y[0]) + return 0 + + def __repr__(self): + "Redefining repr(Accumulation)" + + if not self.contains_data(): return "Empty" + + tmp_string = "X: " + repr(self.x) + "\n" + + for i in range(self.get_number_of_channels()): + tmp_string += ("Y(%d): " % i) + repr(self.y[i]) + "\n" + if self.uses_statistics(): tmp_string += "y_square(%d): " % i + str(self.y_square[i]) + "\n" + + tmp_string += "Indexes: " + str(self.index) + "\n" + + tmp_string += "Samples per Channel: " + str(len(self.y[0])) + "\n" + tmp_string += "Samplingfrequency: " + str(self.sampling_rate) + "\n" + + tmp_string += "n: " + str(self.n) + + return tmp_string + + + def __add__(self, other): + "Redefining self + other" + # Float or int + if isinstance(other, types.IntType) or isinstance(other, types.FloatType): + if not self.contains_data(): raise ValueError("Accumulation: You cant add integers/floats to an empty accumulation") + else: + + tmp_y = [] + tmp_ysquare = [] + + + self.lock.acquire() + for i in range(self.get_number_of_channels()): + # Dont change errors and mean value + if self.uses_statistics(): tmp_ysquare.append(self.y_square[i] + ( (2*self.y[i]*other) + ((other**2)*self.n) )) + tmp_y.append(self.y[i] + (other*self.n)) + + if self.uses_statistics(): + r = Accumulation(x = numpy.array(self.x, dtype="Float64"), y = tmp_y, y_2 = tmp_ysquare, n = self.n, index = self.index, sampl_freq = self.sampling_rate, error = self.use_error) + else: + r = Accumulation(x = numpy.array(self.x, dtype="Float64"), y = tmp_y, n = self.n, index = self.index, sampl_freq = self.sampling_rate, error = self.use_error) + + self.lock.release() + return r + + # ADC_Result + elif str(other.__class__) == "damaris.data.ADC_Result.ADC_Result": + + # Other empty (return) + # todo: this is seems to be bugy!!!! (Achim) + if not other.contains_data(): return + + # Self empty (copy) + if not self.contains_data(): + + tmp_y = [] + tmp_ysquare = [] + + self.lock.acquire() + + for i in range(other.get_number_of_channels()): + tmp_y.append(numpy.array(other.y[i], dtype="Float64")) + if self.uses_statistics(): tmp_ysquare.append(tmp_y[i] ** 2) + + + if self.uses_statistics(): + r = Accumulation(x = numpy.array(other.x, dtype="Float64"), y = tmp_y, y_2 = tmp_ysquare, n = 1, index = other.index, sampl_freq = other.sampling_rate, error = True) + else: + r = Accumulation(x = numpy.array(other.x, dtype="Float64"), y = tmp_y, index = other.index, sampl_freq = other.sampling_rate, n = 1, error = False) + r.time_period=[other.job_date,other.job_date] + r.common_descriptions=other.description.copy() + self.lock.release() + return r + + # Other and self not empty (self + other) + else: + self.lock.acquire() + + if self.sampling_rate != other.get_sampling_rate(): raise ValueError("Accumulation: You cant add ADC-Results with diffrent sampling-rates") + if len(self.y[0]) != len(other): raise ValueError("Accumulation: You cant add ADC-Results with diffrent number of samples") + if len(self.y) != other.get_number_of_channels(): raise ValueError("Accumulation: You cant add ADC-Results with diffrent number of channels") + for i in range(len(self.index)): + if self.index[i] != other.get_index_bounds(i): raise ValueError("Accumulation: You cant add ADC-Results with diffrent indexing") + + tmp_y = [] + tmp_ysquare = [] + + for i in range(self.get_number_of_channels()): + tmp_y.append(self.y[i] + other.y[i]) + if self.uses_statistics(): tmp_ysquare.append(self.y_square[i] + (numpy.array(other.y[i], dtype="Float64") ** 2)) + + if self.uses_statistics(): + r = Accumulation(x = numpy.array(self.x, dtype="Float64"), y = tmp_y, y_2 = tmp_ysquare, n = self.n + 1, index = self.index, sampl_freq = self.sampling_rate, error = True) + else: + r = Accumulation(x = numpy.array(self.x, dtype="Float64"), y = tmp_y, n = self.n + 1, index = self.index, sampl_freq = self.sampling_rate, error = False) + r.time_period=[min(self.time_period[0],other.job_date), + max(self.time_period[1],other.job_date)] + if self.common_descriptions is not None: + r.common_descriptions={} + for key in self.common_descriptions.keys(): + if (key in other.description and self.common_descriptions[key]==other.description[key]): + r.common_descriptions[key]=value + + self.lock.release() + return r + + # Accumulation + elif str(other.__class__) == "damaris.data.Accumulation.Accumulation": + + # Other empty (return) + if not other.contains_data(): return + + # Self empty (copy) + if not self.contains_data(): + + tmp_y = [] + tmp_ysquare = [] + + self.lock.acquire() + + if self.uses_statistics(): + r = Accumulation(x = numpy.array(other.x, dtype="Float64"), y = tmp_y, y_2 = tmp_ysquare, n = other.n, index = other.index, sampl_freq = other.sampling_rate, error = True) + else: + r = Accumulation(x = numpy.array(other.x, dtype="Float64"), y = tmp_y, n = other.n, index = other.index, sampl_freq = other.sampling_rate, error = False) + for i in range(other.get_number_of_channels()): + tmp_y.append(other.y[i]) + tmp_ysquare.append(other.y_square[i]) + r.time_period=other.time_period[:] + if other.common_descriptions is not None: + r.common_descriptions=othter.common_descriptions.copy() + else: + r.common_descriptions=None + + self.lock.release() + return r + + # Other and self not empty (self + other) + else: + self.lock.acquire() + + if self.sampling_rate != other.get_sampling_rate(): raise ValueError("Accumulation: You cant add accumulations with diffrent sampling-rates") + if len(self.y[0]) != len(other): raise ValueError("Accumulation: You cant add accumulations with diffrent number of samples") + if len(self.y) != other.get_number_of_channels(): raise ValueError("Accumulation: You cant add accumulations with diffrent number of channels") + for i in range(len(self.index)): + if self.index[i] != other.get_index_bounds(i): raise ValueError("Accumulation: You cant add accumulations with diffrent indexing") + if self.uses_statistics() and not other.uses_statistics(): raise ValueError("Accumulation: You cant add non-error accumulations to accumulations with error") + + tmp_y = [] + tmp_ysquare = [] + + for i in range(self.get_number_of_channels()): + tmp_y.append(self.y[i] + other.y[i]) + tmp_ysquare.append(self.y_square[i] + other.y_square[i]) + + if self.uses_statistics(): + r = Accumulation(x = numpy.array(self.x, dtype="Float64"), y = tmp_y, y_2 = tmp_ysquare, n = other.n + self.n, index = self.index, sampl_freq = self.sampling_rate, error = True) + else: + r = Accumulation(x = numpy.array(self.x, dtype="Float64"), y = tmp_y, n = other.n + self.n, index = self.index, sampl_freq = self.sampling_rate, error = False) + + r.time_period=[min(self.time_period[0],other.time_period[0]), + max(self.time_period[1],other.time_period[1])] + r.common_descriptions={} + if self.common_descriptions is not None and other.common_descriptions is not None: + for key in self.common_descriptions.keys(): + if (key in other.common_descriptions and + self.common_descriptions[key]==other.common_descriptions[key]): + r.common_descriptions[key]=value + + self.lock.release() + return r + + + def __radd__(self, other): + "Redefining other + self" + return self.__add__(other) + + + def __sub__(self, other): + "Redefining self - other" + return self.__add__(-other) + + + def __rsub__(self, other): + "Redefining other - self" + return self.__neg__(self.__add__(-other)) + + + def __iadd__(self, other): + "Redefining self += other" + # Float or int + if isinstance(other, types.IntType) or isinstance(other, types.FloatType): + if not self.contains_data(): raise ValueError("Accumulation: You cant add integers/floats to an empty accumulation") + else: + + self.lock.acquire() + for i in range(self.get_number_of_channels()): + #Dont change errors and mean value + if self.uses_statistics(): self.y_square[i] += (2*self.y[i]*other) + ((other**2)*self.n) + self.y[i] += other*self.n + self.lock.release() + + return self + + # ADC_Result + elif str(other.__class__) == "damaris.data.ADC_Result.ADC_Result": + + # Other empty (return) + if not other.contains_data(): return self + + # Self empty (copy) + if not self.contains_data(): + self.lock.acquire() + self.n += 1 + self.index = other.index[0:] + self.sampling_rate = other.sampling_rate + self.x = numpy.array(other.x, dtype="Float64") + self.cont_data = True + + for i in range(other.get_number_of_channels()): + self.y.append(numpy.array(other.y[i], dtype="Float64")) + if self.uses_statistics(): self.y_square.append(self.y[i] ** 2) + + self.set_title(self.__title_pattern % self.n) + self.lock.release() + + self.time_period=[other.job_date,other.job_date] + self.common_descriptions=other.description.copy() + + return self + + + # Other and self not empty (self + other) + else: + self.lock.acquire() + + if self.sampling_rate != other.get_sampling_rate(): raise ValueError("Accumulation: You cant add ADC-Results with diffrent sampling-rates") + if len(self.y[0]) != len(other): raise ValueError("Accumulation: You cant add ADC-Results with diffrent number of samples") + if len(self.y) != other.get_number_of_channels(): raise ValueError("Accumulation: You cant add ADC-Results with diffrent number of channels") + for i in range(len(self.index)): + if self.index[i] != other.get_index_bounds(i): raise ValueError("Accumulation: You cant add ADC-Results with diffrent indexing") + + for i in range(self.get_number_of_channels()): + self.y[i] += other.y[i] + if self.uses_statistics(): self.y_square[i] += numpy.array(other.y[i], dtype="Float64") ** 2 + + self.n += 1 + self.time_period=[min(self.time_period[0],other.job_date), + max(self.time_period[1],other.job_date)] + if self.common_descriptions is not None: + for key in self.common_descriptions.keys(): + if not (key in other.description and self.common_descriptions[key]==other.description[key]): + del self.common_descriptions[key] + + self.set_title(self.__title_pattern % self.n) + self.lock.release() + + return self + + # Accumulation + elif str(other.__class__) == "damaris.data.Accumulation.Accumulation": + + # Other empty (return) + if not other.contains_data(): return + + # Self empty (copy) + if not self.contains_data(): + if self.uses_statistics() and not other.uses_statistics(): raise ValueError("Accumulation: You cant add non-error accumulations to accumulations with error") + + self.lock.acquire() + self.n += other.n + self.index = other.index[0:] + self.sampling_rate = other.sampling_rate + self.x = numpy.array(other.x, dtype="Float64") + self.cont_data = True + + for i in range(other.get_number_of_channels()): + self.y.append(numpy.array(other.y[i], dtype="Float64")) + if self.uses_statistics(): self.y_square.append(self.y[i] ** 2) + + self.set_title(self.__title_pattern % self.n) + self.common_descriptions=other.common_desriptions.copy() + self.time_period=other.time_period[:] + self.lock.release() + + return self + + # Other and self not empty (self + other) + else: + self.lock.acquire() + if self.sampling_rate != other.get_sampling_rate(): raise ValueError("Accumulation: You cant add accumulations with diffrent sampling-rates") + if len(self.y[0]) != len(other): raise ValueError("Accumulation: You cant add accumulations with diffrent number of samples") + if len(self.y) != other.get_number_of_channels(): raise ValueError("Accumulation: You cant add accumulations with diffrent number of channels") + for i in range(len(self.index)): + if self.index[i] != other.get_index_bounds(i): raise ValueError("Accumulation: You cant add accumulations with diffrent indexing") + if self.uses_statistics() and not other.uses_statistics(): raise ValueError("Accumulation: You cant add non-error accumulations to accumulations with error") + + for i in range(self.get_number_of_channels()): + self.y[i] += other.y[i] + if self.uses_statistics(): self.y_square[i] += other.y_square[i] + + self.n += other.n + self.time_period=[min(self.time_period[0],other.time_period[0]), + max(self.time_period[1],other.time_period[1])] + if self.common_descriptions is not None and other.common_descriptions is not None: + for key in self.common_descriptions.keys(): + if not (key in other.description and + self.common_descriptions[key]==other.common_descriptions[key]): + del self.common_descriptions[key] + + self.set_title(self.__title_pattern % self.n) + self.lock.release() + + return self + + elif other is None: + # Convenience: ignore add of None + return self + else: + raise ValueError("can not add "+repr(type(other))+" to Accumulation") + + + def __isub__(self, other): + "Redefining self -= other" + return self.__iadd__(-other) + + + def __neg__(self): + "Redefining -self" + + if not self.contains_data(): return + + tmp_y = [] + + self.lock.acquire() + for i in range(self.get_number_of_channels()): + tmp_y.append(numpy.array(-self.y[i], dtype="Float64")) + + if self.uses_statistics(): + r = Accumulation(x = numpy.array(self.x, dtype="Float64"), y = tmp_y, y_2 = numpy.array(self.y_square), n = self.n, index = self.index, sampl_freq = self.sampling_rate, error = True) + else: + r = Accumulation(x = numpy.array(self.x, dtype="Float64"), y = tmp_y, n = self.n, index = self.index, sampl_freq = self.sampling_rate, error = False) + self.lock.release() + return r + + +def read_from_hdf(hdf_node): + """ + read accumulation data from HDF node and return it. + """ + + # formal checks first + if not isinstance(hdf_node, tables.Group): + return None + + if hdf_node._v_attrs.damaris_type!="Accumulation": + return None + + if not (hdf_node.__contains__("indices") and hdf_node.__contains__("accu_data")): + print "no accu data" + return None + + accu=Accumulation() + + # populate description dictionary + accu.common_descriptions={} + for attrname in hdf_node._v_attrs._v_attrnamesuser: + if attrname.startswith("description_"): + accu.common_descriptions[attrname[12:]]=hdf_node._v_attrs.__getattr__(attrname) + + eariliest_time=None + if "earliest_time" in dir(hdf_node._v_attrs): + timestring=hdf_node._v_attrs.__getattr__("earliest_time") + earliest_time=datetime.datetime(int(timestring[:4]), # year + int(timestring[4:6]), # month + int(timestring[6:8]), # day + int(timestring[9:11]), # hour + int(timestring[12:14]), # minute + int(timestring[15:17]), # second + int(timestring[18:21])*1000 # microsecond + ) + + oldest_time=None + if "oldest_time" in dir(hdf_node._v_attrs): + timestring=hdf_node._v_attrs.__getattr__("oldest_time") + oldest_time=datetime.datetime(int(timestring[:4]), # year + int(timestring[4:6]), # month + int(timestring[6:8]), # day + int(timestring[9:11]), # hour + int(timestring[12:14]), # minute + int(timestring[15:17]), # second + int(timestring[18:21])*1000 # microsecond + ) + + if oldest_time is None or earliest_time is None: + accu.time_period=None + if len(accu.common_descriptions)==0: + # no accus inside, so no common description expected + accu.common_descriptions=None + accu.cont_data=False + else: + accu.time_period=[oldest_time, earliest_time] + accu.cont_data=True + + # start with indices + for r in hdf_node.indices.iterrows(): + accu.index.append((r["start"],r["start"]+r["length"]-1)) + accu.n=r["number"] + accu.sampling_rate=1.0/r["dwelltime"] + + # now really belief there are no data + if len(accu.index)==0 or accu.n==0: + accu.cont_data=False + return accu + + # now do the real data + accu_data=hdf_node.accu_data.read() + + accu.x=numpy.arange(accu_data.shape[0], dtype="Float64")/accu.sampling_rate + # assume error information, todo: save this information explicitly + accu.y_square=[] + accu.use_error=False + + for ch in xrange(accu_data.shape[1]/2): + accu.y.append(accu_data[:,ch*2]*accu.n) + if accu.n<2 or numpy.all(accu_data[:,ch*2+1]==0.0): + accu.y_square.append(numpy.zeros((accu_data.shape[0]) ,dtype="Float64")) + else: + accu.use_error=True + accu.y_square.append((accu_data[:,ch*2+1]**2)*float((accu.n-1.0)*accu.n)+(accu_data[:,ch*2]**2)*accu.n) + + if not accu.use_error: + del accu.y_square + + return accu diff --git a/src/data/Config_Result.py b/src/data/Config_Result.py new file mode 100644 index 0000000..4290c02 --- /dev/null +++ b/src/data/Config_Result.py @@ -0,0 +1,54 @@ +# -*- coding: iso-8859-1 -*- + +from Resultable import Resultable + +############################################################################# +# # +# Name: Class Error_Result # +# # +# Purpose: Specialised class of Resultable # +# Contains occured error-messages from the core # +# # +############################################################################# + +class Config_Result(Resultable): + def __init__(self, config = None, desc = None, job_id = None, job_date = None): + Resultable.__init__(self) + + if config is None: self.config = { } + if desc is None: self.description = { } + self.job_id = job_id + self.job_date = job_date + + + + def get_config_dictionary(self): + return self.config + + + def set_config_dictionary(self, config): + self.config = config + + + def get_config(self, key): + if self.config.has_key(key): return self.config[key] + else: return None + + + def set_config(self, key, value): + if self.config.has_key(key): + print "Warning Config_Result: Key \"%s\" will be overwritten with \"%s\"" % (key, value) + + self.config[key] = value + + + # Überladen von Operatoren und Built-Ins ------------------------------------------------------- + + def __repr__(self): + return str(self.config) + + + def __str__(self): + return str(self.config) + + diff --git a/src/data/DaFFT.py b/src/data/DaFFT.py new file mode 100644 index 0000000..0aaf739 --- /dev/null +++ b/src/data/DaFFT.py @@ -0,0 +1,211 @@ +import warnings +# enable warnings in Python 2.7 +warnings.simplefilter('default') +warnings.warn("use of DaFFT is deprecated, please use the methods of Accumulation and ADC_data classes provided by DamarisFFT.", DeprecationWarning) +import numpy as N +import numpy.fft as F + +class FFT: + def __init__(self, one_result): + # create copy of one_result and work only on the copy + # also extract some informations + self.the_result = one_result + 0 + self.timepoints = N.array(one_result.x) + self.sampling_rate = one_result.get_sampling_rate() + self.data_points = one_result.get_ydata(0).size + self.aquisition_time = self.data_points / float(self.sampling_rate) + self.the_result.set_xlabel('Frequency [Hz]') + + def write_n(self, afile): + filename = open(afile,'w') + filename = open(afile,'a') + #print self.the_result.get_description_dictionary() + #filename.write('%s'%self.get_description_dictionary()) + for i in range(self.data_points): + filename.write('%e\t%e\t%e\n'%(self.the_result.x[i], self.the_result.y[0][i], self.the_result.y[1][i])) + filename.close() + return self + + def base_corr(self, cutoff=0.3, show=0): + """ + Subtracts the mean of the last cutoff % of the timsignal + to get rid of the DC part in the FFT and returns the + new data. + If cutoff is not given, the mean of the last 30% will be + subtracted. + If show=1 the result is return and not the instance. This allows to plot the baseline corrected signal + Example: + base_corr(cutoff=0.2, show=1) + """ + last_points = int(cutoff*self.data_points) + for i in range(2): + self.the_result.y[i] = self.the_result.y[i] - self.the_result.y[i][-last_points:].mean() + if show == 1 : + return self.the_result + return self + + def abs_fft(self, points=None, zoom=None,write = 'off'): + """ + Fourier transforms the timesignal; + points is the number of points to transform, if more points given than data points + the rest is zero padded + + absfft(points=4096) + """ + realdata = N.array(self.the_result.y[0]) + imdata = N.array(self.the_result.y[1]) + data = realdata + 1j*imdata + fftdata = F.fftshift(F.fft(data, points)) + absfft = N.sqrt(fftdata.real**2 + fftdata.imag**2) + # create our x axis + n = fftdata.size + self.the_result.x = F.fftshift(F.fftfreq(n, 1.0/self.sampling_rate)) + self.the_result.y[0] = absfft + self.the_result.y[1] = N.zeros(n) + if write == 'on': + return self + else: + if zoom is None: + return self.the_result + else: + center, width = zoom + return self.zoom(self.the_result, center, width) + + + def fft(self, points=None, zoom=None, write='off'): + realdata = N.array(self.the_result.y[0]) + imdata = N.array(self.the_result.y[1]) + data = realdata + 1j*imdata + fftdata = F.fftshift(F.fft(data, points)) + # create our x axis + n = fftdata.size + self.the_result.x = F.fftshift(F.fftfreq(n, 1.0/self.sampling_rate)) + self.the_result.y[0] = fftdata.real + self.the_result.y[1] = fftdata.imag + if write == 'on': + return self + else: + if zoom is None: + return self.the_result + else: + center, width = zoom + return self.zoom(self.the_result, center, width) + + def zoom(self,some_result, center="auto", width=1000): + if center == "auto": + i_center = int(self.the_result.y[0].argmax()) + maximum = self.the_result.y[0][i_center] + print "Maximum at Frequency:", self.the_result.x[i_center] + else: + i_center = int(self.data_points/2.0+self.data_points*center/self.sampling_rate) + #print "TODO: set width automagically" + #if width == "auto": + # i_width = int(self.data_points*width) + i_width = int(self.data_points*width/self.sampling_rate) + some_result.x=some_result.x[i_center-i_width/2:i_center+i_width/2] + some_result.y[0]=some_result.y[0][i_center-i_width/2:i_center+i_width/2] + some_result.y[1]=some_result.y[1][i_center-i_width/2:i_center+i_width/2] + return some_result + + """ + Apodization functions: + * exp_window and gauss_window are S/N enhancing, + * dexp_window and traf_window are resolution enhancing + * standard windows [hamming, hanning, bartlett, blackman, kaiser-bessel] are also available + self.timepoints = time points + self.aquisition_time = aquisition time (no. samples / sampling_rate) + line_broadening = line broadening factor (standard = 10 Hz) + gaussian_multiplicator = Gaussian Multiplication Factor for + the double exponential apodization + function (standard = 0.3) + """ + def exp_window(self, line_broadening=10, show=0): + apod = N.exp(-self.timepoints*line_broadening) + for i in range(2): + self.the_result.y[i] = self.the_result.y[i]*apod + if show == 1 : + return self.the_result + return self + + def gauss_window(self, line_broadening=10, show=0): + apod = N.exp(-(self.timepoints*line_broadening)**2) + for i in range(2): + self.the_result.y[i] = self.the_result.y[i]*apod + if show == 1 : + return self.the_result + return self + + def dexp_window(self, line_broadening=10, gaussian_multiplicator=0.3, show=0): + apod = N.exp(-(self.timepoints*line_broadening - gaussian_multiplicator*self.aquisition_time)**2) + for i in range(2): + self.the_result.y[i] = self.the_result.y[i]*apod + if show == 1: + return self.the_result + return self + + def traf_window(self, line_broadening=10, show=0): + apod = (N.exp(-self.timepoints*line_broadening))**2 / ( (N.exp(-self.timepoints*line_broadening))**3 + + (N.exp(-self.aquisition_time*line_broadening))**3 ) + for i in range(2): + self.the_result.y[i] = self.the_result.y[i]*apod + if show == 1: + return self.the_result + return self + + def hanning_window(self, show=0): + apod = N.hanning(self.data_points) + for i in range(2): + self.the_result.y[i] = self.the_result.y[i]*apod + if show == 1: + return self.the_result + return self + + def hamming_window(self, show=0): + apod = N.hamming(self.data_points) + for i in range(2): + self.the_result.y[i] = self.the_result.y[i]*apod + if show == 1: + return self.the_result + return self + + def blackman_window(self, show=0): + apod = N.blackman(self.data_points) + for i in range(2): + self.the_result.y[i] = self.the_result.y[i]*apod + if show == 1: + return self.the_result + return self + + def bartlett_window(self, show=0): + apod = N.bartlett(self.data_points) + for i in range(2): + self.the_result.y[i] = self.the_result.y[i]*apod + if show == 1: + return self.the_result + return self + + def kaiser_window(self, beta=4, show=0, use_scipy=None): + if use_scipy == None: + # modified Bessel function of zero kind order from somewhere + def I_0(x): + i0=0 + fac = lambda n:reduce(lambda a,b:a*(b+1),range(n),1) + for n in range(20): + i0 += ((x/2.0)**n/(fac(n)))**2 + return i0 + + t = N.arange(self.data_points, type=N.Float) - self.data_points/2.0 + T = self.data_points + # this is the window function array + apod = I_0(beta*N.sqrt(1-(2*t/T)**2))/I_0(beta) + else: + # alternative method using scipy + import scipy + apod=scipy.kaiser(self.data_points, beta) + + for i in range(2): + self.the_result.y[i] = self.the_result.y[i]*apod + if show == 1: + return self.the_result + return self + diff --git a/src/data/DamarisFFT.py b/src/data/DamarisFFT.py new file mode 100644 index 0000000..12c0643 --- /dev/null +++ b/src/data/DamarisFFT.py @@ -0,0 +1,196 @@ +import numpy +import sys +import autophase + +class DamarisFFT: + def clip(self, start=None, stop=None): + """ + Method for clipping data, only the timesignal between start and stop + is returned. + start and stop can be either time or frequency. The unit is automatically determined + """ + # check if start/stop order is properly + if start > stop: + # I could swap start/stop actually + # TODO swap values? + raise + # if one uses clip as a "placeholder" + if start==None and stop==None: + return self + + if start==None: + start = 0 + if stop==None: + stop = -1 + # check if data is fft which changes the start/stop units + # TODO should get nicer(failsafe), i.e. flags in the object? + if self.xlabel == "Frequency / Hz": + isfft = True + start = self.x.size*(0.5 + start/self.sampling_rate) + stop = self.x.size*(0.5 + stop/self.sampling_rate) + else: + isfft = False + # get the corresponding indices + start *= self.sampling_rate + stop *= self.sampling_rate + # check if boundaries make sense, raise exception otherwise + if numpy.abs(int(start)-int(stop))<=0: + raise ValueError("start stop too close: There are no values in the given boundaries!") + for ch in xrange(len(self.y)): + # clip the data for each channel + # TODO multi records + self.y[ch] = self.y[ch][int(start):int(stop)] + # TODO what to do with x? Should it start from 0 or from start? + # self.x = self.x[:int(stop)-int(start)] + self.x = self.x[int(start):int(stop)] + return self + + def baseline(self, last_part=0.1): + """ + Correct the baseline of your data by subtracting the mean of the + last_part fraction of your data. + + last_part defaults to 0.1, i.e. last 10% of your data + """ + # TODO baselinecorrection for spectra after: + # Heuer, A; Haeberlen, U.: J. Mag. Res.(1989) 85, Is 1, 79-94 + # Should I create an empty object? + # I deided to do NOT a copy, but + # rather modify the object + n = int(self.x.size*last_part) + for ch in xrange(len(self.y)): + self.y[ch] -= self.y[ch][-n:].mean() + # Skip the following due to design reasons + # new_object.was_copied = True + return self + + + """ + Apodization functions: + * exp_window and gauss_window are S/N enhancing, + * dexp_window and traf_window are resolution enhancing + * standard windows [hamming, hanning, bartlett, blackman, kaiser-bessel] + are also available + self.x = time points + elf.aquisition_time = aquisition time (no. samples / sampling_rate) + line_broadening = line broadening factor (standard = 10 Hz) + gaussian_multiplicator = Gaussian Multiplication Factor for + the double exponential apodization + function (standard = 0.3) + """ + def exp_window(self, line_broadening=10): + """ + exponential window + """ + apod = numpy.exp(-self.x*numpy.pi*line_broadening) + for i in range(2): + self.y[i] = self.y[i]*apod + return self + + def gauss_window(self, line_broadening=10): + apod = numpy.exp(-(self.x*line_broadening)**2) + for i in range(2): + self.y[i] = self.y[i]*apod + return self + + def dexp_window(self, line_broadening=-10, gaussian_multiplicator=0.3): + apod = numpy.exp(-(self.x*line_broadening - gaussian_multiplicator*self.x.max())**2) + for i in range(2): + self.y[i] = self.y[i]*apod + return self + + def traf_window(self, line_broadening=10): + apod = (numpy.exp(-self.x*line_broadening))**2 / ( (numpy.exp(-self.x*line_broadening))**3 + + (numpy.exp(-self.x.max()*line_broadening))**3 ) + for i in range(2): + self.y[i] = self.y[i]*apod + return self + + def hanning_window(self): + apod = numpy.hanning(self.x.size) + for i in range(2): + self.y[i] = self.y[i]*apod + return self + + def hamming_window(self): + apod = numpy.hamming(self.x.size) + for i in range(2): + self.y[i] = self.y[i]*apod + return self + + def blackman_window(self): + apod = numpy.blackman(self.x.size) + for i in range(2): + self.y[i] = self.y[i]*apod + return self + + def bartlett_window(self): + apod = numpy.bartlett(self.x.size) + for i in range(2): + self.y[i] = self.y[i]*apod + return self + + def kaiser_window(self, beta=4, use_scipy=None): + if use_scipy == None: + # modified Bessel function of zero kind order from somewhere + def I_0(x): + i0=0 + fac = lambda n:reduce(lambda a,b:a*(b+1),range(n),1) + for n in xrange(20): + i0 += ((x/2.0)**n/(fac(n)))**2 + return i0 + + t = numpy.arange(self.x.size, type=numpy.Float) - self.x.size/2.0 + T = self.x.size + # this is the window function array + apod = I_0(beta*numpy.sqrt(1-(2*t/T)**2))/I_0(beta) + else: + # alternative method using scipy + import scipy + apod=scipy.kaiser(self.x.size, beta) + + for i in range(2): + self.y[i] = self.y[i]*apod + return self + + def autophase(self): + """ + works nice with a SNR above 20 dB + 10 V signal height to 1V noise width + """ + autophase.get_phase(self) + return self + + def fft(self, samples=None): + """ + Fouriertransform the timesignal inplace. + For "zerofilling" set "samples" to a value higher than your data length. + Shorten "samples" to truncate your data. + samples takes only integer values + + """ + # Is this smart performance wise? Should I create an empty object? + # Tests showed that this try except block performed 3.78ms + # timesignal.baseline().fft() + # with out this it needed 4.41 ms, thus this is justified :-) + #try: + # if self.was_copied: + # new_object = self + #except: + # new_object = self+0 + fft_of_signal = numpy.fft.fft(self.y[0] + 1j*self.y[1], n=samples) + fft_of_signal = numpy.fft.fftshift(fft_of_signal) + dwell = 1.0/self.sampling_rate + n = fft_of_signal.size + fft_frequencies = numpy.fft.fftfreq(n, dwell) + self.x = numpy.fft.fftshift(fft_frequencies) + self.y[0] = fft_of_signal.real + self.y[1] = fft_of_signal.imag + self.set_xlabel("Frequency / Hz") + return self + + def magnitude(self): + # this should calculate the absolute value, and set the imag channel to zero + self.y[0] = numpy.sqrt(self.y [0]**2+self.y [1]**2) + self.y[1] *= 0 #self.y[0].copy() + return self diff --git a/src/data/DataPool.py b/src/data/DataPool.py new file mode 100644 index 0000000..69beb24 --- /dev/null +++ b/src/data/DataPool.py @@ -0,0 +1,177 @@ +# data pool collects data from data handling script +# provides data to experiment script and display + +import sys +import types +import tables +import UserDict +import threading +import traceback +import StringIO +import ADC_Result +import Accumulation +import MeasurementResult + +class DataPool(UserDict.DictMixin): + """ + dictionary with sending change events + """ + + # supports tranlation from dictionary keys to pytables hdf node names + # taken from: Python Ref Manual Section 2.3: Identifiers and keywords + # things are always prefixed by "dir_" or "dict_" + translation_table="" + for i in xrange(256): + c=chr(i) + if (c>="a" and c<="z") or \ + (c>="A" and c<="Z") or \ + (c>="0" and c<="9"): + translation_table+=c + else: + translation_table+="_" + + class Event: + access=0 + updated_value=1 + new_key=2 + deleted_key=3 + destroy=4 + + def __init__(self, what, subject="", origin=None): + self.what=what + self.subject=subject + self.origin=origin + + def __repr__(self): + return ""%(self.origin, self.what,self.subject) + + def copy(self): + return DataPool.Event(self.what+0, self.subject+"", self.origin) + + def __init__(self): + self.__mydict={} + self.__dictlock=threading.Lock() + self.__registered_listeners=[] + + def __getitem__(self, name): + try: + self.__dictlock.acquire() + return self.__mydict[name] + finally: + self.__dictlock.release() + + def __setitem__(self, name, value): + try: + self.__dictlock.acquire() + if name in self.__mydict: + e=DataPool.Event(DataPool.Event.updated_value,name,self) + else: + e=DataPool.Event(DataPool.Event.new_key, name,self) + self.__mydict[name]=value + finally: + self.__dictlock.release() + self.__send_event(e) + + + def __delitem__(self, name): + try: + self.__dictlock.acquire() + del self.__mydict[name] + finally: + self.__dictlock.release() + self.__send_event(DataPool.Event(DataPool.Event.deleted_key,name,self)) + + def keys(self): + try: + self.__dictlock.acquire() + return self.__mydict.keys() + finally: + self.__dictlock.release() + + def __send_event(self, _event): + for l in self.__registered_listeners: + l(_event.copy()) + + def __del__(self): + self.__send_event(DataPool.Event(DataPool.Event.destroy)) + self.__registered_listeners=None + + def write_hdf5(self,hdffile,where="/",name="data_pool", complib=None, complevel=None): + if type(hdffile) is types.StringType: + dump_file=tables.openFile(hdffile, mode="a") + elif isinstance(hdffile,tables.File): + dump_file=hdffile + else: + raise Exception("expecting hdffile or string") + + dump_group=dump_file.createGroup(where, name, "DAMARIS data pool") + self.__dictlock.acquire() + dict_keys=self.__mydict.keys() + self.__dictlock.release() + try: + for key in dict_keys: + if key[:2]=="__": continue + dump_dir=dump_group + # walk along the given path and create groups if necessary + namelist = key.split("/") + for part in namelist[:-1]: + dir_part="dir_"+str(part).translate(DataPool.translation_table) + if not dir_part in dump_dir: + dump_dir=dump_file.createGroup(dump_dir,name=dir_part,title=part) + else: + if dump_dir._v_children[dir_part]._v_title==part: + dump_dir=dump_dir._v_children[dir_part] + else: + extension_count=0 + while dir_part+"_%03d"%extension_count in dump_dir: + extension_count+=1 + dump_dir=dump_file.createGroup(dump_dir, + name=dir_part+"_%03d"%extension_count, + title=part) + + # convert last part of key to a valid name + group_keyname="dict_"+str(namelist[-1]).translate(DataPool.translation_table) + # avoid double names by adding number extension + if group_keyname in dump_dir: + extension_count=0 + while group_keyname+"_%03d"%extension_count in dump_dir: + extension_count+=1 + group_keyname+="_%03d"%extension_count + self.__dictlock.acquire() + if key not in self.__mydict: + # outdated ... + self.__dictlock.release() + continue + value=self.__mydict[key] + self.__dictlock.release() + # now write data, assuming, the object is constant during write operation + if "write_to_hdf" in dir(value): + try: + value.write_to_hdf(hdffile=dump_file, + where=dump_dir, + name=group_keyname, + title=key, + complib=complib, + complevel=complevel) + except Exception,e: + print "failed to write data_pool[\"%s\"]: %s"%(key,str(e)) + traceback_file=StringIO.StringIO() + traceback.print_tb(sys.exc_info()[2], None, traceback_file) + print "detailed traceback: %s\n"%str(e)+traceback_file.getvalue() + traceback_file=None + else: + print "don't know how to store data_pool[\"%s\"]"%key + value=None + + finally: + dump_group=None + if type(hdffile) is types.StringType: + dump_file.close() + dump_file=None + + def register_listener(self, listening_function): + self.__registered_listeners.append(listening_function) + + def unregister_listener(self, listening_function): + if listening_function in self.__registered_listeners: + self.__registered_listeners.remove(listening_function) diff --git a/src/data/Drawable.py b/src/data/Drawable.py new file mode 100644 index 0000000..d642df6 --- /dev/null +++ b/src/data/Drawable.py @@ -0,0 +1,192 @@ +# -*- coding: iso-8859-1 -*- + +import threading + +############################################################################# +# # +# Name: Class Drawable # +# # +# Purpose: Base class of everything plottable # +# # +############################################################################# + +class Drawable: + def __init__(self): + + # Will be set correctly in one of the subclasses + self.x = [] + self.y = [] + + self.styles = { } + + self.xlabel = None + self.ylabel = None + + self.title = None + + self.legend = { } + + self.text = {} + + self.xmin = 0 + self.xmax = 0 + self.ymin = 0 + self.ymax = 0 + + + def get_xdata(self): + "Returns a reference to the x-Plotdata (array)" + return self.x + + + def set_xdata(self, pos, value): + "Sets a point in x" + try: + self.x[pos] = value + except: + raise + + + def get_ydata(self, channel): + "Returns the y-Plotdata of channel n (array)" + try: + return self.y[channel] + except: + raise + + + def set_ydata(self, channel, pos, value): + "Sets a point in y" + try: + self.y[channel][pos] = value + except: + raise + + + def get_number_of_channels(self): + "Returns the number of channels in y" + return len(self.y) + + + def get_style(self): + "Returns a reference to plot-styles (dictionary)" + return self.styles + + + def set_style(self, channel, value): + "Sets a channel to a certain plot-style" + if self.styles.has_key(channel): + print "Drawable Warning: Style key \"%s\" will be overwritten with \"%s\"" % (str(channel), str(value)) + + self.styles[channel] = str(value) + + + def get_xlabel(self): + "Returns the label for the x-axis" + return self.xlabel + + + def set_xlabel(self, label): + "Sets the label for the x-axis" + self.xlabel = str(label) + + + def get_ylabel(self): + "Gets the label for the y-axis" + return self.ylabel + + + def set_ylabel(self, label): + "Sets the label for the y-axis" + self.ylabel = str(label) + + + def get_text(self, index): + "Returns labels to be plotted (List)" + if self.text.has_key(index): + return self.text[index] + else: return None + + + def set_text(self, index, text): + "Sets labels to be plotted " + self.text[index] = str(text) + + + def get_title(self): + "Returns the title of the plot" + return self.title + + + def set_title(self, title): + "Sets the title of the plot" + self.title = str(title) + + + def get_legend(self): + "Returns the legend of the plot (Dictionary)" + return self.legend + + + def set_legend(self, channel, value): + "Sets the legend of the plot" + if self.legend.has_key(key): + print "Drawable Warning: Legend key \"%s\" will be overwritten with \"%s\"" % (str(channel), str(value)) + + self.legend[channel] = str(value) + + + def get_xmin(self): + "Returns minimun of x" + return self.x.min() + + def set_xmin(self, xmin): + "Sets minimum of x" + self.xmin = xmin + + + def get_xmax(self): + "Returns maximum of x" + return self.x.max() + + + def set_xmax(self, xmax): + "Sets maximum of x" + self.xmax = xmax + + + def get_ymin(self): + "Returns minimum of y" + if type(self.y)==type([]): + return min(map(lambda l:l.min(),self.y)) + else: + return self.y.min() + + + def set_ymin(self, ymin): + "Sets minimum of y" + self.ymin = ymin + + + def get_ymax(self): + "Returns maximimum of y" + if type(self.y)==type([]): + return max(map(lambda l:l.max(),self.y)) + else: + return self.y.max() + + + def set_ymax(self, ymax): + "Sets maximum of y" + self.ymax = ymax + + + + + + + + + + + diff --git a/src/data/Error_Result.py b/src/data/Error_Result.py new file mode 100644 index 0000000..53f559d --- /dev/null +++ b/src/data/Error_Result.py @@ -0,0 +1,71 @@ +# -*- coding: iso-8859-1 -*- + +from Resultable import Resultable +from Drawable import Drawable + +############################################################################# +# # +# Name: Class Error_Result # +# # +# Purpose: Specialised class of Resultable # +# Contains occured error-messages from the core # +# # +############################################################################# + +class Error_Result(Resultable, Drawable): + def __init__(self, error_msg = None, desc = {}, job_id = None, job_date = None): + Resultable.__init__(self) + Drawable.__init__(self) + + if error_msg is not None: + self.error_message = error_msg + self.set_title("Error-Result: %s" % error_msg) + else: + self.error_message = error_msg + self.description = desc + self.job_id = job_id + self.job_date = job_date + + + + def get_error_message(self): + return self.error_message + + + def set_error_message(self, error_msg): + self.set_title("Error-Result: %s" % error_msg) + self.error_message = error_msg + + + # No statistics + def uses_statistics(self): + return False + + # Nothing to plot + def get_ydata(self): + return [0.0] + + # Nothing to plot + def get_xdata(self): + return [0.0] + + # Überladen von Operatoren und Built-Ins ------------------------------------------------------- + + def __repr__(self): + tmp_string = "Core error-message: %s" % self.error_message + + return tmp_string + + def __len__(self): + return len(self.error_message) + + + def __str__(self): + return self.error_message + + + # Preventing an error when adding something to an error-result (needed for plotting error-results) + def __add__(self, other): + return self + + diff --git a/src/data/Errorable.py b/src/data/Errorable.py new file mode 100644 index 0000000..3e578a3 --- /dev/null +++ b/src/data/Errorable.py @@ -0,0 +1,76 @@ +# -*- coding: iso-8859-1 -*- + +############################################################################# +# # +# Name: Class Errorable # +# # +# Purpose: Base class of everything what could contain a statistic error # +# # +############################################################################# + +class Errorable: + def __init__(self): + + # Will be determined in one of the subclasses + self.xerr = [] + self.yerr = [] + + self.error_color = "" + self.bars_above = False + + self.n = 0 + + + def get_xerr(self): + "Returns a reference to x-Error (array)" + return self.xerr + + + def set_xerr(self, pos, value): + "Sets a point in x-Error" + try: + self.xerr[pos] = value + except: + raise + + + def get_yerr(self, channel): + "Returns a list of y-Errors (list of arrays, corresponding channels)" + try: + return self.yerr[channel] + except: + raise + + + def set_yerr(self, channel, pos, value): + "Sets a point in y-Error" + try: + self.yerr[channel][pos] = value + except: + raise + + + def get_error_color(self): + "Returns the error-bar color" + return self.error_color + + + def set_error_color(self, color): + "Sets the error-bar color" + self.error_color = color + + + def get_bars_above(self): + "Gets bars-above property of errorplot" + return self.bars_above + + + def set_bars_above(self, bars_above): + "Sets bars-above property of errorplot" + self.bars_above = bool(bars_above) + + + def ready_for_drawing_error(self): + "Returns true if more than one result have been accumulated" + if self.n >= 2: return True + else: return False diff --git a/src/data/MeasurementResult.py b/src/data/MeasurementResult.py new file mode 100644 index 0000000..f285d29 --- /dev/null +++ b/src/data/MeasurementResult.py @@ -0,0 +1,291 @@ +import threading +import math +import types +import sys +import tables +import numpy +import exceptions +import UserDict +import Drawable + +## provide gaussian statistics for a series of measured data points +# +# AccumulatedValue provides mean and error of mean after being fed with measured data +# internaly it keeps the sum, the sum of squares and the number of data points +class AccumulatedValue: + + def __init__(self, mean=None, mean_err=None, n=None): + """ + one value with std. deviation + can be initialized by: + No argument: no entries + one argument: first entry + two arguments: mean and its error, n is set 2 + three arguments: already existing statistics defined by mean, mean's error, n + """ + if mean is None: + self.y=0.0 + self.y2=0.0 + self.n=0 + elif mean_err is None and n is None: + self.y=float(mean) + self.y2=self.y**2 + self.n=1 + elif mean_err is None: + self.n=max(1, int(n)) + self.y=float(mean)*self.n + self.y2=(float(mean)**2)*self.n + elif n is None: + self.n=2 + self.y=float(mean)*2 + self.y2=(float(mean_err)**2+float(mean)**2)*2 + else: + self.n=int(n) + self.y=float(mean)*self.n + self.y2=float(mean_err)**2*n*(n-1.0)+float(mean)**2*n + + def __add__(self,y): + new_one=AccumulatedValue() + if (type(y) is types.InstanceType and isinstance(y, AccumulatedValue)): + new_one.y=self.y+y.y + new_one.y2=self.y2+y.y2 + new_one.n=self.n+y.n + else: + new_one.y=self.y+float(y) + new_one.y2=self.y2+float(y)**2 + new_one.n=self.n+1 + return new_one + + def __iadd__(self,y): + if (type(y) is types.InstanceType and isinstance(y, AccumulatedValue)): + self.y+=y.y + self.y2+=y.y2 + self.n+=y.n + else: + self.y+=float(y) + self.y2+=float(y)**2 + self.n+=1 + return self + + def copy(self): + a=AccumulatedValue() + a.y=self.y + a.y2=self.y2 + a.n=self.n + return a + + def mean(self): + """ + returns the mean of all added/accumulated values + """ + if self.n is None or self.n==0: + return None + else: + return self.y/self.n + + def sigma(self): + """ + returns the standard deviation added/accumulated values + """ + if self.n>1: + variance=(self.y2-(self.y**2)/float(self.n))/(self.n-1.0) + if variance<0: + if variance<-1e-20: + print "variance=%g<0! assuming 0"%variance + return 0.0 + return math.sqrt(variance) + elif self.n==1: + return 0.0 + else: + return None + + def mean_error(self): + """ + returns the mean's error (=std.dev/sqrt(n)) of all added/accumulated values + """ + if self.n>1: + variance=(self.y2-(self.y**2)/float(self.n))/(self.n-1.0) + if variance<0: + if variance<-1e-20: + print "variance=%g<0! assuming 0"%variance + return 0.0 + return math.sqrt(variance/self.n) + elif self.n==1: + return 0.0 + else: + return None + + def __str__(self): + if self.n==0: + return "no value" + elif self.n==1: + return str(self.y) + else: + return "%g +/- %g (%d accumulations)"%(self.mean(),self.mean_error(),self.n) + + def __repr__(self): + return str(self) + +class MeasurementResult(Drawable.Drawable, UserDict.UserDict): + + def __init__(self, quantity_name): + """ + convenient accumulation and interface to plot functions + + dictionary must not contain anything but AccumulatedValue instances + """ + Drawable.Drawable.__init__(self) + UserDict.UserDict.__init__(self) + self.quantity_name=quantity_name + self.lock=threading.RLock() + + # get the selected item, if it does not exist, create an empty one + def __getitem__(self, key): + if key not in self: + a=AccumulatedValue() + self.data[float(key)]=a + return a + else: + return self.data[float(key)] + + def __setitem__(self,key,value): + if not (type(value) is types.InstanceType and isinstance(value, AccumulatedValue)): + value=AccumulatedValue(float(value)) + return UserDict.UserDict.__setitem__(self, + float(key), + value) + + def __add__(self, right_value): + if right_value==0: + return self.copy() + else: + raise Exception("not implemented") + + def get_title(self): + return self.quantity_name + + def get_xdata(self): + """ + sorted array of all dictionary entries without Accumulated Value objects with n==0 + """ + keys=numpy.array(filter(lambda k: not (isinstance(self.data[k], AccumulatedValue) and self.data[k].n==0), self.data.keys()), + dtype="Float64") + keys.sort() + return keys + + def get_ydata(self): + return self.get_xydata()[1] + + def get_xydata(self): + k=self.get_xdata() + v=numpy.array(map(lambda key: self.data[key].mean(), k), dtype="Float64") + return [k,v] + + def get_errorplotdata(self): + k=self.get_xdata() + v=numpy.array(map(lambda key: self.data[key].mean(), k), dtype="Float64") + e=numpy.array(map(lambda key: self.data[key].mean_error(), k), dtype="Float64") + return [k,v,e] + + def get_lineplotdata(self): + k=self.get_xdata() + v=numpy.array(self.y, dtype="Float64") + return [k, v] + + def uses_statistics(self): + """ + drawable interface method, returns True + """ + return True + + def write_to_csv(self,destination=sys.stdout, delimiter=" "): + """ + writes the data to a file or to sys.stdout + destination can be a file or a filename + suitable for further processing + """ + # write sorted + the_destination=destination + if type(destination) in types.StringTypes: + the_destination=file(destination, "w") + + the_destination.write("# quantity:"+str(self.quantity_name)+"\n") + the_destination.write("# x y ysigma n\n") + for x in self.get_xdata(): + y=self.data[x] + if type(y) in [types.FloatType, types.IntType, types.LongType]: + the_destination.write("%e%s%e%s0%s1\n"%(x, delimiter, y, delimiter, delimiter)) + else: + the_destination.write("%e%s%e%s%e%s%d\n"%(x, + delimiter, + y.mean(), + delimiter, + y.mean_error(), + delimiter, + y.n)) + the_destination=None + + + def write_to_hdf(self, hdffile, where, name, title, complib=None, complevel=None): + + h5_table_format= { + "x" : tables.Float64Col(), + "y" : tables.Float64Col(), + "y_err" : tables.Float64Col(), + "n" : tables.Int64Col() + } + filter=None + if complib is not None: + if complevel is None: + complevel=9 + filter=tables.Filters(complevel=complevel,complib=complib,shuffle=1) + + mr_table=hdffile.createTable(where=where,name=name, + description=h5_table_format, + title=title, + filters=filter, + expectedrows=len(self)) + mr_table.flavor="numpy" + mr_table.attrs.damaris_type="MeasurementResult" + self.lock.acquire() + try: + mr_table.attrs.quantity_name=self.quantity_name + + row=mr_table.row + xdata=self.get_xdata() + if xdata.shape[0]!=0: + for x in self.get_xdata(): + y=self.data[x] + row["x"]=x + if type(y) in [types.FloatType, types.IntType, types.LongType]: + row["y"]=y + row["y_err"]=0.0 + row["n"]=1 + else: + row["y"]=y.mean() + row["y_err"]=y.mean_error() + row["n"]=y.n + row.append() + + finally: + mr_table.flush() + self.lock.release() + +def read_from_hdf(hdf_node): + """ + reads a MeasurementResult object from the hdf_node + or None if the node is not suitable + """ + + if not isinstance(hdf_node, tables.Table): + return None + + if hdf_node._v_attrs.damaris_type!="MeasurementResult": + return None + + mr=MeasurementResult(hdf_node._v_attrs.quantity_name) + + for r in hdf_node.iterrows(): + mr[r["x"]]=AccumulatedValue(r["y"],r["y_err"],r["n"]) + + return mr diff --git a/src/data/Persistance.py b/src/data/Persistance.py new file mode 100644 index 0000000..5fea754 --- /dev/null +++ b/src/data/Persistance.py @@ -0,0 +1,29 @@ +class Persistance : + def __init__(self, shots): + self.shots = shots + self.accu = 0 + self.counter = 0 + self.result_list = [] + + def fade(self, res): + self.counter += 1 + if self.accu == 0: + self.accu=res+0 + self.result_list.append(res) + if self.counter < 1: + for i,ch in enumerate(self.accu.y): + ch += res.y[i] + + elif len(self.result_list) == self.shots: + self.counter = len(self.result_list) + old_result = self.result_list.pop(0) + for i,ch in enumerate(self.accu.y): + ch *= self.shots + ch -= old_result.y[i] + ch += res.y[i] + else: + for i,ch in enumerate(self.accu.y): + ch *= self.counter-1 + ch += res.y[i] + self.accu /= self.counter + return self.accu diff --git a/src/data/Resultable.py b/src/data/Resultable.py new file mode 100644 index 0000000..d2bc623 --- /dev/null +++ b/src/data/Resultable.py @@ -0,0 +1,65 @@ +# -*- coding: iso-8859-1 -*- + +############################################################################# +# # +# Name: Class Resultable # +# # +# Purpose: Base class of everything what could be a core-result # +# # +############################################################################# + +class Resultable: + def __init__(self): + + self.job_id = None + self.job_date = None + + self.description = { } + + + def get_job_id(self): + "Returns the job-id of this result" + return self.job_id + + + def set_job_id(self, _id): + "Sets the job-id of this result" + self.job_id = _id + + + def get_job_date(self): + "Gets the date of this result" + return self.job_date + + + def set_job_date(self, date): + "Sets the date of this result" + self.job_date = date + + + def get_description_dictionary(self): + "Returns a reference to the description (Dictionary)" + return self.description + + + def set_description_dictionary(self, dictionary): + "Sets the entire description" + self.description = dictionary + + + def get_description(self, key): + "Returns the description value for a given key" + if self.description.has_key(key): + return self.description[key] + + else: + print "Warning Resultable: No value for key \"%s\". Returned None" % str(key) + return None + + + def set_description(self, key, value): + "Adds a attribute to the description" + if self.description.has_key(key): + print "Warning: Result key \"%s\" will be overwritten with \"%s\"." % (str(key), str(value)) + + self.description[key] = value diff --git a/src/data/Signalpath.py b/src/data/Signalpath.py new file mode 100644 index 0000000..aeab33b --- /dev/null +++ b/src/data/Signalpath.py @@ -0,0 +1,9 @@ +import numpy as N +class Signalpath: + def phase(self, degrees): + tmp = self.y[0] + 1j*self.y[1] + tmp *= N.exp(1j*degrees*N.pi/180) + self.y[0] = tmp.real + self.y[1] = tmp.imag + del tmp + return self diff --git a/src/data/Temp_Result.py b/src/data/Temp_Result.py new file mode 100644 index 0000000..6288a04 --- /dev/null +++ b/src/data/Temp_Result.py @@ -0,0 +1,35 @@ +# -*- coding: iso-8859-1 -*- + +from Resultable import Resultable +from Drawable import Drawable + +from types import * + +############################################################################# +# # +# Name: Class Temp_Result # +# # +# Purpose: Specialised class of Resultable and Drawable # +# Contains recorded temperature data # +# # +############################################################################# + +class Temp_Result(Resultable, Drawable): + def __init__(self, x = None, y = None, desc = None, job_id = None, job_date = None): + Resultable.__init__(self) + Drawable.__init__(self) + + + if (x is None) and (y is None) and (desc is None) and (job_id is None) and (job_date is None): + pass + + elif (x is not None) and (y is not None) and (desc is not None) and (job_id is not None) and (job_date is not None): + pass + + else: + raise ValueError("Wrong usage of __init__!") + + + # Überladen von Operatoren und Built-Ins ------------------------------------------------------- + + # / Überladen von Operatoren und Built-Ins ----------------------------------------------------- diff --git a/src/data/__init__.py b/src/data/__init__.py new file mode 100644 index 0000000..f0c222b --- /dev/null +++ b/src/data/__init__.py @@ -0,0 +1,9 @@ +from damaris.data.ADC_Result import ADC_Result +from damaris.data.Accumulation import Accumulation +from damaris.data.MeasurementResult import MeasurementResult, AccumulatedValue +from damaris.data.DataPool import DataPool +from damaris.data.Error_Result import Error_Result +from damaris.data.Config_Result import Config_Result + +__all__=["ADC_Result", "Accumulation", "MeasurementResult", "AccumulatedValue", "DataPool", "FFT", "Error_Result", "Config_Result" ] + diff --git a/src/data/autophase.py b/src/data/autophase.py new file mode 100644 index 0000000..351d58f --- /dev/null +++ b/src/data/autophase.py @@ -0,0 +1,63 @@ +from scipy.optimize import fmin_powell, bisect, ridder, brentq +import numpy as N + +def calculate_entropy(phi, real, imag, gamma, dwell): + """ + Calculates the entropy of the spectrum (real part). + p = phase + gamma should be adjusted such that the penalty and entropy are in the same magnitude + """ + # This is first order phasecorrection + # corr_phase = phi[0]+phi[1]*arange(0,len(signal),1.0)/len(signal) # For 0th and 1st correction + + # Zero order phase correction + real_part = real*N.cos(phi)-imag*N.sin(phi) + + # Either this for calculating derivatives: + # Zwei-Punkt-Formel + # real_diff = (Re[1:]-Re[:-1])/dwell + # Better this: + # Drei-Punkte-Mittelpunkt-Formel (Ränder werden nicht beachtet) + # real_diff = abs((Re[2:]-Re[:-2])/(dwell*2)) + # Even better: + # Fünf-Punkte-Mittelpunkt-Formel (ohne Ränder) + real_diff = N.abs((real_part[:-4]-8*real_part[1:-3] + +8*real_part[3:-1]-2*real_part[4:])/(12*dwell)) + + # TODO Ränder, sind wahrscheinlich nicht kritisch + + # Calculate the entropy + h = real_diff/real_diff.sum() + # Set all h with 0 to 1 (log would complain) + h[h==0]=1 + entropy = N.sum(-h*N.log(h)) + + # My version, according the paper + #penalty = gamma*sum([val**2 for val in Re if val < 0]) + # calculate penalty value: a real spectrum should have positive values + if real_part.sum() < 0: + tmp = real_part[real_part<0] + penalty = N.dot(tmp,tmp) + if gamma == 0: + gamma = entropy/penalty + penalty = N.dot(tmp,tmp)*gamma + else: + penalty = 0 + #print "Entropy:",entrop,"Penalty:",penalty # Debugging + shannon = entropy+penalty + return shannon + +def get_phase(result_object): + global gamma + gamma=0 + real = result_object.y[0].copy() + imag = result_object.y[1].copy() + dwell = 1.0/result_object.sampling_rate + # fmin also possible + xopt = fmin_powell( func=calculate_entropy, + x0=N.array([0.0]), + args=(real, imag, gamma, dwell), + disp=0) + result_object.y[0] = real*N.cos(xopt) - imag*N.sin(xopt) + result_object.y[1] = real*N.sin(xopt) + imag*N.cos(xopt) + return result_object diff --git a/src/experiments/Experiment.py b/src/experiments/Experiment.py new file mode 100644 index 0000000..106b8eb --- /dev/null +++ b/src/experiments/Experiment.py @@ -0,0 +1,491 @@ +# -*- coding: iso-8859-1 -*- +import types +import numpy + +class StateBase(object): + def __init__(self): + pass + def to_xml(self, indent = ""): + return indent + "" + +class StateSimple(StateBase): + def __init__(self, time, content=None): + super(StateSimple, self).__init__() + if time < 0: + raise AssertionError("time for state is negative!") + self.time = time + self.content = content + + def to_xml(self, indent = ""): + s = indent + '\n' + s += '>\n' + s += indent + ' ' + str(self.content) + '\n' + s += indent + '\n' + return s + def __repr__(self): + return 'StateSimple(%s, %s)' % (self.time, repr(self.content)) + +class StateList(StateBase): + def __init__(self): + super(StateList, self).__init__() + self.list = [] + def to_xml(self, indent = " "): + s = "" + for k in self.list: + if hasattr(k, "to_xml"): + s += k.to_xml(indent) + else: + s += indent + str(k) + return s + def append(self, val): + self.list.append(val) + + +class StateLoop(StateList): + """Represents a loop in the state tree""" + def __init__(self, repeat): + super(StateLoop, self).__init__() + self.repeat = repeat + def to_xml(self, indent = ""): + s = indent + ('\n' % self.repeat) + s += super(StateLoop, self).to_xml(indent + " ") + s += indent + '\n' + return s + def __repr__(self): + return 'StateLoop(repeat=%d, %s)' \ + % (self.repeat, repr(self.list)) + + +############################################################# +# # +# Class: Experiment # +# # +# Purpose: Represents one full experiment (one program on # +# the pulse-card; one file) # +# # +############################################################# +import dac + +class Experiment: + ## Experiment class holding the state tree + + job_id = 0 + + def __init__(self): + self.job_id = Experiment.job_id + Experiment.job_id += 1 + + self.state_list = StateList() + self.list_stack = [] + self.description = { } + + + # Commands ------------------------------------------------------------------------------------- + ## Deprecated + def rf_pulse(self, value, length = None): + """ + deprecated: use ttl_pulse + """ + s_content = '' % value + + if length is None: + self.state_list.append(s_content) + else: + self.state_list.append(StateSimple(length, s_content)) + + ## Creates a state with ttl signals of duration *length*. + # + # **Example:** + # ttl_pulse(length=1e-6,value=3) + # will create a ttl pulse on channels 0 and 1 (2**0 + 2**1) of duration 1us + # @param length time length if this state + # @param channel select a single channel (1...24) + # @param value select the channels via decimal representation (2**0 + 2**1 ...) + def ttl_pulse(self, length, channel = None, value = None): + """ + Creates a state with length *length* and switches + some bits of the pulse programmer to HIGH: + * channel: this selects a single channel (No. 1 - 24) + * value: this is the integer representation of the 24bit word, + as an example value=3 selects channels 1 and 2 (2**1 + 2**2) + """ + the_value=0 + if value is not None: + the_value=int(value) + elif channel is not None: + the_value=1<' % the_value)) + + ## Same as ttl_pulse, but no *channel* keyword + def ttls(self, length = None, value = None): + """ + same as ttl_pulse, but no *channel* keyword + """ + the_value=int(value) + s_content = '' % the_value + if length is not None: + self.state_list.append(StateSimple(length, s_content)) + else: + self.state_list.append(s_content) + ## Beginning of a new state + def state_start(self, time): + """ + starts a state in the pulse programs with duration *time*. + This must be closed with state_end + """ + self.state_list.append('\n' % repr(time)) + + ## End of *state_start* + def state_end(self): + """ + closes a state after start_state + """ + self.state_list.append('\n') + + ## An empty state doing nothing + # @param time Duration of this state + # @param ttls Additinional ttl channels + def wait(self, time, ttls=None): + if ttls is not None: + s_content = '' % ttls + self.state_list.append(StateSimple(time,s_content)) + else: + self.state_list.append(StateSimple(time)) + + ## Records data with given number of samples, sampling-frequency frequency and sensitivity + # @param samples Number of samples to record + # @param frequency Sampling frequency + # @param timelength Length of this state, per default calculated automatically + # @param sensitivity Sensitivity in Umax/V + # @param ttls Additional ttl channels + def record(self, samples, frequency, timelength=None, sensitivity = None, ttls=None, channels = 3, offset = None, impedance = None): + attributes='s="%d" f="%d"'%(samples,frequency)#%g + if channels != 1 and channels != 3 and channels != 5 and channels != 15: + raise ValueError, "Channel definition is illegal" + attributes += ' channels="%i"'%(channels) + + nchannels = 0 + if channels == 1: + nchannels = 1 + elif channels == 3 or channels == 5: + nchannels = 2 + elif channels == 15: + nchannels = 4 + if sensitivity is not None: + # float values are allowed and applied to all channels + if isinstance(sensitivity, float) or isinstance(sensitivity, int): + for i in range(nchannels): + attributes +=' sensitivity%i="%f"'%(i, float(sensitivity)) + else: + for i in range(nchannels): + attributes +=' sensitivity%i="%f"'%(i, sensitivity[i]) + if offset is not None: + # int values are allowed and applied to all channels + if isinstance(offset, int): + for i in range(nchannels): + attributes +=' offset%i="%f"'%(i, offset) + else: + for i in range(nchannels): + attributes +=' offset%i="%f"'%(i, offset[i]) + if impedance is not None: + # float values are allowed and applied to all channels + if isinstance(impedance, float): + for i in range(nchannels): + attributes += ' impedance%i="%i"'%(i, impedance) + else: + for i in range(nchannels): + attributes += ' impedance%i="%i"'%(i, impedance[i]) + + s_content = '' % attributes + if ttls is not None: + s_content+='' % ttls + if timelength is None: + timelength = samples / float(frequency)#*1.01 + self.state_list.append(StateSimple(timelength, s_content)) + + ## Create a loop on the pulse programmer. Loop contents can not change inside the loop. + # @params iterations Number of loop iterations + def loop_start(self, iterations): + """creates a loop of given number of iterations and has to be closed by loop_end(). + Commands inside the loop can not change, i.e. the parameters are the same for each loop run. + This loop is created on the pulse programmer, thus saving commands. + One must close the loop with loop_end (see below)""" + l = StateLoop(iterations) + self.state_list.append(l) + # (These two lines could probably be guarded by a mutex) + self.list_stack.append(self.state_list) + self.state_list = l + + ## End loop state + def loop_end(self): + # (This line could probably be guarded by a mutex) + self.state_list = self.list_stack.pop(-1) + + ## Set the frequency and phase of the frequency source. + ## This state needs 2us. + # @param frequency New frequency in Hz + # @param phase New phase in degrees + # @param ttls Additional ttl channels + def set_frequency(self, frequency, phase, ttls=0): + """ + Sets the frequency and phase of the frequency source and optionally further channels. + The time needed to set the frequency is 2 us. + Switch pulse programmer line with *ttls* . + + """ + "Sets the frequency generator to a desired frequency (Hz)" + s_content = '' % (frequency, phase) + if ttls != 0: + s_content += '' % ttls + self.state_list.append(StateSimple(2e-6, s_content)) + ## Creates a, possibly shaped, pulsed gradient. + # @param dac_value DAC value to set + # @param length Duration of the state, minimum length is 42*90ns=3.78us (default) + # @param shape Tuple of (shape, resolution/seconds), shape can be one of: rec (default), sin2, sin + # @param is_seq If set to *True*, do NOT set DAC to zero after this state + # @param trigger Additional ttl channels + def set_pfg(self, dac_value=None, length=None, shape=('rec',0), trigger=4, is_seq=False): + """ + This sets the value for the PFG, it also sets it back automatically. + If you don't whish to do so (i.e. line shapes) set is_seq=1 + If you wnat to set a trigger, set trigger (default=4, i.e. channel 2) + If you want shaped gradients: shape=(ashape, resolution), ashape can be rec, sin2, sin + + """ + try: + form, resolution = shape + except: + raise SyntaxError, "shape argument needs to be a tuple, i.e. ('shape',resolution), shape can be sin, sin2, rec" + + if length == None: + # mimimum length + length=42*9e-8 + if resolution >= length: + raise ValueError, "Resolution %.3e of shaped gradients can not be longer than total length %.3e"%(resolution, length) + + if resolution < 42*9e-8: + raise ValueError, "Resulution %.3e can not be smaller than %.3e"%(resolution, 42*9e-8) + + t_steps = numpy.arange(0,length,resolution) + + if form == 'rec': # shape==None --> rectangular gradients + s_content = '' % (trigger, dac_value) + self.state_list.append(StateSimple(length, s_content)) + + if not is_seq and shape == None: + s_content = '' + self.state_list.append(StateSimple(42*9e-8, s_content)) + + elif form == 'sin2': + # sin**2 shape + for t in t_steps: + dac = int (dac_value*numpy.sin(numpy.pi/length*t)**2) + s_content = '' % (trigger, dac) + self.state_list.append(StateSimple(resolution, s_content)) + # set it back to zero + s_content = '' % (trigger) + self.state_list.append(StateSimple(resolution, s_content)) + + elif form == 'sin': + # sin shape + for t in t_steps: + dac = int (dac_value*numpy.sin(numpy.pi/length*t)) + s_content = '' % (trigger, dac) + self.state_list.append(StateSimple(resolution, s_content)) + # set it back to zero + s_content = '' % (trigger) + self.state_list.append(StateSimple(resolution, s_content)) + + else: # don't know what to do + raise SyntaxError , "form is unknown: %s"%form + + ## Deprecated, use set_pfg instead + def set_pfg_wt(self, I_out=None, dac_value=None, length=None, is_seq=0, trigger=4): + """ + This sets the value for the PFG (plus trigger, default=2**2), it also sets it back automatically. + If you don't whish to do so (i.e. line shapes) set is_seq=1 + """ +# raise DeprecationWarning, "to be removed in future, use set_pfg instead" + if I_out == None and dac_value == None: + dac_value=0 + if I_out != None and dac_value == None: + dac_value=dac.conv(I_out) + if I_out == None and dac_value != None: + dac_value=dac_value + if I_out !=None and dac_value != None: + dac_value = 0 + print "WARNING: You can't set both, I_out and dac_value! dac_value set to 0" + if length==None: + length=42*9e-8 + s_content = '' \ + % (dac_value, trigger) + self.state_list.append(StateSimple(length, s_content)) + if is_seq == 0: + s_content = '' \ + % trigger + self.state_list.append(StateSimple(42*9e-8, s_content)) + + ## sets the value of a DAC + # @param dac_value DAC value to set + # @param dac_id ID of the dac in case of multiple DAC(default=1) + # @param length Duration of the state + # @param is_seq If set to *True*, do NOT set DAC to zero after this state + # @param ttls Additional ttl channels + def set_dac(self, dac_value, dac_id=1, length=None, is_seq=False, ttls=0): + """ + This sets the value for the DAC and possibly some TTLs. + It also sets it back automatically. + If you don't whish to do so (i.e. line shapes) set is_seq=True + """ + if length==None: + length=42*9e-8 + s_content = '' \ + % (dac_id, dac_value, ttls) + self.state_list.append(StateSimple(length, s_content)) + if not is_seq: + s_content = '' \ + % (dac_id, ttls) + self.state_list.append(StateSimple(42*9e-8, s_content)) + + ## sets the phase of the frequency source. + ## This state needs 0.5us, though the phase switching time is dependent on the frequency source + # @param phase New phase to set + # @param ttls Additional ttl channels + def set_phase(self, phase, ttls=0): + s_content = '' % (phase) + if ttls!=0: + s_content += '' % ttls + self.state_list.append(StateSimple(0.5e-6, s_content)) + + ## sets a description which is carried via the back end result + ## file to the result script in the front end. In the result script + ## you can extract the description with get_description(key) + # @param key Name of description + # @param value Value of description + def set_description(self, key, value): + """Sets a description which is carried via the back end result + file to the result script in the front end. In the result script + you can extract the description with get_description""" + if key in self.description.keys(): + print 'Warning: Overwriting existing description "%s" = "%s" with "%s"' % (key, self.description[key], value) + + self.description[key] = value + + ## set the PTS310/PTS500 frequency source to local mode + def set_pts_local(self): + """ + this will set the PTS310/PTS500 frequency source to local mode + """ + self.state_list.append(StateSimple(1e-6, '')) + self.state_list.append(StateSimple(1e-6, '')) + + # / Commands ----------------------------------------------------------------------------------- + + + # Public Methods ------------------------------------------------------------------------------- + + def get_job_id(self): + "Returns the current job-id the experiment got" + return self.job_id + + + def write_xml_string(self): + "Returns the current program as a string" + + # Standart XML-Kopf + xml_string = '\n' + + # Experiment-Start-Tag einfügen + xml_string += '\n' % self.job_id + + # Descriptions einfügen + if len(self.description)==0: + xml_string += ' \n' + else: + xml_string += ' \n' + for key,value in self.description.iteritems(): + type_string="repr" + if value is None: + type_string="None" + value="" + elif type(value) is types.FloatType or isinstance(value, numpy.floating): + type_string="Float" + value=repr(value) + elif type(value) is types.IntType or isinstance(value, numpy.integer): + type_string="Int" + value=repr(value) + elif type(value) is types.LongType: + type_string="Long" + value=repr(value) + elif type(value) is types.ComplexType or isinstance(value, numpy.complexfloating): + type_string="Complex" + value=repr(value) + elif type(value) is types.BooleanType or isinstance(value, numpy.bool_): + type_string="Boolean" + value=repr(value) + elif type(value) in types.StringTypes: + type_string="String" + else: + value=repr(value) + xml_string += ' %s\n'%(key, type_string ,value) + xml_string += " \n" + + # Experiment-Inhalt einfügen + xml_string += self.state_list.to_xml(indent = " ") + + # Experiment-End-Tag + xml_string += '\n' + + return xml_string + + def write_quit_job(self): + "Returns a xml quit-job" + return '\n' + + +class Quit(Experiment): + def write_xml_string(self): + return '\n'%self.job_id + + +# /Public Methods ------------------------------------------------------------------------------ + + + +def self_test(): + e = Experiment() + e.set_description("key", "value") + e.set_frequency(85e6, 90, ttls=16) + e.wait(1e-6) + e.rf_pulse(1, 1e-6/3) # val = 1 + e.ttl_pulse(1e-6/3, 1) # val = 2 + e.ttl_pulse(1e-6/3, None, 7) # val = 7 + if True: + e.loop_start(30) + e.set_pfg(dac_value=1024, is_seq = True) + e.set_pfg_wt(dac_value=2048) + e.loop_start(400) + e.set_phase(270, ttls = 32) + e.loop_end() + e.ttl_pulse(5e-6, channel = 6) + e.loop_end() + else: + l = StateLoop(3) + l.append(StateSimple(5e-6, '')) + e.state_list.append(l) + e.set_dac(12345, dac_id=2, is_seq = True, ttls=16) + e.record(1024, 20e6) + try: + e.wait(-1) + except AssertionError: + pass + else: + raise AssertionError("An exception should happen") + e.set_pts_local() + print e.write_xml_string() + +if __name__ == '__main__': + self_test() diff --git a/src/experiments/__init__.py b/src/experiments/__init__.py new file mode 100644 index 0000000..d75ca18 --- /dev/null +++ b/src/experiments/__init__.py @@ -0,0 +1,3 @@ +from Experiment import Experiment +from damaris.tools.ranges import * +#__all__=["Experiment"] diff --git a/src/experiments/dac.py b/src/experiments/dac.py new file mode 100644 index 0000000..8fab0a3 --- /dev/null +++ b/src/experiments/dac.py @@ -0,0 +1,12 @@ +#import math +""" +This module holds everything connected with the DAC and PFG +""" +def conv(I_out=0): + """ + converts the demanded Output current in Integer + """ + V_dac=I_out/50.0 + dac_value=-(V_dac-0.00983)/1.81413e-5 + return int(dac_value) + diff --git a/src/gui/BackendDriver.py b/src/gui/BackendDriver.py new file mode 100644 index 0000000..b039304 --- /dev/null +++ b/src/gui/BackendDriver.py @@ -0,0 +1,294 @@ +import os +import os.path +import subprocess +import sys +import time +import re +import glob +import ExperimentWriter +import ResultReader +import threading +import types +import signal + +if sys.platform=="win32": + import _winreg + +__doc__ = """ +This class handles the backend driver +""" + +class BackendDriver(threading.Thread): + + def __init__(self, executable, spool, clear_jobs=False, clear_results=False): + threading.Thread.__init__(self, name="Backend Driver") + self.core_pid = None + self.core_input = None + self.core_output = None + self.statefilename = None + + self.executable=str(executable) + self.spool_dir=spool + self.experiment_pattern="job.%09d" + self.result_pattern=self.experiment_pattern+".result" + + if not os.path.isfile(self.executable): + raise AssertionError("could not find backend %s "%self.executable) + if not os.access(self.executable,os.X_OK): + raise AssertionError("insufficient rights for backend %s execution"%self.executable) + if not os.path.isdir(self.spool_dir): + try: + os.makedirs(os.path.abspath(self.spool_dir)) + except OSError,e: + print e + raise AssertionError("could not create backend's spool directory %s "%self.spool_dir) + + # remove stale state filenames + if sys.platform.startswith("linux") or sys.platform.startswith("darwin"): + old_state_files=glob.glob(os.path.join(self.spool_dir,"*.state")) + statelinepattern=re.compile("") + for statefilename in old_state_files: + statefile=file(statefilename,"r") + statelines=statefile.readlines() + statefile.close + del statefile + core_pid=None + for l in statelines: + matched=statelinepattern.match(l) + if matched: + core_pid=int(matched.group(2)) + break + if core_pid is not None: + if os.path.isdir("/proc/%d"%core_pid): + raise AssertionError("found backend with pid %d (state file %s) in same spool dir"%(core_pid,statefilename)) + else: + print "removing stale backend state file", statefilename + os.remove(statefilename) + else: + print "todo: take care of existing backend state files" + + self.result_reader = ResultReader.BlockingResultReader(self.spool_dir, + no=0, + result_pattern=self.result_pattern, + clear_jobs=clear_jobs, + clear_results=clear_results) + self.experiment_writer = ExperimentWriter.ExperimentWriterWithCleanup(self.spool_dir, + no=0, + job_pattern=self.experiment_pattern, + inform_last_job=self.result_reader) + + self.quit_flag=threading.Event() + self.raised_exception=None + + def run(self): + # take care of older logfiles + self.core_output_filename=os.path.join(self.spool_dir,"logdata") + if os.path.isfile(self.core_output_filename): + i=0 + max_logs=100 + while os.path.isfile(self.core_output_filename+".%02d"%i): + i+=1 + while (i>=max_logs): + i-=1 + os.remove(self.core_output_filename+".%02d"%i) + for j in xrange(i): + os.rename(self.core_output_filename+".%02d"%(i-j-1),self.core_output_filename+".%02d"%(i-j)) + os.rename(self.core_output_filename, self.core_output_filename+".%02d"%0) + # create logfile + self.core_output=file(self.core_output_filename,"w") + + # again look out for existing state files + state_files=glob.glob(os.path.join(self.spool_dir,"*.state")) + if state_files: + self.raised_exception="found other state file(s) in spool directory: "+",".join(state_files) + self.quit_flag.set() + return + + # start backend + if sys.platform.startswith("linux") or sys.platform.startswith("darwin"): + self.core_input=subprocess.Popen([self.executable, "--spool", self.spool_dir], + stdout=self.core_output, + stderr=self.core_output) + + if sys.platform=="win32": + cygwin_root_key=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Cygnus Solutions\\Cygwin\\mounts v2\\/") + cygwin_path=_winreg.QueryValueEx(cygwin_root_key,"native")[0] + os.environ["PATH"]+=";"+os.path.join(cygwin_path,"bin")+";"+os.path.join(cygwin_path,"lib") + self.core_input=subprocess.Popen("\"" + self.executable + "\"" + " --spool "+self.spool_dir, + stdout=self.core_output, + stderr=self.core_output) + + # wait till state file shows up + timeout=10 + # to do: how should I know core's state name????!!!!! + self.statefilename=None + state_files=glob.glob(os.path.join(self.spool_dir,"*.state")) + while len(state_files)==0: + if timeout<0 or self.core_input is None or self.core_input.poll() is not None or self.quit_flag.isSet(): + # look into core log file and include contents + log_message='' + self.core_input=None + if os.path.isfile(self.core_output_filename): + # to do include log data + log_message='\n'+''.join(file(self.core_output_filename,"r").readlines()[:10]) + if not log_message: + log_message=" no error message from core" + self.core_output.close() + self.raised_exception="no state file appeared or backend died away:"+log_message + print self.raised_exception + self.quit_flag.set() + return + time.sleep(0.05) + timeout-=0.05 + state_files=glob.glob(os.path.join(self.spool_dir,"*.state")) + + # save the one + if len(state_files)>1: + print "did find more than one state file, taking first one!" + self.statefilename=state_files[0] + + # read state file + statefile=file(self.statefilename,"r") + statelines=statefile.readlines() + statefile=None + statelinepattern=re.compile("") + self.core_pid=-1 + for l in statelines: + matched=statelinepattern.match(l) + if matched: + self.core_pid=int(matched.group(2)) + break + + # wait on flag and look after backend + while not self.quit_flag.isSet() and self.is_busy(): + self.quit_flag.wait(0.1) + + if self.quit_flag.isSet(): + self.stop_queue() + while self.is_busy(): + time.sleep(0.1) + + if not self.is_busy(): + if self.core_input is not None: + backend_result=self.core_input.poll() + wait_loop_counter=0 + while backend_result is None: + # waiting in tenth of a second + time.sleep(0.1) + wait_loop_counter+=1 + backend_result=self.core_input.poll() + if backend_result is not None: break + if wait_loop_counter==10: + print "sending termination signal to backend process" + self.send_signal("SIGTERM") + elif wait_loop_counter==20: + print "sending kill signal to backend process" + self.send_signal("SIGKILL") + elif wait_loop_counter>30: + print "no longer waiting for backend shutdown" + break + + if backend_result is None: + print "backend dit not end properly, please stop it manually" + elif backend_result>0: + print "backend returned ", backend_result + elif backend_result<0: + sig_name=filter(lambda x: x.startswith("SIG") and \ + x[3]!="_" and \ + (type(signal.__dict__[x])is types.IntType) and \ + signal.__dict__[x]==-backend_result, + dir(signal)) + if sig_name: + print "backend was terminated by signal ",sig_name[0] + else: + print "backend was terminated by signal no",-backend_result + self.core_input = None + self.core_pid = None + + # the experiment handler should stop + if self.experiment_writer is not None: + # self.experiment_writer. + self.experiment_writer=None + + # tell result reader, game is over... + #self.result_reader.stop_no=self.experiment_writer.no + if self.result_reader is not None: + self.result_reader.poll_time=-1 + self.result_reader=None + + def clear_job(self,no): + jobfilename=os.path.join(self.spool_dir,"job.%09d") + resultfilename=os.path.join(self.spool_dir,"job.%09d.result") + if os.path.isfile(jobfilename): + os.remove(jobfilename) + if os.path.isfile(resultfilename): + os.remove(resultfilename) + + def get_messages(self): + # return pending messages + if self.core_output.tell()==os.path.getsize(self.core_output_filename): + return None + return self.core_output.read() + + def restart_queue(self): + self.send_signal("SIGUSR1") + + def stop_queue(self): + self.send_signal("SIGQUIT") + # assumes success + #self.core_pid=None + #self.core_input=None + + def abort(self): + # abort execution + self.send_signal("SIGTERM") + # assumes success + #self.core_pid=None + #self.core_input=None + + def send_signal(self, sig): + if self.core_pid is None: + print "BackendDriver.send_signal is called with core_pid=None" + return + try: + if sys.platform[:5]=="linux": + os.kill(self.core_pid,signal.__dict__[sig]) + if sys.platform[:7]=="win32": + # reg_handle=_winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE) + cygwin_root_key=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Cygnus Solutions\\Cygwin\\mounts v2\\/") + cygwin_path=_winreg.QueryValueEx(cygwin_root_key,"native")[0] + kill_command=os.path.join(cygwin_path,"bin","kill.exe") + os.popen("%s -%s %d"%(kill_command,sig,self.core_pid)) + except OSError, e: + print "could not send signal %s to core: %s"%(sig, str(e)) + + + def is_busy(self): + "Checks for state file" + return self.statefilename is not None and os.path.isfile(self.statefilename) and \ + self.core_input is not None and self.core_input.poll() is None + + #file_list = glob.glob(os.path.join(self.spool_dir, self.core_state_file)) + #if len(file_list) != 0: + # return True + #else: + # return False + + + def get_exp_writer(self): + return self.experiment_writer + + def get_res_reader(self): + return self.result_reader + + def __del__(self): + # stop core and wait for it + if self.core_pid is not None: + try: + self.abort() + except OSError: + pass + self.core_input=None + if self.core_output: + self.core_output.close() + self.core_output=None diff --git a/src/gui/DAMARIS.ico b/src/gui/DAMARIS.ico new file mode 100644 index 0000000000000000000000000000000000000000..53368b633a8dfac8c8c271e1f621316805a30599 GIT binary patch literal 11454 zcmeHN-%nF#7=8-0r4$BeDaFzO+EvGnAc~z%g@OrDnPx5_jwME>&IcZoiN^=8e+k@f1zztal$Lg( ztgIhZ;W+&My(lmL0E)62!RR(bV>?k*br!*~B7u%XHyYzZsHvGi zF!&wnW0%?U_iXzSv5v#2t^FB0cJ4xR&rvAbkE7-7li1V$K9(!LU|a7M>_2c2ZM(z>T*6ae z{ED<#YZZ3+t6Hs9$ie3-%r(ft=PJxKtk}Y1(*_aEq1+?L@Xa->VqcvTi7n|aQXkfY zbB~Z(1``c(@avdlRYS3^U*+I)#i|B<73ATo8uV3=hi|SyE`Hry6~tVSi*Kxg(#@l* zQ^Z?u^~OTss~;07Fn(oZ zCG$sL0nwv&98!-BFnoOlWa9e+Q3Jl11NQj?0Rz6(6*<38^D+hEETKOO<%gMhRDj^? z&qA4d=J(3M&+gmv^@n$m%o)aKfap*4yM$c)SI9N25cocF4R-L2y}G#lDe+Y4>RT7e z{=0(Ffovn5nvf1Z9QI{9AjkI>H-3qyqM-hG*mv5(`On!K@vEhkm$NtGOM+)B51f54 zRHy4tIru7fBYUm;?XF*O?q=+(eeU!OlCyqd-8(YyjoxeS05b56>}BGcu@}8>pMN&? zfc^C=ecy+}TJP`%lD+=GCmEet^6@GALhz^TW#Ko))Ts6OGT|lpA7lT0@dy~Ms&@TX znYS?PXdqyOx6NN;ZpNL9vAcfNmiEP*JIA-X{=-)`thmd;x8^Pf-=wG?P9FXP!yXWnN#~00qCi;dyx%Crcq6zCHxtrt}H|bT1{Xuj0=sXVC z0U9Q7gyde5`$%3)a#-d21SUF3-e8eel5sD|>7~sz_V?7WF=hRSjqH|9COI9`K(UrT zB;(J>I47y?laVJEFg{84Sx7!j_NTeku~z%g@g%_n$>})F$v*#*dx${ z+&=jEiUEELtO)u8Hm!wKjp#nD0tU3!qhy?yKns$)NbV;&?ZYQzob+%h>0vL)KPCC7 a$mx7x(_%Jy?#t^-Z`|+sT{Gt;dj1bNt(ENn literal 0 HcmV?d00001 diff --git a/src/gui/DAMARIS.png b/src/gui/DAMARIS.png new file mode 100644 index 0000000000000000000000000000000000000000..c407ebf1e89779a5ca248fad5c537ce41b51d1e4 GIT binary patch literal 664 zcmV;J0%!e+P)b7m?Bqt!^Unf?vW$3vQrhwV(z zVzKyroWK9MetdhUMb7zyL)+En&$Y|vSE=uDU~+oC9=+t}GxsV2hsl+TIvZ^FOv=^Da%vX}McTHLcT@b{TqEK*Oh6F(n1Qi~E6l@WYAX)v# zXhEk(hqRu!w4S(1->24jP)i1fN^gBSQDS9-!?5PDORWSgmrEkb862kORVs0OpYcvr zcg@+-DyS*qd#d%sO(q=KqgNT}5fzlw{9rI>tU2pKC_zon#bS8VBU)aaXwrFdWb~d| z35uI@UW3-%s8y_7o!&{yDlMZ|F-|eH<#a+&wwz?j5f;>j!C}c=y40b9@d;8T#%MvA yGaaR4C{zCh&msT-00000000000001>WBdRu908&|&Ih9a0000 run|quit + # run -> pause|stop + # pause -> run|stop + # stop -> edit + self.state=DamarisGUI.Edit_State + # script execution engines and backend driver + self.si = None + # produced and displayed data + self.data = None + + self.glade_layout_init() + # my notebook + self.main_notebook = self.xml_gui.get_widget("main_notebook") + + self.log=LogWindow(self.xml_gui) + + self.sw=ScriptWidgets(self.xml_gui) + + self.toolbar_init() + + self.documentation_init() + + self.monitor=MonitorWidgets(self.xml_gui) + + self.config=ConfigTab(self.xml_gui) + + exp_script=u"" + if exp_script_filename is not None and exp_script_filename!="": + self.sw.exp_script_filename=exp_script_filename[:] + if os.path.isfile(exp_script_filename) and os.access(exp_script_filename, os.R_OK): + exp_script = self.sw.load_file_as_unicode(exp_script_filename) + + res_script=u"" + if res_script_filename is not None and res_script_filename!="": + self.sw.res_script_filename=res_script_filename[:] + if os.path.isfile(res_script_filename) and os.access(res_script_filename, os.R_OK): + res_script = self.sw.load_file_as_unicode(res_script_filename) + self.sw.set_scripts(exp_script, res_script) + + self.statusbar_init() + + self.main_window.show_all() + self.main_window.present() + + def glade_layout_init(self): + glade_file=os.path.join(os.path.dirname(__file__),"damaris.glade") + self.xml_gui = gtk.glade.XML(glade_file) + self.main_window = self.xml_gui.get_widget("main_window") + self.main_window.connect("delete-event", self.quit_event) + self.main_window.set_icon_from_file(os.path.join(os.path.dirname(__file__),"DAMARIS.png")) + self.main_window.set_title(u"DAMARIS-%s"%__version__) + + def statusbar_init(self): + """ + experiment and result thread status, backend state + """ + self.experiment_script_statusbar_label = self.xml_gui.get_widget("statusbar_experiment_script_label") + self.data_handling_statusbar_label = self.xml_gui.get_widget("statusbar_data_handling_label") + self.backend_statusbar_label = self.xml_gui.get_widget("statusbar_core_label") + + def toolbar_init(self): + """ + buttons like save and run... + """ + self.toolbar_stop_button = self.xml_gui.get_widget("toolbar_stop_button") + self.toolbar_run_button = self.xml_gui.get_widget("toolbar_run_button") + self.toolbar_pause_button = self.xml_gui.get_widget("toolbar_pause_button") + + # print button + self.toolbar_print_button=self.xml_gui.get_widget("toolbar_print_button") + if not hasattr(gtk, "PrintOperation"): + self.toolbar_print_button.set_sensitive(False) + print "Printing is not supported by GTK+ version in use" + else: + self.toolbar_print_button.set_sensitive(True) + self.xml_gui.signal_connect("on_toolbar_print_button_clicked", self.print_button_switch) + + # prepare for edit state + self.toolbar_run_button.set_sensitive(True) + self.toolbar_stop_button.set_sensitive(False) + self.toolbar_pause_button.set_sensitive(False) + + # and their events + self.xml_gui.signal_connect("on_toolbar_run_button_clicked", self.start_experiment) + self.xml_gui.signal_connect("on_toolbar_pause_button_toggled", self.pause_experiment) + self.xml_gui.signal_connect("on_toolbar_stop_button_clicked", self.stop_experiment) + self.xml_gui.signal_connect("on_doc_menu_activate",self.show_doc_menu) + self.xml_gui.signal_connect("on_toolbar_manual_button_clicked",self.show_manual) + + def run(self): + # prolong lifetime of clipboard till the very end (avoid error message) + self.main_clipboard = self.sw.main_clipboard + gtk.gdk.threads_enter() + gtk.main() + gtk.gdk.threads_leave() + + self.si=None + self.sw=None + self.config=None + self.xml_gui=None + + # event handling: the real acitons in gui programming + + # first global events + + def quit_event(self, widget, data=None): + """ + expecting quit event for main application + """ + if self.state in [DamarisGUI.Edit_State, DamarisGUI.Quit_State]: + self.state=DamarisGUI.Quit_State + # do a cleanup... + print "ToDo: Cleanup, Save Dialogs ..." + self.config=None + self.sw=None + self.monitor=None + self.log=None + # and quit + gtk.main_quit() + return True + else: + print "Stop Experiment please! (ToDo: Dialog)" + return True + + # toolbar related events: + + def start_experiment(self, widget, data = None): + + # something running? + if self.si is not None: + print "Last Experiment is not clearly stopped!" + self.si = None + + # get config values: + actual_config = self.config.get() + + # get scripts and start script interface + self.sw.disable_editing() + exp_script, res_script=self.sw.get_scripts() + if not actual_config["start_result_script"]: + res_script="" + if not actual_config["start_experiment_script"]: + exp_script="" + backend=actual_config["backend_executable"] + if not actual_config["start_backend"]: + backend="" + + if (backend=="" and exp_script=="" and res_script==""): + print "nothing to do...so doing nothing!" + self.sw.enable_editing() + return + + # check whether scripts are syntacticaly valid + # should be merged with check script function + exp_code=None + if exp_script!="": + try: + exp_code=compile(exp_script, "Experiment Script", "exec") + except SyntaxError, e: + ln=e.lineno + lo=e.offset + if type(ln) is not types.IntType: + ln=0 + if type(lo) is not types.IntType: + lo=0 + print "Experiment Script: %s at line %d, col %d:"%(e.__class__.__name__, ln, lo) + if e.text!="": + print "\"%s\""%e.text + # print " "*(e.offset+1)+"^" # nice idea, but needs monospaced fonts + pass + print e + + res_code=None + if res_script!="": + try: + res_code=compile(res_script, "Result Script", "exec") + except SyntaxError, e: + ln=e.lineno + lo=e.offset + if type(ln) is not types.IntType: + ln=0 + if type(lo) is not types.IntType: + lo=0 + print "Result script: %s at line %d, col %d:"%(e.__class__.__name__,ln, lo) + if e.text!="": + print "\"%s\""%e.text + # print " "*(e.offset+1)+"^" # nice idea, but needs monospaced fonts + pass + print e + + # detect error + if (exp_script!="" and exp_code is None) or \ + (res_script!="" and res_code is None): + self.main_notebook.set_current_page(DamarisGUI.Log_Display) + self.sw.enable_editing() + return + + # prepare to run + self.state=DamarisGUI.Run_State + self.toolbar_run_button.set_sensitive(False) + self.toolbar_stop_button.set_sensitive(True) + self.toolbar_pause_button.set_sensitive(True) + self.toolbar_pause_button.set_active(False) + + # delete old data + self.data = None + self.monitor.observe_data_pool(self.data) + + # set the text mark for hdf logging + if self.log.textbuffer.get_mark("lastdumped") is None: + self.log.textbuffer.create_mark("lastdumped", self.log.textbuffer.get_end_iter(), left_gravity=True) + + # start experiment + try: + self.spool_dir=os.path.abspath(actual_config["spool_dir"]) + # setup script engines + self.si=ScriptInterface(exp_code, + res_code, + backend, + self.spool_dir, + clear_jobs=actual_config["del_jobs_after_execution"], + clear_results=actual_config["del_results_after_processing"]) + + self.data=self.si.data + # run frontend and script engines + self.monitor.observe_data_pool(self.data) + self.si.runScripts() + except Exception, e: + #print "ToDo evaluate exception",str(e), "at",traceback.extract_tb(sys.exc_info()[2])[-1][1:3] + #print "Full traceback:" + traceback_file=cStringIO.StringIO() + traceback.print_tb(sys.exc_info()[2], None, traceback_file) + self.main_notebook.set_current_page(DamarisGUI.Log_Display) + print "Error while executing scripts: %s\n"%str(e)+traceback_file.getvalue() + traceback_file=None + + self.data=None + if self.si is not None: + still_running=filter(None,[self.si.exp_handling,self.si.res_handling,self.si.back_driver]) + for r in still_running: + r.quit_flag.set() + print "waiting for threads stoping...", + still_running=filter(lambda x:x is not None and x.isAlive(), + [self.si.exp_handling, self.si.res_handling, self.si.back_driver]) + for t in still_running: + t.join() + print "done" + + # cleanup + self.si=None + self.state=DamarisGUI.Edit_State + self.sw.enable_editing() + self.toolbar_run_button.set_sensitive(True) + self.toolbar_stop_button.set_sensitive(False) + self.toolbar_pause_button.set_sensitive(False) + self.toolbar_pause_button.set_active(False) + return + + # switch to grapics + self.main_notebook.set_current_page(DamarisGUI.Monitor_Display) + + # set running + if self.si.exp_handling is not None: + self.experiment_script_statusbar_label.set_text("Experiment Script Running (0)") + else: + self.experiment_script_statusbar_label.set_text("Experiment Script Idle") + if self.si.res_handling is not None: + self.data_handling_statusbar_label.set_text("Result Script Running (0)") + else: + self.data_handling_statusbar_label.set_text("Result Script Idle") + + if self.si.back_driver is not None: + self.backend_statusbar_label.set_text("Backend Running") + else: + self.backend_statusbar_label.set_text("Backend Idle") + + # start data dump + self.dump_thread=None + self.save_thread=None + self.dump_filename="" + if actual_config["data_pool_name"]!="": + self.dump_states(init=True) + gobject.timeout_add(200, self.observe_running_experiment) + + def observe_running_experiment(self): + """ + periodically look at running threads + """ + # look at components and update them + # test whether backend and scripts are done + r=self.si.data.get("__recentresult",-1)+1 + b=self.si.data.get("__resultsinadvance",-1)+1 + e=self.si.data.get("__recentexperiment",-1)+1 + e_text=None + r_text=None + b_text=None + if self.si.exp_handling is not None: + if not self.si.exp_handling.isAlive(): + self.si.exp_handling.join() + if self.si.exp_handling.raised_exception: + print "experiment script failed at line %d (function %s): %s"%(self.si.exp_handling.location[0], + self.si.exp_handling.location[1], + self.si.exp_handling.raised_exception) + print "Full traceback", self.si.exp_handling.traceback + e_text="Experiment Script Failed (%d)"%e + else: + e_text="Experiment Script Finished (%d)"%e + print "experiment script finished" + self.si.exp_handling = None + else: + e_text="Experiment Script Running (%d)"%e + + if self.si.res_handling is not None: + if not self.si.res_handling.isAlive(): + self.si.res_handling.join() + if self.si.res_handling.raised_exception: + print "result script failed at line %d (function %s): %s"%(self.si.res_handling.location[0], + self.si.res_handling.location[1], + self.si.res_handling.raised_exception) + print "Full traceback", self.si.res_handling.traceback + r_text="Result Script Failed (%d)"%r + else: + r_text="Result Script Finished (%d)"%r + self.si.res_handling = None + else: + r_text="Result Script Running (%d)"%r + + if self.si.back_driver is not None: + if not self.si.back_driver.isAlive(): + if self.si.back_driver.raised_exception: + b_text="Backend Failed" + else: + b_text="Backend Finished" + self.si.back_driver.join() + self.si.back_driver = None + else: + b_text="Backend Running" + if b!=0: + b_text+=" (%d)"%b + + if self.dump_thread is not None: + if self.dump_thread.isAlive(): + sys.stdout.write(".") + self.dump_dots+=1 + if self.dump_dots>80: + print + self.dump_dots=0 + else: + self.dump_thread.join() + self.dump_thread=None + dump_size=os.stat(self.dump_filename).st_size/1e6 + print "done (%.1f s, %.1f MB)"%(time.time()-self.dump_start_time, dump_size) + + gtk.gdk.threads_enter() + if e_text: + self.experiment_script_statusbar_label.set_text(e_text) + if r_text: + self.data_handling_statusbar_label.set_text(r_text) + if b_text: + self.backend_statusbar_label.set_text(b_text) + gtk.gdk.threads_leave() + + still_running=filter(None,[self.si.exp_handling, self.si.res_handling, self.si.back_driver, self.dump_thread]) + if len(still_running)==0: + if self.save_thread is None and self.dump_filename!="": + print "all subprocesses ended, saving data pool" + # thread to save data... + self.save_thread=threading.Thread(target=self.dump_states, name="dump states") + self.save_thread.start() + self.dump_start_time=time.time() + self.dump_dots=0 + self.state = DamarisGUI.Stop_State + + if self.state == DamarisGUI.Stop_State: + gtk.gdk.threads_enter() + self.toolbar_pause_button.set_sensitive(False) + self.toolbar_stop_button.set_sensitive(False) + gtk.gdk.threads_leave() + if len(still_running)!=0: + print "subprocess(es) still running: "+', '.join(map(lambda s:s.getName(),still_running)) + return True + else: + if self.save_thread is not None: + if self.save_thread.isAlive(): + sys.stdout.write(".") + self.dump_dots+=1 + if self.dump_dots>80: + print + self.dump_dots=0 + return True + self.save_thread.join() + self.save_thread=None + dump_size=os.stat(self.dump_filename).st_size/1e6 + print "done (%.1f s, %.1f MB)"%(time.time()-self.dump_start_time, dump_size) + + # now everything is stopped + self.state=DamarisGUI.Edit_State + gtk.gdk.threads_enter() + self.sw.enable_editing() + self.toolbar_run_button.set_sensitive(True) + self.toolbar_stop_button.set_sensitive(False) + self.toolbar_pause_button.set_sensitive(False) + gtk.gdk.threads_leave() + + # keep data to display but throw away everything else + self.si=None + + return False + + # dump states? + if self.dump_thread is None and self.dump_filename!="" and \ + self.dump_timeinterval!=0 and self.last_dumped+self.dump_timeinterval0: + os.rename(dump_filename_pattern%(last_backup-1),dump_filename_pattern%last_backup) + last_backup-=1 + os.rename(self.dump_filename,dump_filename_pattern%0) + if cummulated_size>(1<<30): + print "Warning: the cummulated backups size of '%s' is %d MByte"%(self.dump_filename, + cummulated_size/(1<<20)) + # init is finnished now + + # now it's time to create the hdf file + dump_file=None + if not os.path.isfile(self.dump_filename): + if not init: + print "dump file \"%s\" vanished unexpectedly, creating new one"%self.dump_filename + # have a look to the path and create necessary directories + dir_stack=[] + dir_trunk=os.path.dirname(os.path.abspath(self.dump_filename)) + while dir_trunk!="" and not os.path.isdir(dir_trunk): + dir_stack.append(os.path.basename(dir_trunk)) + dir_trunk=os.path.dirname(dir_trunk) + try: + while len(dir_stack): + dir_trunk=os.path.join(dir_trunk, dir_stack.pop()) + if os.path.isdir(dir_trunk): continue + os.mkdir(dir_trunk) + except OSError, e: + print e + print "coud not create dump directory '%s', so hdf5 dumps disabled"%dir_trunk + self.dump_filename="" + self.dump_timeinterval=0 + return True + + # create new dump file + dump_file=tables.openFile(self.dump_filename,mode="w",title="DAMARIS experiment data") + # write scripts and other useful information + scriptgroup=dump_file.createGroup("/","scripts","Used Scripts") + exp_text, res_text=self.sw.get_scripts() + if self.si.exp_script: + dump_file.createArray(scriptgroup,"experiment_script", exp_text) + if self.si.res_script: + dump_file.createArray(scriptgroup,"result_script", res_text) + if self.si.backend_executable: + dump_file.createArray(scriptgroup,"backend_executable", self.si.backend_executable) + if self.spool_dir: + dump_file.createArray(scriptgroup,"spool_directory", self.spool_dir) + timeline_tablecols=numpy.recarray(0,dtype=([("time","S17"), + ("experiments","int64"), + ("results","int64")])) + timeline_table=dump_file.createTable("/","timeline", timeline_tablecols, title="Timeline of Experiment") + if tables.__version__[0]=="1": + logarray=dump_file.createVLArray(where=dump_file.root, + name="log", + atom=tables.StringAtom(length=120), + title="log messages", + filters=tables.Filters(complevel=9, complib='zlib')) + else: + logarray=dump_file.createEArray(where=dump_file.root, + name="log", + atom=tables.StringAtom(itemsize=120), + shape=(0,), + title="log messages", + filters=tables.Filters(complevel=9, complib='zlib')) + + if dump_file is None and os.path.isfile(self.dump_filename) and tables.isPyTablesFile(self.dump_filename): + # take some data from dump file and repack + os.rename(self.dump_filename, self.dump_filename+".bak") + old_dump_file=tables.openFile(self.dump_filename+".bak", mode="r+") + if "data_pool" in old_dump_file.root: + old_dump_file.removeNode(where="/", name="data_pool", recursive=True) + old_dump_file.copyFile(self.dump_filename) + old_dump_file.close() + del old_dump_file + os.remove(self.dump_filename+".bak") + # prepare for update + dump_file=tables.openFile(self.dump_filename, mode="r+") + + if dump_file is None: + # exit! + print "coud not create dump directory '%s', so hdf5 dumps disabled"%dir_trunk + self.dump_filename="" + self.dump_timeinterval=0 + return True + + # no undo please! + if dump_file.isUndoEnabled(): + dump_file.disableUndo() + + # save the data! + self.data.write_hdf5(dump_file, where="/", name="data_pool", + complib=self.dump_complib, complevel=self.dump_complevel) + + # now save additional information + e=self.si.data.get("__recentexperiment",-1)+1 + r=self.si.data.get("__recentresult",-1)+1 + timeline_table=dump_file.root.timeline + timeline_row=timeline_table.row + timeline_row["time"]=time.strftime("%Y%m%d %H:%M:%S") + timeline_row["experiments"]=e + timeline_row["results"]=r + timeline_row.append() + timeline_table.flush() + + # save log window information: + # also save backend's logfile information? + logtextbuffer=self.log.textbuffer + last_end=logtextbuffer.get_mark("lastdumped") + if last_end is None: + last_end=logtextbuffer.create_mark("lastdumped", logtextbuffer.get_start_iter(), left_gravity=True) + logtext_start=logtextbuffer.get_iter_at_mark(last_end) + logtext_end=logtextbuffer.get_end_iter() + logtextbuffer.move_mark(last_end, logtext_end) + # recode from unicode + logtext=codecs.getencoder("iso-8859-15")(logtextbuffer.get_text(logtext_start , logtext_end),"replace")[0] + # avoid circular references (seems to be necessary with gtk-2.12) + del logtextbuffer, logtext_start, logtext_end, last_end + for l in logtext.splitlines(): + dump_file.root.log.append(numpy.array([l], dtype="S120")) + + dump_file.flush() + dump_file.close() + self.last_dumped=time.time() + del dump_file + + return True + + def pause_experiment(self, widget, data = None): + """ + pause experiment execution (that means delay backend and let others run) + """ + if self.si is None: return False + pause_state=self.toolbar_pause_button.get_active() + if pause_state: + if self.state!=DamarisGUI.Run_State: return False + if self.spool_dir is None: return False + no=self.si.data.get("__recentresult",-1)+1 + result_pattern=os.path.join(self.spool_dir, "job.%09d.result") + job_pattern=os.path.join(self.spool_dir, "job.%09d") + while os.path.isfile(result_pattern%no): + no+=1 + i=0 + self.pause_files=[] + while i<3 and os.path.isfile(job_pattern%(no+i)): + pause_file=(job_pattern%(no+i))+".pause" + os.rename(job_pattern%(no+i), pause_file ) + self.pause_files.append(pause_file) + i+=1 + self.state=DamarisGUI.Pause_State + self.backend_statusbar_label.set_text("Backend Paused") + + else: + if self.state!=DamarisGUI.Pause_State: return False + self.state=DamarisGUI.Run_State + for f in self.pause_files: + os.rename(f, f[:-6]) + self.pause_files=None + self.backend_statusbar_label.set_text("Backend Running") + + def stop_experiment(self, widget, data = None): + if self.state in [DamarisGUI.Run_State, DamarisGUI.Pause_State]: + if self.si is None: return + still_running=filter(None,[self.si.exp_handling,self.si.res_handling,self.si.back_driver]) + for r in still_running: + r.quit_flag.set() + self.state=DamarisGUI.Stop_State + + def print_button_switch(self, widget): + """ + decides what to print... and prints, layout is done by responsible class + """ + if not hasattr(gtk, "PrintOperation"): + return + + # copied and modified from pygtk-2.10.1/examples/pygtk-demo/demos/print_editor.py + + print_ = gtk.PrintOperation() + + # will come from config + settings=None + if settings is not None: + print_.set_print_settings(settings) + + page_setup=None + if page_setup is not None: + print_.set_default_page_setup(page_setup) + + + #print_.set_property("allow_async",True) + current_page=self.main_notebook.get_current_page() + print_data = {} + if current_page in [0,1]: + print_.connect("begin_print", self.sw.begin_print, print_data) + print_.connect("draw_page", self.sw.draw_page, print_data) + elif current_page == 2: + print_.connect("begin_print", self.monitor.begin_print, print_data) + print_.connect("draw_page", self.monitor.draw_page, print_data) + else: + return + + try: + res = print_.run(gtk.PRINT_OPERATION_ACTION_PRINT_DIALOG, self.main_window) + except gobject.GError, ex: + error_dialog = gtk.MessageDialog(self.main_window, + gtk.DIALOG_DESTROY_WITH_PARENT, + gtk._MESSAGE_ERROR, + gtk.BUTTONS_CLOSE, + ("Error printing file:\n%s" % str(ex))) + error_dialog.connect("response", gtk.Widget.destroy) + error_dialog.show() + else: + if res == gtk.PRINT_OPERATION_RESULT_APPLY: + settings = print_.get_print_settings() + + def documentation_init(self): + + self.doc_urls={ + "Python DAMARIS": None, + "DAMARIS Homepage": "http://damaris.berlios.de/", + "Python": "http://www.python.org/doc/%d.%d/"%(sys.version_info[:2]), + "numpy/scipy": "http://docs.scipy.org/", + "pytables": "http://www.pytables.org/docs/manual/", + "DAMARIS backends": None, + "DAMARIS Repository": "http://svn.berlios.de/viewvc/damaris" + } + + if os.path.isdir("/usr/share/doc/python%d.%d-doc/html"%(sys.version_info[:2])): + self.doc_urls["Python"]="file:///usr/share/doc/python%d.%d-doc/html/index.html"%(sys.version_info[:2]) + + if os.path.isdir("/usr/share/doc/python-tables-doc/html"): + self.doc_urls["pytables"]="file:///usr/share/doc/python-tables-doc/html/index.html" + + doc_index_url=None + # local installation + installation_base=__file__ + for i in xrange(5): + installation_base=os.path.dirname(installation_base) + if os.path.isfile(os.path.join(installation_base, "share", "python-damaris", "doc", "index.html")): + self.doc_urls["Python DAMARIS"]=os.path.join(installation_base, "share", "python-damaris", "doc", "index.html") + elif os.path.isfile("/usr/share/doc/python-damaris/html/index.html"): + # check generic debian location + self.doc_urls["Python DAMARIS"]="file:///usr/share/doc/python-damaris/html/index.html" + else: + self.doc_urls["Python DAMARIS"]="http://damaris.berlios.de/wiki/index.php/Tutorial" + + self.doc_browser=None + + def show_doc_menu(self, widget, data=None): + """ + offer a wide variety of docs, prefer local installations + """ + if type(widget) is gtk.MenuItem: + requested_doc=widget.get_child().get_text() + else: + requested_doc="Python DAMARIS" + + if requested_doc in self.doc_urls and self.doc_urls[requested_doc] is not None: + if self.doc_browser is not None: + if not self.doc_browser.isAlive(): + self.doc_browser.join() + if self.doc_browser.my_webbrowser is not None: + print "new browser tab" + self.doc_browser.my_webbrowser.open_new_tab(self.doc_urls[requested_doc]) + else: + del self.doc_browser + self.doc_browser=start_browser(self.doc_urls[requested_doc]) + self.doc_browser.start() + else: + self.doc_browser=start_browser(self.doc_urls[requested_doc]) + self.doc_browser.start() + else: + print "missing docs for '%s'"%(requested_doc) + + show_manual=show_doc_menu + +class start_browser(threading.Thread): + + def __init__(self, url): + threading.Thread.__init__(self, name="manual browser") + self.my_webbrowser=None + self.my_webbrowser_process=None + self.start_url=url + + def run(self): + """ + start a webbrowser + """ + if sys.hexversion>=0x02050000: + if sys.platform=="linux2" and self.my_webbrowser is None: + # try for debian linux + self.my_webbrowser=webbrowser.get("x-www-browser") + if self.my_webbrowser is None: + # this is what it should be everywhere! + self.my_webbrowser=webbrowser.get() + if self.my_webbrowser is not None: + print "starting web browser (module webbrowser)" + self.my_webbrowser.open(self.start_url) + return True + # last resort + print "starting web browser (webbrowser.py)" + self.my_webbrowser_process=os.spawnl(os.P_NOWAIT, + sys.executable, + os.path.basename(sys.executable), + "-c", + "import webbrowser\nwebbrowser.open('%s')"%self.start_url) + return True + +class LogWindow: + """ + writes messages to the log window + """ + + def __init__(self, xml_gui): + + self.xml_gui=xml_gui + self.textview=self.xml_gui.get_widget("messages_textview") + self.textbuffer=self.textview.get_buffer() + self.logstream=log + self.logstream.gui_log=self + self.last_timetag=None + self("Started in directory %s\n" % os.getcwd()) + + def __call__(self, message): + timetag=time.time() + gobject.idle_add(self.add_message_callback,timetag,message,priority=gobject.PRIORITY_LOW) + + def add_message_callback(self, timetag, message): + date_tag=u"" + if self.last_timetag is None or (message!="\n" and self.last_timetag+600 and ln<=tb.get_line_count(): + new_place=tb.get_iter_at_line_offset(ln-1,0) + if lo>0 and lo<=new_place.get_chars_in_line(): + new_place.set_line_offset(lo) + tb.place_cursor(new_place) + tv.scroll_to_iter(new_place, 0.2, False, 0,0) + except Exception, e: + print "Compilation Error:\n"+str(e)+"\n(ToDo: Dialog)" + + def notebook_page_switched(self, notebook, page, pagenumber): + self.set_toolbuttons_status() + + def column_line_widgets_changed_event(self, data=None): + widget_name=data.name + text_name=None + if widget_name.startswith("data_handling"): + text_name="data_handling" + elif widget_name.startswith("experiment_script"): + text_name="experiment_script" + else: + print "unknown line/column selector" + return + textview=self.__dict__[text_name+"_textview"] + textbuffer=textview.get_buffer() + newline=self.__dict__[text_name+"_line_indicator"].get_value_as_int()-1 + newcol=self.__dict__[text_name+"_column_indicator"].get_value_as_int()-1 + #if newline>textbuffer.get_end_iter().get_line(): + # return + + new_place=textbuffer.get_iter_at_line(newline) + if not newcol>new_place.get_chars_in_line(): + new_place.set_line_offset(newcol) + else: + self.__dict__[text_name+"_column_indicator"].set_value(1) + if len(textbuffer.get_selection_bounds())!=0: + textbuffer.move_mark_by_name("insert", new_place) + else: + textbuffer.place_cursor(new_place) + + textview.scroll_mark_onscreen(textbuffer.get_insert()) + textview.grab_focus() + return True + + def textviews_modified(self, data = None): + # mix into toolbar affairs + self.set_toolbuttons_status() + + def textviews_clicked(self, widget, event): + return self.textviews_moved(widget) + + def textviews_moved(self, widget, text=None, count=None, ext_selection=None, data = None): + textbuffer=widget.get_buffer() + cursor_mark=textbuffer.get_insert() + cursor_iter=textbuffer.get_iter_at_mark(cursor_mark) + if textbuffer is self.experiment_script_textbuffer: + line_indicator=self.experiment_script_line_indicator + column_indicator=self.experiment_script_column_indicator + if textbuffer is self.data_handling_textbuffer: + line_indicator=self.data_handling_line_indicator + column_indicator=self.data_handling_column_indicator + + # do only necessary updates! + li_range_new=textbuffer.get_end_iter().get_line()+1 + if line_indicator.get_range()[1]!=li_range_new: + line_indicator.set_range(1, li_range_new) + ci_range_new=cursor_iter.get_chars_in_line()+1 + if column_indicator.get_range()[1]!=ci_range_new: + column_indicator.set_range(1, ci_range_new) + cursor_line=cursor_iter.get_line()+1 + cursor_lineoffset=cursor_iter.get_line_offset()+1 + if line_indicator.get_value()!=cursor_line: + line_indicator.set_value(cursor_line) + if column_indicator.get_value()!=cursor_lineoffset: + column_indicator.set_value(cursor_lineoffset) + return False + + def textviews_keypress(self, widget, event, data = None): + """ + helpful tab and return key functions + """ + #print "keypress", event.state, event.keyval + if event.state>k.gdk.CONTROL_MASK!=0: + if event.keyval==gtk.gdk.keyval_from_name("c"): + if self.main_notebook.get_current_page() == 0: + self.experiment_script_textbuffer.copy_clipboard(self.main_clipboard) + elif self.main_notebook.get_current_page() == 1: + self.data_handling_textbuffer.copy_clipboard(self.main_clipboard) + return True + elif event.keyval==gtk.gdk.keyval_from_name("x"): + # cut_clipboard(clipboard, textview editable?) + if self.main_notebook.get_current_page() == 0: + self.experiment_script_textbuffer.cut_clipboard(self.main_clipboard, True) + elif self.main_notebook.get_current_page() == 1: + self.data_handling_textbuffer.cut_clipboard(self.main_clipboard, True) + return True + elif event.keyval==gtk.gdk.keyval_from_name("v"): + # paste_clipboard(clipboard, textpos (None = Cursor), textview editable?) + if self.main_notebook.get_current_page() == 0: + self.experiment_script_textbuffer.paste_clipboard(self.main_clipboard, None, True) + elif self.main_notebook.get_current_page() == 1: + self.data_handling_textbuffer.paste_clipboard(self.main_clipboard, None, True) + return True + elif event.keyval==gtk.gdk.keyval_from_name("s"): + # save buffer + page=self.main_notebook.get_current_page() + if (self.exp_script_filename,self.res_script_filename)[page] is None: + self.save_file_as() + else: + self.save_file() + return True + elif event.keyval==gtk.gdk.keyval_from_name("S"): + # save both buffers + print "ToDo: save both buffers" + self.save_all_files(None, None) + return True + return 0 + + # indent helpers + # tab keyval 0xff09 + # backspace keyval 0xff08 + # to do check if modified event is called after all + if(event.keyval==0xFF09 or event.keyval==0xFF08): + textbuffer=widget.get_buffer() + # do not do things during selection + if (textbuffer.get_selection_bounds()): return 0 + cursor_mark=textbuffer.get_insert() + cursor_iter=textbuffer.get_iter_at_mark(cursor_mark) + if (cursor_iter.starts_line()): + # backspace with normal function at line start + if (event.keyval==0xFF08): + self.textviews_moved(widget) + return 0 + # now get iterator at line start + linestart_iter=cursor_iter.copy() + linestart_iter.set_line_offset(0) + linebegin=textbuffer.get_text(linestart_iter,cursor_iter).expandtabs() + if (len(linebegin)!=0 and not linebegin.isspace()): + # just make the spaces go away + textbuffer.delete(linestart_iter,cursor_iter) + textbuffer.insert(linestart_iter,linebegin) + self.textviews_moved(widget) + return 0 + # find all space at the begin + while(not cursor_iter.ends_line() + and not cursor_iter.is_end() + and cursor_iter.get_char().isspace()): + cursor_iter.forward_char() + linebegin=textbuffer.get_text(linestart_iter,cursor_iter) + if (event.keyval==0xFF08): + # backspace shortens space + linebegin=u' '*((len(linebegin)-1)/4)*4 + elif (event.keyval==0xFF09): + # tab widens space + linebegin=u' '*((len(linebegin)+4)/4)*4 + + textbuffer.delete(linestart_iter,cursor_iter) + textbuffer.insert(linestart_iter,linebegin) + self.textviews_moved(widget) + return 1 + # implement convenience function for enter key + elif (event.keyval==0xFF0D): + textbuffer=widget.get_buffer() + # do not do things during selection + if (textbuffer.get_selection_bounds()): return 0 + cursor_mark=textbuffer.get_insert() + cursor_iter=textbuffer.get_iter_at_mark(cursor_mark) + # determine this line's indent count + linestart_iter=cursor_iter.copy() + linestart_iter.set_line_offset(0) + spaceend_iter=linestart_iter.copy() + while(not spaceend_iter.ends_line() + and not spaceend_iter.is_end() + and spaceend_iter.get_char().isspace()): + spaceend_iter.forward_char() + linebegin=textbuffer.get_text(linestart_iter,spaceend_iter).expandtabs() + indent_length=len(linebegin) + textbuffer.delete(linestart_iter,spaceend_iter) + textbuffer.insert(linestart_iter,u' '*indent_length) + # start with the real work + cursor_iter=textbuffer.get_iter_at_mark(cursor_mark) + if (not cursor_iter.starts_line()): + # find last char before cursor + lastchar_iter=cursor_iter.copy() + lastchar_iter.backward_char() + if (lastchar_iter.get_char()==u":"): indent_length+=4 + # now find indent of next line... + textbuffer.insert(cursor_iter,u'\n'+(u' '*indent_length)) + widget.scroll_to_mark(cursor_mark,0.0,0) + self.textviews_moved(widget) + return 1 + + #self.textviews_moved(widget) + return 0 + + def load_file_as_unicode(self, script_filename): + script_file = file(script_filename, "rU") + script_string = u"" + for line in script_file: + script_string += unicode(line,encoding="iso-8859-15", errors="replace") + script_file.close() + return script_string + + def open_file(self, widget, Data = None): + """ + do the open file dialog, if necessary ask for save + """ + # ignore + if not self.editing_state: return 0 + + # Determining the tab which is currently open + current_page=self.main_notebook.get_current_page() + if current_page == 0: + open_dialog_title="Open Experiment Script..." + modified=self.experiment_script_textbuffer.get_modified() + elif current_page == 1: + open_dialog_title="Open Result Script..." + modified=self.data_handling_textbuffer.get_modified() + else: + return 0 + + if modified: + print "ToDo: Save First Dialog" + + def response(self, response_id, script_widget): + if response_id == gtk.RESPONSE_OK: + file_name = dialog.get_filename() + if file_name is None: + return + + script_filename=os.path.abspath(file_name) + if not os.access(script_filename, os.R_OK): + outer_space.show_error_dialog("File I/O Error","Cannot read from file %s" % script_filename) + return True + + script_string = script_widget.load_file_as_unicode(script_filename) + + if script_widget.main_notebook.get_current_page() == 0: + script_widget.exp_script_filename=script_filename + script_widget.set_scripts(script_string,None) + elif script_widget.main_notebook.get_current_page() == 1: + script_widget.res_script_filename=script_filename + script_widget.set_scripts(None, script_string) + + return True + + + parent_window=self.xml_gui.get_widget("main_window") + dialog = gtk.FileChooserDialog(title=open_dialog_title, + parent=parent_window, + action=gtk.FILE_CHOOSER_ACTION_OPEN, + buttons = (gtk.STOCK_OPEN, gtk.RESPONSE_OK, gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL) + ) + dialog.set_default_response(gtk.RESPONSE_OK) + dialog.set_select_multiple(False) + # Event-Handler for responce-signal (when one of the button is pressed) + dialog.connect("response", response, self) + + dialog.run() + dialog.destroy() + + # update title and so on... + + return True + + def save_file(self, widget = None, Data = None): + """ + save file to associated filename + """ + # ignore + if not self.editing_state: return 0 + + # Determining the tab which is currently open + current_page=self.main_notebook.get_current_page() + if current_page == 0: + filename=self.exp_script_filename + elif current_page == 1: + filename=self.res_script_filename + else: + return 0 + + if filename is None: return 0 + + # save file + if current_page==0: + script=self.get_scripts()[0] + elif current_page==1: + script=self.get_scripts()[1] + else: + return 0 + + # encode from unicode to iso-8859-15 + filecontents=codecs.getencoder("iso-8859-15")(script,"replace")[0] + file(filename,"w").write(filecontents) + + if current_page == 0: + self.experiment_script_textbuffer.set_modified(False) + elif current_page == 1: + self.data_handling_textbuffer.set_modified(False) + self.set_toolbuttons_status() + + + def save_file_as(self, widget = None, Data = None): + + def response(self, response_id, script_widget): + if response_id == gtk.RESPONSE_OK: + file_name = dialog.get_filename() + if file_name is None: + return True + + absfilename=os.path.abspath(file_name) + if os.access(file_name, os.F_OK): + print "ToDo: Overwrite file question" + + current_page=script_widget.main_notebook.get_current_page() + if current_page==0: + script_widget.exp_script_filename=absfilename + elif current_page==1: + script_widget.res_script_filename=absfilename + script_widget.save_file() + + return True + + # Determining the tab which is currently open + + current_page=self.main_notebook.get_current_page() + if current_page == 0: + dialog_title="Save Experiment Script As..." + elif current_page == 1: + dialog_title="Save Result Script As..." + else: + return + + parent_window=self.xml_gui.get_widget("main_window") + dialog = gtk.FileChooserDialog(title = dialog_title, + parent = parent_window, + action = gtk.FILE_CHOOSER_ACTION_SAVE, + buttons = (gtk.STOCK_SAVE, gtk.RESPONSE_OK, gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)) + + dialog.set_default_response(gtk.RESPONSE_OK) + dialog.set_select_multiple(False) + + # Event-Handler for responce-signal (when one of the button is pressed) + dialog.connect("response", response, self) + + dialog.run() + dialog.destroy() + + return True + + def save_all_files(self, widget, Data = None): + + current_page=self.main_notebook.get_current_page() + + # change page and call save dialog + self.main_notebook.set_current_page(0) + if self.exp_script_filename is None: + self.save_file_as() + else: + self.save_file() + + self.main_notebook.set_current_page(1) + if self.res_script_filename is None: + self.save_file_as() + else: + self.save_file() + + self.main_notebook.set_current_page(current_page) + + def new_file(self, widget, Data = None): + + if not self.editing_state: return 0 + current_page=self.main_notebook.get_current_page() + if current_page==0: + if self.experiment_script_textbuffer.get_modified(): + print "ToDo: Save before Clear Dialog" + self.set_scripts("", None) + self.exp_script_filename=None + elif current_page==1: + if self.data_handling_textbuffer.get_modified(): + print "ToDo: Save before Clear Dialog" + self.set_scripts(None, "") + self.res_script_filename=None + self.set_toolbuttons_status() + + def begin_print(self, operation, context, print_data): + """ + layout of all pages + """ + # copied and modified from pygtk-2.10.1/examples/pygtk-demo/demos/print_editor.py + + # Determining the tab which is currently open + current_page=self.main_notebook.get_current_page() + script="" + # get script text + if current_page==0: + script=self.get_scripts()[0] + elif current_page==1: + script=self.get_scripts()[1] + + width = context.get_width() + height = context.get_height() + layout = context.create_pango_layout() + layout.set_font_description(pango.FontDescription("Monospace 12")) + layout.set_width(int(width*pango.SCALE)) + layout.set_text(script) + num_lines = layout.get_line_count() + + page_breaks = [] + page_height = 0 + + for line in xrange(num_lines): + layout_line = layout.get_line(line) + ink_rect, logical_rect = layout_line.get_extents() + lx, ly, lwidth, lheight = logical_rect + line_height = lheight / 1024.0 + if page_height + line_height > height: + page_breaks.append(line) + page_height = 0 + page_height += line_height + + operation.set_n_pages(len(page_breaks) + 1) + print_data["page_breaks"] = page_breaks + print_data["layout"]=layout + + def draw_page(self, operation, context, page_nr, print_data): + """ + render a single page + """ + # copied and modified from pygtk-2.10.1/examples/pygtk-demo/demos/print_editor.py + assert isinstance(print_data["page_breaks"], list) + if page_nr == 0: + start = 0 + else: + start = print_data["page_breaks"][page_nr - 1] + + try: + end = print_data["page_breaks"][page_nr] + except IndexError: + end = print_data["layout"].get_line_count() + + cr = context.get_cairo_context() + + cr.set_source_rgb(0, 0, 0) + + i = 0 + start_pos = 0 + iter = print_data["layout"].get_iter() + while 1: + if i >= start: + line = iter.get_line() + _, logical_rect = iter.get_line_extents() + lx, ly, lwidth, lheight = logical_rect + baseline = iter.get_baseline() + if i == start: + start_pos = ly / 1024.0; + cr.move_to(lx / 1024.0, baseline / 1024.0 - start_pos) + cr.show_layout_line(line) + i += 1 + if not (i < end and iter.next_line()): + break + +class ConfigTab: + """ + by now all values are saved in the GUI widgets + """ + + def __init__(self, xml_gui): + self.xml_gui=xml_gui + + self.configname = "damaris/python-damaris.xml" + self.system_default_filename = None + self.system_backend_folder = "/usr/lib/damaris/backends/" + if sys.platform[:5] == "linux" or "darwin": + xdg_dirs = xdg.BaseDirectory.xdg_config_dirs + xdg_dirs.remove(xdg.BaseDirectory.xdg_config_home) + self.user_default_filename = os.path.join(xdg.BaseDirectory.xdg_config_home, self.configname) + self.system_default_filenames = [os.path.join(syscfg,self.configname) for syscfg in xdg_dirs] + + self.config_start_backend_checkbutton=self.xml_gui.get_widget("start_backend_checkbutton") + self.config_backend_executable_entry=self.xml_gui.get_widget("backend_executable_entry") + self.config_spool_dir_entry=self.xml_gui.get_widget("spool_dir_entry") + self.config_start_experiment_script_checkbutton=self.xml_gui.get_widget("start_experiment_script_checkbutton") + self.config_start_result_script_checkbutton=self.xml_gui.get_widget("start_result_script_checkbutton") + self.config_del_results_after_processing_checkbutton=self.xml_gui.get_widget("del_results_after_processing_checkbutton") + self.config_del_jobs_after_execution_checkbutton=self.xml_gui.get_widget("del_jobs_after_execution_checkbutton") + self.config_data_pool_name_entry=self.xml_gui.get_widget("data_pool_name_entry") + self.config_data_pool_write_interval_entry=self.xml_gui.get_widget("data_pool_write_interval_entry") + self.config_data_pool_complib=self.xml_gui.get_widget("CompLibs") + self.config_data_pool_comprate=self.xml_gui.get_widget("CompRatio") + self.config_info_textview=self.xml_gui.get_widget("info_textview") + self.config_script_font_button=self.xml_gui.get_widget("script_fontbutton") + self.config_printer_setup_button=self.xml_gui.get_widget("printer_setup_button") + if not hasattr(gtk, "print_run_page_setup_dialog" ): + self.config_printer_setup_button.set_sensitive(False) + + # insert version informations + components_text=u""" +operating system %(os)s +gtk version %(gtk)s +glib version %(glib)s +python version %(python)s +matplotlib version %(matplotlib)s, %(matplotlib_backend)s +numpy version %(numpy)s +pytables version %(pytables)s, using %(pytables_libs)s +pygtk version %(pygtk)s +pygobject version %(pygobject)s +""" + if hasattr(gobject, "glib_version"): + glib_version="%d.%d.%d"%gobject.glib_version + else: + glib_version="? (no pygobject module)" + if hasattr(gobject, "pygobject_version"): + pygobject_version="%d.%d.%d"%gobject.pygobject_version + else: + pygobject_version="? (no gobject version number)" + + numpy_version="none" + try: + import numpy + except ImportError: + pass + else: + numpy_version=numpy.__version__ + + components_versions = { + "os": platform.platform() , + "gtk": "%d.%d.%d"%gtk.gtk_version, + "glib": glib_version, + "python": sys.version , + "matplotlib": matplotlib.__version__, + "matplotlib_backend": FigureCanvas.__name__[12:], + "numpy": numpy_version, + "pytables": tables.getPyTablesVersion(), + "pytables_libs": "", + "pygtk": "%d.%d.%d"%gtk.pygtk_version, + "pygobject": pygobject_version + } + + # pytables modules: + # find compression extensions for combo box and write version numbers + # list is taken from ValueError output of tables.whichLibVersion("") + model=self.config_data_pool_complib.get_model() + for libname in ('hdf5', 'zlib', 'lzo', 'ucl', 'bzip2'): + version_info=None + try: + version_info=tables.whichLibVersion(libname) + except ValueError: + continue + if version_info: + components_versions["pytables_libs"]+="\n %s: %s"%(libname, str(version_info)) + if libname!="hdf5": + # a compression library, add it to combo box + if isinstance(model,gtk.ListStore): + model.append([libname]) + elif isinstance(model,gtk.TreeStore): + model.append(None,[libname]) + else: + print "cannot append compression lib name to %s"%model.__class__.__name__ + + + # debug message + if debug: + print "DAMARIS", __version__ + print components_text%components_versions + + # set no compression as default... + self.config_data_pool_complib.set_active(0) + + info_textbuffer=self.config_info_textview.get_buffer() + info_text=info_textbuffer.get_text(info_textbuffer.get_start_iter(),info_textbuffer.get_end_iter()) + info_text%={"moduleversions": components_text%components_versions, "damarisversion": __version__ } + info_textbuffer.set_text(info_text) + del info_textbuffer, info_text, components_text, components_versions + + self.xml_gui.signal_connect("on_config_save_button_clicked", self.save_config_handler) + self.xml_gui.signal_connect("on_config_load_button_clicked", self.load_config_handler) + self.xml_gui.signal_connect("on_backend_executable_browse_button_clicked", + self.browse_backend_executable_dialog) + self.xml_gui.signal_connect("on_fontbutton_font_set",self.set_script_font_handler) + self.xml_gui.signal_connect("on_printer_setup_button_clicked", self.printer_setup_handler) + for self.system_default_filename in self.system_default_filenames: + if self.system_default_filename: + if os.access(self.system_default_filename, os.R_OK): + self.load_config(self.system_default_filename) + else: + print "can not read system defaults from %s, ask your instrument responsible if required"%self.system_default_filename + + self.config_from_system = self.get() + self.load_config() + + def get(self): + """ + returns a dictionary of actual config values + """ + complib_iter=self.config_data_pool_complib.get_active_iter() + complib=self.config_data_pool_complib.get_model().get_value(complib_iter,0) + return { + "start_backend": self.config_start_backend_checkbutton.get_active(), + "start_result_script": self.config_start_result_script_checkbutton.get_active(), + "start_experiment_script": self.config_start_experiment_script_checkbutton.get_active(), + "spool_dir": self.config_spool_dir_entry.get_text(), + "backend_executable" : self.config_backend_executable_entry.get_text(), + "data_pool_name" : self.config_data_pool_name_entry.get_text(), + "del_results_after_processing" : self.config_del_results_after_processing_checkbutton.get_active(), + "del_jobs_after_execution" : self.config_del_jobs_after_execution_checkbutton.get_active(), + "data_pool_write_interval" : self.config_data_pool_write_interval_entry.get_text(), + "data_pool_complib": complib, + "data_pool_comprate": self.config_data_pool_comprate.get_value_as_int(), + "script_font": self.config_script_font_button.get_font_name() + } + + def set(self, config): + if "start_backend" in config: + self.config_start_backend_checkbutton.set_active(config["start_backend"]) + if "start_experiment_script" in config: + self.config_start_experiment_script_checkbutton.set_active(config["start_experiment_script"]) + if "start_result_script" in config: + self.config_start_result_script_checkbutton.set_active(config["start_result_script"]) + if "spool_dir" in config: + self.config_spool_dir_entry.set_text(config["spool_dir"]) + if "backend_executable" in config: + self.config_backend_executable_entry.set_text(config["backend_executable"]) + if "del_results_after_processing" in config: + self.config_del_results_after_processing_checkbutton.set_active(config["del_results_after_processing"]) + if "del_jobs_after_execution" in config: + self.config_del_jobs_after_execution_checkbutton.set_active(config["del_jobs_after_execution"]) + if "data_pool_write_interval" in config: + self.config_data_pool_write_interval_entry.set_text(config["data_pool_write_interval"]) + if "data_pool_name" in config: + self.config_data_pool_name_entry.set_text(config["data_pool_name"]) + if "data_pool_comprate" in config: + self.config_data_pool_comprate.set_value(float(config["data_pool_comprate"])) + if "script_font" in config: + self.config_script_font_button.set_font_name(config["script_font"]) + self.set_script_font_handler(None) + if "data_pool_complib" in config: + # find combo-box entry and make it active... + model=self.config_data_pool_complib.get_model() + iter=model.get_iter_first() + while iter is not None: + if model.get(iter,0)[0]==config["data_pool_complib"]: + self.config_data_pool_complib.set_active_iter(iter) + break + iter=model.iter_next(iter) + # if this compression method is not supported, warn and do nothing + if iter is None: + print "compression method %s is not supported"%config["data_pool_complib"] + + def load_config_handler(self, widget): + if self.system_default_filename: + self.load_config(self.system_default_filename) + self.load_config() + + def save_config_handler(self, widget): + self.save_config() + + def set_script_font_handler(self, widget): + """ + handles changes in font name + also sets the fonts to the text views (fast implementation: breaking encapsulation) + """ + font=self.config_script_font_button.get_font_name() + experiment_script_textview = self.xml_gui.get_widget("experiment_script_textview") + if experiment_script_textview: + experiment_script_textview.modify_font(pango.FontDescription(font)) + data_handling_textview = self.xml_gui.get_widget("data_handling_textview") + if data_handling_textview: + data_handling_textview.modify_font(pango.FontDescription(font)) + + def printer_setup_handler(self, widget): + """ + changes to printer setup + """ + if not (hasattr(gtk, "PrintSettings") and hasattr(gtk, "print_run_page_setup_dialog")): + return + if not hasattr(self, "printer_setup"): + self.printer_setup = gtk.PrintSettings() + + if not hasattr(self, "page_setup"): + self.page_setup = None + + self.page_setup = gtk.print_run_page_setup_dialog(self.xml_gui.get_widget("main_window"), + self.page_setup, self.printer_setup) + + + def browse_backend_executable_dialog(self, widget): + """ + do the open file dialog + """ + backend_filename_dialog_title="find backend" + + def response(self, response_id, script_widget): + if response_id == gtk.RESPONSE_OK: + file_name = self.get_filename() + if file_name is None: + return + script_widget.config_backend_executable_entry.set_text(file_name) + return True + + + parent_window=self.xml_gui.get_widget("main_window") + dialog = gtk.FileChooserDialog(title=backend_filename_dialog_title, + parent=parent_window, + action=gtk.FILE_CHOOSER_ACTION_OPEN, + buttons = (gtk.STOCK_OPEN, + gtk.RESPONSE_OK, + gtk.STOCK_CANCEL, + gtk.RESPONSE_CANCEL)) + dialog.set_default_response(gtk.RESPONSE_OK) + dialog.set_select_multiple(False) + dialog.set_filename(os.path.abspath(self.config_backend_executable_entry.get_text())) + if os.path.isdir(self.system_backend_folder) \ + and os.access(self.system_backend_folder, os.R_OK): + dialog.add_shortcut_folder(self.system_backend_folder) + # Event-Handler for responce-signal (when one of the button is pressed) + dialog.connect("response", response, self) + f=gtk.FileFilter() + f.add_custom(gtk.FILE_FILTER_FILENAME, lambda x:os.access(x[0],os.X_OK)) + dialog.set_filter(f) + + dialog.run() + dialog.destroy() + + return True + + def load_config(self, filename=None): + """ + set config from an xml file + """ + if filename is None: + filename=self.user_default_filename + + try: + readfile = file(filename, "r") + except Exception, e: + if debug: + print "Could not open %s: %s" % (filename, str(e)) + return + + # parser functions + def start_element(name, attrs, config): + if name == "config" and "key" in attrs: + config["__this_key__"]=attrs["key"] + if "type" in attrs: + config["__this_type__"]=attrs["type"] + config[attrs["key"]]="" + + def end_element(name, config): + if "__this_type__" in config and "__this_key__" in config: + if config["__this_type__"] == "Boolean": + if config[config["__this_key__"]] == "True": + config[config["__this_key__"]]=True + else: + config[config["__this_key__"]]=False + elif config["__this_type__"] == "Integer": + config[config["__this_key__"]]=int(config[config["__this_key__"]]) + + if "__this_type__" in config: + del config["__this_type__"] + if "__this_key__" in config: + del config["__this_key__"] + + + def char_data(data, config): + if "__this_key__" in config: + config[config["__this_key__"]]+=data + + # parse file contents to dictionary + config={} + p = xml.parsers.expat.ParserCreate() + p.StartElementHandler = lambda n,a: start_element(n,a, config) + p.EndElementHandler = lambda n: end_element(n, config) + p.CharacterDataHandler = lambda d: char_data(d, config) + p.ParseFile(readfile) + + self.set(config) + + def save_config(self, filename=None): + """ + write config as an xml file + """ + config=self.get() + if filename is None: + filename=self.user_default_filename + dirs = os.path.dirname(filename) + if not os.path.isdir(dirs): + os.makedirs(dirs) + + configfile=file(filename, "w") + configfile.write("\n") + configfile.write("\n") + for k,v in config.iteritems(): + if k in self.config_from_system \ + and self.config_from_system[k] == v: + if debug: + print "Ignoring for write, because system value for %r is %r equal to %r" % \ + (k, self.config_from_system[k],v) + continue + val="" + typename="" + if type(v) is types.BooleanType: + typename="Boolean" + if v: + val="True" + else: + val="False" + elif type(v) is types.StringType: + typename="String" + val=v + elif type(v) is types.IntType: + typename="Integer" + val=str(v) + configfile.write(" %s\n"%(k, typename, val)) + configfile.write("\n") + +class MonitorWidgets: + + def __init__(self, xml_gui): + """ + initialize matplotlib widgets and stuff around + """ + + self.xml_gui=xml_gui + self.main_window = self.xml_gui.get_widget("main_window") + self.display_settings_frame = self.xml_gui.get_widget("display_settings_frame") + + # Display footer: + self.display_x_scaling_combobox = self.xml_gui.get_widget("display_x_scaling_combobox") + self.display_x_scaling_combobox.remove_text(1) # remove base-e log + self.display_x_scaling_combobox.set_sensitive(False) + self.display_y_scaling_combobox = self.xml_gui.get_widget("display_y_scaling_combobox") + self.display_y_scaling_combobox.remove_text(1) # remove base-e log + self.display_y_scaling_combobox.set_sensitive(False) + self.display_autoscaling_checkbutton = self.xml_gui.get_widget("display_autoscaling_checkbutton") + self.display_statistics_checkbutton = self.xml_gui.get_widget("display_statistics_checkbutton") + + # insert monitor + # Matplot (Display_Table, 1st Row) -------------------------------------------------------- + + # Neue Abbildung erstellen + self.matplot_figure = matplotlib.figure.Figure() + + # the plot area surrounded by axes + self.matplot_axes = self.matplot_figure.add_axes([0.1,0.15,0.8,0.7]) + + # Achsen beschriften & Gitternetzlinien sichtbar machen + self.matplot_axes.grid(True) + + # Ersten Plot erstellen und Referenz des ersten Eintrags im zurueckgegebenen Tupel speichern + # Voerst: graphen[0,1] = Real und Img-Kanal; [2,3] = Real-Fehler, [4,5] = Img-Fehler + #self.graphen = [] + #self.measurementresultgraph=None + + self.matplot_axes.set_ylim([0.0,1.0]) + self.matplot_axes.set_xlim([0.0,1.0]) + self.matplot_axes.set_autoscale_on(self.display_autoscaling_checkbutton.get_active()) + + # Lineare y-/x-Skalierung + self.matplot_axes.set_yscale("linear") + self.matplot_axes.set_xscale("linear") + + # Matplot in einen GTK-Rahmen stopfen + self.matplot_canvas = FigureCanvas(self.matplot_figure) + + self.display_table = self.xml_gui.get_widget("display_table") + self.display_table.attach(self.matplot_canvas, 0, 6, 0, 1, gtk.EXPAND | gtk.FILL, gtk.EXPAND | gtk.FILL, 0, 0) + self.matplot_canvas.show() + + # Matplot Toolbar hinzufuegen (Display_Table, 2. Zeile) + self.matplot_toolbar = matplotlib.backends.backend_gtk.NavigationToolbar2GTK(self.matplot_canvas, self.main_window) + + self.display_table.attach(self.matplot_toolbar, 0, 1, 1, 2, gtk.FILL | gtk.EXPAND, 0, 0, 0) + self.matplot_toolbar.show() + + # /Mathplot -------------------------------------------------------------------------------- + + # display source + self.display_source_combobox = self.xml_gui.get_widget("display_source_combobox") + self.display_source_treestore = gtk.TreeStore(gobject.TYPE_STRING) + self.display_source_combobox.set_model(self.display_source_treestore) + display_source_cell = gtk.CellRendererText() + self.display_source_combobox.pack_start(display_source_cell, True) + self.display_source_combobox.add_attribute(display_source_cell, 'text', 0) + self.source_list_reset() + self.display_source_path_label = self.xml_gui.get_widget("display_source_path_label") + + # display scaling: ToDo enable scaling + self.display_x_scaling_combobox.set_active(0) + self.display_y_scaling_combobox.set_active(0) + self.display_x_scaling_combobox.set_sensitive(True) + self.display_y_scaling_combobox.set_sensitive(True) + + # and events... + self.display_source_combobox.connect("changed", self.display_source_changed_event) + self.xml_gui.signal_connect("on_display_autoscaling_checkbutton_toggled", self.display_autoscaling_toggled) + self.xml_gui.signal_connect("on_display_statistics_checkbutton_toggled", self.display_statistics_toggled) + self.xml_gui.signal_connect("on_display_x_scaling_combobox_changed", self.display_scaling_changed) + self.xml_gui.signal_connect("on_display_y_scaling_combobox_changed", self.display_scaling_changed) + self.xml_gui.signal_connect("on_display_save_data_as_text_button_clicked", self.save_display_data_as_text) + self.xml_gui.signal_connect("on_display_copy_data_to_clipboard_button_clicked", self.copy_display_data_to_clipboard) + + # data to observe + self.data_pool=None + # name of displayed data and reference to data + self.displayed_data=[None,None] + self.__rescale=True + self.update_counter=0 + self.update_counter_lock=threading.Lock() + + def source_list_reset(self): + self.display_source_treestore.clear() + self.source_list_add(u'None') + none_iter=self.source_list_find([u'None']) + if none_iter is not None: self.display_source_combobox.set_active_iter(none_iter) + + def source_list_find_one(self, model, iter, what): + """find node in subcategory""" + while iter is not None: + if model.get(iter,0)[0] == what: + return iter + iter = model.iter_next(iter) + return iter + + def source_list_find(self, namelist): + """ + namelist sequence of names, e.g. ["a", "b", "c" ] for "a/b/c" + """ + model = self.display_source_treestore + retval = None + iter = model.get_iter_root() + while iter is not None and len(namelist) > 0: + name = namelist[0] + iter = self.source_list_find_one(model, iter, name) + if iter is not None: + retval = iter + namelist.pop(0) + iter = model.iter_children(retval) + return retval + + def source_list_add(self, source_name, parent=None): + namelist = source_name.split("/") + found = self.source_list_find(namelist) + if parent is None: + parent = found + for rest_name in namelist: + # append() returns iter for the new row + parent = self.display_source_treestore.append(parent, [rest_name]) + + def source_list_remove(self, source_name): + namelist = source_name.split("/") + pwd = namelist[:] + iter = self.source_list_find(namelist) + if iter is None or len(namelist) > 0: + print "source_list_remove: WARNING: Not found" + return + model = self.display_source_treestore + if model.iter_has_child(iter): + print "source_list_remove: WARNING: Request to delete a tree" + return + while True: + parent = model.iter_parent(iter) + model.remove(iter) + pwd.pop() + # We now test, if we want to remove parent too + if parent is None: + break + if model.iter_has_child(parent): + # The parent has other children + break + if "/".join(pwd) in self.data_pool: + # The parent has data connected to it + break + iter = parent + + def source_list_current(self): + ai = self.display_source_combobox.get_active_iter() + namelist = [] + while ai is not None: + namelist.insert(0, str(self.display_source_treestore.get(ai,0)[0])) + ai = self.display_source_treestore.iter_parent(ai) + cur_source_name = "/".join(namelist) + return cur_source_name + + def observe_data_pool(self, data_pool): + """ + register a listener and save reference to data + assume to be in gtk/gdk lock + """ + if not self.data_pool is None: + # maybe some extra cleanup needed + print "ToDo: cleanup widgets" + if self.displayed_data[1] is not None and hasattr(self.displayed_data[1], "unregister_listener"): + self.displayed_data[1].unregister_listener(self.datastructures_listener) + self.displayed_data[1]=None + self.displayed_data=[None,None] + self.data_pool.unregister_listener(self.datapool_listener) + self.data_pool=None + + self.source_list_reset() + self.update_counter_lock.acquire() + self.update_counter=0 + self.update_counter_lock.release() + + # display states + self.__rescale=True + self.displayed_data=[None,None] + self.display_source_path_label.set_label(u"") + self.clear_display() + + if data_pool is not None: + # keep track of data + self.data_pool=data_pool + self.data_pool.register_listener(self.datapool_listener) + + #################### observing data structures and produce idle events + + def datapool_listener(self, event): + """ + sort data as fast as possible and get rid of non-interesting data + """ + if event.subject.startswith("__"): return + if debug and self.update_counter<0: + print "negative event count!", self.update_counter + if self.update_counter>5: + if debug: + print "sleeping to find time for grapics updates" + threading.Event().wait(0.05) + while self.update_counter>15: + threading.Event().wait(0.05) + if event.what==DataPool.Event.updated_value: + if self.displayed_data[0] is None or event.subject!=self.displayed_data[0]: + # do nothing, forget it + return + + displayed_object=self.displayed_data[1] + object_to_display=self.data_pool.get(event.subject) + if displayed_object is None or object_to_display is None: + self.update_counter_lock.acquire() + self.update_counter+=1 + self.update_counter_lock.release() + gobject.idle_add(self.datapool_idle_listener,event,priority=gobject.PRIORITY_DEFAULT_IDLE) + else: + if event.what==DataPool.Event.updated_value and \ + (displayed_object is object_to_display or displayed_object.__class__ is object_to_display.__class__): + # oh, another category + self.update_counter+=1 + gobject.idle_add(self.update_display_idle_event,self.displayed_data[0][:], + priority=gobject.PRIORITY_DEFAULT_IDLE) + else: + self.update_counter_lock.acquire() + self.update_counter+=1 + self.update_counter_lock.release() + gobject.idle_add(self.datapool_idle_listener,event,priority=gobject.PRIORITY_DEFAULT_IDLE) + + if event.what in [DataPool.Event.updated_value, DataPool.Event.new_key]: + #print "Update hdf5 file ..." + # TODO: incremental hdf5 update + pass + + del displayed_object + del object_to_display + + def datastructures_listener(self, event): + """ + do fast work selecting important events + """ + if debug and self.update_counter<0: + print "negative event count!", self.update_counter + if self.update_counter>5: + if debug: + print "sleeping to find time for graphics updates" + threading.Event().wait(0.05) + while self.update_counter>15: + threading.Event().wait(0.05) + if event.origin is not self.displayed_data[1]: return + self.update_counter_lock.acquire() + self.update_counter+=1 + self.update_counter_lock.release() + gobject.idle_add(self.update_display_idle_event,self.displayed_data[0][:],priority=gobject.PRIORITY_DEFAULT_IDLE) + + ################### consume idle events + + def datapool_idle_listener(self,event): + """ + here dictionary changes are done + """ + self.update_counter_lock.acquire() + self.update_counter-=1 + self.update_counter_lock.release() + + if event.what==DataPool.Event.updated_value: + if (self.displayed_data[0] is not None and + self.displayed_data[0]==event.subject): + new_data_struct=self.data_pool[self.displayed_data[0]] + if self.displayed_data[1] is new_data_struct: + # update display only + if self.update_counter>10: + print "update queue too long (%d>10): skipping one update"%self.update_counter + else: + gtk.gdk.threads_enter() + try: + self.update_display() + finally: + gtk.gdk.threads_leave() + else: + # unregister old one + if self.displayed_data[1] is not None and hasattr(self.displayed_data[1], "unregister_listener"): + self.displayed_data[1].unregister_listener(self.datastructures_listener) + self.displayed_data[1]=None + # register new one + if hasattr(new_data_struct, "register_listener"): + new_data_struct.register_listener(self.datastructures_listener) + self.displayed_data[1]=new_data_struct + if self.update_counter>10: + print "update queue too long (%d>10): skipping one update"%self.update_counter + else: + gtk.gdk.threads_enter() + try: + self.renew_display() + finally: + gtk.gdk.threads_leave() + new_data_struct=None + elif event.what==DataPool.Event.new_key: + # update combo-box by inserting and rely on consistent information + gtk.gdk.threads_enter() + self.source_list_add(event.subject) + gtk.gdk.threads_leave() + elif event.what==DataPool.Event.deleted_key: + # update combo-box by removing and rely on consistent information + gtk.gdk.threads_enter() + if (not self.displayed_data[0] is None and + self.displayed_data[0]==event.subject): + self.displayed_data=[None,None] + none_iter=self.source_list_find([u'None']) + if none_iter is not None: self.display_source_combobox.set_active_iter(none_iter) + # not necessary, because event will be submitted + #self.clear_display() + self.source_list_remove(event.subject) + gtk.gdk.threads_leave() + elif event.what==DataPool.Event.destroy: + gtk.gdk.threads_enter() + self.source_list_reset('None') + self.displayed_data=[None,None] + self.clear_display() + gtk.gdk.threads_leave() + return + + def update_display_idle_event(self, subject=None): + + self.update_counter_lock.acquire() + self.update_counter-=1 + self.update_counter_lock.release() + # print "update display", self.update_counter + if self.update_counter>10: + print "update queue too long (%d>10): skipping one update"%self.update_counter + return + if self.displayed_data[0] is None or subject!=self.displayed_data[0]: + return + gtk.gdk.threads_enter() + try: + self.update_display() + finally: + gtk.gdk.threads_leave() + + + ######################## events from buttons + + def display_source_changed_event(self, widget, data=None): + + new_data_name = self.source_list_current() + if (self.displayed_data[0] is None and new_data_name==u"None"): return + if (self.displayed_data[0]==new_data_name): return + if self.displayed_data[1] is not None and hasattr(self.displayed_data[1], "unregister_listener"): + self.displayed_data[1].unregister_listener(self.datastructures_listener) + self.displayed_data[1]=None + # register new one + if new_data_name==u"None": + self.display_source_path_label.set_label(u"") + self.displayed_data=[None,None] + self.clear_display() + elif self.data_pool is None or new_data_name not in self.data_pool: + none_iter=self.source_list_find([u'None']) + if none_iter is not None: self.display_source_combobox.set_active_iter(none_iter) + self.display_source_path_label.set_label(u"") + else: + new_data_struct=self.data_pool[new_data_name] + if hasattr(new_data_struct, "register_listener"): + new_data_struct.register_listener(self.datastructures_listener) + self.displayed_data=[new_data_name, new_data_struct] + dirpart=new_data_name.rfind("/") + if dirpart>=0: + self.display_source_path_label.set_label(u"in "+new_data_name[:dirpart]) + else: + self.display_source_path_label.set_label(u"") + self.clear_display() + # renew display via idle event + self.update_counter_lock.acquire() + self.update_counter+=1 + self.update_counter_lock.release() + event=DataPool.Event(DataPool.Event.updated_value, new_data_name) + gobject.idle_add(self.datapool_idle_listener,event,priority=gobject.PRIORITY_DEFAULT_IDLE) + + def display_autoscaling_toggled(self, widget, data=None): + self.matplot_axes.set_autoscale_on(self.display_autoscaling_checkbutton.get_active()) + if self.displayed_data[0] is not None: + self.update_display(self.displayed_data[0][:]) + + def display_scaling_changed(self, widget, data=None): + self.__rescale=True + if self.displayed_data[0] is not None: + self.update_display(self.displayed_data[0][:]) + + def display_statistics_toggled(self, widget, data=None): + if self.displayed_data[0] is not None: + self.update_display(self.displayed_data[0][:]) + + def save_display_data_as_text(self, widget, data=None): + """ + copy data to tmp file and show save dialog + """ + data_to_save=self.displayed_data[:] + if self.displayed_data[1] is None: + # nothing to save + return + if not hasattr(data_to_save[1], "write_to_csv"): + log("do not know how to save %s of class/type %s"%(data_to_save[0],type(data_to_save[1]))) + return + + # save them to a temporary file (in memory) + tmpdata=os.tmpfile() + tmpdata.write("# saved from monitor as %s\n"%data_to_save[0]) + data_to_save[1].write_to_csv(tmpdata) + + # show save dialog + def response(self, response_id, tmpfile): + if response_id == gtk.RESPONSE_OK: + file_name = dialog.get_filename() + if file_name is None: + return True + + absfilename=os.path.abspath(file_name) + if os.access(file_name, os.F_OK): + log("ToDo: Overwrite file question") + + textfile=file(absfilename,"w") + tmpfile.seek(0) + for l in tmpfile: + textfile.write(l) + textfile.close() + textfile=None + tmpfile=None + return True + + # Determining the tab which is currently open + dialog_title="Save %s in file"%data_to_save[0] + + dialog = gtk.FileChooserDialog(title = dialog_title, + parent = self.main_window, + action = gtk.FILE_CHOOSER_ACTION_SAVE, + buttons = (gtk.STOCK_SAVE, gtk.RESPONSE_OK, gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)) + + dialog.set_default_response(gtk.RESPONSE_OK) + dialog.set_current_name(data_to_save[0]) + dialog.set_select_multiple(False) + + # Event-Handler for responce-signal (when one of the button is pressed) + dialog.connect("response", response, tmpdata) + del tmpdata, data_to_save + dialog.run() + dialog.destroy() + + return True + + def copy_display_data_to_clipboard(self, widget, data=None): + data_to_save=self.displayed_data[:] + if self.displayed_data[1] is None: + # nothing to save + return + if not hasattr(data_to_save[1], "write_to_csv"): + log("do not know how to save %s of class/type %s"%(data_to_save[0],type(data_to_save[1]))) + return + + # tested with qtiplot, labplot, openoffice + # tab delimiters necessary + # comments are not welcome :-( + tmpdata=cStringIO.StringIO() + data_to_save[1].write_to_csv(tmpdata, delimiter=u"\t") + # cut away comments + tmpstring=u"" + tmpdata.seek(0) + for line in tmpdata: + if line[0]=="#": continue + tmpstring+=line + del tmpdata + clipboard=gtk.clipboard_get() + clipboard.set_text(tmpstring) + del tmpstring, clipboard + + ##################### functions to feed display + + def clear_display(self): + """ + unconditionally throw away everything + we are inside gtk/gdk lock + """ + self.display_x_scaling_combobox.set_sensitive(False) + self.display_y_scaling_combobox.set_sensitive(False) + if not hasattr(self, "__rescale"): + self.__rescale = True + + if not hasattr(self,"measurementresultline"): + self.measurementresultline=None + elif self.measurementresultline is not None: + # clear line plot + self.matplot_axes.lines.remove(self.measurementresultline[0]) + self.measurementresultline=None + + if not hasattr(self,"measurementresultgraph"): + self.measurementresultgraph=None + elif self.measurementresultgraph is not None: + # clear errorbars + self.matplot_axes.lines.remove(self.measurementresultgraph[0]) + for l in self.measurementresultgraph[1]: + self.matplot_axes.lines.remove(l) + for l in self.measurementresultgraph[2]: + self.matplot_axes.collections.remove(l) + self.measurementresultgraph=None + self.matplot_axes.clear() + self.matplot_axes.grid(True) + if not hasattr(self,"graphen"): + self.graphen=[] + elif self.graphen: + for l in self.graphen: + self.matplot_axes.lines.remove(l) + self.graphen=[] + self.matplot_axes.clear() + self.matplot_axes.grid(True) + self.matplot_canvas.draw_idle() + + def update_display(self, subject=None): + """ + try to recycle labels, data, lines.... + assume, object is not changed + we are inside gtk/gdk lock + + """ + in_result=self.data_pool.get(self.displayed_data[0]) + if in_result is None: + self.clear_display() + return + if isinstance(in_result, Accumulation) or isinstance(in_result, ADC_Result): + # directly taken from bluedamaris + + xmin = in_result.get_xmin() + xmax = in_result.get_xmax() + ymin = in_result.get_ymin() + ymax = in_result.get_ymax() + + # Check for log-scale + if xmin <= 0 or xmax <= 0: + self.display_x_scaling_combobox.set_sensitive(False) + self.display_x_scaling_combobox.set_active(0) + else: + self.display_x_scaling_combobox.set_sensitive(True) + + if ymin <= 0 or ymax <= 0: + self.display_y_scaling_combobox.set_sensitive(False) + self.display_y_scaling_combobox.set_active(0) + else: + self.display_y_scaling_combobox.set_sensitive(False) + + # Initial rescaling needed? + if self.__rescale: + x_scale=self.display_x_scaling_combobox.get_active_text() + y_scale=self.display_y_scaling_combobox.get_active_text() + if xmin<=0 or x_scale=="lin": + self.matplot_axes.set_xscale("linear") + if ymin<=0 or y_scale=="lin": + self.matplot_axes.set_yscale("linear") + self.matplot_axes.set_xlim(xmin, xmax) + self.matplot_axes.set_ylim(ymin, ymax) + self.__rescale = False + + # Autoscaling activated? + elif self.display_autoscaling_checkbutton.get_active(): + + xlim_min, xlim_max=self.matplot_axes.get_xlim() + if xlim_min!= xmin or xlim_max!=xmax : + self.matplot_axes.set_xlim(xmin, xmax) + + # Rescale if new max is much larger than old ymax, simialar rules apply to ymin + ylim_min, ylim_max = self.matplot_axes.get_ylim() + ydiff=ymax-ymin + if (ylim_max < ymax or ylim_min > ymin or + ylim_max > ymax+0.2*ydiff or ylim_min < ymin-0.2*ydiff): + self.matplot_axes.set_ylim(ymin, ymax) + + xdata=in_result.get_xdata() + chans = in_result.get_number_of_channels() + data = [] + colors = [(0,0,0.8,1), (0.7,0,0,1), (0,0.7,0,1), (0.7,0.5,0,1), (0,0,0,1)] # rgba tuples: blue, red, green, yellow + + for i in xrange(chans): + data.append(in_result.get_ydata(i)) + + moving_average=data_slice=None + if max_points_to_display>0 and len(xdata)>max_points_to_display: + print "decimating data to %d points by moving average (prevent crash of matplotlib)"%max_points_to_display + n=numpy.ceil(len(xdata)/max_points_to_display) + moving_average=numpy.ones(n, dtype="float")/n + data_slice=numpy.array(numpy.floor(numpy.arange(max_points_to_display, dtype="float") \ + /max_points_to_display*len(xdata)), + dtype="int") + xdata=xdata.take(data_slice) # no average !? + for i in xrange(chans): + data[i] = numpy.convolve(data[i], moving_average, "same").take(data_slice) + + if len(self.graphen)==0: + for i in xrange(chans): + self.graphen.extend(self.matplot_axes.plot(xdata, data[i], linestyle="-", color=colors[i], linewidth = 2)) + for i in xrange(chans): + # initialize error bars + self.graphen.extend(self.matplot_axes.plot([0.0], [0.0], linestyle="-", color=colors[i], linewidth = 0.5)) + self.graphen.extend(self.matplot_axes.plot([0.0], [0.0], linestyle="-", color=colors[i], linewidth = 0.5)) + else: + for i in xrange(chans): + self.graphen[i].set_data(xdata, data[i]) + + # Statistics activated? + if (self.display_statistics_checkbutton.get_active() and + in_result.uses_statistics() and in_result.ready_for_drawing_error()): + + for i in xrange(chans): + err = in_result.get_yerr(i) + if moving_average is not None: + err = numpy.convolve(err, moving_average, "same").take(data_slice) + self.graphen[chans+2*i].set_data(xdata, data[i] + err) + self.graphen[chans+2*i+1].set_data(xdata, data[i] - err) + else: + for i in xrange(chans): + self.graphen[chans+2*i].set_data([0.0],[0.0]) + self.graphen[chans+2*i+1].set_data([0.0],[0.0]) + moving_average=data_mask=None + data=None + + # Any title to be set? + in_result_title=in_result.get_title() + if in_result_title is not None: + col=101 + while len(in_result_title)-col>10: + in_result_title=in_result_title[:col]+"\n"+in_result_title[col:] + col+=101 + self.matplot_axes.set_title(in_result_title) + else: + self.matplot_axes.set_title("") + + # Any labels to be set? + if in_result.get_xlabel() is not None: + self.matplot_axes.set_xlabel(in_result.get_xlabel()) + else: + self.matplot_axes.set_xlabel("") + + if in_result.get_ylabel() is not None: + self.matplot_axes.set_ylabel(in_result.get_ylabel()) + else: + self.matplot_axes.set_ylabel("") + + # Any variables to be set? +# if False: +# if isinstance(in_result, Accumulation): +# descriptions = in_result.common_descriptions +# elif isinstance(in_result, ADC_Result): +# descriptions = in_result.description + # else: pass + # actual_config = self.config.get() +# if (descriptions is not None) : #--markusro + # print actual_config['pretty_descriptions'] + # pass +# description_string = "" +# for key in descriptions.keys(): +# description_string += "%s = %s\n" % (key,descriptions[key]) +# self.matplot_axes.text(0.7,0.95, description_string[:-1], +# size=8, +# transform=self.matplot_axes.transAxes, +# va='top', +# backgroundcolor='white') +# + + # Draw it! + self.matplot_canvas.draw_idle() + del in_result + + elif isinstance(in_result, MeasurementResult): + + # remove lines and error bars + if self.measurementresultgraph is not None: + self.matplot_axes.lines.remove(self.measurementresultgraph[0]) + # remove caps + for l in self.measurementresultgraph[1]: + self.matplot_axes.lines.remove(l) + # and columns + for l in self.measurementresultgraph[2]: + self.matplot_axes.collections.remove(l) + self.measurementresultgraph=None + + if self.measurementresultline is not None: + self.matplot_axes.lines.remove(self.measurementresultline[0]) + self.measurementresultline=None + + + [k,v,e]=in_result.get_errorplotdata() + if k.shape[0]!=0: + xmin=k.min() + ymin=(v-e).min() + + if xmin>0: + self.display_x_scaling_combobox.set_sensitive(True) + else: + # force switch to lin scale + self.display_x_scaling_combobox.set_sensitive(False) + if self.display_x_scaling_combobox.get_active_text()!="lin": + self.__rescale=True + # and reset to linear + self.display_x_scaling_combobox.set_active(0) + if ymin>0: + self.display_y_scaling_combobox.set_sensitive(True) + else: + # force switch to lin scale + self.display_y_scaling_combobox.set_sensitive(False) + if self.display_y_scaling_combobox.get_active_text()!="lin": + self.__rescale=True + # and reset to linear + self.display_y_scaling_combobox.set_active(0) + + x_scale=self.display_x_scaling_combobox.get_active_text() + y_scale=self.display_y_scaling_combobox.get_active_text() + + # Initial rescaling needed? + if self.__rescale: # or self.display_autoscaling_checkbutton.get_active(): + xmax=k.max() + ymax=(v+e).max() + # is there a range problem? + if xmin==xmax or ymin==ymax: + # fix range and scaling problems + if xmin==xmax: + if xmin!=0: + (xmin,xmax)=(xmin-abs(xmin/10.0), xmin+abs(xmin/10.0)) + else: + (xmin,xmax)=(-1,1) + if ymin==ymax: + if ymin==0: + (ymin,ymax)=(ymin-abs(ymin/10.0), ymin+abs(ymin/10.0)) + else: + (ymin, ymax)=(-1,1) + + if xmin<=0 or x_scale=="lin": + self.matplot_axes.set_xscale("linear") + if ymin<=0 or y_scale=="lin": + self.matplot_axes.set_yscale("linear") + + self.matplot_axes.set_xlim(xmin, xmax) + self.matplot_axes.set_ylim(ymin, ymax) + + # finally decide about x log plot + if x_scale=="log10" and xmin>0: + self.matplot_axes.set_xscale("log", basex=10.0) + self.matplot_axes.fmt_xdata = lambda x: "%g" % x + #elif x_scale=="log" and xmin>0: + # e scaling implementation not really useful + # self.matplot_axes.set_xscale("log", basex=numpy.e) + if y_scale=="log10" and ymin>0: + self.matplot_axes.set_yscale("log", basex=10.0) + self.matplot_axes.fmt_ydata = lambda x: "%g" % x + + self.__rescale=False + + self.measurementresultgraph=self.matplot_axes.errorbar(x=k, y=v, yerr=e, fmt="bx") + + + [k,v]=in_result.get_lineplotdata() + if k.shape[0]!=0 and v.shape==k.shape: + self.measurementresultline = self.matplot_axes.plot(k, v, 'r-') + + + # Any title to be set? + title=in_result.get_title()+"" + if title is not None: + self.matplot_axes.set_title(title) + else: + self.matplot_axes.set_title("") + + self.matplot_canvas.draw_idle() + del k,v,e + del in_result + + def renew_display(self): + """ + set all properties of display + we are inside gtk/gdk lock + """ + self.clear_display() + to_draw=self.data_pool[self.displayed_data[0]] + + if to_draw is None: return + self.update_display() + + def begin_print(self, operation, context, print_data): + """ + layout of one page with matplotlib graph + """ + operation.set_n_pages( 1 ) + + + def draw_page(self, operation, context, page_nr, print_data): + """ + render a single page + """ + # copied and modified from pygtk-2.10.1/examples/pygtk-demo/demos/print_editor.py + + if page_nr != 0: + return + + # check page dimensions + # all lengths in inch: name *_in + page_setup=context.get_page_setup() + dpi=context.get_dpi_x() + if dpi!=context.get_dpi_y(): + print "draw_page: dpi_x!=dpi_y, I am not prepared for that" + freewidth_in = float(context.get_width())/dpi + freeheight_in = float(context.get_height())/dpi + fc = self.matplot_canvas.switch_backends(matplotlib.backends.backend_cairo.FigureCanvasCairo) + fc.figure.dpi.set(dpi) + orig_w_in, orig_h_in = fc.figure.get_size_inches() + orig_f_color=fc.figure.get_facecolor() + orig_e_color=fc.figure.get_edgecolor() + + # scale to maximum + fc.figure.set_facecolor("w") + fc.figure.set_edgecolor("w") + + # maximum scale with constant aspect + scale=min(freewidth_in/orig_w_in, freeheight_in/orig_h_in) + fc.figure.set_size_inches(orig_w_in*scale, orig_h_in*scale) + width_in_points, height_in_points = orig_w_in * dpi * scale, orig_h_in * dpi * scale + renderer = matplotlib.backends.backend_cairo.RendererCairo (fc.figure.dpi) + renderer.width = width_in_points + renderer.height = height_in_points + # centered picture + renderer.matrix_flipy = cairo.Matrix (yy=-1,xx=1, + y0=page_setup.get_top_margin(gtk.UNIT_POINTS)+(height_in_points+freeheight_in*dpi)/2.0, + x0=page_setup.get_left_margin(gtk.UNIT_POINTS)+(freewidth_in*dpi-width_in_points)/2.0) + + renderer.set_ctx_from_surface (context.get_cairo_context().get_target()) + # unfortunateley there is need for extra treatment of text + renderer.ctx.translate(page_setup.get_left_margin(gtk.UNIT_POINTS)+(freewidth_in*dpi-width_in_points)/2.0, + page_setup.get_top_margin(gtk.UNIT_POINTS)-height_in_points/2.0+freeheight_in*dpi/2.0) + renderer.ctx.save() # important! there will be no effect of previous statement without save + fc.figure.draw(renderer) + + # restore the figure's settings + fc.figure.set_size_inches(orig_w_in, orig_h_in) + fc.figure.set_facecolor(orig_f_color) + fc.figure.set_edgecolor(orig_e_color) + +class ScriptInterface: + """ + texts or code objects are executed as experiment and result script the backend is started with sufficient arguments + """ + + def __init__(self, exp_script=None, res_script=None, backend_executable=None, spool_dir="spool", clear_jobs=True, clear_results=True): + """ + run experiment scripts and result scripts + """ + + self.exp_script=exp_script + self.res_script=res_script + self.backend_executable=str(backend_executable) + self.spool_dir=os.path.abspath(spool_dir) + self.clear_jobs=clear_jobs + self.clear_results=clear_results + self.exp_handling=self.res_handling=None + + self.exp_writer=self.res_reader=self.back_driver=None + if self.backend_executable is not None and self.backend_executable!="": + self.back_driver=BackendDriver.BackendDriver(self.backend_executable, spool_dir, clear_jobs, clear_results) + if self.exp_script: self.exp_writer=self.back_driver.get_exp_writer() + if self.res_script: self.res_reader=self.back_driver.get_res_reader() + elif self.exp_script and self.res_script: + self.back_driver=None + self.res_reader=ResultReader.BlockingResultReader(spool_dir, clear_jobs=self.clear_jobs, clear_results=self.clear_results) + self.exp_writer=ExperimentWriter.ExperimentWriter(spool_dir, inform_last_job=self.res_reader) + else: + self.back_driver=None + if self.exp_script: self.exp_writer=ExperimentWriter.ExperimentWriter(spool_dir) + if self.res_script: self.res_reader=ResultReader.ResultReader(spool_dir, clear_jobs=self.clear_jobs, clear_results=self.clear_results) + self.data=DataPool() + + def runScripts(self): + + try: + # get script engines + self.exp_handling=self.res_handling=None + if self.exp_script and self.exp_writer: + self.exp_handling=ExperimentHandling.ExperimentHandling(self.exp_script, self.exp_writer, self.data) + if self.res_script and self.res_reader: + self.res_handling=ResultHandling.ResultHandling(self.res_script, self.res_reader, self.data) + + # start them + if self.back_driver is not None: + self.back_driver.start() + while (not self.back_driver.quit_flag.isSet() and \ + self.back_driver.core_pid is None and self.back_driver.core_pid<=0): + self.back_driver.quit_flag.wait(0.1) + if self.exp_handling: self.exp_handling.start() + if self.res_handling: self.res_handling.start() + finally: + self.exp_writer=self.res_reader=None + + def __del__(self): + self.exp_writer=None + self.res_reader=None + self.back_driver=None + self.data=None + self.exp_handling=None + self.res_handling=None + diff --git a/src/gui/ExperimentHandling.py b/src/gui/ExperimentHandling.py new file mode 100644 index 0000000..19a86b9 --- /dev/null +++ b/src/gui/ExperimentHandling.py @@ -0,0 +1,95 @@ +import threading +import StringIO +import traceback +import sys +import time +from damaris.experiments.Experiment import Quit +from damaris.experiments import Experiment + +class ExperimentHandling(threading.Thread): + """ + runs the experiment script in sandbox + """ + + def __init__(self, script, exp_writer, data): + threading.Thread.__init__(self, name="experiment handler") + self.script=script + self.writer=exp_writer + self.data=data + self.quit_flag = threading.Event() + if self.data is not None: + self.data["__recentexperiment"]=-1 + + def synchronize(self, before=0, waitsteps=0.1): + while (self.data["__recentexperiment"]>self.data["__recentresult"]+before) and not self.quit_flag.isSet(): + self.quit_flag.wait(waitsteps) + if self.quit_flag.isSet(): + raise StopIteration + + def run(self): + dataspace={} + exp_classes = __import__('damaris.experiments', dataspace, dataspace, ['Experiment']) + for name in dir(exp_classes): + if name[:2]=="__" and name[-2:]=="__": continue + dataspace[name]=exp_classes.__dict__[name] + del exp_classes + + dataspace["data"]=self.data + dataspace["synchronize"]=self.synchronize + self.raised_exception = None + self.location = None + exp_iterator=None + try: + exec self.script in dataspace + except Exception, e: + self.raised_exception=e + self.location=traceback.extract_tb(sys.exc_info()[2])[-1][1:3] + traceback_file=StringIO.StringIO() + traceback.print_tb(sys.exc_info()[2], None, traceback_file) + self.traceback=traceback_file.getvalue() + traceback_file=None + return + if "experiment" in dataspace: + try: + exp_iterator=dataspace["experiment"]() + except Exception, e: + self.raised_exception=e + self.location=traceback.extract_tb(sys.exc_info()[2])[-1][1:3] + traceback_file=StringIO.StringIO() + traceback.print_tb(sys.exc_info()[2], None, traceback_file) + self.traceback=traceback_file.getvalue() + traceback_file=None + return + while exp_iterator is not None and not self.quit_flag.isSet(): + # get next experiment from script + try: + job=exp_iterator.next() + except StopIteration: + break + except Exception, e: + self.raised_exception=e + self.location=traceback.extract_tb(sys.exc_info()[2])[-1][1:3] + traceback_file=StringIO.StringIO() + traceback.print_tb(sys.exc_info()[2], None, traceback_file) + self.traceback=traceback_file.getvalue() + traceback_file=None + break + # send it + self.writer.send_next(job) + # write a note + if isinstance(job, Experiment): + if self.data is not None: + self.data["__recentexperiment"]=job.job_id+0 + # relax for a short time + if "__resultsinadvance" in self.data and self.data["__resultsinadvance"]+10010: + raise e + print e, "retry", retries + time.sleep(0.05) + retries+=1 + + # get date of last modification + self.result_job_date = datetime.fromtimestamp(os.stat(in_filename)[8]) + if ELEMENT_TREE: + self.__parseFile = self.__parseFile_cETree + else: + self.__parseFile = self.__parseFile_expat + + self.__parseFile (result_file) + + result_file.close() + result_file = None + + r=self.result + self.result = None + + return r + + def __parseFile_cETree(self, in_file): + self.result = None + self.in_description_section=False + self.result_description = { } + self.result_job_number = None + # Job Date is set in __read_file() + self.__filetype = None + for elem in xml.etree.cElementTree.ElementTree(file=in_file).getiterator(): + if elem.tag == 'result': + self.result_job_number = int(elem.get("job")) + pass + + elif elem.tag == 'description': + if elem.text!=None: + self.result_description = {} + self.in_description_section=True + self.in_description_data=() + for an_item in elem.getchildren(): + self.in_description_data = (an_item.get("key"), an_item.get("type"), an_item.text) + # make item contents to dictionary item: + k,t,v=self.in_description_data + self.in_description_data=() + if t == "None": + self.result_description[k]=None + if t == "Float": + self.result_description[k]=float(v) + elif t == "Int": + self.result_description[k]=int(v) + elif t == "Long": + self.result_description[k]=long(v) + elif t == "Complex": + self.result_description[k]=complex(v) + elif t == "Boolean": + self.result_description[k]=bool(v) + elif t == "String": + self.result_description[k]=v + else: + # Anything else will be handled as a string + # Probably "repr". + self.result_description[k]=v + + elif elem.tag == 'adcdata': + self.__filetype = ResultReader.ADCDATA_TYPE + self.adc_result_trailing_chars = "" + + if self.result is None: + self.result = ADC_Result() + # None: new guess for adc data encoding + # "a": ascii + # "b": base64 + self.adc_data_encoding = None + + self.result.set_sampling_rate(float(elem.get("rate"))) + self.result.set_job_id(self.result_job_number) + self.result.set_job_date(self.result_job_date) + self.result.set_nChannels(int(elem.get("channels"))) + + self.result.set_description_dictionary(self.result_description.copy()) + title = "ADC-Result: job-id=%d"%int(self.result_job_number) + if len(self.result_description) > 0: + for k,v in self.result_description.iteritems(): + title += ", %s=%s"%(k,v) + self.result.set_title(title) + self.result_description = None + self.adc_result_sample_counter = 0 + self.adc_result_parts = [] # will contain arrays of sampled intervals, assumes same sample rate + else: + if float(elem.get("rate")) != self.result.get_sampling_rate(): + print "sample rate different in ADC_Result, found %f, former value %f"%\ + (float(in_attribute["rate"]),self.result.get_sampling_rate()) + new_samples = int(elem.get("samples")) + self.adc_result_sample_counter += new_samples + + self.adc_result_trailing_chars = "".join(elem.text.splitlines()) + tmp_string = base64.standard_b64decode(self.adc_result_trailing_chars) + self.adc_result_trailing_chars = None + tmp = numpy.fromstring(tmp_string,dtype='Int16') + tmp_string = None + self.adc_result_parts.append(tmp) + tmp = None + # we do not need this adcdata anymore, delete it + elem.clear() + + + + elif elem.tag == 'error': + self.__filetype = ResultReader.ERROR_TYPE + + self.result = Error_Result() + self.result.set_job_id(self.result_job_number) + self.result.set_job_date(self.result_job_date) + + self.result.set_description_dictionary(self.result_description.copy()) + self.result.set_error_message(elem.text) + + elif elem.tag == 'temp': + self.__filetype = ResultReader.TEMP_TYPE + + self.result = Error_Result() + self.result.set_job_id(self.result_job_number) + self.result.set_job_date(self.result_job_date) + + elif elem.tag == 'conf': + self.__filetype = ResultReader.CONF_TYPE + + self.result = Error_Result() + self.result.set_job_id(self.result_job_number) + self.result.set_job_date(self.result_job_date) + + # xml file was traversed now prepare the data in one go + # prepare result data + if self.result is not None and \ + self.__filetype == ResultReader.ADCDATA_TYPE and \ + self.adc_result_sample_counter>0: + # fill the ADC_Result with collected data + # x data + self.result.x=numpy.arange(self.adc_result_sample_counter, dtype="Float64")/\ + self.result.get_sampling_rate() + self.result.y = [] + nChannels = self.result.get_nChannels() + # initialise the y arrays + for i in xrange(nChannels): + self.result.y.append(numpy.empty(self.adc_result_sample_counter, dtype='Int16')) + # remove from result stack + tmp_index = 0 + while self.adc_result_parts: + tmp_part=self.adc_result_parts.pop(0) + tmp_size = tmp_part.size/nChannels + + for i in xrange(nChannels): + # split interleaved data + self.result.y[i][tmp_index:tmp_index+tmp_size] = tmp_part[i::nChannels] + self.result.y[i][tmp_index:tmp_index+tmp_size] = tmp_part[i::nChannels] + + if self.result.index != []: + self.result.index.append((tmp_index, tmp_index+tmp_size-1)) + else: + self.result.index = [(0,tmp_size-1)] + tmp_index += tmp_size + self.result.cont_data=True + tmp_part = None + + def __parseFile_expat(self, in_file): + "Parses the given file, adding it to the result-queue" + + self.result = None + self.in_description_section=False + self.result_description = { } + self.result_job_number = None + # Job Date is set in __read_file() + self.__filetype = None + + # Expat XML-Parser & Binding handlers + self.xml_parser = xml.parsers.expat.ParserCreate() + self.xml_parser.StartElementHandler = self.__xmlStartTagFound + self.xml_parser.CharacterDataHandler = self.__xmlCharacterDataFound + self.xml_parser.EndElementHandler = self.__xmlEndTagFound + self.element_stack=[] + + try: + # short version, but pyexpat buffers are awfully small + # self.xml_parser.ParseFile(in_file) + # read all, at least try + databuffer=in_file.read(-1) + # test wether really everything was read... + databuffer2=in_file.read(self.xml_parser.buffer_size) + if databuffer2=="": + # parse everything at once + self.xml_parser.Parse(databuffer,True) + else: + # do the first part ... + self.xml_parser.Parse(databuffer,False) + databuffer=databuffer2 + # ... and again and again + while databuffer!="": + self.xml_parser.Parse(databuffer,False) + databuffer=in_file.read(-1) + self.xml_parser.Parse("",True) + except xml.parsers.expat.ExpatError, e: + print "result file %d: xml parser '%s' error at line %d, offset %d"%(self.no, + xml.parsers.expat.ErrorString(e.code), + e.lineno, + e.offset) + self.result = None + + del databuffer + self.xml_parser.StartElementHandler=None + self.xml_parser.EndElementHandler=None + self.xml_parser.CharacterDataHandler=None + del self.xml_parser + + # prepare result data + if self.result is not None and \ + self.__filetype == ResultReader.ADCDATA_TYPE and \ + self.adc_result_sample_counter>0: + # fill the ADC_Result with collected data + self.result.x=numpy.arange(self.adc_result_sample_counter, dtype="Float64")/\ + self.result.get_sampling_rate() + self.result.y=[] + self.result.index=[] + for i in xrange(2): + self.result.y.append(numpy.empty((self.adc_result_sample_counter,), dtype="Int16")) + tmp_sample_counter=0 + while self.adc_result_parts: + tmp_part=self.adc_result_parts.pop(0) + tmp_size=tmp_part.size/2 + self.result.y[0][tmp_sample_counter:tmp_sample_counter+tmp_size]=tmp_part[::2] + self.result.y[1][tmp_sample_counter:tmp_sample_counter+tmp_size]=tmp_part[1::2] + self.result.index.append((tmp_sample_counter,tmp_sample_counter+tmp_size-1)) + tmp_sample_counter+=tmp_size + self.result.cont_data=True + + # Callback when a xml start tag is found + def __xmlStartTagFound(self, in_name, in_attribute): + + # General Result-Tag + if in_name == "result": + self.result_job_number = int(in_attribute["job"]) + # Job-Date is set in __read_file() + + # Description + elif in_name == "description": + # old style description: + if len(in_attribute)!=0: + self.result_description = in_attribute.copy() + self.in_description_section=True + self.in_description_data=() + + elif self.in_description_section and in_name == "item": + self.in_description_data=[in_attribute["key"], in_attribute["type"], ""] + + # ADC_Results + elif in_name == "adcdata": + self.__filetype = ResultReader.ADCDATA_TYPE + + self.adc_result_trailing_chars = "" + + if self.result is None: + self.result = ADC_Result() + # None: new guess for adc data encoding + # "a": ascii + # "b": base64 + self.adc_data_encoding = None + + self.result.set_sampling_rate(float(in_attribute["rate"])) + self.result.set_job_id(self.result_job_number) + self.result.set_job_date(self.result_job_date) + + self.result.set_description_dictionary(self.result_description.copy()) + title="ADC-Result: job-id=%d"%int(self.result_job_number) + if len(self.result_description)>0: + for k,v in self.result_description.iteritems(): + title+=", %s=%s"%(k,v) + self.result.set_title(title) + self.result_description=None + self.adc_result_sample_counter = 0 + self.adc_result_parts=[] # will contain arrays of sampled intervals, assumes same sample rate + else: + if float(in_attribute["rate"])!=self.result.get_sampling_rate(): + print "sample rate different in ADC_Result, found %f, former value %f"%\ + (float(in_attribute["rate"]),self.result.get_sampling_rate()) + new_samples=int(in_attribute["samples"]) + self.adc_result_sample_counter += new_samples + + # version depends on the inclusion of http://bugs.python.org/issue1137 + if sys.hexversion>=0x020501f0: + # extend buffer to expected base64 size (2 channels, 2 byte) + required_buffer=int(new_samples*4/45+1)*62 + if self.xml_parser.buffer_size < required_buffer: + try: + self.xml_parser.buffer_size=required_buffer + except AttributeError: + pass + + # pass all chardata as one block + self.xml_parser.buffer_text = True + # do not change the contents + self.xml_parser.returns_unicode=False + + # Error_Results + elif in_name == "error": + self.__filetype = ResultReader.ERROR_TYPE + + self.result = Error_Result() + self.result.set_job_id(self.result_job_number) + self.result.set_job_date(self.result_job_date) + + self.result.set_description_dictionary(self.result_description.copy()) + + # Temp_Results + elif in_name == "temp": + self.__filetype = ResultReader.TEMP_TYPE + + self.result = Temp_Result() + self.result.set_job_id(self.result_job_number) + self.result.set_job_date(self.result_job_date) + + # Config_Results + elif in_name == "conf": + self.__filetype = ResultReader.CONFIG_TYPE + + self.result = Config_Result() + self.result.set_job_id(self.result_job_number) + self.result.set_job_date(self.result_job_date) + + # maintain the stack + self.element_stack.append(in_name) + + def __xmlCharacterDataFound(self, in_cdata): + + if self.in_description_section and len(self.in_description_data): + self.in_description_data[2]+=in_cdata + + # ADC_Result + elif self.__filetype == ResultReader.ADCDATA_TYPE and self.element_stack[-1]=="adcdata": + self.adc_result_trailing_chars+=in_cdata + + # Error_Result + elif self.__filetype == ResultReader.ERROR_TYPE: + tmp_string = self.result.get_error_message() + if tmp_string is None: tmp_string = "" + + tmp_string += in_cdata + self.result.set_error_message(tmp_string) + + # Temp_Results + elif self.__filetype == ResultReader.TEMP_TYPE: + pass + + # Config_Results + elif self.__filetype == ResultReader.CONFIG_TYPE: + pass + + def __xmlEndTagFound(self, in_name): + + # maintain the stack + self.element_stack.pop() + + if in_name == "adcdata": + + # ADC_Result + if self.__filetype == ResultReader.ADCDATA_TYPE: + # detect type of data encoding from first line + if self.adc_data_encoding is None: + self.adc_result_trailing_chars=self.adc_result_trailing_chars.strip() + first_line_end=self.adc_result_trailing_chars.find("\n") + first_line="" + if first_line_end!=-1: + first_line=self.adc_result_trailing_chars[:first_line_end] + else: + first_line=self.adc_result_trailing_chars + if len(first_line.lstrip("-0123456789 \t\n\r"))==0: + try: + map(int,filter(len,first_line.split())) + except ValueError,e: + pass + else: + self.adc_data_encoding="a" + if self.adc_data_encoding is None and len(first_line)%4==0: + try: + base64.standard_b64decode(first_line) + except TypeError: + pass + else: + self.adc_data_encoding="b" + if self.adc_data_encoding is None: + print "unknown ADC data format \"%s\""%first_line + + tmp=None + if self.adc_data_encoding=="a": + values=map(int,self.adc_result_trailing_chars.split()) + tmp=numpy.array(values, dtype="Int16") + elif self.adc_data_encoding=="b": + tmp_string=base64.standard_b64decode(self.adc_result_trailing_chars) + tmp=numpy.fromstring(tmp_string, dtype="Int16") + del tmp_string + else: + print "unknown ADC data format" + + self.adc_result_trailing_chars="" + self.adc_result_parts.append(tmp) + del tmp + return + + elif in_name == "description": + self.in_description_section=False + + elif self.in_description_section and in_name == "item": + # make item contents to dictionary item: + k,t,v=self.in_description_data + self.in_description_data=() + if t == "None": + self.result_description[k]=None + if t == "Float": + self.result_description[k]=float(v) + elif t == "Int": + self.result_description[k]=int(v) + elif t == "Long": + self.result_description[k]=long(v) + elif t == "Complex": + self.result_description[k]=complex(v) + elif t == "Boolean": + self.result_description[k]=bool(v) + elif t == "String": + self.result_description[k]=v + else: + # Anything else will be handled as a string + # Probably "repr". + self.result_description[k]=v + + elif in_name == "result": + pass + + # Error_Result + elif self.__filetype == ResultReader.ERROR_TYPE: + pass + + # Temp_Result + elif self.__filetype == ResultReader.TEMP_TYPE: + pass + + # Config_Result + elif self.__filetype == ResultReader.CONFIG_TYPE: + pass + +class BlockingResultReader(ResultReader): + """ + to follow an active result stream + """ + + def __init__(self, spool_dir=".", no=0, result_pattern="job.%09d.result", clear_jobs=False, clear_results=False): + ResultReader.__init__(self, spool_dir, no, result_pattern, clear_jobs=clear_jobs, clear_results=clear_results) + self.stop_no=None # end of job queue + self.poll_time=0.1 # sleep interval for polling results, <0 means no polling and stop + self.in_advance=0 + + def __iter__(self): + """ + get next job with iterator + block until result is available + """ + expected_filename=os.path.join(self.spool_dir,self.result_pattern%(self.no)) + while (not self.quit_flag.isSet()) and (self.stop_no is None or self.stop_no>self.no): + if not os.access(expected_filename,os.R_OK): + # stop polling, if required + if self.poll_time<0: break + self.quit_flag.wait(self.poll_time) + continue + + # find pending results + self.in_advance=max(self.no,self.in_advance) + in_advance_filename=os.path.join(self.spool_dir,self.result_pattern%(self.in_advance+1)) + while os.access(in_advance_filename, os.R_OK) and (self.stop_no is None or self.stop_no>self.in_advance+1): + # do not more than 100 results in advance at one glance + if self.in_advance>self.no+100: break + self.in_advance+=1 + in_advance_filename=os.path.join(self.spool_dir,self.result_pattern%(self.in_advance+1)) + + if self.quit_flag.isSet(): break + r=self.get_result_object(expected_filename) + if self.quit_flag.isSet(): break + + if self.quit_flag.isSet(): break + yield r + + if self.clear_results: + if os.path.isfile(expected_filename): os.remove(expected_filename) + if self.clear_jobs: + if os.path.isfile(expected_filename[:-7]): os.remove(expected_filename[:-7]) + + self.no+=1 + expected_filename=os.path.join(self.spool_dir,self.result_pattern%(self.no)) + + return + + def quit(self): + self.quit_flag.set() diff --git a/src/gui/__init__.py b/src/gui/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/gui/damaris.glade b/src/gui/damaris.glade new file mode 100644 index 0000000..34ca5d1 --- /dev/null +++ b/src/gui/damaris.glade @@ -0,0 +1,2106 @@ + + + + + + True + DAMARIS + GTK_WIN_POS_CENTER + 790 + + + + + True + 3 + + + True + + + True + Creates an empty script + New + True + gtk-new + + + + True + + + + + True + Opens a script + Open + True + gtk-open + + + + True + + + + + True + Saves a script + Save + True + gtk-save + + + + True + + + + + True + Saves a script in a new file + Save AS + True + gtk-save-as + + + + True + + + + + True + Saves all scripts + Save All + True + gtk-copy + + + + True + + + + + True + Checks script's syntax + Check Syntax + True + gtk-apply + + + + True + + + + + True + + + + + True + gtk-print + + + + True + + + + + True + + + + + True + Executes the scripts + True + Execute + True + gtk-execute + + + + True + + + + + True + + + True + False + True + False + 0 + + + + True + + + True + gtk-media-pause + 3 + + + + + True + Pause + + + False + False + 1 + + + + + + + + + + + True + False + Stops running experiment + True + Stop + True + gtk-stop + + + + True + + + + + True + + + + + True + This progam's documentation + Help + True + gtk-info + documentation_menu + + + + + + + + + + + True + True + + + + True + 2 + 5 + + + True + 0 + Column: + + + 3 + 4 + 1 + 2 + + + + + + + True + 0 + Line: + + + 1 + 2 + 1 + 2 + + + + + + + True + 0 + + + 1 + 2 + + + + + + True + True + GTK_SHADOW_IN + + + True + True + + + + + 5 + + + + + True + True + GDK_POINTER_MOTION_MASK | GDK_POINTER_MOTION_HINT_MASK | GDK_BUTTON_PRESS_MASK | GDK_BUTTON_RELEASE_MASK + 1 1 1 1 10 0 + 1 + True + + + + 2 + 3 + 1 + 2 + + + + + + + True + True + GDK_POINTER_MOTION_MASK | GDK_POINTER_MOTION_HINT_MASK | GDK_BUTTON_PRESS_MASK | GDK_BUTTON_RELEASE_MASK + 1 1 1 1 10 0 + 1 + True + True + + + + 4 + 5 + 1 + 2 + + + + + + + + + True + Experiment Script + + + tab + False + + + + + True + 2 + 5 + + + True + 0 + Column: + + + 3 + 4 + 1 + 2 + + + + + + + True + 0 + Line: + + + 1 + 2 + 1 + 2 + + + + + + + True + True + GTK_SHADOW_IN + + + True + True + + + + + + 5 + + + + + True + 0 + + + 1 + 2 + + + + + + True + True + GDK_POINTER_MOTION_MASK | GDK_POINTER_MOTION_HINT_MASK | GDK_BUTTON_PRESS_MASK | GDK_BUTTON_RELEASE_MASK + 1 1 1 1 10 0 + 1 + True + + + + 2 + 3 + 1 + 2 + + + + + + + True + True + GDK_POINTER_MOTION_MASK | GDK_POINTER_MOTION_HINT_MASK | GDK_BUTTON_PRESS_MASK | GDK_BUTTON_RELEASE_MASK + 1 1 1 1 10 0 + 1 + True + + + + 4 + 5 + 1 + 2 + + + + + + + 1 + + + + + True + Result Script + + + tab + 1 + False + + + + + True + 3 + 6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + True + 1 + 0 + + + True + 2 + 4 + 12 + 4 + + + True + 2 + 4 + + + True + + + True + Source + + + False + False + + + + + True + True + + + False + False + 1 + + + + + + + + + + True + False + lin +log +log10 + + + + 1 + 2 + 1 + 2 + + + + + + + True + 0 + y-Scaling + + + 2 + 3 + 1 + 2 + + + 3 + + + + + True + False + lin +log +log10 + + + + 1 + 2 + + + + + + + True + 0 + x-Scaling + + + 2 + 3 + + + 3 + + + + + True + 0 + in ... + + + 1 + 2 + + + + + + True + 0 + + + 1 + 2 + + + + + + + True + True + Rescale graph if new ymax is larger than the old one or shrinked more than 20% + Auto-Scaling + True + 0 + True + + + + 3 + 4 + 1 + 2 + + + + + + + True + True + Plot error-bars if result to plot uses statistic + Show Statistics + True + 0 + True + + + + 3 + 4 + + + + + + + + + + + True + Settings: + True + + + label_item + + + + + 6 + 2 + 3 + + 2 + 2 + + + + + True + True + Save display data as text to file + 0 + + + + True + 0 + 0 + + + True + 2 + + + True + gtk-floppy + + + False + False + + + + + True + 10 + Save as text + True + + + False + 1 + + + + + + + + + 5 + 6 + 1 + 2 + + GTK_FILL + + + + + True + True + Copy display data as text to clipboard + 0 + + + + True + 0 + 0 + + + True + 2 + + + True + gtk-copy + + + False + False + + + + + True + 10 + Copy to Cipboard + True + + + False + 1 + + + + + + + + + 4 + 5 + 1 + 2 + + GTK_FILL + + + + + 2 + + + + + True + Display + + + tab + 2 + False + + + + + True + 2 + + + True + 3 + + + True + 0 + + + 2 + 3 + + 4 + + + + + True + 0 + Lines in Buffer: 0 + GTK_JUSTIFY_RIGHT + + + + + 3 + 4 + + + + + True + 0 + File: Unnamed + + + 1 + 2 + + + 16 + 4 + + + + + 1 + 2 + + + + + + True + True + GTK_SHADOW_IN + + + True + True + False + GTK_WRAP_WORD + False + False + + + + + + + 3 + + + + + True + Log + True + + + tab + 3 + False + + + + + True + + + True + + + True + + + True + Backend's Settings + + + False + False + + + + + True + True + start backend + True + 0 + True + True + + + False + False + 1 + + + + + True + + + True + executable + + + False + False + + + + + True + True + backend + + + 1 + + + + + True + True + browse + True + 0 + + + + False + False + 2 + + + + + 2 + + + + + True + + + True + spool directory + + + False + False + + + + + True + True + spool + + + 1 + + + + + 3 + + + + + True + True + delete job data after execution + True + 0 + True + True + + + False + False + 4 + + + + + True + + + True + log file name + + + False + False + + + + + True + True + logdata + + + 1 + + + + + 5 + + + + + True + + + True + maximum number of log files to keep + + + False + False + + + + + True + True + False + 4 + 3 + 10 + + + False + 1 + + + + + 6 + + + + + + + True + + + False + False + 1 + + + + + True + + + True + Scripts' Settings + + + False + False + + + + + True + True + start experiment script + True + 0 + True + True + + + False + False + 1 + + + + + True + True + start result script + True + 0 + True + True + + + False + False + 2 + + + + + True + True + delete result data after processing + True + 0 + True + True + + + False + False + 3 + + + + + 2 + + + + + False + False + + + + + True + + + False + False + 1 + + + + + True + + + True + HDF5 Data Pool's Settings + + + False + False + + + + + True + + + True + data pool file name + + + False + False + + + + + True + True + DAMARIS_data_pool.h5 + + + 1 + + + + + 1 + + + + + True + + + True + data pool write interval + + + False + False + + + + + True + True + 10 + 10 + 60 + + + False + 1 + + + + + True + min, compression + + + False + False + 2 + + + + + True + None + + + + False + False + 3 + + + + + True + , ratio + + + False + False + 4 + + + + + True + True + 0 0 9 1 1 0 + 1 + True + + + False + False + 5 + + + + + + + + 2 + + + + + False + 2 + + + + + True + + + False + 3 + + + + + True + + + False + False + 4 + + + + + True + + + True + Frontend's Settings + + + False + False + + + + + True + + + True + Script Windows Font + + + False + False + + + + + True + True + 0 + + + + False + False + 1 + + + + + False + False + 1 + + + + + True + + + True + True + Printer Setup + True + 0 + + + + False + False + + + + + + + + 2 + + + + + True + ToDo + + + False + False + 3 + + + + + False + 5 + + + + + True + + + False + False + 6 + + + + + True + + + True + info, authors and disclaimer + + + False + False + + + + + True + True + GTK_POLICY_NEVER + GTK_SHADOW_IN + + + True + False + GTK_WRAP_WORD_CHAR + False + DAMARIS python frontend version %(damarisversion)s, 2012 +%(moduleversions)s + +DArmstadt Magnetic Resonance Instrument Software, 2004-2012 +Homepage http://www.fkp.physik.tu-darmstadt.de/damaris/ + +Python frontend written by Christopher Schmitt, Achim Gädke, Markus Rosenstihl and Christian Tacke +Backends written by Achim Gädke, Markus Rosenstihl, Christian Tacke, tested by Holger Stork +The drivers included in the backend may use propietary interfaces. +The source code distribution may include commercial third party's code, +which is specially marked and not covered by this license. + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + <signature of Ty Coon>, 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. + + + + + + 1 + + + + + 7 + + + + + True + + + True + True + True + 0 + + + + True + 0 + 0 + + + True + 2 + + + True + gtk-floppy + + + False + False + + + + + True + save configuration + True + + + False + False + 1 + + + + + + + + + False + False + + + + + True + True + True + 0 + + + + True + 0 + 0 + + + True + 2 + + + True + gtk-refresh + + + False + False + + + + + True + reload configuration + True + + + False + False + 1 + + + + + + + + + False + False + 1 + + + + + False + False + 8 + + + + + 4 + + + + + True + Configuration + + + tab + 4 + False + + + + + 1 + 2 + + + + + True + 8 + + + True + + + 6 + 7 + + 5 + + + + + True + + + + 3 + + + + + True + + + 4 + 5 + + 5 + + + + + True + + + 2 + 3 + + 5 + + + + + True + Backend: Idle. + + + 3 + 4 + + + + + + + 20 + True + Experiment Script: Idle. + + + 1 + 2 + + + + + + + True + Result Script: Idle. + + + 5 + 6 + + + + + + + True + 0 + + + + 7 + 8 + + + + + + 2 + 3 + GTK_FILL + + + + + + + + True + GDK_POINTER_MOTION_MASK | GDK_POINTER_MOTION_HINT_MASK | GDK_BUTTON_PRESS_MASK | GDK_BUTTON_RELEASE_MASK + + + True + This program's documentation + Python DAMARIS + True + + + + + + True + DArmstadt MAgnetic Resonance Instrument Homepage + DAMARIS Homepage + True + + + + + + 2 + True + + + + + True + Python scripting Language + Python + True + + + + + + True + Array and Numeric Processing Support + numpy/scipy + True + + + + + + True + Hierachical Data Format Support + pytables + True + + + + + + 2 + True + + + + + True + (developer's stuff) + DAMARIS Backends + True + + + + + + True + (developers' stuff) + DAMARIS Repository + True + + + + + diff --git a/src/gui/damaris.gladep b/src/gui/damaris.gladep new file mode 100644 index 0000000..19cf0e7 --- /dev/null +++ b/src/gui/damaris.gladep @@ -0,0 +1,8 @@ + + + + + damaris-gui + damaris + FALSE + diff --git a/src/gui/gtkcodebuffer.py b/src/gui/gtkcodebuffer.py new file mode 100644 index 0000000..2e0c59b --- /dev/null +++ b/src/gui/gtkcodebuffer.py @@ -0,0 +1,681 @@ +""" This module contains the PyGTKCodeBuffer-class. This class is a + specialisation of the gtk.TextBuffer and enables syntax-highlighting for + PyGTK's TextView-widget. + + To use the syntax-highlighting feature you have load a syntax-definition or + specify your own. To load one please read the docs for the SyntaxLoader() + class. """ + + +# This library is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see . + + +import gtk +import pango +import re +import sys +import os.path +import xml.sax +import imp +from xml.sax.handler import ContentHandler +from xml.sax.saxutils import unescape + +__version__ = "1.0RC2" +__author__ = "Hannes Matuschek " + + +# defined the default styles +DEFAULT_STYLES = { + 'DEFAULT': {'font': 'monospace'}, + 'comment': {'foreground': '#0000FF'}, + 'preprocessor': {'foreground': '#A020F0', + 'weight': pango.WEIGHT_BOLD}, + 'keyword': {'foreground': '#A52A2A', + 'weight': pango.WEIGHT_BOLD}, + 'special': {'foreground': '#006600'}, + 'mark1': {'foreground': '#008B8B'}, + 'mark2': {'foreground': '#6A5ACD'}, + 'string': {'foreground': '#CC00CC'}, + 'number': {'foreground': '#CC00CC'}, + 'datatype': {'foreground': '#2E8B57', + 'weight': pango.WEIGHT_BOLD}, + 'function': {'foreground': '#008A8C'}, + + 'link': {'foreground': '#0000FF', + 'underline': pango.UNDERLINE_SINGLE}} + + + + + +def _main_is_frozen(): + """ Internal used function. """ + return (hasattr(sys, "frozen") or # new py2exe + hasattr(sys, "importers") # old py2exe + or imp.is_frozen("__main__")) # tools/freeze + + +if _main_is_frozen(): + this_module_path = os.path.dirname(sys.executable) +else: + this_module_path = os.path.abspath(os.path.dirname(__file__)) + + +# defines default-search paths for syntax-files +SYNTAX_PATH = [ os.path.join('.', 'syntax'), + this_module_path, + os.path.join(os.path.expanduser('~'),".pygtkcodebuffer"), + os.path.join(sys.prefix,"share","pygtkcodebuffer","syntax")] + + +# enable/disable debug-messages +DEBUG_FLAG = False + + +# +# Some log functions... +# (internal used) +def _log_debug(msg): + if not DEBUG_FLAG: + return + sys.stderr.write("DEBUG: ") + sys.stderr.write(msg) + sys.stderr.write("\n") + +def _log_warn(msg): + sys.stderr.write("WARN: ") + sys.stderr.write(msg) + sys.stderr.write("\n") + +def _log_error(msg): + sys.stderr.write("ERROR: ") + sys.stderr.write(msg) + sys.stderr.write("\n") + + + + +def add_syntax_path(path_or_list): + """ This function adds one (string) or many (list of strings) paths to the + global search-paths for syntax-files. """ + global SYNTAX_PATH + # handle list of strings + if isinstance(path_or_list, (list, tuple)): + for i in range(len(path_or_list)): + SYNTAX_PATH.insert(0, path_or_list[-i]) + # handle single string + elif isinstance(path_or_list, basestring): + SYNTAX_PATH.insert(0, path_or_list) + # handle attr-error + else: + raise TypeError, "Argument must be path-string or list of strings" + + + +class Pattern: + """ More or less internal used class representing a pattern. You may use + this class to "hard-code" your syntax-definition. """ + + def __init__(self, regexp, style="DEFAULT", group=0, flags=""): + """ The constructor takes at least on argument: the regular-expression. + + The optional kwarg style defines the style applied to the string + matched by the regexp. + + The kwarg group may be used to define which group of the regular + expression will be used for highlighting (Note: This means that only + the selected group will be highlighted but the complete pattern must + match!) + + The optional kwarg flags specifies flags for the regular expression. + Look at the Python lib-ref for a list of flags and there meaning.""" + # assemble re-flag + flags += "ML"; flag = 0 + + _log_debug("init rule %s -> %s (%s)"%(regexp, style, flags)) + + for char in flags: + if char == 'M': flag |= re.M + if char == 'L': flag |= re.L + if char == 'S': flag |= re.S + if char == 'I': flag |= re.I + if char == 'U': flag |= re.U + if char == 'X': flag |= re.X + + # compile re + try: self._regexp = re.compile(regexp, flag) + except re.error, e: + raise Exception("Invalid regexp \"%s\": %s"%(regexp,str(e))) + + self._group = group + self.tag_name = style + + + def __call__(self, txt, start, end): + m = self._regexp.search(txt) + if not m: return None + + mstart, mend = m.start(self._group), m.end(self._group) + s = start.copy(); s.forward_chars(mstart) + e = start.copy(); e.forward_chars(mend) + + return (s,e) + + + +class KeywordList(Pattern): + """ This class may be used for hard-code a syntax-definition. It specifies + a pattern for a keyword-list. This simplifies the definition of + keyword-lists. """ + + def __init__(self, keywords, style="keyword", flags=""): + """ The constructor takes at least on argument: A list of strings + specifying the keywords to highlight. + + The optional kwarg style specifies the style used to highlight these + keywords. + + The optional kwarg flags specifies the flags for the + (internal generated) regular-expression. """ + regexp = "(?:\W|^)(%s)\W"%("|".join(keywords),) + Pattern.__init__(self, regexp, style, group=1, flags=flags) + + + +class String: + """ This class may be used to hard-code a syntax-definition. It simplifies + the definition of a "string". A "string" is something that consists of + a start-pattern and an end-pattern. The end-pattern may be content of + the string if it is escaped. """ + + def __init__(self, starts, ends, escape=None, style="string"): + """ The constructor needs at least two arguments: The start- and + end-pattern. + + The optional kwarg escape specifies a escape-sequence escaping the + end-pattern. + + The optional kwarg style specifies the style used to highlight the + string. """ + try: + self._starts = re.compile(starts) + except re.error, e: + raise Exception("Invalid regexp \"%s\": %s"%(regexp,str(e))) + + if escape: + end_exp = "[^%(esc)s](?:%(esc)s%(esc)s)*%(end)s" + end_exp = end_exp%{'esc':escape*2,'end':ends} + else: + end_exp = ends + + try: + self._ends = re.compile(end_exp) + except re.error, e: + raise Exception("Invalid regexp \"%s\": %s"%(regexp,str(e))) + + self.tag_name = style + + + def __call__(self, txt, start, end): + start_match = self._starts.search(txt) + if not start_match: return + + start_it = start.copy() + start_it.forward_chars(start_match.start(0)) + end_it = end.copy() + + end_match = self._ends.search(txt, start_match.end(0)-1) + if end_match: + end_it.set_offset(start.get_offset()+end_match.end(0)) + + return start_it, end_it + + + +class LanguageDefinition: + """ This class is a container class for all rules (Pattern, KeywordList, + ...) specifying the language. You have to used this class if you like + to hard-code your syntax-definition. """ + + def __init__(self, rules): + """ The constructor takes only one argument: A list of rules (i.e + Pattern, KeywordList and String). """ + self._grammar = rules + self._styles = dict() + + + def __call__(self, buf, start, end=None): + # if no end given -> end of buffer + if not end: end = buf.get_end_iter() + + mstart = mend = end + mtag = None + txt = buf.get_slice(start, end) + + # search min match + for rule in self._grammar: + # search pattern + m = rule(txt, start, end) + if not m: continue + + # prefer match with smallest start-iter + if m[0].compare(mstart) < 0: + mstart, mend = m + mtag = rule.tag_name + continue + + if m[0].compare(mstart)==0 and m[1].compare(mend)>0: + mstart, mend = m + mtag = rule.tag_name + continue + + return (mstart, mend, mtag) + + + def get_styles(self): + return self._styles + + + + +class SyntaxLoader(ContentHandler, LanguageDefinition): + """ This class loads a syntax definition. There have to be a file + named LANGUAGENAME.xml in one of the directories specified in the + global path-list. You may add a directory using the add_syntax_path() + function. """ + + # some translation-tables for the style-defs: + style_weight_table = {'ultralight': pango.WEIGHT_ULTRALIGHT, + 'light': pango.WEIGHT_LIGHT, + 'normal': pango.WEIGHT_NORMAL, + 'bold': pango.WEIGHT_BOLD, + 'ultrabold': pango.WEIGHT_ULTRABOLD, + 'heavy': pango.WEIGHT_HEAVY} + style_variant_table = {'normal': pango.VARIANT_NORMAL, + 'smallcaps': pango.VARIANT_SMALL_CAPS} + style_underline_table = {'none': pango.UNDERLINE_NONE, + 'single': pango.UNDERLINE_SINGLE, + 'double': pango.UNDERLINE_DOUBLE} + style_style_table = {'normal': pango.STYLE_NORMAL, + 'oblique': pango.STYLE_OBLIQUE, + 'italic': pango.STYLE_ITALIC} + style_scale_table = { + 'xx_small': pango.SCALE_XX_SMALL, + 'x_small': pango.SCALE_X_SMALL, + 'small': pango.SCALE_SMALL, + 'medium': pango.SCALE_MEDIUM, + 'large': pango.SCALE_LARGE, + 'x_large': pango.SCALE_X_LARGE, + 'xx_large': pango.SCALE_XX_LARGE, + } + + + def __init__(self, lang_name): + """ The constructor takes only one argument: the language name. + The constructor tries to load the syntax-definition from a + syntax-file in one directory of the global path-list. + + An instance of this class IS a LanguageDefinition. You can pass it + to the constructor of the CodeBuffer class. """ + LanguageDefinition.__init__(self, []) + ContentHandler.__init__(self) + + # search for syntax-files: + fname = None + for syntax_dir in SYNTAX_PATH: + fname = os.path.join(syntax_dir, "%s.xml"%lang_name) + if os.path.isfile(fname): break + + _log_debug("Loading syntaxfile %s"%fname) + + if not os.path.isfile(fname): + raise Exception("No snytax-file for %s found!"%lang_name) + + xml.sax.parse(fname, self) + + + # Dispatch start/end - document/element and chars + def startDocument(self): + self.__stack = [] + + def endDocument(self): + del self.__stack + + def startElement(self, name, attr): + self.__stack.append( (name, attr) ) + if hasattr(self, "start_%s"%name): + handler = getattr(self, "start_%s"%name) + handler(attr) + + def endElement(self, name): + if hasattr(self, "end_%s"%name): + handler = getattr(self, "end_%s"%name) + handler() + del self.__stack[-1] + + def characters(self, txt): + if not self.__stack: return + name, attr = self.__stack[-1] + + if hasattr(self, "chars_%s"%name): + handler = getattr(self, "chars_%s"%name) + handler(txt) + + + # Handle regexp-patterns + def start_pattern(self, attr): + self.__pattern = "" + self.__group = 0 + self.__flags = '' + self.__style = attr['style'] + if 'group' in attr.keys(): self.__group = int(attr['group']) + if 'flags' in attr.keys(): self.__flags = attr['flags'] + + def end_pattern(self): + rule = Pattern(self.__pattern, self.__style, self.__group, self.__flags) + self._grammar.append(rule) + del self.__pattern + del self.__group + del self.__flags + del self.__style + + def chars_pattern(self, txt): + self.__pattern += unescape(txt) + + + # handle keyword-lists + def start_keywordlist(self, attr): + self.__style = "keyword" + self.__flags = "" + if 'style' in attr.keys(): + self.__style = attr['style'] + if 'flags' in attr.keys(): + self.__flags = attr['flags'] + self.__keywords = [] + + def end_keywordlist(self): + kwlist = KeywordList(self.__keywords, self.__style, self.__flags) + self._grammar.append(kwlist) + del self.__keywords + del self.__style + del self.__flags + + def start_keyword(self, attr): + self.__keywords.append("") + + def end_keyword(self): + if not self.__keywords[-1]: + del self.__keywords[-1] + + def chars_keyword(self, txt): + parent,pattr = self.__stack[-2] + if not parent == "keywordlist": return + self.__keywords[-1] += unescape(txt) + + + #handle String-definitions + def start_string(self, attr): + self.__style = "string" + self.__escape = None + if 'escape' in attr.keys(): + self.__escape = attr['escape'] + if 'style' in attr.keys(): + self.__style = attr['style'] + self.__start_pattern = "" + self.__end_pattern = "" + + def end_string(self): + strdef = String(self.__start_pattern, self.__end_pattern, + self.__escape, self.__style) + self._grammar.append(strdef) + del self.__style + del self.__escape + del self.__start_pattern + del self.__end_pattern + + def chars_starts(self, txt): + self.__start_pattern += unescape(txt) + + def chars_ends(self, txt): + self.__end_pattern += unescape(txt) + + + # handle style + def start_style(self, attr): + self.__style_props = dict() + self.__style_name = attr['name'] + + def end_style(self): + self._styles[self.__style_name] = self.__style_props + del self.__style_props + del self.__style_name + + def start_property(self, attr): + self.__style_prop_name = attr['name'] + + def chars_property(self, value): + value.strip() + + # convert value + if self.__style_prop_name in ['font','foreground','background',]: + pass + + elif self.__style_prop_name == 'variant': + if not value in self.style_variant_table.keys(): + Exception("Unknown style-variant: %s"%value) + value = self.style_variant_table[value] + + elif self.__style_prop_name == 'underline': + if not value in self.style_underline_table.keys(): + Exception("Unknown underline-style: %s"%value) + value = self.style_underline_table[value] + + elif self.__style_prop_name == 'scale': + if not value in self.style_scale_table.keys(): + Exception("Unknown scale-style: %s"%value) + value = self.style_scale_table[value] + + elif self.__style_prop_name == 'weight': + if not value in self.style_weight_table.keys(): + Exception("Unknown style-weight: %s"%value) + value = self.style_weight_table[value] + + elif self.__style_prop_name == 'style': + if not value in self.style_style_table[value]: + Exception("Unknwon text-style: %s"%value) + value = self.style_style_table[value] + + else: + raise Exception("Unknown style-property %s"%self.__style_prop_name) + + # store value + self.__style_props[self.__style_prop_name] = value + + + + +class CodeBuffer(gtk.TextBuffer): + """ This class extends the gtk.TextBuffer to support syntax-highlighting. + You can use this class like a normal TextBuffer. """ + + def __init__(self, table=None, lang=None, styles={}): + """ The constructor takes 3 optional arguments. + + table specifies a tag-table associated with the TextBuffer-instance. + This argument will be passed directly to the constructor of the + TextBuffer-class. + + lang specifies the language-definition. You have to load one using + the SyntaxLoader-class or you may hard-code your syntax-definition + using the LanguageDefinition-class. + + styles is a dictionary used to extend or overwrite the default styles + provided by this module (DEFAULT_STYLE) and any language specific + styles defined by the LanguageDefinition. """ + gtk.TextBuffer.__init__(self, table) + + # default styles + self.styles = DEFAULT_STYLES + + # update styles with lang-spec: + if lang: + self.styles.update(lang.get_styles()) + # update styles with user-defined + self.styles.update(styles) + + # create tags + for name, props in self.styles.items(): + style = dict(self.styles['DEFAULT']) # take default + style.update(props) # and update with props + self.create_tag(name, **style) + + # store lang-definition + self._lang_def = lang + + self.connect_after("insert-text", self._on_insert_text) + self.connect_after("delete-range", self._on_delete_range) + self.connect('apply-tag', self._on_apply_tag) + + self._apply_tags = False + + + def _on_apply_tag(self, buf, tag, start, end): + # FIXME This is a hack! It allows apply-tag only while + # _on_insert_text() and _on_delete_range() + if not self._apply_tags: + self.emit_stop_by_name('apply-tag') + return True + + _log_debug("tag \"%s\" as %s"%(self.get_slice(start,end), tag.get_property("name"))) + + + def _on_insert_text(self, buf, it, text, length): + # if no syntax defined -> nop + if not self._lang_def: return False + + it = it.copy() + it.backward_chars(length) + + if not it.begins_tag(): + it.backward_to_tag_toggle(None) + _log_debug("Not tag-start -> moved iter to %i (%s)"%(it.get_offset(), it.get_char())) + + if it.begins_tag(self.get_tag_table().lookup("DEFAULT")): + it.backward_to_tag_toggle(None) + _log_debug("Iter at DEFAULT-start -> moved to %i (%s)"%(it.get_offset(), it.get_char())) + + self._apply_tags = True + self.update_syntax(it) + self._apply_tags = False + + + def _on_delete_range(self, buf, start, end): + # if no syntax defined -> nop + if not self._lang_def: return False + + start = start.copy() + if not start.begins_tag(): + start.backward_to_tag_toggle(None) + + self._apply_tags = True + self.update_syntax(start) + self._apply_tags = False + + + def update_syntax(self, start, end=None): + """ More or less internal used method to update the + syntax-highlighting. """ + # if no lang set + if not self._lang_def: return + _log_debug("Update syntax from %i"%start.get_offset()) + + # if not end defined + if not end: end = self.get_end_iter() + + # We do not use recursion -> long files exceed rec-limit! + finished = False + while not finished: + # search first rule matching txt[start..end] + mstart, mend, tagname = self._lang_def(self, start, end) + + # optimisation: if mstart-mend is allready tagged with tagname + # -> finished + if tagname: #if something found + tag = self.get_tag_table().lookup(tagname) + if mstart.begins_tag(tag) and mend.ends_tag(tag) and not mstart.equal(start): + self.remove_all_tags(start,mstart) + self.apply_tag_by_name("DEFAULT", start, mstart) + _log_debug("Optimized: Found old tag at %i (%s)"%(mstart.get_offset(), mstart.get_char())) + # finish + finished = True + continue + + # remove all tags from start..mend (mend == buffer-end if no match) + self.remove_all_tags(start, mend) + # make start..mstart = DEFAUL (mstart == buffer-end if no match) + if not start.equal(mstart): + _log_debug("Apply DEFAULT") + self.apply_tag_by_name("DEFAULT", start, mstart) + + # nothing found -> finished + if not tagname: + finished = True + continue + + # apply tag + _log_debug("Apply %s"%tagname) + self.apply_tag_by_name(tagname, mstart, mend) + + start = mend + + if start == end: + finished = True + continue + + + def reset_language(self, lang_def): + """ Reset the currently used language-definition. """ + # remove all tags from complete text + start = self.get_start_iter() + self.remove_all_tags(start, self.get_end_iter()) + # store lexer + self._lang_def = lang_def + # update styles from lang_def: + if self._lang_def: + self.update_styles(self._lang_def.get_styles()) + # and ... + self._apply_tags = True + self.update_syntax(start) + self._apply_tags = False + + + def update_styles(self, styles): + """ Update styles. This method may be used to reset any styles at + runtime. """ + self.styles.update(styles) + + table = self.get_tag_table() + for name, props in styles.items(): + style = self.styles['DEFAULT'] + style.update(props) + # if tagname is unknown: + if not table.lookup(name): + _log_debug("Create tag: %s (%s)"%(name, style)) + self.create_tag(name, **style) + else: # update tag + tag = table.lookup(name) + _log_debug("Update tag %s with (%s)"%(name, style)) + map(lambda i: tag.set_property(i[0],i[1]), style.items()) + + diff --git a/src/gui/python.xml b/src/gui/python.xml new file mode 100644 index 0000000..031a9b7 --- /dev/null +++ b/src/gui/python.xml @@ -0,0 +1,210 @@ + + + + ([uUrR]|[uU][rR]|[rR][uU])?"""""" + ([uUrR]|[uU][rR]|[rR][uU])?'''''' + ([uUrR]|[uU][rR]|[rR][uU])?"" + ([uUrR]|[uU][rR]|[rR][uU])?'' + + + import + from + as + False + None + True + __name__ + __debug__ + + + + def + class + return + + + + and + assert + break + continue + del + elif + else + except + exec + finally + for + global + if + in + is + lambda + not + or + pass + print + raise + try + while + yield + + + + ArithmeticError + AssertionError + AttributeError + EnvironmentError + EOFError + Exception + FloatingPointError + ImportError + IndentationError + IndexError + IOError + KeyboardInterrupt + KeyError + LookupError + MemoryError + NameError + NotImplementedError + OSError + OverflowError + ReferenceError + RuntimeError + StandardError + StopIteration + SyntaxError + SystemError + SystemExit + TabError + TypeError + UnboundLocalError + UnicodeDecodeError + UnicodeEncodeError + UnicodeError + UnicodeTranslateError + ValueError + WindowsError + ZeroDivisionError + + Warning + UserWarning + DeprecationWarning + PendingDeprecationWarning + SyntaxWarning + OverflowWarning + RuntimeWarning + FutureWarning + + __import__ + abs + apply + basestring + bool + buffer + callable + chr + classmethod + cmp + coerce + compile + complex + delattr + dict + dir + divmod + enumerate + eval + execfile + file + filter + float + getattr + globals + hasattr + hash + hex + id + input + int + intern + isinstance + issubclass + iter + len + list + locals + long + map + max + min + object + oct + open + ord + pow + property + range + raw_input + reduce + reload + repr + round + setattr + slice + staticmethod + str + sum + super + tuple + type + unichr + unicode + vars + xrange + zip + + + + + set_pfg + set_pfg_wt + set_description + get_description + set_phase + set_frequency + ttl_pulse + rf_pulse + state_start + state_end + loop_start + loop_end + set_pts_local + wait + record + + + + Accumulation + Experiment + ADC_Result + MeasurementResult + AccumulatedValue + + + #.*$ + \bself\b + \b([1-9][0-9]*|0)([Uu]([Ll]|LL|ll)?|([Ll]|LL|ll)[Uu]?)?\b + \b([0-9]+[Ee][-]?[0-9]+|([0-9]*\.[0-9]+|[0-9]+\.)([Ee][-]?[0-9]+)?)[fFlL]? + \b0[0-7]+([Uu]([Ll]|LL|ll)?|([Ll]|LL|ll)[Uu]?)?\b + \b0[xX][0-9a-fA-F]+([Uu]([Ll]|LL|ll)?|([Ll]|LL|ll)[Uu]?)?\b + diff --git a/src/gui/script_interface.py b/src/gui/script_interface.py new file mode 100644 index 0000000..1e3a1e7 --- /dev/null +++ b/src/gui/script_interface.py @@ -0,0 +1,152 @@ +#! /usr/bin/env python + +import time +import sys +import os +import os.path +import tables +import damaris.data.DataPool as DataPool +import damaris.gui.ResultReader as ResultReader +import damaris.gui.ExperimentWriter as ExperimentWriter +import damaris.gui.BackendDriver as BackendDriver +import damaris.gui.ResultHandling as ResultHandling +import damaris.gui.ExperimentHandling as ExperimentHandling + +def some_listener(event): + if event.subject=="__recentexperiment" or event.subject=="__recentresult": + r=event.origin.get("__recentresult",-1)+1 + e=event.origin.get("__recentexperiment",-1)+1 + if e!=0: + ratio=100.0*r/e + else: + ratio=100.0 + print "\r%d/%d (%.0f%%)"%(r,e,ratio), + + +class ScriptInterface: + + def __init__(self, exp_script=None, res_script=None, backend_executable=None, spool_dir="spool"): + self.exp_script=exp_script + self.res_script=res_script + self.backend_executable=backend_executable + self.spool_dir=os.path.abspath(spool_dir) + self.exp_handling=self.res_handling=None + + self.exp_writer=self.res_reader=self.back_driver=None + if self.backend_executable is not None: + self.back_driver=BackendDriver.BackendDriver(self.backend_executable, spool_dir) + if self.exp_script: self.exp_writer=self.back_driver.get_exp_writer() + if self.res_script: self.res_reader=self.back_driver.get_res_reader() + else: + self.back_driver=None + if self.exp_script: self.exp_writer=ExperimentWriter.ExperimentWriter(spool_dir) + if self.res_script: self.res_reader=ResultReader.ResultReader(spool_dir) + + self.data=DataPool() + + + def runScripts(self): + # get script engines + if self.exp_script and self.exp_writer: + self.exp_handling=ExperimentHandling.ExperimentHandling(self.exp_script, self.exp_writer, self.data) + if self.res_script and self.res_reader: + self.res_handling=ResultHandling.ResultHandling(self.res_script, self.res_reader, self.data) + + # start them + if self.exp_handling: self.exp_handling.start() + if self.back_driver is not None: self.back_driver.start() + if self.res_handling: self.res_handling.start() + + def waitForScriptsEnding(self): + # time of last dump + dump_interval=600 + next_dump_time=time.time()+dump_interval + # keyboard interrupts are handled in extra cleanup loop + try: + while filter(None,[self.exp_handling,self.res_handling,self.back_driver]): + time.sleep(0.1) + if time.time()>next_dump_time: + self.dump_data("pool/data_pool.h5") + next_dump_time+=dump_interval + + if self.exp_handling is not None: + if not self.exp_handling.isAlive(): + self.exp_handling.join() + if self.exp_handling.raised_exception: + print ": experiment script failed at line %d (function %s): %s"%(self.exp_handling.location[0], + self.exp_handling.location[1], + self.exp_handling.raised_exception) + else: + print ": experiment script finished" + self.exp_handling = None + + if self.res_handling is not None: + if not self.res_handling.isAlive(): + self.res_handling.join() + if self.res_handling.raised_exception: + print ": result script failed at line %d (function %s): %s"%(self.res_handling.location[0], + self.res_handling.location[1], + self.res_handling.raised_exception) + else: + print ": result script finished" + self.res_handling = None + + if self.back_driver is not None: + if not self.back_driver.isAlive(): + print ": backend finished" + self.back_driver=None + + except KeyboardInterrupt: + still_running=filter(None,[self.exp_handling,self.res_handling,self.back_driver]) + for r in still_running: + r.quit_flag.set() + + for r in still_running: + r.join() + + def dump_data(self, filename): + try: + # write data from pool + dump_file=tables.openFile(filename,mode="w",title="DAMARIS experiment data") + self.data.write_hdf5(dump_file, complib='zlib', complevel=6) + # write scripts + scriptgroup=dump_file.createGroup("/","scripts","Used Scripts") + dump_file.createArray(scriptgroup,"experiment_script", self.exp_script) + dump_file.createArray(scriptgroup,"result_script", self.res_script) + dump_file.createArray(scriptgroup,"backend_executable", self.backend_executable) + dump_file.createArray(scriptgroup,"spool_directory", self.spool_dir) + dump_file.flush() + dump_file.close() + dump_file=None + # todo + except Exception,e: + print "dump failed", e + + + +if __name__=="__main__": + + if len(sys.argv)==1: + print "%s: data_handling_script [spool directory]"%sys.argv[0] + sys.exit(1) + if len(sys.argv)==3: + spool_dir=os.getcwd() + else: + spool_dir=sys.argv[3] + + expscriptfile=open(sys.argv[1]) + expscript=expscriptfile.read() + resscriptfile=open(sys.argv[2]) + resscript=resscriptfile.read() + + si=ScriptInterface(expscript, resscript,"/usr/lib/damaris/backends/Mobilecore", spool_dir) + + si.data.register_listener(some_listener) + + si.runScripts() + + si.waitForScriptsEnding() + + si.dump_data("data_pool.h5") + + si=None diff --git a/src/tools/__init__.py b/src/tools/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/tools/eurotherm.py b/src/tools/eurotherm.py new file mode 100644 index 0000000..df0e65a --- /dev/null +++ b/src/tools/eurotherm.py @@ -0,0 +1,119 @@ +import serial +import re +import operator + + +DEBUG=False + +reply_pattern = re.compile(r"\x02..(.*)\x03.", re.DOTALL) +# example answer '\x02PV279.8\x03/' +# [EOT] = \x04 +# [STX] = \x02 +# [ENQ] = \x05 +# [ETX] = \x03 +# [ACK] = \x06 +# BCC = checksum + +standard_device='0011' +EOT = '\x04' +STX = '\x02' +ENQ = '\x05' +ETX = '\x03' +ACK = '\x06' +NAK = '\x15' + + +""" +Paramter read example: + +Master: [EOT]0011PV[ENQ] +Instrument: [STX]PV16.4[ETX]{BCC} + +Writing data: + +Master: [EOT] {GID}{GID}{UID}{UID}[STX]{CHAN}(c1)(c2)[ETX](BCC) + +""" + +def checksum(message): + bcc = (reduce(operator.xor, map(ord,message))) + return chr(bcc) + +class Eurotherm(object): + def __init__(self, serial_device, baudrate = 19200): + self.device = standard_device + # timeout: 110 ms to get all answers. + self.s = serial.Serial(serial_device, + baudrate = baudrate, + bytesize=7, + parity='E', + stopbits=1, + timeout=0.11) + self._expect_len = 50 + + def send_read_param(self, param): + self.s.write(EOT + self.device + param + ENQ) + + def read_param(self, param): + self.s.flushInput() + self.send_read_param(param) + answer = self.s.read(self._expect_len) + m = reply_pattern.search(answer) + if m is None: + # Reading _expect_len bytes was not enough... + answer += self.s.read(200) + m = reply_pattern.search(answer) + if m is not None: + self._expect_len = len(answer) + return m.group(1) + else: + print "received:", repr(answer) + return None + + def write_param(self, mnemonic, data): + if len(mnemonic) > 2: + raise ValueError + bcc = checksum(mnemonic + data + ETX) + mes = EOT+self.device+STX+mnemonic+data+ETX+bcc + if DEBUG: + for i in mes: + print i,hex(ord(i)) + self.s.flushInput() + self.s.write(mes) + answer = self.s.read(1) + # print "received:", repr(answer) + if answer == "": + # raise IOError("No answer from device") + return None + return answer[-1] == ACK + + def get_current_temperature(self): + temp = self.read_param('PV') + if temp is None: + temp = "0" + return temp + + def set_temperature(self, temperature): + return self.write_param('SL', str(temperature)) + + def get_setpoint_temperature(self): + return self.read_param('SL') + +if __name__ == '__main__': + import time + delta=5 + date = time.strftime('%Y-%m-%d') + f = open('templog_%s'%date,'w') + f.write('# Start time: %s\n#delta t : %.1f s\n'%(time.asctime(), delta)) + et = Eurotherm("/dev/ttyUSB0") + while True: + for i in xrange(120): + time.sleep(delta) + #t = time.strftime() + T = et.get_current_temperature() + + l = '%f %s\n'%(time.time(),T) + print time.asctime(), T + f.write(l) + f.flush() + f.write('# MARK -- %s --\n'%(time.asctime())) diff --git a/src/tools/ranges.py b/src/tools/ranges.py new file mode 100644 index 0000000..f8bef5b --- /dev/null +++ b/src/tools/ranges.py @@ -0,0 +1,127 @@ +import numpy as N +import sys +if sys.version_info > (2,6,0): + import numbers +else: + pass + +if sys.version_info > (2,6,0): + def lin_range(start,stop,step): + if isinstance(step, numbers.Integral): + return N.linspace(start,stop,step) + else: + return N.arange(start,stop,step) +else: + def lin_range(start,stop,step): + return N.arange(start,stop,step) + + + +def log_range(start, stop, stepno): + if (start<=0 or stop<=0 or stepno<1): + raise ValueError("start, stop must be positive and stepno must be >=1") + return N.logspace(N.log10(start),N.log10(stop), num=stepno) + + +def staggered_range(some_range, size=3): + m=0 + if isinstance(some_range, N.ndarray): + is_numpy = True + some_range = list(some_range) + else: + is_numpy = False + new_list=[] + for k in xrange(len(some_range)): + for i in xrange(size): + try: + index = (m*size) + new_list.append(some_range.pop(index)) + except IndexError: + break + m+=1 + if is_numpy: + new_list = N.asarray(new_list+some_range) + else: + new_list+=some_range + return new_list + + +def combine_ranges(*ranges): + new_list = [] + for r in ranges: + new_list.extend(r) + return new_list + +combined_ranges=combine_ranges + +def interleaved_range(some_list, left_out): + """ + in first run, do every n-th, then do n-1-th of the remaining values and so on... + """ + m=0 + new_list = [] + for j in xrange(left_out): + for i in xrange(len(some_list)): + if (i*left_out+m) < len(some_list): + new_list.append(some_list[i*left_out+m]) + else: + m+=1 + break + if isinstance(some_list, N.ndarray): + new_list = N.array(new_list) + return new_list + + +# These are the generators +def lin_range_iter(start,stop, step): + this_one=float(start)+0.0 + if step>0: + while (this_one<=float(stop)): + yield this_one + this_one+=float(step) + else: + while (this_one>=float(stop)): + yield this_one + this_one+=float(step) + + +def log_range_iter(start, stop, stepno): + if (start<=0 or stop<=0 or stepno<1): + raise ValueError("start, stop must be positive and stepno must be >=1") + if int(stepno)==1: + factor=1.0 + else: + factor=(stop/start)**(1.0/int(stepno-1)) + for i in xrange(int(stepno)): + yield start*(factor**i) + +def staggered_range_iter(some_range, size = 1): + """ + size=1: do one, drop one, .... + size=n: do 1 ... n, drop n+1 ... 2*n + in a second run the dropped values were done + """ + left_out=[] + try: + while True: + for i in xrange(size): + yield some_range.next() + for i in xrange(size): + left_out.append(some_range.next()) + except StopIteration: + pass + + # now do the droped ones + for i in left_out: + yield i + +def combined_ranges_iter(*ranges): + """ + iterate over one range after the other + """ + for r in ranges: + for i in r: + yield i + +combine_ranges_iter=combined_ranges_iter + diff --git a/src/tools/signal.py b/src/tools/signal.py new file mode 100644 index 0000000..bafe8e6 --- /dev/null +++ b/src/tools/signal.py @@ -0,0 +1,30 @@ +import math + +__all__ = ['rotate_signal'] + + +def rotate_signal(timesignal, angle): + "Rotate by degrees" + # implicit change to float arrays! + if timesignal.get_number_of_channels()!=2: + raise Exception("rotation defined only for 2 channels") + # simple case 0, 90, 180, 270 degree + reduced_angle=divmod(angle, 90) + if abs(reduced_angle[1])<1e-6: + reduced_angle=reduced_angle[0]%4 + if reduced_angle==0: + return + elif reduced_angle==1: + timesignal.y[1]*=-1 + timesignal.y=[timesignal.y[1],timesignal.y[0]] + elif reduced_angle==2: + timesignal.y[0]*=-1 + timesignal.y[1]*=-1 + elif reduced_angle==3: + timesignal.y[0]*=-1 + timesignal.y=[timesignal.y[1],timesignal.y[0]] + else: + sin_angle=math.sin(angle/180.0*math.pi) + cos_angle=math.cos(angle/180.0*math.pi) + timesignal.y=[cos_angle*timesignal.y[0]-sin_angle*timesignal.y[1], + sin_angle*timesignal.y[0]+cos_angle*timesignal.y[1]] diff --git a/tests/datatest.py b/tests/datatest.py new file mode 100644 index 0000000..fda68ec --- /dev/null +++ b/tests/datatest.py @@ -0,0 +1,97 @@ +import sys +import numpy +import math +import os.path +import unittest + +print "running tests on modules in source directory" +# assume, script is in tests directory and we are testing modules in src +sys.path[0:0]=[os.path.join(os.path.dirname(sys.argv[0]), "..", "src", "data")] +from MeasurementResult import * + +class TestAccumulatedValueClass(unittest.TestCase): + + def setUp(self): + # is called before each test + pass + + def testInitialization_Empty(self): + a=AccumulatedValue() + self.assert_(a.mean() is None) + self.assert_(a.mean_error() is None) + self.assert_(a.sigma() is None) + self.assert_(a.n==0) + + def testInitialization_1Value(self): + a=AccumulatedValue(0) + self.assert_(a.mean()==0) + self.assertAlmostEqual(a.mean_error(),0) + self.assertAlmostEqual(a.sigma(),0) + self.assert_(a.n==1) + + def testInitialization_2Values(self): + a=AccumulatedValue(1, 0.1) + self.assert_(a.mean(),1.0) + self.assertAlmostEqual(a.mean_error(), 0.1) + self.assertAlmostEqual(a.sigma(), 0.1*math.sqrt(2.0)) + self.assert_(a.n==2) + + def testInitialization_3Values(self): + a=AccumulatedValue(1, 0.1, 10) + self.assertAlmostEqual(a.mean(),1) + self.assertAlmostEqual(a.mean_error(), 0.1) + self.assertAlmostEqual(a.sigma(), 0.1*math.sqrt(10.0)) + self.assert_(a.n==10) + + def testStatistics(self): + test_dataset=numpy.arange(10.0) + a=AccumulatedValue() + for i in test_dataset: + a+=i + self.assert_(a.n==len(test_dataset)) + # sum x_i/n + self.assertAlmostEqual(a.mean(), test_dataset.mean()) + # std_dev_n-1 x_i= sqrt(sum (x-xmean)**2/(n-1)) + self.assertAlmostEqual(a.sigma(), math.sqrt(((test_dataset-a.mean())**2).sum()/(len(test_dataset)-1.))) + # std_dev_n-1 x_i/sqrt(n) + self.assertAlmostEqual(a.mean_error(), a.sigma()/math.sqrt(len(test_dataset))) + + def tearDown(self): + # is called after each test + pass + +class TestMeasurementResult(unittest.TestCase): + + def testImplicitCast(self): + # check wether other data types will be converted to AccumulatedValue + m=MeasurementResult("TestData") + m[1.0] + m[2.0]=2 + self.assert_(isinstance(m[1.0], AccumulatedValue)) + self.assert_(isinstance(m[2.0], AccumulatedValue)) + + def testUninitalizedEntries(self): + # assure that entries with no data are listed as xdata + m=MeasurementResult("TestData") + a=m[2.0] + self.assert_(isinstance(a, AccumulatedValue)) + self.assert_(2.0 in m) + self.assert_(2.0 not in m.get_xdata()) + m[2.0]+=1 + self.assert_(2.0 in m.get_xdata()) + + def testZeroError(self): + # AccumulatedValues with only one Accumulation should have 0 error + m=MeasurementResult("TestData") + m[0.0] + m[1.0]=AccumulatedValue() + m[2.0]=0 + m[3.0]=AccumulatedValue(0,1.0) + k,v,e=m.get_errorplotdata() + self.assert_(2.0 in k and 3.0 in k) + self.assert_(1.0 not in k and 0.0 not in k) + self.assertAlmostEqual(e[k==2.0][0], 0) + self.assertAlmostEqual(e[k==3.0][0], 1.0) + +if __name__=="__main__": + unittest.main() \ No newline at end of file