migrate to standard svn repo layout

This commit is contained in:
Markus Rosenstihl 2014-06-26 11:10:51 +00:00
commit 0a393b0748
55 changed files with 13617 additions and 0 deletions

7
MANIFEST.in Normal file
View File

@ -0,0 +1,7 @@
include src/gui/DAMARIS.png
include src/gui/DAMARIS.ico
include src/gui/damaris.glade
include src/gui/damaris.gladep
include doc/index.html
recursive-include doc/reference-html *.html *.gif *.png *.css
recursive-include doc/tutorial-html *.html *.gif *.png *.css *.tar.gz *.sh

17
README Normal file
View File

@ -0,0 +1,17 @@
Installing into private directory
=================================
For whatever reason, it might be useful to install the
python frontend into a private directory.
Here are some examples on how to do it:
python setup.py install --home=/opt/damaris-private
or
python setup.py install --home=$HOME/devel/damaris-installed
To start Damaris use for example:
/opt/damaris-private/bin/DAMARIS

68
debian/changelog vendored Normal file
View File

@ -0,0 +1,68 @@
python-damaris (0.14-svn) experimental; urgency=low
* MeasurementResults (MR) can be lineplots (Oleg Petrov)
* Default symbol for MR changed to "*"
* Several more improvements to GUI code
* Fixed some errors in FFT module (oelg Petrov)
* Config now according to XDG standard
-- Markus Rosenstihl <Markus.Rosenstihl@physik.tu-darmstadt.de> Wed, 07 Nov 2012 12:36:40 +0000
python-damaris (0.13-0) experimental; urgency=low
* using cElementTree if available, giving the ResultReader almost 20x speed up
-- Markus Rosenstihl <Markus Rosenstihl <markus.rosenstihl@physik.tu-darmstadt.de> Wed, 03 Nov 2010 18:49:46 +0000
python-damaris (0.12-0.0) experimental; urgency=low
* new development version
-- Achim Gaedke <Achim.Gaedke@physik.tu-darmstadt.de> Sat, 24 May 2008 17:49:25 +0200
python-damaris (0.11-0.1) experimental; urgency=low
* updated debian python policy
* job descriptions: types are saved and recovered
* support for system wide defaults file thanks to Christian
* new interleaved range by Markus Rosenstihl
* revised display source change event handling, found some severe errors
* cursor movement and selection problems solved
* log window information are saved in hdf files
-- Achim Gaedke <Achim.Gaedke@physik.tu-darmstadt.de> Thu, 17 Mar 2008 17:28:13 +0100
python-damaris (0.11-0.0) experimental; urgency=low
* rename to python-damaris
* x axis log plot
* %()s functions for hdf file naming
* line number widgets for scripts
* Persistance class added (scope like fade away of signal changes)
-- Achim Gaedke <Achim.Gaedke@physik.tu-darmstadt.de> Thu, 07 Feb 2008 02:21:45 +0100
greendamaris (0.10-0.2) experimental; urgency=medium
* increased compatibility with pytables version 1 and 2
* sending signals to backend, when it fails to quit properly
* correcting quit event return value
-- Achim Gaedke <Achim.Gaedke@physik.tu-darmstadt.de> Fri, 12 Oct 2007 13:34:33 +0200
greendamaris (0.10-0.1) experimental; urgency=low
* added docs and browser button for docs
* limited number of points to display for GTKCairo backend
* numarray replaced by numpy
* adapted width of window to fit even gnome icon size
-- Achim Gaedke <Achim.Gaedke@physik.tu-darmstadt.de> Tue, 02 Oct 2007 19:11:33 +0200
greendamaris (0.10-0.0) experimental; urgency=medium
* started separate source package of pyhton-damaris
* pycentral used to support both python 2.4 and 2.5
-- Achim Gaedke <Achim.Gaedke@physik.tu-darmstadt.de> Sat, 08 Sep 2007 12:56:06 +0200

1
debian/compat vendored Normal file
View File

@ -0,0 +1 @@
5

16
debian/control vendored Normal file
View File

@ -0,0 +1,16 @@
Source: python-damaris
Section: science
Priority: optional
Maintainer: Achim Gaedke <Achim.Gaedke@physik.tu-darmstadt.de>
Build-Depends: debhelper, python-dev, python-central, dpkg-dev
Standards-Version: 3.7.3
XS-Python-Version: current
Package: python-damaris
Architecture: all
XB-Python-Version: ${python:Versions}
Provides: ${python:Provides}, damaris-frontend
Depends: ${python:Depends}, python-numpy (>=1.0), python-scipy, python-gtk2 (>=2.8.0), python-glade2, python-matplotlib (>=0.90), python-tables (>=1.3.2), lzop
Recommends: damaris-backends, python-numpy-ext
Suggests: python-doc, python-tables-doc, python-numpy-doc
Description: python frontend for DAMARIS project

10
debian/damaris.desktop vendored Normal file
View File

@ -0,0 +1,10 @@
[Desktop Entry]
Name=DAMARIS
Comment=DArmstadt MAgnetic Resonance Instrument Software
Exec=/usr/bin/DAMARIS %F
X-MultipleArgs=true
Terminal=false
MimeType=text/x-python;text/plain;
Type=Application
Categories=Science;Education;Physics;Chemistry;
Icon=DAMARIS

1
debian/pycompat vendored Normal file
View File

@ -0,0 +1 @@
2

1
debian/python-damaris.menu vendored Normal file
View File

@ -0,0 +1 @@
?package(python-damaris): needs="X11" section="Apps/Science" title="DAMARIS" longtitle="DArmstadt Magnetic Resonance Instrument Software" command="DAMARIS -dGTKAgg" icon="/usr/share/python-damaris/images/DAMARIS.png"

65
debian/rules vendored Executable file
View File

@ -0,0 +1,65 @@
#!/usr/bin/make -f
# Achim Gaedke
# May 2007
# Uncomment this to turn on verbose mode.
export DH_VERBOSE=1
PYVERS=$(shell pyversions -vr)
PACKAGE_NAME=python-damaris
MODULE_NAME=damaris
PYBASE=$(CURDIR)/debian/${PACKAGE_NAME}
DH_ALWAYS_EXCLUDE=CVS:.svn
clean:
dh_testdir
dh_testroot
dh_installdirs
# Add here commands to clean up after the build process.
rm -f *-stamp
rm -rf dist build $(PYVERS:%=build-ext-%)
find . -name *\.py[co] -exec rm {} \;
dh_clean
build: build-stamp
build-stamp: $(PYVERS:%=build-ext-%)
touch $@
build-ext-%:
dh_testdir
python$* setup.py build
touch $@
install: install-stamp
# install menu related things
dh_install debian/damaris.desktop /usr/share/applications/
install-stamp: build-stamp $(PYVERS:%=install-ext-%)
install-ext-%:
python$* setup.py install --root $(PYBASE) --install-layout=deb
echo "DAMARIS script: removing path to local installation"
-sed 's/^sys.path.insert(0,.*).*/# damaris package is found on default path/' <$(PYBASE)/usr/bin/DAMARIS >$(PYBASE)/usr/bin/DAMARIS.new
-mv $(PYBASE)/usr/bin/DAMARIS.new $(PYBASE)/usr/bin/DAMARIS
binary-indep: build install
dh_link usr/share/python-damaris/doc usr/share/doc/python-damaris/html
dh_link usr/share/python-damaris/images usr/share/doc/python-damaris/images
dh_link usr/share/python-damaris/images/DAMARIS.png usr/share/icons/DAMARIS.png
dh_testdir -i
dh_testroot -i
dh_installchangelogs -i
dh_installdocs -i
dh_pysupport
dh_installmenu -i
dh_desktop -i
dh_compress -i -X.py
dh_fixperms -i
dh_installdeb -i
dh_gencontrol -i
dh_md5sums -i
dh_builddeb -i
binary-arch: build install
binary: binary-indep binary-arch
.PHONY: build clean binary-indep binary-arch binary install configure

242
doc/Doxyfile Normal file
View File

@ -0,0 +1,242 @@
# Doxyfile 1.5.3
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = "python DAMARIS"
PROJECT_NUMBER = 0.13-svn
OUTPUT_DIRECTORY = .
CREATE_SUBDIRS = NO
OUTPUT_LANGUAGE = English
BRIEF_MEMBER_DESC = YES
REPEAT_BRIEF = YES
ABBREVIATE_BRIEF = "The $name class " \
"The $name widget " \
"The $name file " \
is \
provides \
specifies \
contains \
represents \
a \
an \
the
ALWAYS_DETAILED_SEC = NO
INLINE_INHERITED_MEMB = NO
FULL_PATH_NAMES = NO
STRIP_FROM_PATH =
STRIP_FROM_INC_PATH =
SHORT_NAMES = NO
JAVADOC_AUTOBRIEF = NO
QT_AUTOBRIEF = NO
MULTILINE_CPP_IS_BRIEF = NO
DETAILS_AT_TOP = NO
INHERIT_DOCS = YES
SEPARATE_MEMBER_PAGES = NO
TAB_SIZE = 8
ALIASES =
OPTIMIZE_OUTPUT_FOR_C = NO
OPTIMIZE_OUTPUT_JAVA = NO
BUILTIN_STL_SUPPORT = NO
CPP_CLI_SUPPORT = NO
DISTRIBUTE_GROUP_DOC = NO
SUBGROUPING = YES
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
EXTRACT_ALL = YES
EXTRACT_PRIVATE = YES
EXTRACT_STATIC = YES
EXTRACT_LOCAL_CLASSES = YES
EXTRACT_LOCAL_METHODS = NO
EXTRACT_ANON_NSPACES = NO
HIDE_UNDOC_MEMBERS = NO
HIDE_UNDOC_CLASSES = NO
HIDE_FRIEND_COMPOUNDS = NO
HIDE_IN_BODY_DOCS = NO
INTERNAL_DOCS = NO
CASE_SENSE_NAMES = YES
HIDE_SCOPE_NAMES = NO
SHOW_INCLUDE_FILES = YES
INLINE_INFO = YES
SORT_MEMBER_DOCS = YES
SORT_BRIEF_DOCS = NO
SORT_BY_SCOPE_NAME = NO
GENERATE_TODOLIST = YES
GENERATE_TESTLIST = YES
GENERATE_BUGLIST = YES
GENERATE_DEPRECATEDLIST= YES
ENABLED_SECTIONS =
MAX_INITIALIZER_LINES = 30
SHOW_USED_FILES = YES
SHOW_DIRECTORIES = NO
FILE_VERSION_FILTER =
#---------------------------------------------------------------------------
# configuration options related to warning and progress messages
#---------------------------------------------------------------------------
QUIET = NO
WARNINGS = YES
WARN_IF_UNDOCUMENTED = YES
WARN_IF_DOC_ERROR = YES
WARN_NO_PARAMDOC = NO
WARN_FORMAT = "$file:$line: $text "
WARN_LOGFILE =
#---------------------------------------------------------------------------
# configuration options related to the input files
#---------------------------------------------------------------------------
INPUT = damaris
INPUT_ENCODING = ISO-8859-15
FILE_PATTERNS = *.py \
*.PY
RECURSIVE = YES
EXCLUDE =
EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS =
EXCLUDE_SYMBOLS =
EXAMPLE_PATH =
EXAMPLE_PATTERNS = *
EXAMPLE_RECURSIVE = YES
IMAGE_PATH =
INPUT_FILTER =
FILTER_PATTERNS =
FILTER_SOURCE_FILES = NO
#---------------------------------------------------------------------------
# configuration options related to source browsing
#---------------------------------------------------------------------------
SOURCE_BROWSER = YES
INLINE_SOURCES = NO
STRIP_CODE_COMMENTS = YES
REFERENCED_BY_RELATION = NO
REFERENCES_RELATION = NO
REFERENCES_LINK_SOURCE = YES
USE_HTAGS = NO
VERBATIM_HEADERS = NO
#---------------------------------------------------------------------------
# configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
ALPHABETICAL_INDEX = NO
COLS_IN_ALPHA_INDEX = 5
IGNORE_PREFIX =
#---------------------------------------------------------------------------
# configuration options related to the HTML output
#---------------------------------------------------------------------------
GENERATE_HTML = YES
HTML_OUTPUT = reference-html
HTML_FILE_EXTENSION = .html
HTML_HEADER =
HTML_FOOTER =
HTML_STYLESHEET =
HTML_ALIGN_MEMBERS = YES
GENERATE_HTMLHELP = NO
HTML_DYNAMIC_SECTIONS = NO
CHM_FILE =
HHC_LOCATION =
GENERATE_CHI = NO
BINARY_TOC = NO
TOC_EXPAND = NO
DISABLE_INDEX = NO
ENUM_VALUES_PER_LINE = 4
GENERATE_TREEVIEW = NO
TREEVIEW_WIDTH = 250
#---------------------------------------------------------------------------
# configuration options related to the LaTeX output
#---------------------------------------------------------------------------
GENERATE_LATEX = NO
LATEX_OUTPUT = latex
LATEX_CMD_NAME = latex
MAKEINDEX_CMD_NAME = makeindex
COMPACT_LATEX = NO
PAPER_TYPE = a4wide
EXTRA_PACKAGES =
LATEX_HEADER =
PDF_HYPERLINKS = NO
USE_PDFLATEX = NO
LATEX_BATCHMODE = NO
LATEX_HIDE_INDICES = NO
#---------------------------------------------------------------------------
# configuration options related to the RTF output
#---------------------------------------------------------------------------
GENERATE_RTF = NO
RTF_OUTPUT = rtf
COMPACT_RTF = NO
RTF_HYPERLINKS = NO
RTF_STYLESHEET_FILE =
RTF_EXTENSIONS_FILE =
#---------------------------------------------------------------------------
# configuration options related to the man page output
#---------------------------------------------------------------------------
GENERATE_MAN = NO
MAN_OUTPUT = man
MAN_EXTENSION = .3
MAN_LINKS = NO
#---------------------------------------------------------------------------
# configuration options related to the XML output
#---------------------------------------------------------------------------
GENERATE_XML = NO
XML_OUTPUT = xml
XML_SCHEMA =
XML_DTD =
XML_PROGRAMLISTING = YES
#---------------------------------------------------------------------------
# configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
GENERATE_AUTOGEN_DEF = NO
#---------------------------------------------------------------------------
# configuration options related to the Perl module output
#---------------------------------------------------------------------------
GENERATE_PERLMOD = NO
PERLMOD_LATEX = NO
PERLMOD_PRETTY = YES
PERLMOD_MAKEVAR_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
ENABLE_PREPROCESSING = NO
MACRO_EXPANSION = NO
EXPAND_ONLY_PREDEF = NO
SEARCH_INCLUDES = YES
INCLUDE_PATH =
INCLUDE_FILE_PATTERNS =
PREDEFINED =
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES
#---------------------------------------------------------------------------
# Configuration::additions related to external references
#---------------------------------------------------------------------------
TAGFILES =
GENERATE_TAGFILE =
ALLEXTERNALS = NO
EXTERNAL_GROUPS = YES
PERL_PATH = /usr/bin/perl
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
CLASS_DIAGRAMS = YES
MSCGEN_PATH =
HIDE_UNDOC_RELATIONS = YES
HAVE_DOT = YES
CLASS_GRAPH = YES
COLLABORATION_GRAPH = YES
GROUP_GRAPHS = YES
UML_LOOK = NO
TEMPLATE_RELATIONS = NO
INCLUDE_GRAPH = YES
INCLUDED_BY_GRAPH = YES
CALL_GRAPH = NO
CALLER_GRAPH = NO
GRAPHICAL_HIERARCHY = YES
DIRECTORY_GRAPH = YES
DOT_IMAGE_FORMAT = png
DOT_PATH = /usr/bin/
DOTFILE_DIRS =
DOT_GRAPH_MAX_NODES = 50
MAX_DOT_GRAPH_DEPTH = 1000
DOT_TRANSPARENT = NO
DOT_MULTI_TARGETS = NO
GENERATE_LEGEND = YES
DOT_CLEANUP = YES
#---------------------------------------------------------------------------
# Configuration::additions related to the search engine
#---------------------------------------------------------------------------
SEARCHENGINE = NO

24
doc/README.txt Normal file
View File

@ -0,0 +1,24 @@
by now the documentation creation is not automatized...
# html reference
# requires dot and doxygen
cd doc
ln -s ../src damaris
doxygen Doxyfile
rm damaris
# todo: copy damaris logo
# html wiki export
# requires moinmoin and damaris/data as wikidata
cd doc
# underlay must be writable, so we have to copy it...
cp -r /usr/share/moin/underlay wikiunderlay
python dump_wiki.py
cp -r /usr/share/moin/htdocs/modern tutorial-html
rm -r wikiunderlay wikiconfig.py
# get useful numpy doc
wget http://www.scipy.org/Numpy_Example_List_With_Doc?action=print

177
doc/dump_wiki.py Normal file
View File

@ -0,0 +1,177 @@
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - Dump a MoinMoin wiki to static pages
based on "moin.py export dump" command
"""
import sys, os, time, StringIO, codecs, shutil, re, errno
from MoinMoin import config, wikiutil, Page
from MoinMoin.request import RequestCLI
from MoinMoin.action import AttachFile
HTML_SUFFIX = ".html"
page_template = u'''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=%(charset)s">
<title>%(pagename)s</title>
<link rel="stylesheet" type="text/css" media="all" charset="utf-8" href="%(theme)s/css/common.css">
<link rel="stylesheet" type="text/css" media="screen" charset="utf-8" href="%(theme)s/css/screen.css">
<link rel="stylesheet" type="text/css" media="print" charset="utf-8" href="%(theme)s/css/print.css">
</head>
<body>
<div id="page">
<h1 id="title">%(pagename)s</h1>
%(pagehtml)s
</div>
<hr>
%(timestamp)s
</body>
</html>
'''
def _attachment(request, pagename, filename, outputdir):
filename = filename.encode(config.charset)
source_dir = AttachFile.getAttachDir(request, pagename)
source_file = os.path.join(source_dir, filename)
dest_dir = os.path.join(outputdir, "attachments", wikiutil.quoteWikinameFS(pagename))
dest_file = os.path.join(dest_dir, filename)
dest_url = "attachments/%s/%s" % (wikiutil.quoteWikinameFS(pagename), wikiutil.url_quote(filename))
if os.access(source_file, os.R_OK):
if not os.access(dest_dir, os.F_OK):
try:
os.makedirs(dest_dir)
except:
print ("Cannot create attachment directory '%s'" % dest_dir)
raise
elif not os.path.isdir(dest_dir):
print ("'%s' is not a directory" % dest_dir)
shutil.copyfile(source_file, dest_file)
print ('Writing "%s"...' % dest_url)
return dest_url
else:
return ""
class PluginScript: #(MoinScript):
""" Dump script class """
def __init__(self):
pass
def mainloop(self):
""" moin-dump's main code. """
# Prepare output directory
outputdir=os.path.join(os.curdir,"tutorial-html")
try:
os.mkdir(outputdir)
print "Created output directory '%s'!" % outputdir
except OSError, err:
if err.errno != errno.EEXIST:
print "Cannot create output directory '%s'!" % outputdir
raise
sys.path.insert(0, os.path.abspath(os.curdir))
wikiconfig_template="""
from MoinMoin.multiconfig import DefaultConfig
class Config(DefaultConfig):
sitename = u'DAMARIS Homepage and Usergroup'
logo_string = u'<img src="/damaris/wiki/damaris/DAMARIS.png" alt="DAMARIS Logo">'
page_front_page = u"Welcome"
interwikiname = 'damaris'
data_dir = '%(pwd)s/wikidata/'
data_underlay_dir = '%(pwd)s/wikiunderlay'
url_prefix = '/damaris/wiki'
theme_default = 'modern'
"""%{"pwd": os.curdir, "underlay": "/home/achim/underlay" }
config_file = open("wikiconfig.py","w")
print >>config_file, wikiconfig_template
config_file.close()
# start with wiki entry page
request = RequestCLI(pagename="Welcome")
# fix url_prefix so we get relative paths in output html
url_prefix = "."
request.cfg.url_prefix = url_prefix
pages = request.rootpage.getPageList(user='') # get list of all pages in wiki
pages.sort()
# extract a list of pages to be extracted
# trial session to fat!!!
try:
pages_match = re.compile("^(Tutorial|auxiliary tools|overview|installation|code snippets)")
pages = [page for page in pages if pages_match.match(page)]
except:
print "did not find suitable pages"
raise
wikiutil.quoteWikinameURL = lambda pagename, qfn=wikiutil.quoteWikinameFS: (qfn(pagename) + HTML_SUFFIX)
AttachFile.getAttachUrl = lambda pagename, filename, request, addts=0, escaped=0: (_attachment(request, pagename, filename, outputdir))
errfile = os.path.join(outputdir, 'error.log')
errlog = open(errfile, 'w')
errcnt = 0
page_front_page = wikiutil.getSysPage(request, request.cfg.page_front_page).page_name
page_title_index = wikiutil.getSysPage(request, 'TitleIndex').page_name
page_word_index = wikiutil.getSysPage(request, 'WordIndex').page_name
navibar_html = ''
for p in [page_front_page, page_title_index, page_word_index]:
navibar_html += '&nbsp;[<a href="%s">%s</a>]' % (wikiutil.quoteWikinameURL(p), wikiutil.escape(p))
urlbase = request.url # save wiki base url
for pagename in pages:
# we have the same name in URL and FS
file = wikiutil.quoteWikinameURL(pagename)
print ('Writing "%s"...' % file)
try:
pagehtml = ''
request.url = urlbase + pagename # add current pagename to url base
page = Page.Page(request, pagename)
request.page = page
try:
request.reset()
pagehtml = request.redirectedOutput(page.send_page, request, count_hit=0, content_only=1)
except:
errcnt = errcnt + 1
print >>sys.stderr, "*** Caught exception while writing page!"
print >>errlog, "~" * 78
print >>errlog, file # page filename
import traceback
traceback.print_exc(None, errlog)
finally:
timestamp = time.strftime("%Y-%m-%d %H:%M")
filepath = os.path.join(outputdir, file)
fileout = codecs.open(filepath, 'w', config.charset)
logo_html = '<img src="logo.png">'
fileout.write(page_template % {
'charset': config.charset,
'pagename': pagename,
'pagehtml': pagehtml,
'logo_html': logo_html,
'navibar_html': navibar_html,
'timestamp': timestamp,
'theme': request.cfg.theme_default,
})
fileout.close()
# ToDo: insert style sheets and logo
errlog.close()
if errcnt:
print >>sys.stderr, "*** %d error(s) occurred, see '%s'!" % (errcnt, errfile)
if __name__=="__main__":
PluginScript().mainloop()

84
doc/index.html Normal file
View File

@ -0,0 +1,84 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<meta content="text/html; charset=ISO-8859-1"
http-equiv="content-type">
<title>python-damaris documentation index</title>
</head>
<body>
<h1><img alt="DAMARIS" src="../images/DAMARIS.png" align="middle">DAMARIS
Documentation Index<br>
</h1>
<h2>Local Resources:</h2>
<br>
Documentation from DAMARIS wiki:<br>
<ul>
<li><a href="tutorial-html/Tutorial.html">Tutorial</a></li>
<li><a href="tutorial-html/Tutorial%282f%29List_of_Commands.html">Command
Reference</a></li>
</ul>
python-damaris source code reference (generated by <a
href="http://www.doxygen.org/">doxygen</a>)<br>
<ul>
<li><a href="reference-html/index.html">index</a></li>
<li><a href="reference-html/namespacedamaris_1_1data.html">result
reference</a><br>
</li>
<li><a
href="reference-html/namespacedamaris_1_1experiments_1_1Experiment.html">experiment
reference</a></li>
</ul>
<h2>Internet Resources:</h2>
<ul>
<li><a href="http://damaris.berlios.de/">DAMARIS
Homepage</a></li>
<li>Software Related</li>
<ul>
<li><a href="http://www.python.org/">Python Homepage</a></li>
<li><a href="http://www.numpy.org/">Numeric Python (numpy)</a></li>
<li><a href="http://www.pytables.org/">HDF Support (pytables)</a><br>
</li>
<li><a href="http://www.scipy.org/">Scientific Python (scipy)</a></li>
<li><a href="http://www.pygtk.org/">PyGTK Homepage (Graphics)<br>
</a></li>
<li><a href="matplotlib.sourceforge.net">2D Graph drawing
(Matplotlib)</a></li>
<li><a href="http://ipython.scipy.org/">Interactive Python
Interpreter (iPython)</a><br>
</li>
</ul>
<li>More Documentation:</li>
<ul>
<li>Official Python <a href="http://www.python.org/doc/">Documentation</a>
/ <a href="http://docs.python.org/tut/tut.html">Tutorial</a></li>
<li>More Python <a
href="http://www.awaretek.com/tutorials.html#begin">Tutorials</a><br>
</li>
</ul>
<ul>
<li><a href="http://www.scipy.org/Numpy_Example_List_With_Doc">Numpy
Example List</a></li>
<li><a href="http://www.tramy.us/numpybook.pdf">Numpy Book (pdf)</a>
von Travis E. Oliphant</li>
<li>A <a href="http://www.scipy.org/Cookbook">Cookbook</a> for
many advanced data processing tasks<br>
</li>
</ul>
<li>NMR Links</li>
<ul>
<li><a href="http://www.nmrwiki.org/">NMR wiki</a><br>
</li>
<li><a href="http://www.spincore.com/nmrinfo/">NMR Info</a><br>
</li>
<li><a href="http://www.diffusion-fundamentals.org/">Diffusion
Fundamentals</a><br>
</li>
</ul>
<ul>
<li><a href="http://www.fkp.tu-darmstadt.de/groups/ag_fujara/fup/index.de.jsp">AG Fujara</a></li>
<li><a href="http://www.fkp.tu-darmstadt.de/groups/ag_vogel/index.de.jsp">AG Vogel</a><br>
</li>
</ul>
</ul>
</body>
</html>

1864
doxygen.conf Normal file

File diff suppressed because it is too large Load Diff

51
scripts/DAMARIS Executable file
View File

@ -0,0 +1,51 @@
#!/usr/bin/python
# setup script will insert local DAMARIS installation path behind import sys statement
# this must happen before any damaris stuff is called!
import sys
import os
# for numpy-1.1 and later: check the environment for LANG and LC_NUMERIC
# see: http://projects.scipy.org/scipy/numpy/ticket/902
if os.environ.get("LANG","").startswith("de") or os.environ.get("LC_NUMERIC", "").startswith("de"):
os.environ["LC_NUMERIC"]="C"
import damaris.gui.DamarisGUI
import matplotlib
import os.path
# argv is already parsed by gtk initialisation
myargs=sys.argv[1:]
myname=os.path.basename(sys.argv[0])
# find debug flag:
if "--debug" in myargs:
damaris.gui.DamarisGUI.debug = True
print "debug flag set"
try:
import resource
resource.setrlimit(resource.RLIMIT_CORE, (-1,-1))
except ImportError:
pass
matplotlib.rcParams["verbose.level"]="debug"
myargs.remove("--debug")
# remove matplotlib flags
if "-d"+matplotlib.rcParams["backend"] in myargs:
myargs.remove("-d"+matplotlib.rcParams["backend"])
# find scripts to load in parameter list
exp_script = None
res_script = None
if len(myargs)<=2:
if len(myargs)>=1:
exp_script=myargs[0]
if len(myargs)==2:
res_script=myargs[1]
else:
print """too many arguments.\n%s [--debug] [-dGTK(Agg|Cairo|)] (Experiment File|"") (Result File|"")"""%(myname)
d=damaris.gui.DamarisGUI.DamarisGUI(exp_script, res_script)
d.run()
sys.stdout=sys.__stdout__
sys.stderr=sys.__stderr__

121
setup.py Normal file
View File

@ -0,0 +1,121 @@
#!/usr/bin/env python
import shutil
import os
import os.path
import sys
from distutils.core import setup
from distutils.command.build_scripts import build_scripts as _build_scripts
from distutils import log
from distutils.util import convert_path
from distutils.dep_util import newer
#if sys.version_info < (2, 5, 3):
# log.error("**** ERROR: Install manually: python setup.py install ****")
# raise ValueError
class build_damaris_scripts(_build_scripts):
#user_options=_build_scripts.user_options[:]
#user_options.append(('install-dir=', 'd', "directory to install scripts to"))
def initialize_options (self):
_build_scripts.initialize_options(self)
self.damaris_dir = None
def finalize_options (self):
_build_scripts.finalize_options(self)
self.set_undefined_options('install',
('install_lib', 'damaris_dir'))
def run (self):
"change PYTHON_PATH for DAMARIS executable"
_build_scripts.run(self)
script="scripts/DAMARIS"
script = convert_path(script)
outfile = os.path.join(self.build_dir, os.path.basename(script))
self.damaris_dir=os.path.normpath(self.damaris_dir)
if self.damaris_dir in sys.path:
log.debug("not changing %s (this path is on standard path)", script)
# nothing to do for us
return
# now change PATH in DAMARIS script
# copy backup
log.info("adapting DAMARIS script to use local installation")
shutil.copyfile(outfile, outfile+".bak")
# the file should keep all its attributes (executable...)
inf=file(outfile+".bak","r")
outf=file(outfile,"w")
l=inf.readline()
while not l.startswith("import sys") and l!="":
outf.write(l)
l=inf.readline()
if l!="":
outf.write(l)
l=inf.readline()
while l.endswith("# inserted by setup.py\n"):
l=inf.readline()
outf.write("sys.path.insert(0,\"%s\") # inserted by setup.py\n"%self.damaris_dir)
outf.write(l)
outf.writelines(inf.readlines())
inf.close()
outf.close()
os.remove(outfile+".bak")
# create doc data file information
distribution_doc_prefix=os.path.join("share","python-damaris","doc")
distribution_data_files = [[ "share", []],
[os.path.join("share", "python-damaris", "images"),
["src/gui/DAMARIS.png", "src/gui/DAMARIS.ico"]],
[os.path.join("share", "python-damaris"), []],
[distribution_doc_prefix, ['doc/index.html']]]
if os.path.isdir(os.path.join("doc","reference-html")):
# no subdirs, work can be done in simple way
distribution_data_files.append([os.path.join(distribution_doc_prefix, 'reference-html'),
[os.path.join('doc', 'reference-html', f)
for f in os.listdir(os.path.join('doc', 'reference-html'))]])
if os.path.isdir(os.path.join("doc","tutorial-html")):
# here, modern style file and attachment directories should be handled
for d in os.walk(os.path.join("doc","tutorial-html")):
distribution_data_files.append([os.path.join(os.path.dirname(distribution_doc_prefix),d[0]),
[os.path.join(d[0], f) for f in d[2]]])
LONG_DESCRIPTION="""
DArmstadt MAgnetic Resonance Instrument Software
"""
GPL_LICENCE = "feed licence here"
setup (
name = 'python-damaris',
version = "0.14-svn",
description = 'python frontend for DAMARIS (DArmstadt MAgnetic Resonance Instrument Software)',
long_description = LONG_DESCRIPTION,
author = 'Achim Gaedke',
author_email = 'Achim.Gaedke@physik.tu-darmstadt.de',
maintainer = 'Achim Gaedke',
maintainer_email = 'Achim.Gaedke@physik.tu-darmstadt.de',
url = 'http://www.fkp.physik.tu-darmstadt.de/damaris/',
license = GPL_LICENCE,
platforms = ('Any',),
keywords = ('NMR', 'data-processing'),
packages = [ 'damaris',
'damaris.data',
'damaris.experiments',
'damaris.gui',
'damaris.tools' ],
package_dir = { 'damaris': 'src',
'damaris.data': 'src/data',
'damaris.experiments': 'src/experiments',
'damaris.gui': 'src/gui',
'damaris.tools': 'src/tools' },
package_data = { 'damaris.gui': ['DAMARIS.png', 'DAMARIS.ico', 'damaris.glade', 'damaris.gladep', 'python.xml']},
scripts = ['scripts/DAMARIS'],
cmdclass={"build_scripts": build_damaris_scripts},
data_files = distribution_data_files
)

18
src/__init__.py Normal file
View File

@ -0,0 +1,18 @@
##\mainpage DArmstadt MAgnetic Resonance Instrument Software
#
#Python Frontend based on
# - Python/GTK
# - Matplotlib
# - Numpy
# - PyTables
#
#Written by
# - Achim Gaedke
# - Christopher Schmitt
# - Markus Rosenstihl
# - Holger Stork
# - Christian Tacke
## module contents
#
__all__=["experiments", "data", "gui"]

523
src/data/ADC_Result.py Normal file
View File

@ -0,0 +1,523 @@
# -*- coding: iso-8859-1 -*-
from Resultable import Resultable
from Drawable import Drawable
from Signalpath import Signalpath
from DamarisFFT import DamarisFFT
import threading
import numpy
import sys
import types
import datetime
import tables
#############################################################################
# #
# Name: Class ADC_Result #
# #
# Purpose: Specialised class of Resultable and Drawable #
# Contains recorded ADC Data #
# #
#############################################################################
class ADC_Result(Resultable, Drawable, DamarisFFT, Signalpath):
def __init__(self, x = None, y = None, index = None, sampl_freq = None, desc = None, job_id = None, job_date = None):
Resultable.__init__(self)
Drawable.__init__(self)
# Title of this accumulation: set Values: Job-ID and Description (plotted in GUI -> look Drawable)
# Is set in ResultReader.py (or in copy-construktor)
self.__title_pattern = "ADC-Result: job_id = %s, desc = %s"
# Axis-Labels (inherited from Drawable)
self.xlabel = "Time (s)"
self.ylabel = "Samples [Digits]"
self.lock=threading.RLock()
self.nChannels = 0
if (x is None) and (y is None) and (index is None) and (sampl_freq is None) and (desc is None) and (job_id is None) and (job_date is None):
self.cont_data = False
self.sampling_rate = 0
self.index = []
self.x = []
self.y = []
elif (x is not None) and (y is not None) and (index is not None) and (sampl_freq is not None) and (desc is not None) and (job_id is not None) and (job_date is not None):
self.x = x
self.y = y
self.index = index
self.sampling_rate = sampl_freq
self.cont_data = True
self.description = desc
self.job_id = job_id
self.job_date = job_date
title="ADC-Result: job-id=%d"%int(self.job_id)
if len(self.description)>0:
for k,v in self.description.iteritems():
# string keys can be made invisible by adding two underscores in front of them
if not (type(k) in types.StringTypes and k[0] == '_' and k[1] == '_'):
title+=", %s=%s"%(k,v)
self.set_title(title)
else:
raise ValueError("Wrong usage of __init__!")
def create_data_space(self, channels, samples):
"Initialises the internal data-structures"
if self.contains_data():
print "Warning ADC-Result: Tried to run \"create_data_space()\" more than once."
return
if channels <= 0: raise ValueError("ValueError: You cant create an ADC-Result with less than 1 channel!")
if samples <= 0: raise ValueError("ValueError: You cant create an ADC-Result with less than 1 sample!")
for i in range(channels):
self.y.append(numpy.zeros((samples,), dtype="Int16"))
self.x = numpy.zeros((samples,), dtype="Float64")
self.index.append((0, samples-1))
self.cont_data = True
def contains_data(self):
"Returns true if ADC_Result contains data. (-> create_data_space() was called)"
return self.cont_data
def add_sample_space(self, samples):
"Adds space for n samples, where n can also be negative (deletes space). New space is filled up with \"0\""
self.lock.acquire()
if not self.cont_data:
print "Warning ADC-Result: Tried to resize empty array!"
return
length = len(self.y[0])
self.x = numpy.resize(self.x, (length+samples))
for i in range(self.get_number_of_channels()):
self.y[i] = numpy.resize(self.y[i], (length+samples))
self.index.append((length, len(self.y[0])-1))
self.lock.release()
def get_result_by_index(self, index):
self.lock.acquire()
try:
start = self.index[index][0]
end = self.index[index][1]
except:
self.lock.release()
raise
tmp_x = self.x[start:end+1].copy()
tmp_y = []
for i in range(self.get_number_of_channels()):
tmp_y.append(self.y[i][start:end+1].copy())
r = ADC_Result(x = tmp_x, y = tmp_y, index = [(0,len(tmp_y[0])-1)], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date)
self.lock.release()
return r
def get_sampling_rate(self):
"Returns the samplingfrequency"
return self.sampling_rate + 0
def set_sampling_rate(self, hz):
"Sets the samplingfrequency in hz"
self.sampling_rate = float(hz)
def get_nChannels(self):
"Gets the number of channels"
return self.nChannels + 0
def set_nChannels(self, channels):
"Sets the number of channels"
self.nChannels = int(channels)
def get_index_bounds(self, index):
"Returns a tuple with (start, end) of the wanted result"
return self.index[index]
def uses_statistics(self):
return False
def write_to_csv(self, destination=sys.stdout, delimiter=" "):
"""
writes the data to a file or to sys.stdout
destination can be a file or a filename
suitable for further processing
"""
# write sorted
the_destination=destination
if type(destination) in types.StringTypes:
the_destination=file(destination, "w")
the_destination.write("# adc_result\n")
the_destination.write("# t y0 y1 ...\n")
self.lock.acquire()
try:
xdata=self.get_xdata()
ch_no=self.get_number_of_channels()
ydata=map(self.get_ydata, xrange(ch_no))
#yerr=map(self.get_yerr, xrange(ch_no))
for i in xrange(len(xdata)):
the_destination.write("%e"%xdata[i])
for j in xrange(ch_no):
the_destination.write("%s%e"%(delimiter, ydata[j][i]))
the_destination.write("\n")
the_destination=None
xdata=ydata=None
finally:
self.lock.release()
def write_to_simpson(self, destination=sys.stdout, delimiter=" "):
"""
writes the data to a text file or sys.stdout in Simpson format,
for further processing with the NMRnotebook software;
destination can be a file or a filename
"""
# write sorted
the_destination=destination
if type(destination) in types.StringTypes:
the_destination=file(destination, "w")
self.lock.acquire()
try:
xdata=self.get_xdata()
the_destination.write("SIMP\n")
the_destination.write("%s%i%s"%("NP=", len(xdata), "\n"))
the_destination.write("%s%i%s"%("SW=", self.get_sampling_rate(), "\n"))
the_destination.write("TYPE=FID\n")
the_destination.write("DATA\n")
ch_no=self.get_number_of_channels()
ydata=map(self.get_ydata, xrange(ch_no))
for i in xrange(len(xdata)):
for j in xrange(ch_no):
the_destination.write("%g%s"%(ydata[j][i], delimiter))
the_destination.write("\n")
the_destination.write("END\n")
the_destination=None
xdata=ydata=None
finally:
self.lock.release()
def write_to_hdf(self, hdffile, where, name, title, complib=None, complevel=None):
accu_group=hdffile.createGroup(where=where,name=name,title=title)
accu_group._v_attrs.damaris_type="ADC_Result"
if self.contains_data():
self.lock.acquire()
try:
# save time stamps
if "job_date" in dir(self) and self.job_date is not None:
accu_group._v_attrs.time="%04d%02d%02d %02d:%02d:%02d.%03d"%(self.job_date.year,
self.job_date.month,
self.job_date.day,
self.job_date.hour,
self.job_date.minute,
self.job_date.second,
self.job_date.microsecond/1000)
if self.description is not None:
for (key,value) in self.description.iteritems():
accu_group._v_attrs.__setattr__("description_"+key,str(value))
# save interval information
filter=None
if complib is not None:
if complevel is None:
complevel=9
filter=tables.Filters(complevel=complevel,complib=complib,shuffle=1)
index_table=hdffile.createTable(where=accu_group,
name="indices",
description={"start": tables.UInt64Col(),
"length": tables.UInt64Col(),
"start_time": tables.Float64Col(),
"dwelltime": tables.Float64Col()},
title="indices of adc data intervals",
filters=filter,
expectedrows=len(self.index))
index_table.flavor="numpy"
# save channel data
new_row=index_table.row
for i in xrange(len(self.index)):
new_row["start"]=self.index[i][0]
new_row["dwelltime"]=1.0/self.sampling_rate
new_row["start_time"]=1.0/self.sampling_rate*self.index[i][0]
new_row["length"]=self.index[i][1]-self.index[i][0]+1
new_row.append()
index_table.flush()
new_row=None
index_table=None
# prepare saving data
channel_no=len(self.y)
timedata=numpy.empty((len(self.y[0]),channel_no),
dtype = "Int32")
for ch in xrange(channel_no):
timedata[:,ch]=self.get_ydata(ch)
# save data
time_slice_data=None
if filter is not None:
chunkshape = numpy.shape(timedata)
if len(chunkshape) <= 1:
chunkshape = (min(chunkshape[0],1024*8),)
else:
chunkshape = (min(chunkshape[0],1024*8), chunkshape[1])
if tables.__version__[0]=="1":
time_slice_data=hdffile.createCArray(accu_group,
name="adc_data",
shape=timedata.shape,
atom=tables.Int32Atom(shape=chunkshape,
flavor="numpy"),
filters=filter,
title="adc data")
else:
time_slice_data=hdffile.createCArray(accu_group,
name="adc_data",
shape=timedata.shape,
chunkshape=chunkshape,
atom=tables.Int32Atom(),
filters=filter,
title="adc data")
time_slice_data[:]=timedata
else:
time_slice_data=hdffile.createArray(accu_group,
name="adc_data",
object=timedata,
title="adc data")
finally:
timedata=None
time_slice_data=None
accu_group=None
self.lock.release()
# <20>berladen von Operatoren und Built-Ins -------------------------------------------------------
def __len__(self):
"Redefining len(ADC_Result obj), returns the number of samples in one channel and 0 without data"
if len(self.y)>0:
return len(self.y[0])
return 0
def __repr__(self):
"""
writes job meta data and data to string returned
"""
tmp_string = "Job ID: " + str(self.job_id) + "\n"
tmp_string += "Job Date: " + str(self.job_date) + "\n"
tmp_string += "Description: " + str(self.description) + "\n"
if len(self.y)>0:
tmp_string += "Indexes: " + str(self.index) + "\n"
tmp_string += "Samples per Channel: " + str(len(self.y[0])) + "\n"
tmp_string += "Samplingfrequency: " + str(self.sampling_rate) + "\n"
tmp_string += "X: " + repr(self.x) + "\n"
for i in range(self.get_number_of_channels()):
tmp_string += ("Y(%d): " % i) + repr(self.y[i]) + "\n"
return tmp_string
def __add__(self, other):
"Redefining self + other (scalar)"
if isinstance(other, types.IntType) or isinstance(other, types.FloatType):
self.lock.acquire()
tmp_y = []
for i in range(self.get_number_of_channels()):
tmp_y.append(numpy.array(self.y[i], dtype="Float64") + other)
r = ADC_Result(x = self.x[:], y = tmp_y, index = self.index[:], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date)
self.lock.release()
return r
else:
raise ValueError("ValueError: Cannot add \"%s\" to ADC-Result!" % str(other.__class__))
def __radd__(self, other):
"Redefining other (scalar) + self"
return self.__add__(other)
def __sub__(self, other):
"Redefining self - other (scalar)"
if isinstance(other, types.IntType) or isinstance(other, types.FloatType):
self.lock.acquire()
tmp_y = []
for i in range(self.get_number_of_channels()):
tmp_y.append(numpy.array(self.y[i], dtype="Float64") - other)
r = ADC_Result(x = self.x[:], y = tmp_y, index = self.index[:], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date)
self.lock.release()
return r
else:
raise ValueError("ValueError: Cannot subtract \"%s\" to ADC-Result!") % str(other.__class__)
def __rsub__(self, other):
"Redefining other (scalar) - self"
if isinstance(other, types.IntType) or isinstance(other, types.FloatType):
self.lock.acquire()
tmp_y = []
for i in range(self.get_number_of_channels()):
tmp_y.append(other - numpy.array(self.y[i], dtype="Float64"))
r = ADC_Result(x = self.x[:], y = tmp_y, index = self.index[:], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date)
self.lock.release()
return r
else:
raise ValueError("ValueError: Cannot subtract \"%s\" to ADC-Result!") % str(other.__class__)
def __mul__(self, other):
"Redefining self * other (scalar)"
if isinstance(other, types.IntType) or isinstance(other, types.FloatType):
self.lock.acquire()
tmp_y = []
for i in range(self.get_number_of_channels()):
tmp_y.append(numpy.array(self.y[i], dtype="Float64") * other)
r = ADC_Result(x = self.x[:], y = tmp_y, index = self.index[:], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date)
self.lock.release()
else:
raise ValueError("ValueError: Cannot multiply \"%s\" to ADC-Result!") % str(other.__class__)
def __rmul__(self, other):
"Redefining other (scalar) * self"
return self.__mul__(other)
def __pow__(self, other):
"Redefining self ** other (scalar)"
if isinstance(other, types.IntType) or isinstance(other, types.FloatType):
self.lock.acquire()
tmp_y = []
for i in range(self.get_number_of_channels()):
tmp_y.append(numpy.array(self.y[i], dtype="Float64") ** other)
r = ADC_Result(x = self.x[:], y = tmp_y, index = self.index[:], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date)
self.lock.release()
return r
else:
raise ValueError("ValueError: Cannot multiply \"%s\" to ADC-Result!") % str(other.__class__)
def __div__(self, other):
"Redefining self / other (scalar)"
if isinstance(other, types.IntType) or isinstance(other, types.FloatType):
self.lock.acquire()
tmp_y = []
for i in range(self.get_number_of_channels()):
tmp_y.append(numpy.array(self.y[i], dtype="Float64") / other)
r = ADC_Result(x = self.x[:], y = tmp_y, index = self.index[:], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date)
self.lock.release()
return r
else:
raise ValueError("ValueError: Cannot multiply \"%s\" to ADC-Result!") % str(other.__class__)
def __rdiv__(self, other):
"Redefining other (scalar) / self"
if isinstance(other, types.IntType) or isinstance(other, types.FloatType):
self.lock.acquire()
tmp_y = []
for i in range(self.get_number_of_channels()):
tmp_y.append(other / numpy.array(self.y[i], dtype="Float64"))
r = ADC_Result(x = self.x[:], y = tmp_y, index = self.index[:], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date)
self.lock.release()
else:
raise ValueError("ValueError: Cannot multiply \"%s\" to ADC-Result!") % str(other.__class__)
def __neg__(self):
"Redefining -self"
self.lock.acquire()
tmp_y = []
for i in range(self.get_number_of_channels()):
tmp_y.append(numpy.array(-self.y[i]))
r = ADC_Result(x = self.x[:], y = tmp_y, index = self.index[:], sampl_freq = self.sampling_rate, desc = self.description, job_id = self.job_id, job_date = self.job_date)
self.lock.release()
return r
def read_from_hdf(hdf_node):
"""
read accumulation data from HDF node and return it.
"""
# formal checks first
if not isinstance(hdf_node, tables.Group):
return None
if hdf_node._v_attrs.damaris_type!="ADC_Result":
return None
if not (hdf_node.__contains__("indices") and hdf_node.__contains__("adc_data")):
return None
# job id and x,y titles are missing
adc=ADC_Result()
# populate description dictionary
adc.description={}
for attrname in hdf_node._v_attrs._v_attrnamesuser:
if attrname.startswith("description_"):
adc.description[attrname[12:]]=hdf_node._v_attrs.__getattr__(attrname)
if "time" in dir(hdf_node._v_attrs):
timestring=hdf_node._v_attrs.__getattr__("time")
adc.job_date=datetime.datetime(int(timestring[:4]), # year
int(timestring[4:6]), # month
int(timestring[6:8]), # day
int(timestring[9:11]), # hour
int(timestring[12:14]), # minute
int(timestring[15:17]), # second
int(timestring[18:21])*1000 # microsecond
)
# start with indices
for r in hdf_node.indices.iterrows():
adc.index.append((r["start"],r["start"]+r["length"]-1))
adc.sampling_rate=1.0/r["dwelltime"]
# now really belief there are no data
if len(adc.index)==0:
adc.cont_data=False
return adc
adc.cont_data=True
# now do the real data
adc_data=hdf_node.adc_data.read()
adc.x=numpy.arange(adc_data.shape[0], dtype="Float64")/adc.sampling_rate
for ch in xrange(adc_data.shape[1]):
adc.y.append(adc_data[:,ch])
return adc

824
src/data/Accumulation.py Normal file
View File

@ -0,0 +1,824 @@
# -*- coding: iso-8859-1 -*-
#############################################################################
# #
# Name: Class Accumulation #
# #
# Purpose: Specialised class of Errorable and Drawable #
# Contains accumulated ADC-Data #
# #
#############################################################################
from Errorable import Errorable
from Drawable import Drawable
from DamarisFFT import DamarisFFT
from Signalpath import Signalpath
import sys
import threading
import types
import tables
import numpy
import datetime
class Accumulation(Errorable, Drawable, DamarisFFT, Signalpath):
def __init__(self, x = None, y = None, y_2 = None, n = None, index = None, sampl_freq = None, error = False):
Errorable.__init__(self)
Drawable.__init__(self)
# Title of this accumulation (plotted in GUI -> look Drawable)
self.__title_pattern = "Accumulation: n = %d"
# Axis-Labels (inherited from Drawable)
self.xlabel = "Time (s)"
self.ylabel = "Avg. Samples [Digits]"
self.lock=threading.RLock()
self.common_descriptions=None
self.time_period=[]
self.use_error = error
if self.uses_statistics():
if (y_2 is not None):
self.y_square = y_2
elif (y_2 is None) :
self.y_square = []
else:
raise ValueError("Wrong usage of __init__!")
if (x is None) and (y is None) and (index is None) and (sampl_freq is None) and (n is None):
self.sampling_rate = 0
self.n = 0
self.set_title(self.__title_pattern % self.n)
self.cont_data = False
self.index = []
self.x = []
self.y = []
elif (x is not None) and (y is not None) and (index is not None) and (sampl_freq is not None) and (n is not None):
self.x = x
self.y = y
self.sampling_rate = sampl_freq
self.n = n
self.set_title(self.__title_pattern % self.n)
self.index = index
self.cont_data = True
else:
raise ValueError("Wrong usage of __init__!")
def get_accu_by_index(self, index):
self.lock.acquire()
try:
start = self.index[index][0]
end = self.index[index][1]
except:
self.lock.release()
raise
tmp_x = self.x[start:end+1]
tmp_y = []
for i in range(self.get_number_of_channels()):
tmp_y.append(self.y[i][start:end+1])
r = Accumulation(x = tmp_x, y = tmp_y, n = self.n, index = [(0,len(tmp_y[0])-1)], sampl_freq = self.sampling_rate, error = self.use_error)
self.lock.release()
return r
def get_ysquare(self, channel):
if self.uses_statistics():
try:
return self.y_square[channel]
except:
raise
else: return None
def contains_data(self):
return self.cont_data
def get_sampling_rate(self):
"Returns the samplingfrequency"
return self.sampling_rate + 0
def get_index_bounds(self, index):
"Returns a tuple with (start, end) of the wanted result"
return self.index[index]
def uses_statistics(self):
return self.use_error
# Schnittstellen nach Außen --------------------------------------------------------------------
def get_yerr(self, channel):
"""
return error (std.dev/sqrt(n)) of mean
"""
if not self.uses_statistics(): return numpy.zeros((len(self.y[0]),),dtype="Float64")
if not self.contains_data(): return []
self.lock.acquire()
if self.n < 2:
retval=numpy.zeros((len(self.y[0]),),dtype="Float64")
self.lock.release()
return retval
try:
variance_over_n = (self.y_square[channel] - (self.y[channel]**2 / float(self.n)))/float((self.n-1)*self.n)
except IndexError:
print "Warning Accumulation.get_ydata(channel): Channel index does not exist."
variance_over_n = numpy.zeros((len(self.y[0]),), dtype="Float64")
self.lock.release()
# sample standard deviation / sqrt(n)
return numpy.nan_to_num(numpy.sqrt(variance_over_n))
def get_ydata(self, channel):
"""
return mean data
"""
if not self.contains_data(): return []
self.lock.acquire()
try:
tmp_y = self.y[channel] / self.n
except IndexError:
print "Warning Accumulation.get_ydata(channel): Channel index does not exist."
tmp_y = numpy.zeros((len(self.y[0]),), dtype="Float64")
self.lock.release()
return tmp_y
def get_ymin(self):
if not self.contains_data(): return 0
tmp_min = []
self.lock.acquire()
for i in range(self.get_number_of_channels()):
tmp_min.append(self.get_ydata(i).min())
if self.uses_statistics() and self.ready_for_drawing_error():
for i in range(self.get_number_of_channels()):
tmp_min.append((self.get_ydata(i) - self.get_yerr(i)).min())
self.lock.release()
return min(tmp_min)
def get_ymax(self):
if not self.contains_data(): return 0
tmp_max = []
self.lock.acquire()
for i in range(self.get_number_of_channels()):
tmp_max.append(self.get_ydata(i).max())
if self.uses_statistics() and self.ready_for_drawing_error():
for i in range(self.get_number_of_channels()):
tmp_max.append((self.get_ydata(i) + self.get_yerr(i)).max())
self.lock.release()
return max(tmp_max)
def get_job_id(self):
return None
def write_to_csv(self, destination=sys.stdout, delimiter=" "):
"""
writes the data to a file or to sys.stdout
destination can be a file or a filename
suitable for further processing
"""
the_destination=destination
if type(destination) in types.StringTypes:
the_destination=file(destination, "w")
the_destination.write("# accumulation %d\n"%self.n)
self.lock.acquire()
try:
if self.common_descriptions is not None:
for (key,value) in self.common_descriptions.iteritems():
the_destination.write("# %s : %s\n"%(key, str(value)))
the_destination.write("# t")
ch_no=self.get_number_of_channels()
if self.use_error:
for i in xrange(ch_no): the_destination.write(" ch%d_mean ch%d_err"%(i,i))
else:
for i in xrange(ch_no): the_destination.write(" ch%d_mean"%i)
the_destination.write("\n")
xdata=self.get_xdata()
ydata=map(self.get_ydata, xrange(ch_no))
yerr=None
if self.use_error:
yerr=map(self.get_yerr, xrange(ch_no))
for i in xrange(len(xdata)):
the_destination.write("%e"%xdata[i])
for j in xrange(ch_no):
if self.use_error:
the_destination.write("%s%e%s%e"%(delimiter, ydata[j][i], delimiter, yerr[j][i]))
else:
the_destination.write("%s%e"%(delimiter,ydata[j][i]))
the_destination.write("\n")
the_destination=None
xdata=yerr=ydata=None
finally:
self.lock.release()
def write_to_simpson(self, destination=sys.stdout, delimiter=" "):
"""
writes the data to a text file or sys.stdout in Simpson format,
for further processing with the NMRnotebook software;
destination can be a file or a filename
"""
# write sorted
the_destination=destination
if type(destination) in types.StringTypes:
the_destination=file(destination, "w")
self.lock.acquire()
try:
xdata=self.get_xdata()
the_destination.write("SIMP\n")
the_destination.write("%s%i%s"%("NP=", len(xdata), "\n"))
the_destination.write("%s%i%s"%("SW=", self.get_sampling_rate(), "\n"))
the_destination.write("TYPE=FID\n")
the_destination.write("DATA\n")
ch_no=self.get_number_of_channels()
ydata=map(self.get_ydata, xrange(ch_no))
for i in xrange(len(xdata)):
for j in xrange(ch_no):
the_destination.write("%g%s"%(ydata[j][i], delimiter))
the_destination.write("\n")
the_destination.write("END\n")
the_destination=None
xdata=ydata=None
finally:
self.lock.release()
def write_to_hdf(self, hdffile, where, name, title, complib=None, complevel=None):
accu_group=hdffile.createGroup(where=where,name=name,title=title)
accu_group._v_attrs.damaris_type="Accumulation"
if self.contains_data():
self.lock.acquire()
try:
# save time stamps
if self.time_period is not None and len(self.time_period)>0:
accu_group._v_attrs.earliest_time="%04d%02d%02d %02d:%02d:%02d.%03d"%(self.time_period[0].year,
self.time_period[0].month,
self.time_period[0].day,
self.time_period[0].hour,
self.time_period[0].minute,
self.time_period[0].second,
self.time_period[0].microsecond/1000)
accu_group._v_attrs.oldest_time="%04d%02d%02d %02d:%02d:%02d.%03d"%(self.time_period[1].year,
self.time_period[1].month,
self.time_period[1].day,
self.time_period[1].hour,
self.time_period[1].minute,
self.time_period[1].second,
self.time_period[1].microsecond/1000)
if self.common_descriptions is not None:
for (key,value) in self.common_descriptions.iteritems():
accu_group._v_attrs.__setattr__("description_"+key,str(value))
# save interval information
filter=None
if complib is not None:
if complevel is None:
complevel=9
filter=tables.Filters(complevel=complevel,complib=complib,shuffle=1)
# tried compression filter, but no effect...
index_table=hdffile.createTable(where=accu_group,
name="indices",
description={"start": tables.UInt64Col(),
"length": tables.UInt64Col(),
"start_time": tables.Float64Col(),
"dwelltime": tables.Float64Col(),
"number": tables.UInt64Col()},
title="indices of adc data intervals",
filters=filter,
expectedrows=len(self.index))
index_table.flavor="numpy"
# save interval data
new_row=index_table.row
for i in xrange(len(self.index)):
new_row["start"]=self.index[i][0]
new_row["dwelltime"]=1.0/self.sampling_rate
new_row["start_time"]=1.0/self.sampling_rate*self.index[i][0]
new_row["length"]=self.index[i][1]-self.index[i][0]+1
new_row["number"]=self.n
new_row.append()
index_table.flush()
new_row=None
index_table=None
# prepare saving data
channel_no=len(self.y)
timedata=numpy.empty((len(self.y[0]),channel_no*2), dtype = "Float64")
for ch in xrange(channel_no):
timedata[:,ch*2]=self.get_ydata(ch)
if self.uses_statistics():
timedata[:,ch*2+1]=self.get_yerr(ch)
else:
timedata[:,ch*2+1]=numpy.zeros((len(self.y[0]),),dtype = "Float64")
# save data
time_slice_data=None
if filter is not None:
chunkshape=timedata.shape
if len(chunkshape) <= 1:
chunkshape = (min(chunkshape[0],1024*8),)
else:
chunkshape = (min(chunkshape[0],1024*8), chunkshape[1])
if tables.__version__[0]=="1":
time_slice_data=hdffile.createCArray(accu_group,
name="accu_data",
shape=timedata.shape,
atom=tables.Float64Atom(shape=chunkshape,
flavor="numpy"),
filters=filter,
title="accu data")
else:
time_slice_data=hdffile.createCArray(accu_group,
name="accu_data",
shape=timedata.shape,
chunkshape=chunkshape,
atom=tables.Float64Atom(),
filters=filter,
title="accu data")
time_slice_data[:]=timedata
else:
time_slice_data=hdffile.createArray(accu_group,
name="accu_data",
object=timedata,
title="accu data")
finally:
time_slice_data=None
accu_group=None
self.lock.release()
# / Schnittstellen nach Außen ------------------------------------------------------------------
# Überladen von Operatoren ---------------------------------------------------------------------
def __len__(self):
"""
return number of samples per channel, 0 if empty
"""
if len(self.y)>0:
return len(self.y[0])
return 0
def __repr__(self):
"Redefining repr(Accumulation)"
if not self.contains_data(): return "Empty"
tmp_string = "X: " + repr(self.x) + "\n"
for i in range(self.get_number_of_channels()):
tmp_string += ("Y(%d): " % i) + repr(self.y[i]) + "\n"
if self.uses_statistics(): tmp_string += "y_square(%d): " % i + str(self.y_square[i]) + "\n"
tmp_string += "Indexes: " + str(self.index) + "\n"
tmp_string += "Samples per Channel: " + str(len(self.y[0])) + "\n"
tmp_string += "Samplingfrequency: " + str(self.sampling_rate) + "\n"
tmp_string += "n: " + str(self.n)
return tmp_string
def __add__(self, other):
"Redefining self + other"
# Float or int
if isinstance(other, types.IntType) or isinstance(other, types.FloatType):
if not self.contains_data(): raise ValueError("Accumulation: You cant add integers/floats to an empty accumulation")
else:
tmp_y = []
tmp_ysquare = []
self.lock.acquire()
for i in range(self.get_number_of_channels()):
# Dont change errors and mean value
if self.uses_statistics(): tmp_ysquare.append(self.y_square[i] + ( (2*self.y[i]*other) + ((other**2)*self.n) ))
tmp_y.append(self.y[i] + (other*self.n))
if self.uses_statistics():
r = Accumulation(x = numpy.array(self.x, dtype="Float64"), y = tmp_y, y_2 = tmp_ysquare, n = self.n, index = self.index, sampl_freq = self.sampling_rate, error = self.use_error)
else:
r = Accumulation(x = numpy.array(self.x, dtype="Float64"), y = tmp_y, n = self.n, index = self.index, sampl_freq = self.sampling_rate, error = self.use_error)
self.lock.release()
return r
# ADC_Result
elif str(other.__class__) == "damaris.data.ADC_Result.ADC_Result":
# Other empty (return)
# todo: this is seems to be bugy!!!! (Achim)
if not other.contains_data(): return
# Self empty (copy)
if not self.contains_data():
tmp_y = []
tmp_ysquare = []
self.lock.acquire()
for i in range(other.get_number_of_channels()):
tmp_y.append(numpy.array(other.y[i], dtype="Float64"))
if self.uses_statistics(): tmp_ysquare.append(tmp_y[i] ** 2)
if self.uses_statistics():
r = Accumulation(x = numpy.array(other.x, dtype="Float64"), y = tmp_y, y_2 = tmp_ysquare, n = 1, index = other.index, sampl_freq = other.sampling_rate, error = True)
else:
r = Accumulation(x = numpy.array(other.x, dtype="Float64"), y = tmp_y, index = other.index, sampl_freq = other.sampling_rate, n = 1, error = False)
r.time_period=[other.job_date,other.job_date]
r.common_descriptions=other.description.copy()
self.lock.release()
return r
# Other and self not empty (self + other)
else:
self.lock.acquire()
if self.sampling_rate != other.get_sampling_rate(): raise ValueError("Accumulation: You cant add ADC-Results with diffrent sampling-rates")
if len(self.y[0]) != len(other): raise ValueError("Accumulation: You cant add ADC-Results with diffrent number of samples")
if len(self.y) != other.get_number_of_channels(): raise ValueError("Accumulation: You cant add ADC-Results with diffrent number of channels")
for i in range(len(self.index)):
if self.index[i] != other.get_index_bounds(i): raise ValueError("Accumulation: You cant add ADC-Results with diffrent indexing")
tmp_y = []
tmp_ysquare = []
for i in range(self.get_number_of_channels()):
tmp_y.append(self.y[i] + other.y[i])
if self.uses_statistics(): tmp_ysquare.append(self.y_square[i] + (numpy.array(other.y[i], dtype="Float64") ** 2))
if self.uses_statistics():
r = Accumulation(x = numpy.array(self.x, dtype="Float64"), y = tmp_y, y_2 = tmp_ysquare, n = self.n + 1, index = self.index, sampl_freq = self.sampling_rate, error = True)
else:
r = Accumulation(x = numpy.array(self.x, dtype="Float64"), y = tmp_y, n = self.n + 1, index = self.index, sampl_freq = self.sampling_rate, error = False)
r.time_period=[min(self.time_period[0],other.job_date),
max(self.time_period[1],other.job_date)]
if self.common_descriptions is not None:
r.common_descriptions={}
for key in self.common_descriptions.keys():
if (key in other.description and self.common_descriptions[key]==other.description[key]):
r.common_descriptions[key]=value
self.lock.release()
return r
# Accumulation
elif str(other.__class__) == "damaris.data.Accumulation.Accumulation":
# Other empty (return)
if not other.contains_data(): return
# Self empty (copy)
if not self.contains_data():
tmp_y = []
tmp_ysquare = []
self.lock.acquire()
if self.uses_statistics():
r = Accumulation(x = numpy.array(other.x, dtype="Float64"), y = tmp_y, y_2 = tmp_ysquare, n = other.n, index = other.index, sampl_freq = other.sampling_rate, error = True)
else:
r = Accumulation(x = numpy.array(other.x, dtype="Float64"), y = tmp_y, n = other.n, index = other.index, sampl_freq = other.sampling_rate, error = False)
for i in range(other.get_number_of_channels()):
tmp_y.append(other.y[i])
tmp_ysquare.append(other.y_square[i])
r.time_period=other.time_period[:]
if other.common_descriptions is not None:
r.common_descriptions=othter.common_descriptions.copy()
else:
r.common_descriptions=None
self.lock.release()
return r
# Other and self not empty (self + other)
else:
self.lock.acquire()
if self.sampling_rate != other.get_sampling_rate(): raise ValueError("Accumulation: You cant add accumulations with diffrent sampling-rates")
if len(self.y[0]) != len(other): raise ValueError("Accumulation: You cant add accumulations with diffrent number of samples")
if len(self.y) != other.get_number_of_channels(): raise ValueError("Accumulation: You cant add accumulations with diffrent number of channels")
for i in range(len(self.index)):
if self.index[i] != other.get_index_bounds(i): raise ValueError("Accumulation: You cant add accumulations with diffrent indexing")
if self.uses_statistics() and not other.uses_statistics(): raise ValueError("Accumulation: You cant add non-error accumulations to accumulations with error")
tmp_y = []
tmp_ysquare = []
for i in range(self.get_number_of_channels()):
tmp_y.append(self.y[i] + other.y[i])
tmp_ysquare.append(self.y_square[i] + other.y_square[i])
if self.uses_statistics():
r = Accumulation(x = numpy.array(self.x, dtype="Float64"), y = tmp_y, y_2 = tmp_ysquare, n = other.n + self.n, index = self.index, sampl_freq = self.sampling_rate, error = True)
else:
r = Accumulation(x = numpy.array(self.x, dtype="Float64"), y = tmp_y, n = other.n + self.n, index = self.index, sampl_freq = self.sampling_rate, error = False)
r.time_period=[min(self.time_period[0],other.time_period[0]),
max(self.time_period[1],other.time_period[1])]
r.common_descriptions={}
if self.common_descriptions is not None and other.common_descriptions is not None:
for key in self.common_descriptions.keys():
if (key in other.common_descriptions and
self.common_descriptions[key]==other.common_descriptions[key]):
r.common_descriptions[key]=value
self.lock.release()
return r
def __radd__(self, other):
"Redefining other + self"
return self.__add__(other)
def __sub__(self, other):
"Redefining self - other"
return self.__add__(-other)
def __rsub__(self, other):
"Redefining other - self"
return self.__neg__(self.__add__(-other))
def __iadd__(self, other):
"Redefining self += other"
# Float or int
if isinstance(other, types.IntType) or isinstance(other, types.FloatType):
if not self.contains_data(): raise ValueError("Accumulation: You cant add integers/floats to an empty accumulation")
else:
self.lock.acquire()
for i in range(self.get_number_of_channels()):
#Dont change errors and mean value
if self.uses_statistics(): self.y_square[i] += (2*self.y[i]*other) + ((other**2)*self.n)
self.y[i] += other*self.n
self.lock.release()
return self
# ADC_Result
elif str(other.__class__) == "damaris.data.ADC_Result.ADC_Result":
# Other empty (return)
if not other.contains_data(): return self
# Self empty (copy)
if not self.contains_data():
self.lock.acquire()
self.n += 1
self.index = other.index[0:]
self.sampling_rate = other.sampling_rate
self.x = numpy.array(other.x, dtype="Float64")
self.cont_data = True
for i in range(other.get_number_of_channels()):
self.y.append(numpy.array(other.y[i], dtype="Float64"))
if self.uses_statistics(): self.y_square.append(self.y[i] ** 2)
self.set_title(self.__title_pattern % self.n)
self.lock.release()
self.time_period=[other.job_date,other.job_date]
self.common_descriptions=other.description.copy()
return self
# Other and self not empty (self + other)
else:
self.lock.acquire()
if self.sampling_rate != other.get_sampling_rate(): raise ValueError("Accumulation: You cant add ADC-Results with diffrent sampling-rates")
if len(self.y[0]) != len(other): raise ValueError("Accumulation: You cant add ADC-Results with diffrent number of samples")
if len(self.y) != other.get_number_of_channels(): raise ValueError("Accumulation: You cant add ADC-Results with diffrent number of channels")
for i in range(len(self.index)):
if self.index[i] != other.get_index_bounds(i): raise ValueError("Accumulation: You cant add ADC-Results with diffrent indexing")
for i in range(self.get_number_of_channels()):
self.y[i] += other.y[i]
if self.uses_statistics(): self.y_square[i] += numpy.array(other.y[i], dtype="Float64") ** 2
self.n += 1
self.time_period=[min(self.time_period[0],other.job_date),
max(self.time_period[1],other.job_date)]
if self.common_descriptions is not None:
for key in self.common_descriptions.keys():
if not (key in other.description and self.common_descriptions[key]==other.description[key]):
del self.common_descriptions[key]
self.set_title(self.__title_pattern % self.n)
self.lock.release()
return self
# Accumulation
elif str(other.__class__) == "damaris.data.Accumulation.Accumulation":
# Other empty (return)
if not other.contains_data(): return
# Self empty (copy)
if not self.contains_data():
if self.uses_statistics() and not other.uses_statistics(): raise ValueError("Accumulation: You cant add non-error accumulations to accumulations with error")
self.lock.acquire()
self.n += other.n
self.index = other.index[0:]
self.sampling_rate = other.sampling_rate
self.x = numpy.array(other.x, dtype="Float64")
self.cont_data = True
for i in range(other.get_number_of_channels()):
self.y.append(numpy.array(other.y[i], dtype="Float64"))
if self.uses_statistics(): self.y_square.append(self.y[i] ** 2)
self.set_title(self.__title_pattern % self.n)
self.common_descriptions=other.common_desriptions.copy()
self.time_period=other.time_period[:]
self.lock.release()
return self
# Other and self not empty (self + other)
else:
self.lock.acquire()
if self.sampling_rate != other.get_sampling_rate(): raise ValueError("Accumulation: You cant add accumulations with diffrent sampling-rates")
if len(self.y[0]) != len(other): raise ValueError("Accumulation: You cant add accumulations with diffrent number of samples")
if len(self.y) != other.get_number_of_channels(): raise ValueError("Accumulation: You cant add accumulations with diffrent number of channels")
for i in range(len(self.index)):
if self.index[i] != other.get_index_bounds(i): raise ValueError("Accumulation: You cant add accumulations with diffrent indexing")
if self.uses_statistics() and not other.uses_statistics(): raise ValueError("Accumulation: You cant add non-error accumulations to accumulations with error")
for i in range(self.get_number_of_channels()):
self.y[i] += other.y[i]
if self.uses_statistics(): self.y_square[i] += other.y_square[i]
self.n += other.n
self.time_period=[min(self.time_period[0],other.time_period[0]),
max(self.time_period[1],other.time_period[1])]
if self.common_descriptions is not None and other.common_descriptions is not None:
for key in self.common_descriptions.keys():
if not (key in other.description and
self.common_descriptions[key]==other.common_descriptions[key]):
del self.common_descriptions[key]
self.set_title(self.__title_pattern % self.n)
self.lock.release()
return self
elif other is None:
# Convenience: ignore add of None
return self
else:
raise ValueError("can not add "+repr(type(other))+" to Accumulation")
def __isub__(self, other):
"Redefining self -= other"
return self.__iadd__(-other)
def __neg__(self):
"Redefining -self"
if not self.contains_data(): return
tmp_y = []
self.lock.acquire()
for i in range(self.get_number_of_channels()):
tmp_y.append(numpy.array(-self.y[i], dtype="Float64"))
if self.uses_statistics():
r = Accumulation(x = numpy.array(self.x, dtype="Float64"), y = tmp_y, y_2 = numpy.array(self.y_square), n = self.n, index = self.index, sampl_freq = self.sampling_rate, error = True)
else:
r = Accumulation(x = numpy.array(self.x, dtype="Float64"), y = tmp_y, n = self.n, index = self.index, sampl_freq = self.sampling_rate, error = False)
self.lock.release()
return r
def read_from_hdf(hdf_node):
"""
read accumulation data from HDF node and return it.
"""
# formal checks first
if not isinstance(hdf_node, tables.Group):
return None
if hdf_node._v_attrs.damaris_type!="Accumulation":
return None
if not (hdf_node.__contains__("indices") and hdf_node.__contains__("accu_data")):
print "no accu data"
return None
accu=Accumulation()
# populate description dictionary
accu.common_descriptions={}
for attrname in hdf_node._v_attrs._v_attrnamesuser:
if attrname.startswith("description_"):
accu.common_descriptions[attrname[12:]]=hdf_node._v_attrs.__getattr__(attrname)
eariliest_time=None
if "earliest_time" in dir(hdf_node._v_attrs):
timestring=hdf_node._v_attrs.__getattr__("earliest_time")
earliest_time=datetime.datetime(int(timestring[:4]), # year
int(timestring[4:6]), # month
int(timestring[6:8]), # day
int(timestring[9:11]), # hour
int(timestring[12:14]), # minute
int(timestring[15:17]), # second
int(timestring[18:21])*1000 # microsecond
)
oldest_time=None
if "oldest_time" in dir(hdf_node._v_attrs):
timestring=hdf_node._v_attrs.__getattr__("oldest_time")
oldest_time=datetime.datetime(int(timestring[:4]), # year
int(timestring[4:6]), # month
int(timestring[6:8]), # day
int(timestring[9:11]), # hour
int(timestring[12:14]), # minute
int(timestring[15:17]), # second
int(timestring[18:21])*1000 # microsecond
)
if oldest_time is None or earliest_time is None:
accu.time_period=None
if len(accu.common_descriptions)==0:
# no accus inside, so no common description expected
accu.common_descriptions=None
accu.cont_data=False
else:
accu.time_period=[oldest_time, earliest_time]
accu.cont_data=True
# start with indices
for r in hdf_node.indices.iterrows():
accu.index.append((r["start"],r["start"]+r["length"]-1))
accu.n=r["number"]
accu.sampling_rate=1.0/r["dwelltime"]
# now really belief there are no data
if len(accu.index)==0 or accu.n==0:
accu.cont_data=False
return accu
# now do the real data
accu_data=hdf_node.accu_data.read()
accu.x=numpy.arange(accu_data.shape[0], dtype="Float64")/accu.sampling_rate
# assume error information, todo: save this information explicitly
accu.y_square=[]
accu.use_error=False
for ch in xrange(accu_data.shape[1]/2):
accu.y.append(accu_data[:,ch*2]*accu.n)
if accu.n<2 or numpy.all(accu_data[:,ch*2+1]==0.0):
accu.y_square.append(numpy.zeros((accu_data.shape[0]) ,dtype="Float64"))
else:
accu.use_error=True
accu.y_square.append((accu_data[:,ch*2+1]**2)*float((accu.n-1.0)*accu.n)+(accu_data[:,ch*2]**2)*accu.n)
if not accu.use_error:
del accu.y_square
return accu

54
src/data/Config_Result.py Normal file
View File

@ -0,0 +1,54 @@
# -*- coding: iso-8859-1 -*-
from Resultable import Resultable
#############################################################################
# #
# Name: Class Error_Result #
# #
# Purpose: Specialised class of Resultable #
# Contains occured error-messages from the core #
# #
#############################################################################
class Config_Result(Resultable):
def __init__(self, config = None, desc = None, job_id = None, job_date = None):
Resultable.__init__(self)
if config is None: self.config = { }
if desc is None: self.description = { }
self.job_id = job_id
self.job_date = job_date
def get_config_dictionary(self):
return self.config
def set_config_dictionary(self, config):
self.config = config
def get_config(self, key):
if self.config.has_key(key): return self.config[key]
else: return None
def set_config(self, key, value):
if self.config.has_key(key):
print "Warning Config_Result: Key \"%s\" will be overwritten with \"%s\"" % (key, value)
self.config[key] = value
# Überladen von Operatoren und Built-Ins -------------------------------------------------------
def __repr__(self):
return str(self.config)
def __str__(self):
return str(self.config)

211
src/data/DaFFT.py Normal file
View File

@ -0,0 +1,211 @@
import warnings
# enable warnings in Python 2.7
warnings.simplefilter('default')
warnings.warn("use of DaFFT is deprecated, please use the methods of Accumulation and ADC_data classes provided by DamarisFFT.", DeprecationWarning)
import numpy as N
import numpy.fft as F
class FFT:
def __init__(self, one_result):
# create copy of one_result and work only on the copy
# also extract some informations
self.the_result = one_result + 0
self.timepoints = N.array(one_result.x)
self.sampling_rate = one_result.get_sampling_rate()
self.data_points = one_result.get_ydata(0).size
self.aquisition_time = self.data_points / float(self.sampling_rate)
self.the_result.set_xlabel('Frequency [Hz]')
def write_n(self, afile):
filename = open(afile,'w')
filename = open(afile,'a')
#print self.the_result.get_description_dictionary()
#filename.write('%s'%self.get_description_dictionary())
for i in range(self.data_points):
filename.write('%e\t%e\t%e\n'%(self.the_result.x[i], self.the_result.y[0][i], self.the_result.y[1][i]))
filename.close()
return self
def base_corr(self, cutoff=0.3, show=0):
"""
Subtracts the mean of the last cutoff % of the timsignal
to get rid of the DC part in the FFT and returns the
new data.
If cutoff is not given, the mean of the last 30% will be
subtracted.
If show=1 the result is return and not the instance. This allows to plot the baseline corrected signal
Example:
base_corr(cutoff=0.2, show=1)
"""
last_points = int(cutoff*self.data_points)
for i in range(2):
self.the_result.y[i] = self.the_result.y[i] - self.the_result.y[i][-last_points:].mean()
if show == 1 :
return self.the_result
return self
def abs_fft(self, points=None, zoom=None,write = 'off'):
"""
Fourier transforms the timesignal;
points is the number of points to transform, if more points given than data points
the rest is zero padded
absfft(points=4096)
"""
realdata = N.array(self.the_result.y[0])
imdata = N.array(self.the_result.y[1])
data = realdata + 1j*imdata
fftdata = F.fftshift(F.fft(data, points))
absfft = N.sqrt(fftdata.real**2 + fftdata.imag**2)
# create our x axis
n = fftdata.size
self.the_result.x = F.fftshift(F.fftfreq(n, 1.0/self.sampling_rate))
self.the_result.y[0] = absfft
self.the_result.y[1] = N.zeros(n)
if write == 'on':
return self
else:
if zoom is None:
return self.the_result
else:
center, width = zoom
return self.zoom(self.the_result, center, width)
def fft(self, points=None, zoom=None, write='off'):
realdata = N.array(self.the_result.y[0])
imdata = N.array(self.the_result.y[1])
data = realdata + 1j*imdata
fftdata = F.fftshift(F.fft(data, points))
# create our x axis
n = fftdata.size
self.the_result.x = F.fftshift(F.fftfreq(n, 1.0/self.sampling_rate))
self.the_result.y[0] = fftdata.real
self.the_result.y[1] = fftdata.imag
if write == 'on':
return self
else:
if zoom is None:
return self.the_result
else:
center, width = zoom
return self.zoom(self.the_result, center, width)
def zoom(self,some_result, center="auto", width=1000):
if center == "auto":
i_center = int(self.the_result.y[0].argmax())
maximum = self.the_result.y[0][i_center]
print "Maximum at Frequency:", self.the_result.x[i_center]
else:
i_center = int(self.data_points/2.0+self.data_points*center/self.sampling_rate)
#print "TODO: set width automagically"
#if width == "auto":
# i_width = int(self.data_points*width)
i_width = int(self.data_points*width/self.sampling_rate)
some_result.x=some_result.x[i_center-i_width/2:i_center+i_width/2]
some_result.y[0]=some_result.y[0][i_center-i_width/2:i_center+i_width/2]
some_result.y[1]=some_result.y[1][i_center-i_width/2:i_center+i_width/2]
return some_result
"""
Apodization functions:
* exp_window and gauss_window are S/N enhancing,
* dexp_window and traf_window are resolution enhancing
* standard windows [hamming, hanning, bartlett, blackman, kaiser-bessel] are also available
self.timepoints = time points
self.aquisition_time = aquisition time (no. samples / sampling_rate)
line_broadening = line broadening factor (standard = 10 Hz)
gaussian_multiplicator = Gaussian Multiplication Factor for
the double exponential apodization
function (standard = 0.3)
"""
def exp_window(self, line_broadening=10, show=0):
apod = N.exp(-self.timepoints*line_broadening)
for i in range(2):
self.the_result.y[i] = self.the_result.y[i]*apod
if show == 1 :
return self.the_result
return self
def gauss_window(self, line_broadening=10, show=0):
apod = N.exp(-(self.timepoints*line_broadening)**2)
for i in range(2):
self.the_result.y[i] = self.the_result.y[i]*apod
if show == 1 :
return self.the_result
return self
def dexp_window(self, line_broadening=10, gaussian_multiplicator=0.3, show=0):
apod = N.exp(-(self.timepoints*line_broadening - gaussian_multiplicator*self.aquisition_time)**2)
for i in range(2):
self.the_result.y[i] = self.the_result.y[i]*apod
if show == 1:
return self.the_result
return self
def traf_window(self, line_broadening=10, show=0):
apod = (N.exp(-self.timepoints*line_broadening))**2 / ( (N.exp(-self.timepoints*line_broadening))**3
+ (N.exp(-self.aquisition_time*line_broadening))**3 )
for i in range(2):
self.the_result.y[i] = self.the_result.y[i]*apod
if show == 1:
return self.the_result
return self
def hanning_window(self, show=0):
apod = N.hanning(self.data_points)
for i in range(2):
self.the_result.y[i] = self.the_result.y[i]*apod
if show == 1:
return self.the_result
return self
def hamming_window(self, show=0):
apod = N.hamming(self.data_points)
for i in range(2):
self.the_result.y[i] = self.the_result.y[i]*apod
if show == 1:
return self.the_result
return self
def blackman_window(self, show=0):
apod = N.blackman(self.data_points)
for i in range(2):
self.the_result.y[i] = self.the_result.y[i]*apod
if show == 1:
return self.the_result
return self
def bartlett_window(self, show=0):
apod = N.bartlett(self.data_points)
for i in range(2):
self.the_result.y[i] = self.the_result.y[i]*apod
if show == 1:
return self.the_result
return self
def kaiser_window(self, beta=4, show=0, use_scipy=None):
if use_scipy == None:
# modified Bessel function of zero kind order from somewhere
def I_0(x):
i0=0
fac = lambda n:reduce(lambda a,b:a*(b+1),range(n),1)
for n in range(20):
i0 += ((x/2.0)**n/(fac(n)))**2
return i0
t = N.arange(self.data_points, type=N.Float) - self.data_points/2.0
T = self.data_points
# this is the window function array
apod = I_0(beta*N.sqrt(1-(2*t/T)**2))/I_0(beta)
else:
# alternative method using scipy
import scipy
apod=scipy.kaiser(self.data_points, beta)
for i in range(2):
self.the_result.y[i] = self.the_result.y[i]*apod
if show == 1:
return self.the_result
return self

196
src/data/DamarisFFT.py Normal file
View File

@ -0,0 +1,196 @@
import numpy
import sys
import autophase
class DamarisFFT:
def clip(self, start=None, stop=None):
"""
Method for clipping data, only the timesignal between start and stop
is returned.
start and stop can be either time or frequency. The unit is automatically determined
"""
# check if start/stop order is properly
if start > stop:
# I could swap start/stop actually
# TODO swap values?
raise
# if one uses clip as a "placeholder"
if start==None and stop==None:
return self
if start==None:
start = 0
if stop==None:
stop = -1
# check if data is fft which changes the start/stop units
# TODO should get nicer(failsafe), i.e. flags in the object?
if self.xlabel == "Frequency / Hz":
isfft = True
start = self.x.size*(0.5 + start/self.sampling_rate)
stop = self.x.size*(0.5 + stop/self.sampling_rate)
else:
isfft = False
# get the corresponding indices
start *= self.sampling_rate
stop *= self.sampling_rate
# check if boundaries make sense, raise exception otherwise
if numpy.abs(int(start)-int(stop))<=0:
raise ValueError("start stop too close: There are no values in the given boundaries!")
for ch in xrange(len(self.y)):
# clip the data for each channel
# TODO multi records
self.y[ch] = self.y[ch][int(start):int(stop)]
# TODO what to do with x? Should it start from 0 or from start?
# self.x = self.x[:int(stop)-int(start)]
self.x = self.x[int(start):int(stop)]
return self
def baseline(self, last_part=0.1):
"""
Correct the baseline of your data by subtracting the mean of the
last_part fraction of your data.
last_part defaults to 0.1, i.e. last 10% of your data
"""
# TODO baselinecorrection for spectra after:
# Heuer, A; Haeberlen, U.: J. Mag. Res.(1989) 85, Is 1, 79-94
# Should I create an empty object?
# I deided to do NOT a copy, but
# rather modify the object
n = int(self.x.size*last_part)
for ch in xrange(len(self.y)):
self.y[ch] -= self.y[ch][-n:].mean()
# Skip the following due to design reasons
# new_object.was_copied = True
return self
"""
Apodization functions:
* exp_window and gauss_window are S/N enhancing,
* dexp_window and traf_window are resolution enhancing
* standard windows [hamming, hanning, bartlett, blackman, kaiser-bessel]
are also available
self.x = time points
elf.aquisition_time = aquisition time (no. samples / sampling_rate)
line_broadening = line broadening factor (standard = 10 Hz)
gaussian_multiplicator = Gaussian Multiplication Factor for
the double exponential apodization
function (standard = 0.3)
"""
def exp_window(self, line_broadening=10):
"""
exponential window
"""
apod = numpy.exp(-self.x*numpy.pi*line_broadening)
for i in range(2):
self.y[i] = self.y[i]*apod
return self
def gauss_window(self, line_broadening=10):
apod = numpy.exp(-(self.x*line_broadening)**2)
for i in range(2):
self.y[i] = self.y[i]*apod
return self
def dexp_window(self, line_broadening=-10, gaussian_multiplicator=0.3):
apod = numpy.exp(-(self.x*line_broadening - gaussian_multiplicator*self.x.max())**2)
for i in range(2):
self.y[i] = self.y[i]*apod
return self
def traf_window(self, line_broadening=10):
apod = (numpy.exp(-self.x*line_broadening))**2 / ( (numpy.exp(-self.x*line_broadening))**3
+ (numpy.exp(-self.x.max()*line_broadening))**3 )
for i in range(2):
self.y[i] = self.y[i]*apod
return self
def hanning_window(self):
apod = numpy.hanning(self.x.size)
for i in range(2):
self.y[i] = self.y[i]*apod
return self
def hamming_window(self):
apod = numpy.hamming(self.x.size)
for i in range(2):
self.y[i] = self.y[i]*apod
return self
def blackman_window(self):
apod = numpy.blackman(self.x.size)
for i in range(2):
self.y[i] = self.y[i]*apod
return self
def bartlett_window(self):
apod = numpy.bartlett(self.x.size)
for i in range(2):
self.y[i] = self.y[i]*apod
return self
def kaiser_window(self, beta=4, use_scipy=None):
if use_scipy == None:
# modified Bessel function of zero kind order from somewhere
def I_0(x):
i0=0
fac = lambda n:reduce(lambda a,b:a*(b+1),range(n),1)
for n in xrange(20):
i0 += ((x/2.0)**n/(fac(n)))**2
return i0
t = numpy.arange(self.x.size, type=numpy.Float) - self.x.size/2.0
T = self.x.size
# this is the window function array
apod = I_0(beta*numpy.sqrt(1-(2*t/T)**2))/I_0(beta)
else:
# alternative method using scipy
import scipy
apod=scipy.kaiser(self.x.size, beta)
for i in range(2):
self.y[i] = self.y[i]*apod
return self
def autophase(self):
"""
works nice with a SNR above 20 dB
10 V signal height to 1V noise width
"""
autophase.get_phase(self)
return self
def fft(self, samples=None):
"""
Fouriertransform the timesignal inplace.
For "zerofilling" set "samples" to a value higher than your data length.
Shorten "samples" to truncate your data.
samples takes only integer values
"""
# Is this smart performance wise? Should I create an empty object?
# Tests showed that this try except block performed 3.78ms
# timesignal.baseline().fft()
# with out this it needed 4.41 ms, thus this is justified :-)
#try:
# if self.was_copied:
# new_object = self
#except:
# new_object = self+0
fft_of_signal = numpy.fft.fft(self.y[0] + 1j*self.y[1], n=samples)
fft_of_signal = numpy.fft.fftshift(fft_of_signal)
dwell = 1.0/self.sampling_rate
n = fft_of_signal.size
fft_frequencies = numpy.fft.fftfreq(n, dwell)
self.x = numpy.fft.fftshift(fft_frequencies)
self.y[0] = fft_of_signal.real
self.y[1] = fft_of_signal.imag
self.set_xlabel("Frequency / Hz")
return self
def magnitude(self):
# this should calculate the absolute value, and set the imag channel to zero
self.y[0] = numpy.sqrt(self.y [0]**2+self.y [1]**2)
self.y[1] *= 0 #self.y[0].copy()
return self

177
src/data/DataPool.py Normal file
View File

@ -0,0 +1,177 @@
# data pool collects data from data handling script
# provides data to experiment script and display
import sys
import types
import tables
import UserDict
import threading
import traceback
import StringIO
import ADC_Result
import Accumulation
import MeasurementResult
class DataPool(UserDict.DictMixin):
"""
dictionary with sending change events
"""
# supports tranlation from dictionary keys to pytables hdf node names
# taken from: Python Ref Manual Section 2.3: Identifiers and keywords
# things are always prefixed by "dir_" or "dict_"
translation_table=""
for i in xrange(256):
c=chr(i)
if (c>="a" and c<="z") or \
(c>="A" and c<="Z") or \
(c>="0" and c<="9"):
translation_table+=c
else:
translation_table+="_"
class Event:
access=0
updated_value=1
new_key=2
deleted_key=3
destroy=4
def __init__(self, what, subject="", origin=None):
self.what=what
self.subject=subject
self.origin=origin
def __repr__(self):
return "<DataPool.Event origin=%s what=%d subject='%s'>"%(self.origin, self.what,self.subject)
def copy(self):
return DataPool.Event(self.what+0, self.subject+"", self.origin)
def __init__(self):
self.__mydict={}
self.__dictlock=threading.Lock()
self.__registered_listeners=[]
def __getitem__(self, name):
try:
self.__dictlock.acquire()
return self.__mydict[name]
finally:
self.__dictlock.release()
def __setitem__(self, name, value):
try:
self.__dictlock.acquire()
if name in self.__mydict:
e=DataPool.Event(DataPool.Event.updated_value,name,self)
else:
e=DataPool.Event(DataPool.Event.new_key, name,self)
self.__mydict[name]=value
finally:
self.__dictlock.release()
self.__send_event(e)
def __delitem__(self, name):
try:
self.__dictlock.acquire()
del self.__mydict[name]
finally:
self.__dictlock.release()
self.__send_event(DataPool.Event(DataPool.Event.deleted_key,name,self))
def keys(self):
try:
self.__dictlock.acquire()
return self.__mydict.keys()
finally:
self.__dictlock.release()
def __send_event(self, _event):
for l in self.__registered_listeners:
l(_event.copy())
def __del__(self):
self.__send_event(DataPool.Event(DataPool.Event.destroy))
self.__registered_listeners=None
def write_hdf5(self,hdffile,where="/",name="data_pool", complib=None, complevel=None):
if type(hdffile) is types.StringType:
dump_file=tables.openFile(hdffile, mode="a")
elif isinstance(hdffile,tables.File):
dump_file=hdffile
else:
raise Exception("expecting hdffile or string")
dump_group=dump_file.createGroup(where, name, "DAMARIS data pool")
self.__dictlock.acquire()
dict_keys=self.__mydict.keys()
self.__dictlock.release()
try:
for key in dict_keys:
if key[:2]=="__": continue
dump_dir=dump_group
# walk along the given path and create groups if necessary
namelist = key.split("/")
for part in namelist[:-1]:
dir_part="dir_"+str(part).translate(DataPool.translation_table)
if not dir_part in dump_dir:
dump_dir=dump_file.createGroup(dump_dir,name=dir_part,title=part)
else:
if dump_dir._v_children[dir_part]._v_title==part:
dump_dir=dump_dir._v_children[dir_part]
else:
extension_count=0
while dir_part+"_%03d"%extension_count in dump_dir:
extension_count+=1
dump_dir=dump_file.createGroup(dump_dir,
name=dir_part+"_%03d"%extension_count,
title=part)
# convert last part of key to a valid name
group_keyname="dict_"+str(namelist[-1]).translate(DataPool.translation_table)
# avoid double names by adding number extension
if group_keyname in dump_dir:
extension_count=0
while group_keyname+"_%03d"%extension_count in dump_dir:
extension_count+=1
group_keyname+="_%03d"%extension_count
self.__dictlock.acquire()
if key not in self.__mydict:
# outdated ...
self.__dictlock.release()
continue
value=self.__mydict[key]
self.__dictlock.release()
# now write data, assuming, the object is constant during write operation
if "write_to_hdf" in dir(value):
try:
value.write_to_hdf(hdffile=dump_file,
where=dump_dir,
name=group_keyname,
title=key,
complib=complib,
complevel=complevel)
except Exception,e:
print "failed to write data_pool[\"%s\"]: %s"%(key,str(e))
traceback_file=StringIO.StringIO()
traceback.print_tb(sys.exc_info()[2], None, traceback_file)
print "detailed traceback: %s\n"%str(e)+traceback_file.getvalue()
traceback_file=None
else:
print "don't know how to store data_pool[\"%s\"]"%key
value=None
finally:
dump_group=None
if type(hdffile) is types.StringType:
dump_file.close()
dump_file=None
def register_listener(self, listening_function):
self.__registered_listeners.append(listening_function)
def unregister_listener(self, listening_function):
if listening_function in self.__registered_listeners:
self.__registered_listeners.remove(listening_function)

192
src/data/Drawable.py Normal file
View File

@ -0,0 +1,192 @@
# -*- coding: iso-8859-1 -*-
import threading
#############################################################################
# #
# Name: Class Drawable #
# #
# Purpose: Base class of everything plottable #
# #
#############################################################################
class Drawable:
def __init__(self):
# Will be set correctly in one of the subclasses
self.x = []
self.y = []
self.styles = { }
self.xlabel = None
self.ylabel = None
self.title = None
self.legend = { }
self.text = {}
self.xmin = 0
self.xmax = 0
self.ymin = 0
self.ymax = 0
def get_xdata(self):
"Returns a reference to the x-Plotdata (array)"
return self.x
def set_xdata(self, pos, value):
"Sets a point in x"
try:
self.x[pos] = value
except:
raise
def get_ydata(self, channel):
"Returns the y-Plotdata of channel n (array)"
try:
return self.y[channel]
except:
raise
def set_ydata(self, channel, pos, value):
"Sets a point in y"
try:
self.y[channel][pos] = value
except:
raise
def get_number_of_channels(self):
"Returns the number of channels in y"
return len(self.y)
def get_style(self):
"Returns a reference to plot-styles (dictionary)"
return self.styles
def set_style(self, channel, value):
"Sets a channel to a certain plot-style"
if self.styles.has_key(channel):
print "Drawable Warning: Style key \"%s\" will be overwritten with \"%s\"" % (str(channel), str(value))
self.styles[channel] = str(value)
def get_xlabel(self):
"Returns the label for the x-axis"
return self.xlabel
def set_xlabel(self, label):
"Sets the label for the x-axis"
self.xlabel = str(label)
def get_ylabel(self):
"Gets the label for the y-axis"
return self.ylabel
def set_ylabel(self, label):
"Sets the label for the y-axis"
self.ylabel = str(label)
def get_text(self, index):
"Returns labels to be plotted (List)"
if self.text.has_key(index):
return self.text[index]
else: return None
def set_text(self, index, text):
"Sets labels to be plotted "
self.text[index] = str(text)
def get_title(self):
"Returns the title of the plot"
return self.title
def set_title(self, title):
"Sets the title of the plot"
self.title = str(title)
def get_legend(self):
"Returns the legend of the plot (Dictionary)"
return self.legend
def set_legend(self, channel, value):
"Sets the legend of the plot"
if self.legend.has_key(key):
print "Drawable Warning: Legend key \"%s\" will be overwritten with \"%s\"" % (str(channel), str(value))
self.legend[channel] = str(value)
def get_xmin(self):
"Returns minimun of x"
return self.x.min()
def set_xmin(self, xmin):
"Sets minimum of x"
self.xmin = xmin
def get_xmax(self):
"Returns maximum of x"
return self.x.max()
def set_xmax(self, xmax):
"Sets maximum of x"
self.xmax = xmax
def get_ymin(self):
"Returns minimum of y"
if type(self.y)==type([]):
return min(map(lambda l:l.min(),self.y))
else:
return self.y.min()
def set_ymin(self, ymin):
"Sets minimum of y"
self.ymin = ymin
def get_ymax(self):
"Returns maximimum of y"
if type(self.y)==type([]):
return max(map(lambda l:l.max(),self.y))
else:
return self.y.max()
def set_ymax(self, ymax):
"Sets maximum of y"
self.ymax = ymax

71
src/data/Error_Result.py Normal file
View File

@ -0,0 +1,71 @@
# -*- coding: iso-8859-1 -*-
from Resultable import Resultable
from Drawable import Drawable
#############################################################################
# #
# Name: Class Error_Result #
# #
# Purpose: Specialised class of Resultable #
# Contains occured error-messages from the core #
# #
#############################################################################
class Error_Result(Resultable, Drawable):
def __init__(self, error_msg = None, desc = {}, job_id = None, job_date = None):
Resultable.__init__(self)
Drawable.__init__(self)
if error_msg is not None:
self.error_message = error_msg
self.set_title("Error-Result: %s" % error_msg)
else:
self.error_message = error_msg
self.description = desc
self.job_id = job_id
self.job_date = job_date
def get_error_message(self):
return self.error_message
def set_error_message(self, error_msg):
self.set_title("Error-Result: %s" % error_msg)
self.error_message = error_msg
# No statistics
def uses_statistics(self):
return False
# Nothing to plot
def get_ydata(self):
return [0.0]
# Nothing to plot
def get_xdata(self):
return [0.0]
# Überladen von Operatoren und Built-Ins -------------------------------------------------------
def __repr__(self):
tmp_string = "Core error-message: %s" % self.error_message
return tmp_string
def __len__(self):
return len(self.error_message)
def __str__(self):
return self.error_message
# Preventing an error when adding something to an error-result (needed for plotting error-results)
def __add__(self, other):
return self

76
src/data/Errorable.py Normal file
View File

@ -0,0 +1,76 @@
# -*- coding: iso-8859-1 -*-
#############################################################################
# #
# Name: Class Errorable #
# #
# Purpose: Base class of everything what could contain a statistic error #
# #
#############################################################################
class Errorable:
def __init__(self):
# Will be determined in one of the subclasses
self.xerr = []
self.yerr = []
self.error_color = ""
self.bars_above = False
self.n = 0
def get_xerr(self):
"Returns a reference to x-Error (array)"
return self.xerr
def set_xerr(self, pos, value):
"Sets a point in x-Error"
try:
self.xerr[pos] = value
except:
raise
def get_yerr(self, channel):
"Returns a list of y-Errors (list of arrays, corresponding channels)"
try:
return self.yerr[channel]
except:
raise
def set_yerr(self, channel, pos, value):
"Sets a point in y-Error"
try:
self.yerr[channel][pos] = value
except:
raise
def get_error_color(self):
"Returns the error-bar color"
return self.error_color
def set_error_color(self, color):
"Sets the error-bar color"
self.error_color = color
def get_bars_above(self):
"Gets bars-above property of errorplot"
return self.bars_above
def set_bars_above(self, bars_above):
"Sets bars-above property of errorplot"
self.bars_above = bool(bars_above)
def ready_for_drawing_error(self):
"Returns true if more than one result have been accumulated"
if self.n >= 2: return True
else: return False

View File

@ -0,0 +1,291 @@
import threading
import math
import types
import sys
import tables
import numpy
import exceptions
import UserDict
import Drawable
## provide gaussian statistics for a series of measured data points
#
# AccumulatedValue provides mean and error of mean after being fed with measured data
# internaly it keeps the sum, the sum of squares and the number of data points
class AccumulatedValue:
def __init__(self, mean=None, mean_err=None, n=None):
"""
one value with std. deviation
can be initialized by:
No argument: no entries
one argument: first entry
two arguments: mean and its error, n is set 2
three arguments: already existing statistics defined by mean, mean's error, n
"""
if mean is None:
self.y=0.0
self.y2=0.0
self.n=0
elif mean_err is None and n is None:
self.y=float(mean)
self.y2=self.y**2
self.n=1
elif mean_err is None:
self.n=max(1, int(n))
self.y=float(mean)*self.n
self.y2=(float(mean)**2)*self.n
elif n is None:
self.n=2
self.y=float(mean)*2
self.y2=(float(mean_err)**2+float(mean)**2)*2
else:
self.n=int(n)
self.y=float(mean)*self.n
self.y2=float(mean_err)**2*n*(n-1.0)+float(mean)**2*n
def __add__(self,y):
new_one=AccumulatedValue()
if (type(y) is types.InstanceType and isinstance(y, AccumulatedValue)):
new_one.y=self.y+y.y
new_one.y2=self.y2+y.y2
new_one.n=self.n+y.n
else:
new_one.y=self.y+float(y)
new_one.y2=self.y2+float(y)**2
new_one.n=self.n+1
return new_one
def __iadd__(self,y):
if (type(y) is types.InstanceType and isinstance(y, AccumulatedValue)):
self.y+=y.y
self.y2+=y.y2
self.n+=y.n
else:
self.y+=float(y)
self.y2+=float(y)**2
self.n+=1
return self
def copy(self):
a=AccumulatedValue()
a.y=self.y
a.y2=self.y2
a.n=self.n
return a
def mean(self):
"""
returns the mean of all added/accumulated values
"""
if self.n is None or self.n==0:
return None
else:
return self.y/self.n
def sigma(self):
"""
returns the standard deviation added/accumulated values
"""
if self.n>1:
variance=(self.y2-(self.y**2)/float(self.n))/(self.n-1.0)
if variance<0:
if variance<-1e-20:
print "variance=%g<0! assuming 0"%variance
return 0.0
return math.sqrt(variance)
elif self.n==1:
return 0.0
else:
return None
def mean_error(self):
"""
returns the mean's error (=std.dev/sqrt(n)) of all added/accumulated values
"""
if self.n>1:
variance=(self.y2-(self.y**2)/float(self.n))/(self.n-1.0)
if variance<0:
if variance<-1e-20:
print "variance=%g<0! assuming 0"%variance
return 0.0
return math.sqrt(variance/self.n)
elif self.n==1:
return 0.0
else:
return None
def __str__(self):
if self.n==0:
return "no value"
elif self.n==1:
return str(self.y)
else:
return "%g +/- %g (%d accumulations)"%(self.mean(),self.mean_error(),self.n)
def __repr__(self):
return str(self)
class MeasurementResult(Drawable.Drawable, UserDict.UserDict):
def __init__(self, quantity_name):
"""
convenient accumulation and interface to plot functions
dictionary must not contain anything but AccumulatedValue instances
"""
Drawable.Drawable.__init__(self)
UserDict.UserDict.__init__(self)
self.quantity_name=quantity_name
self.lock=threading.RLock()
# get the selected item, if it does not exist, create an empty one
def __getitem__(self, key):
if key not in self:
a=AccumulatedValue()
self.data[float(key)]=a
return a
else:
return self.data[float(key)]
def __setitem__(self,key,value):
if not (type(value) is types.InstanceType and isinstance(value, AccumulatedValue)):
value=AccumulatedValue(float(value))
return UserDict.UserDict.__setitem__(self,
float(key),
value)
def __add__(self, right_value):
if right_value==0:
return self.copy()
else:
raise Exception("not implemented")
def get_title(self):
return self.quantity_name
def get_xdata(self):
"""
sorted array of all dictionary entries without Accumulated Value objects with n==0
"""
keys=numpy.array(filter(lambda k: not (isinstance(self.data[k], AccumulatedValue) and self.data[k].n==0), self.data.keys()),
dtype="Float64")
keys.sort()
return keys
def get_ydata(self):
return self.get_xydata()[1]
def get_xydata(self):
k=self.get_xdata()
v=numpy.array(map(lambda key: self.data[key].mean(), k), dtype="Float64")
return [k,v]
def get_errorplotdata(self):
k=self.get_xdata()
v=numpy.array(map(lambda key: self.data[key].mean(), k), dtype="Float64")
e=numpy.array(map(lambda key: self.data[key].mean_error(), k), dtype="Float64")
return [k,v,e]
def get_lineplotdata(self):
k=self.get_xdata()
v=numpy.array(self.y, dtype="Float64")
return [k, v]
def uses_statistics(self):
"""
drawable interface method, returns True
"""
return True
def write_to_csv(self,destination=sys.stdout, delimiter=" "):
"""
writes the data to a file or to sys.stdout
destination can be a file or a filename
suitable for further processing
"""
# write sorted
the_destination=destination
if type(destination) in types.StringTypes:
the_destination=file(destination, "w")
the_destination.write("# quantity:"+str(self.quantity_name)+"\n")
the_destination.write("# x y ysigma n\n")
for x in self.get_xdata():
y=self.data[x]
if type(y) in [types.FloatType, types.IntType, types.LongType]:
the_destination.write("%e%s%e%s0%s1\n"%(x, delimiter, y, delimiter, delimiter))
else:
the_destination.write("%e%s%e%s%e%s%d\n"%(x,
delimiter,
y.mean(),
delimiter,
y.mean_error(),
delimiter,
y.n))
the_destination=None
def write_to_hdf(self, hdffile, where, name, title, complib=None, complevel=None):
h5_table_format= {
"x" : tables.Float64Col(),
"y" : tables.Float64Col(),
"y_err" : tables.Float64Col(),
"n" : tables.Int64Col()
}
filter=None
if complib is not None:
if complevel is None:
complevel=9
filter=tables.Filters(complevel=complevel,complib=complib,shuffle=1)
mr_table=hdffile.createTable(where=where,name=name,
description=h5_table_format,
title=title,
filters=filter,
expectedrows=len(self))
mr_table.flavor="numpy"
mr_table.attrs.damaris_type="MeasurementResult"
self.lock.acquire()
try:
mr_table.attrs.quantity_name=self.quantity_name
row=mr_table.row
xdata=self.get_xdata()
if xdata.shape[0]!=0:
for x in self.get_xdata():
y=self.data[x]
row["x"]=x
if type(y) in [types.FloatType, types.IntType, types.LongType]:
row["y"]=y
row["y_err"]=0.0
row["n"]=1
else:
row["y"]=y.mean()
row["y_err"]=y.mean_error()
row["n"]=y.n
row.append()
finally:
mr_table.flush()
self.lock.release()
def read_from_hdf(hdf_node):
"""
reads a MeasurementResult object from the hdf_node
or None if the node is not suitable
"""
if not isinstance(hdf_node, tables.Table):
return None
if hdf_node._v_attrs.damaris_type!="MeasurementResult":
return None
mr=MeasurementResult(hdf_node._v_attrs.quantity_name)
for r in hdf_node.iterrows():
mr[r["x"]]=AccumulatedValue(r["y"],r["y_err"],r["n"])
return mr

29
src/data/Persistance.py Normal file
View File

@ -0,0 +1,29 @@
class Persistance :
def __init__(self, shots):
self.shots = shots
self.accu = 0
self.counter = 0
self.result_list = []
def fade(self, res):
self.counter += 1
if self.accu == 0:
self.accu=res+0
self.result_list.append(res)
if self.counter < 1:
for i,ch in enumerate(self.accu.y):
ch += res.y[i]
elif len(self.result_list) == self.shots:
self.counter = len(self.result_list)
old_result = self.result_list.pop(0)
for i,ch in enumerate(self.accu.y):
ch *= self.shots
ch -= old_result.y[i]
ch += res.y[i]
else:
for i,ch in enumerate(self.accu.y):
ch *= self.counter-1
ch += res.y[i]
self.accu /= self.counter
return self.accu

65
src/data/Resultable.py Normal file
View File

@ -0,0 +1,65 @@
# -*- coding: iso-8859-1 -*-
#############################################################################
# #
# Name: Class Resultable #
# #
# Purpose: Base class of everything what could be a core-result #
# #
#############################################################################
class Resultable:
def __init__(self):
self.job_id = None
self.job_date = None
self.description = { }
def get_job_id(self):
"Returns the job-id of this result"
return self.job_id
def set_job_id(self, _id):
"Sets the job-id of this result"
self.job_id = _id
def get_job_date(self):
"Gets the date of this result"
return self.job_date
def set_job_date(self, date):
"Sets the date of this result"
self.job_date = date
def get_description_dictionary(self):
"Returns a reference to the description (Dictionary)"
return self.description
def set_description_dictionary(self, dictionary):
"Sets the entire description"
self.description = dictionary
def get_description(self, key):
"Returns the description value for a given key"
if self.description.has_key(key):
return self.description[key]
else:
print "Warning Resultable: No value for key \"%s\". Returned None" % str(key)
return None
def set_description(self, key, value):
"Adds a attribute to the description"
if self.description.has_key(key):
print "Warning: Result key \"%s\" will be overwritten with \"%s\"." % (str(key), str(value))
self.description[key] = value

9
src/data/Signalpath.py Normal file
View File

@ -0,0 +1,9 @@
import numpy as N
class Signalpath:
def phase(self, degrees):
tmp = self.y[0] + 1j*self.y[1]
tmp *= N.exp(1j*degrees*N.pi/180)
self.y[0] = tmp.real
self.y[1] = tmp.imag
del tmp
return self

35
src/data/Temp_Result.py Normal file
View File

@ -0,0 +1,35 @@
# -*- coding: iso-8859-1 -*-
from Resultable import Resultable
from Drawable import Drawable
from types import *
#############################################################################
# #
# Name: Class Temp_Result #
# #
# Purpose: Specialised class of Resultable and Drawable #
# Contains recorded temperature data #
# #
#############################################################################
class Temp_Result(Resultable, Drawable):
def __init__(self, x = None, y = None, desc = None, job_id = None, job_date = None):
Resultable.__init__(self)
Drawable.__init__(self)
if (x is None) and (y is None) and (desc is None) and (job_id is None) and (job_date is None):
pass
elif (x is not None) and (y is not None) and (desc is not None) and (job_id is not None) and (job_date is not None):
pass
else:
raise ValueError("Wrong usage of __init__!")
# Überladen von Operatoren und Built-Ins -------------------------------------------------------
# / Überladen von Operatoren und Built-Ins -----------------------------------------------------

9
src/data/__init__.py Normal file
View File

@ -0,0 +1,9 @@
from damaris.data.ADC_Result import ADC_Result
from damaris.data.Accumulation import Accumulation
from damaris.data.MeasurementResult import MeasurementResult, AccumulatedValue
from damaris.data.DataPool import DataPool
from damaris.data.Error_Result import Error_Result
from damaris.data.Config_Result import Config_Result
__all__=["ADC_Result", "Accumulation", "MeasurementResult", "AccumulatedValue", "DataPool", "FFT", "Error_Result", "Config_Result" ]

63
src/data/autophase.py Normal file
View File

@ -0,0 +1,63 @@
from scipy.optimize import fmin_powell, bisect, ridder, brentq
import numpy as N
def calculate_entropy(phi, real, imag, gamma, dwell):
"""
Calculates the entropy of the spectrum (real part).
p = phase
gamma should be adjusted such that the penalty and entropy are in the same magnitude
"""
# This is first order phasecorrection
# corr_phase = phi[0]+phi[1]*arange(0,len(signal),1.0)/len(signal) # For 0th and 1st correction
# Zero order phase correction
real_part = real*N.cos(phi)-imag*N.sin(phi)
# Either this for calculating derivatives:
# Zwei-Punkt-Formel
# real_diff = (Re[1:]-Re[:-1])/dwell
# Better this:
# Drei-Punkte-Mittelpunkt-Formel (Ränder werden nicht beachtet)
# real_diff = abs((Re[2:]-Re[:-2])/(dwell*2))
# Even better:
# Fünf-Punkte-Mittelpunkt-Formel (ohne Ränder)
real_diff = N.abs((real_part[:-4]-8*real_part[1:-3]
+8*real_part[3:-1]-2*real_part[4:])/(12*dwell))
# TODO Ränder, sind wahrscheinlich nicht kritisch
# Calculate the entropy
h = real_diff/real_diff.sum()
# Set all h with 0 to 1 (log would complain)
h[h==0]=1
entropy = N.sum(-h*N.log(h))
# My version, according the paper
#penalty = gamma*sum([val**2 for val in Re if val < 0])
# calculate penalty value: a real spectrum should have positive values
if real_part.sum() < 0:
tmp = real_part[real_part<0]
penalty = N.dot(tmp,tmp)
if gamma == 0:
gamma = entropy/penalty
penalty = N.dot(tmp,tmp)*gamma
else:
penalty = 0
#print "Entropy:",entrop,"Penalty:",penalty # Debugging
shannon = entropy+penalty
return shannon
def get_phase(result_object):
global gamma
gamma=0
real = result_object.y[0].copy()
imag = result_object.y[1].copy()
dwell = 1.0/result_object.sampling_rate
# fmin also possible
xopt = fmin_powell( func=calculate_entropy,
x0=N.array([0.0]),
args=(real, imag, gamma, dwell),
disp=0)
result_object.y[0] = real*N.cos(xopt) - imag*N.sin(xopt)
result_object.y[1] = real*N.sin(xopt) + imag*N.cos(xopt)
return result_object

View File

@ -0,0 +1,491 @@
# -*- coding: iso-8859-1 -*-
import types
import numpy
class StateBase(object):
def __init__(self):
pass
def to_xml(self, indent = ""):
return indent + "<!-- " + repr(self) + " -->"
class StateSimple(StateBase):
def __init__(self, time, content=None):
super(StateSimple, self).__init__()
if time < 0:
raise AssertionError("time for state is negative!")
self.time = time
self.content = content
def to_xml(self, indent = ""):
s = indent + '<state time="%s"' % repr(self.time)
if self.content is None:
return s + '/>\n'
s += '>\n'
s += indent + ' ' + str(self.content) + '\n'
s += indent + '</state>\n'
return s
def __repr__(self):
return 'StateSimple(%s, %s)' % (self.time, repr(self.content))
class StateList(StateBase):
def __init__(self):
super(StateList, self).__init__()
self.list = []
def to_xml(self, indent = " "):
s = ""
for k in self.list:
if hasattr(k, "to_xml"):
s += k.to_xml(indent)
else:
s += indent + str(k)
return s
def append(self, val):
self.list.append(val)
class StateLoop(StateList):
"""Represents a loop in the state tree"""
def __init__(self, repeat):
super(StateLoop, self).__init__()
self.repeat = repeat
def to_xml(self, indent = ""):
s = indent + ('<sequent repeat="%d">\n' % self.repeat)
s += super(StateLoop, self).to_xml(indent + " ")
s += indent + '</sequent>\n'
return s
def __repr__(self):
return 'StateLoop(repeat=%d, %s)' \
% (self.repeat, repr(self.list))
#############################################################
# #
# Class: Experiment #
# #
# Purpose: Represents one full experiment (one program on #
# the pulse-card; one file) #
# #
#############################################################
import dac
class Experiment:
## Experiment class holding the state tree
job_id = 0
def __init__(self):
self.job_id = Experiment.job_id
Experiment.job_id += 1
self.state_list = StateList()
self.list_stack = []
self.description = { }
# Commands -------------------------------------------------------------------------------------
## Deprecated
def rf_pulse(self, value, length = None):
"""
deprecated: use ttl_pulse
"""
s_content = '<ttlout value="0x%06x"/>' % value
if length is None:
self.state_list.append(s_content)
else:
self.state_list.append(StateSimple(length, s_content))
## Creates a state with ttl signals of duration *length*.
#
# **Example:**
# ttl_pulse(length=1e-6,value=3)
# will create a ttl pulse on channels 0 and 1 (2**0 + 2**1) of duration 1us
# @param length time length if this state
# @param channel select a single channel (1...24)
# @param value select the channels via decimal representation (2**0 + 2**1 ...)
def ttl_pulse(self, length, channel = None, value = None):
"""
Creates a state with length *length* and switches
some bits of the pulse programmer to HIGH:
* channel: this selects a single channel (No. 1 - 24)
* value: this is the integer representation of the 24bit word,
as an example value=3 selects channels 1 and 2 (2**1 + 2**2)
"""
the_value=0
if value is not None:
the_value=int(value)
elif channel is not None:
the_value=1<<channel
self.state_list.append(StateSimple(length, \
'<ttlout value="0x%06x"/>' % the_value))
## Same as ttl_pulse, but no *channel* keyword
def ttls(self, length = None, value = None):
"""
same as ttl_pulse, but no *channel* keyword
"""
the_value=int(value)
s_content = '<ttlout value="0x%06x"/>' % the_value
if length is not None:
self.state_list.append(StateSimple(length, s_content))
else:
self.state_list.append(s_content)
## Beginning of a new state
def state_start(self, time):
"""
starts a state in the pulse programs with duration *time*.
This must be closed with state_end
"""
self.state_list.append('<state time="%s">\n' % repr(time))
## End of *state_start*
def state_end(self):
"""
closes a state after start_state
"""
self.state_list.append('</state>\n')
## An empty state doing nothing
# @param time Duration of this state
# @param ttls Additinional ttl channels
def wait(self, time, ttls=None):
if ttls is not None:
s_content = '<ttlout value="0x%06x"/>' % ttls
self.state_list.append(StateSimple(time,s_content))
else:
self.state_list.append(StateSimple(time))
## Records data with given number of samples, sampling-frequency frequency and sensitivity
# @param samples Number of samples to record
# @param frequency Sampling frequency
# @param timelength Length of this state, per default calculated automatically
# @param sensitivity Sensitivity in Umax/V
# @param ttls Additional ttl channels
def record(self, samples, frequency, timelength=None, sensitivity = None, ttls=None, channels = 3, offset = None, impedance = None):
attributes='s="%d" f="%d"'%(samples,frequency)#%g
if channels != 1 and channels != 3 and channels != 5 and channels != 15:
raise ValueError, "Channel definition is illegal"
attributes += ' channels="%i"'%(channels)
nchannels = 0
if channels == 1:
nchannels = 1
elif channels == 3 or channels == 5:
nchannels = 2
elif channels == 15:
nchannels = 4
if sensitivity is not None:
# float values are allowed and applied to all channels
if isinstance(sensitivity, float) or isinstance(sensitivity, int):
for i in range(nchannels):
attributes +=' sensitivity%i="%f"'%(i, float(sensitivity))
else:
for i in range(nchannels):
attributes +=' sensitivity%i="%f"'%(i, sensitivity[i])
if offset is not None:
# int values are allowed and applied to all channels
if isinstance(offset, int):
for i in range(nchannels):
attributes +=' offset%i="%f"'%(i, offset)
else:
for i in range(nchannels):
attributes +=' offset%i="%f"'%(i, offset[i])
if impedance is not None:
# float values are allowed and applied to all channels
if isinstance(impedance, float):
for i in range(nchannels):
attributes += ' impedance%i="%i"'%(i, impedance)
else:
for i in range(nchannels):
attributes += ' impedance%i="%i"'%(i, impedance[i])
s_content = '<analogin %s/>' % attributes
if ttls is not None:
s_content+='<ttlout value="0x%06x"/>' % ttls
if timelength is None:
timelength = samples / float(frequency)#*1.01
self.state_list.append(StateSimple(timelength, s_content))
## Create a loop on the pulse programmer. Loop contents can not change inside the loop.
# @params iterations Number of loop iterations
def loop_start(self, iterations):
"""creates a loop of given number of iterations and has to be closed by loop_end().
Commands inside the loop can not change, i.e. the parameters are the same for each loop run.
This loop is created on the pulse programmer, thus saving commands.
One must close the loop with loop_end (see below)"""
l = StateLoop(iterations)
self.state_list.append(l)
# (These two lines could probably be guarded by a mutex)
self.list_stack.append(self.state_list)
self.state_list = l
## End loop state
def loop_end(self):
# (This line could probably be guarded by a mutex)
self.state_list = self.list_stack.pop(-1)
## Set the frequency and phase of the frequency source.
## This state needs 2us.
# @param frequency New frequency in Hz
# @param phase New phase in degrees
# @param ttls Additional ttl channels
def set_frequency(self, frequency, phase, ttls=0):
"""
Sets the frequency and phase of the frequency source and optionally further channels.
The time needed to set the frequency is 2 us.
Switch pulse programmer line with *ttls* .
"""
"Sets the frequency generator to a desired frequency (Hz)"
s_content = '<analogout id="0" f="%f" phase="%f"/>' % (frequency, phase)
if ttls != 0:
s_content += '<ttlout value="0x%06x"/>' % ttls
self.state_list.append(StateSimple(2e-6, s_content))
## Creates a, possibly shaped, pulsed gradient.
# @param dac_value DAC value to set
# @param length Duration of the state, minimum length is 42*90ns=3.78us (default)
# @param shape Tuple of (shape, resolution/seconds), shape can be one of: rec (default), sin2, sin
# @param is_seq If set to *True*, do NOT set DAC to zero after this state
# @param trigger Additional ttl channels
def set_pfg(self, dac_value=None, length=None, shape=('rec',0), trigger=4, is_seq=False):
"""
This sets the value for the PFG, it also sets it back automatically.
If you don't whish to do so (i.e. line shapes) set is_seq=1
If you wnat to set a trigger, set trigger (default=4, i.e. channel 2)
If you want shaped gradients: shape=(ashape, resolution), ashape can be rec, sin2, sin
"""
try:
form, resolution = shape
except:
raise SyntaxError, "shape argument needs to be a tuple, i.e. ('shape',resolution), shape can be sin, sin2, rec"
if length == None:
# mimimum length
length=42*9e-8
if resolution >= length:
raise ValueError, "Resolution %.3e of shaped gradients can not be longer than total length %.3e"%(resolution, length)
if resolution < 42*9e-8:
raise ValueError, "Resulution %.3e can not be smaller than %.3e"%(resolution, 42*9e-8)
t_steps = numpy.arange(0,length,resolution)
if form == 'rec': # shape==None --> rectangular gradients
s_content = '<ttlout value="%s"/><analogout id="1" dac_value="%i"/>' % (trigger, dac_value)
self.state_list.append(StateSimple(length, s_content))
if not is_seq and shape == None:
s_content = '<analogout id="1" dac_value="0"/>'
self.state_list.append(StateSimple(42*9e-8, s_content))
elif form == 'sin2':
# sin**2 shape
for t in t_steps:
dac = int (dac_value*numpy.sin(numpy.pi/length*t)**2)
s_content = '<ttlout value="%s"/><analogout id="1" dac_value="%i"/>' % (trigger, dac)
self.state_list.append(StateSimple(resolution, s_content))
# set it back to zero
s_content = '<ttlout value="%s"/><analogout id="1" dac_value="0"/>' % (trigger)
self.state_list.append(StateSimple(resolution, s_content))
elif form == 'sin':
# sin shape
for t in t_steps:
dac = int (dac_value*numpy.sin(numpy.pi/length*t))
s_content = '<ttlout value="%s"/><analogout id="1" dac_value="%i"/>' % (trigger, dac)
self.state_list.append(StateSimple(resolution, s_content))
# set it back to zero
s_content = '<ttlout value="%s"/><analogout id="1" dac_value="0"/>' % (trigger)
self.state_list.append(StateSimple(resolution, s_content))
else: # don't know what to do
raise SyntaxError , "form is unknown: %s"%form
## Deprecated, use set_pfg instead
def set_pfg_wt(self, I_out=None, dac_value=None, length=None, is_seq=0, trigger=4):
"""
This sets the value for the PFG (plus trigger, default=2**2), it also sets it back automatically.
If you don't whish to do so (i.e. line shapes) set is_seq=1
"""
# raise DeprecationWarning, "to be removed in future, use set_pfg instead"
if I_out == None and dac_value == None:
dac_value=0
if I_out != None and dac_value == None:
dac_value=dac.conv(I_out)
if I_out == None and dac_value != None:
dac_value=dac_value
if I_out !=None and dac_value != None:
dac_value = 0
print "WARNING: You can't set both, I_out and dac_value! dac_value set to 0"
if length==None:
length=42*9e-8
s_content = '<analogout id="1" dac_value="%i"/><ttlout value="%s"/>' \
% (dac_value, trigger)
self.state_list.append(StateSimple(length, s_content))
if is_seq == 0:
s_content = '<analogout id="1" dac_value="0"/><ttlout value="%s"/>' \
% trigger
self.state_list.append(StateSimple(42*9e-8, s_content))
## sets the value of a DAC
# @param dac_value DAC value to set
# @param dac_id ID of the dac in case of multiple DAC(default=1)
# @param length Duration of the state
# @param is_seq If set to *True*, do NOT set DAC to zero after this state
# @param ttls Additional ttl channels
def set_dac(self, dac_value, dac_id=1, length=None, is_seq=False, ttls=0):
"""
This sets the value for the DAC and possibly some TTLs.
It also sets it back automatically.
If you don't whish to do so (i.e. line shapes) set is_seq=True
"""
if length==None:
length=42*9e-8
s_content = '<analogout id="%d" dac_value="%i"/><ttlout value="0x%06x"/>' \
% (dac_id, dac_value, ttls)
self.state_list.append(StateSimple(length, s_content))
if not is_seq:
s_content = '<analogout id="%d" dac_value="0"/><ttlout value="0x%06x"/>' \
% (dac_id, ttls)
self.state_list.append(StateSimple(42*9e-8, s_content))
## sets the phase of the frequency source.
## This state needs 0.5us, though the phase switching time is dependent on the frequency source
# @param phase New phase to set
# @param ttls Additional ttl channels
def set_phase(self, phase, ttls=0):
s_content = '<analogout phase="%f" />' % (phase)
if ttls!=0:
s_content += '<ttlout value="%d"/>' % ttls
self.state_list.append(StateSimple(0.5e-6, s_content))
## sets a description which is carried via the back end result
## file to the result script in the front end. In the result script
## you can extract the description with get_description(key)
# @param key Name of description
# @param value Value of description
def set_description(self, key, value):
"""Sets a description which is carried via the back end result
file to the result script in the front end. In the result script
you can extract the description with get_description"""
if key in self.description.keys():
print 'Warning: Overwriting existing description "%s" = "%s" with "%s"' % (key, self.description[key], value)
self.description[key] = value
## set the PTS310/PTS500 frequency source to local mode
def set_pts_local(self):
"""
this will set the PTS310/PTS500 frequency source to local mode
"""
self.state_list.append(StateSimple(1e-6, '<ttlout value="0xf000"/>'))
self.state_list.append(StateSimple(1e-6, '<ttlout value="0x8000"/>'))
# / Commands -----------------------------------------------------------------------------------
# Public Methods -------------------------------------------------------------------------------
def get_job_id(self):
"Returns the current job-id the experiment got"
return self.job_id
def write_xml_string(self):
"Returns the current program as a string"
# Standart XML-Kopf
xml_string = '<?xml version="1.0" encoding="ISO-8859-1"?>\n'
# Experiment-Start-Tag einfügen
xml_string += '<experiment no="%d">\n' % self.job_id
# Descriptions einfügen
if len(self.description)==0:
xml_string += ' <description/>\n'
else:
xml_string += ' <description>\n'
for key,value in self.description.iteritems():
type_string="repr"
if value is None:
type_string="None"
value=""
elif type(value) is types.FloatType or isinstance(value, numpy.floating):
type_string="Float"
value=repr(value)
elif type(value) is types.IntType or isinstance(value, numpy.integer):
type_string="Int"
value=repr(value)
elif type(value) is types.LongType:
type_string="Long"
value=repr(value)
elif type(value) is types.ComplexType or isinstance(value, numpy.complexfloating):
type_string="Complex"
value=repr(value)
elif type(value) is types.BooleanType or isinstance(value, numpy.bool_):
type_string="Boolean"
value=repr(value)
elif type(value) in types.StringTypes:
type_string="String"
else:
value=repr(value)
xml_string += ' <item key="%s" type="%s">%s</item>\n'%(key, type_string ,value)
xml_string += " </description>\n"
# Experiment-Inhalt einfügen
xml_string += self.state_list.to_xml(indent = " ")
# Experiment-End-Tag
xml_string += '</experiment>\n'
return xml_string
def write_quit_job(self):
"Returns a xml quit-job"
return '<?xml version="1.0" encoding="ISO-8859-1"?>\n<quit/>'
class Quit(Experiment):
def write_xml_string(self):
return '<?xml version="1.0" encoding="ISO-8859-1"?>\n<quit no="%d"/>'%self.job_id
# /Public Methods ------------------------------------------------------------------------------
def self_test():
e = Experiment()
e.set_description("key", "value")
e.set_frequency(85e6, 90, ttls=16)
e.wait(1e-6)
e.rf_pulse(1, 1e-6/3) # val = 1
e.ttl_pulse(1e-6/3, 1) # val = 2
e.ttl_pulse(1e-6/3, None, 7) # val = 7
if True:
e.loop_start(30)
e.set_pfg(dac_value=1024, is_seq = True)
e.set_pfg_wt(dac_value=2048)
e.loop_start(400)
e.set_phase(270, ttls = 32)
e.loop_end()
e.ttl_pulse(5e-6, channel = 6)
e.loop_end()
else:
l = StateLoop(3)
l.append(StateSimple(5e-6, '<ttlout value="1"/>'))
e.state_list.append(l)
e.set_dac(12345, dac_id=2, is_seq = True, ttls=16)
e.record(1024, 20e6)
try:
e.wait(-1)
except AssertionError:
pass
else:
raise AssertionError("An exception should happen")
e.set_pts_local()
print e.write_xml_string()
if __name__ == '__main__':
self_test()

View File

@ -0,0 +1,3 @@
from Experiment import Experiment
from damaris.tools.ranges import *
#__all__=["Experiment"]

12
src/experiments/dac.py Normal file
View File

@ -0,0 +1,12 @@
#import math
"""
This module holds everything connected with the DAC and PFG
"""
def conv(I_out=0):
"""
converts the demanded Output current in Integer
"""
V_dac=I_out/50.0
dac_value=-(V_dac-0.00983)/1.81413e-5
return int(dac_value)

294
src/gui/BackendDriver.py Normal file
View File

@ -0,0 +1,294 @@
import os
import os.path
import subprocess
import sys
import time
import re
import glob
import ExperimentWriter
import ResultReader
import threading
import types
import signal
if sys.platform=="win32":
import _winreg
__doc__ = """
This class handles the backend driver
"""
class BackendDriver(threading.Thread):
def __init__(self, executable, spool, clear_jobs=False, clear_results=False):
threading.Thread.__init__(self, name="Backend Driver")
self.core_pid = None
self.core_input = None
self.core_output = None
self.statefilename = None
self.executable=str(executable)
self.spool_dir=spool
self.experiment_pattern="job.%09d"
self.result_pattern=self.experiment_pattern+".result"
if not os.path.isfile(self.executable):
raise AssertionError("could not find backend %s "%self.executable)
if not os.access(self.executable,os.X_OK):
raise AssertionError("insufficient rights for backend %s execution"%self.executable)
if not os.path.isdir(self.spool_dir):
try:
os.makedirs(os.path.abspath(self.spool_dir))
except OSError,e:
print e
raise AssertionError("could not create backend's spool directory %s "%self.spool_dir)
# remove stale state filenames
if sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
old_state_files=glob.glob(os.path.join(self.spool_dir,"*.state"))
statelinepattern=re.compile("<state name=\"([^\"]+)\" pid=\"([^\"]+)\" starttime=\"([^\"]+)\">")
for statefilename in old_state_files:
statefile=file(statefilename,"r")
statelines=statefile.readlines()
statefile.close
del statefile
core_pid=None
for l in statelines:
matched=statelinepattern.match(l)
if matched:
core_pid=int(matched.group(2))
break
if core_pid is not None:
if os.path.isdir("/proc/%d"%core_pid):
raise AssertionError("found backend with pid %d (state file %s) in same spool dir"%(core_pid,statefilename))
else:
print "removing stale backend state file", statefilename
os.remove(statefilename)
else:
print "todo: take care of existing backend state files"
self.result_reader = ResultReader.BlockingResultReader(self.spool_dir,
no=0,
result_pattern=self.result_pattern,
clear_jobs=clear_jobs,
clear_results=clear_results)
self.experiment_writer = ExperimentWriter.ExperimentWriterWithCleanup(self.spool_dir,
no=0,
job_pattern=self.experiment_pattern,
inform_last_job=self.result_reader)
self.quit_flag=threading.Event()
self.raised_exception=None
def run(self):
# take care of older logfiles
self.core_output_filename=os.path.join(self.spool_dir,"logdata")
if os.path.isfile(self.core_output_filename):
i=0
max_logs=100
while os.path.isfile(self.core_output_filename+".%02d"%i):
i+=1
while (i>=max_logs):
i-=1
os.remove(self.core_output_filename+".%02d"%i)
for j in xrange(i):
os.rename(self.core_output_filename+".%02d"%(i-j-1),self.core_output_filename+".%02d"%(i-j))
os.rename(self.core_output_filename, self.core_output_filename+".%02d"%0)
# create logfile
self.core_output=file(self.core_output_filename,"w")
# again look out for existing state files
state_files=glob.glob(os.path.join(self.spool_dir,"*.state"))
if state_files:
self.raised_exception="found other state file(s) in spool directory: "+",".join(state_files)
self.quit_flag.set()
return
# start backend
if sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
self.core_input=subprocess.Popen([self.executable, "--spool", self.spool_dir],
stdout=self.core_output,
stderr=self.core_output)
if sys.platform=="win32":
cygwin_root_key=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Cygnus Solutions\\Cygwin\\mounts v2\\/")
cygwin_path=_winreg.QueryValueEx(cygwin_root_key,"native")[0]
os.environ["PATH"]+=";"+os.path.join(cygwin_path,"bin")+";"+os.path.join(cygwin_path,"lib")
self.core_input=subprocess.Popen("\"" + self.executable + "\"" + " --spool "+self.spool_dir,
stdout=self.core_output,
stderr=self.core_output)
# wait till state file shows up
timeout=10
# to do: how should I know core's state name????!!!!!
self.statefilename=None
state_files=glob.glob(os.path.join(self.spool_dir,"*.state"))
while len(state_files)==0:
if timeout<0 or self.core_input is None or self.core_input.poll() is not None or self.quit_flag.isSet():
# look into core log file and include contents
log_message=''
self.core_input=None
if os.path.isfile(self.core_output_filename):
# to do include log data
log_message='\n'+''.join(file(self.core_output_filename,"r").readlines()[:10])
if not log_message:
log_message=" no error message from core"
self.core_output.close()
self.raised_exception="no state file appeared or backend died away:"+log_message
print self.raised_exception
self.quit_flag.set()
return
time.sleep(0.05)
timeout-=0.05
state_files=glob.glob(os.path.join(self.spool_dir,"*.state"))
# save the one
if len(state_files)>1:
print "did find more than one state file, taking first one!"
self.statefilename=state_files[0]
# read state file
statefile=file(self.statefilename,"r")
statelines=statefile.readlines()
statefile=None
statelinepattern=re.compile("<state name=\"([^\"]+)\" pid=\"([^\"]+)\" starttime=\"([^\"]+)\">")
self.core_pid=-1
for l in statelines:
matched=statelinepattern.match(l)
if matched:
self.core_pid=int(matched.group(2))
break
# wait on flag and look after backend
while not self.quit_flag.isSet() and self.is_busy():
self.quit_flag.wait(0.1)
if self.quit_flag.isSet():
self.stop_queue()
while self.is_busy():
time.sleep(0.1)
if not self.is_busy():
if self.core_input is not None:
backend_result=self.core_input.poll()
wait_loop_counter=0
while backend_result is None:
# waiting in tenth of a second
time.sleep(0.1)
wait_loop_counter+=1
backend_result=self.core_input.poll()
if backend_result is not None: break
if wait_loop_counter==10:
print "sending termination signal to backend process"
self.send_signal("SIGTERM")
elif wait_loop_counter==20:
print "sending kill signal to backend process"
self.send_signal("SIGKILL")
elif wait_loop_counter>30:
print "no longer waiting for backend shutdown"
break
if backend_result is None:
print "backend dit not end properly, please stop it manually"
elif backend_result>0:
print "backend returned ", backend_result
elif backend_result<0:
sig_name=filter(lambda x: x.startswith("SIG") and \
x[3]!="_" and \
(type(signal.__dict__[x])is types.IntType) and \
signal.__dict__[x]==-backend_result,
dir(signal))
if sig_name:
print "backend was terminated by signal ",sig_name[0]
else:
print "backend was terminated by signal no",-backend_result
self.core_input = None
self.core_pid = None
# the experiment handler should stop
if self.experiment_writer is not None:
# self.experiment_writer.
self.experiment_writer=None
# tell result reader, game is over...
#self.result_reader.stop_no=self.experiment_writer.no
if self.result_reader is not None:
self.result_reader.poll_time=-1
self.result_reader=None
def clear_job(self,no):
jobfilename=os.path.join(self.spool_dir,"job.%09d")
resultfilename=os.path.join(self.spool_dir,"job.%09d.result")
if os.path.isfile(jobfilename):
os.remove(jobfilename)
if os.path.isfile(resultfilename):
os.remove(resultfilename)
def get_messages(self):
# return pending messages
if self.core_output.tell()==os.path.getsize(self.core_output_filename):
return None
return self.core_output.read()
def restart_queue(self):
self.send_signal("SIGUSR1")
def stop_queue(self):
self.send_signal("SIGQUIT")
# assumes success
#self.core_pid=None
#self.core_input=None
def abort(self):
# abort execution
self.send_signal("SIGTERM")
# assumes success
#self.core_pid=None
#self.core_input=None
def send_signal(self, sig):
if self.core_pid is None:
print "BackendDriver.send_signal is called with core_pid=None"
return
try:
if sys.platform[:5]=="linux":
os.kill(self.core_pid,signal.__dict__[sig])
if sys.platform[:7]=="win32":
# reg_handle=_winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
cygwin_root_key=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Cygnus Solutions\\Cygwin\\mounts v2\\/")
cygwin_path=_winreg.QueryValueEx(cygwin_root_key,"native")[0]
kill_command=os.path.join(cygwin_path,"bin","kill.exe")
os.popen("%s -%s %d"%(kill_command,sig,self.core_pid))
except OSError, e:
print "could not send signal %s to core: %s"%(sig, str(e))
def is_busy(self):
"Checks for state file"
return self.statefilename is not None and os.path.isfile(self.statefilename) and \
self.core_input is not None and self.core_input.poll() is None
#file_list = glob.glob(os.path.join(self.spool_dir, self.core_state_file))
#if len(file_list) != 0:
# return True
#else:
# return False
def get_exp_writer(self):
return self.experiment_writer
def get_res_reader(self):
return self.result_reader
def __del__(self):
# stop core and wait for it
if self.core_pid is not None:
try:
self.abort()
except OSError:
pass
self.core_input=None
if self.core_output:
self.core_output.close()
self.core_output=None

BIN
src/gui/DAMARIS.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

BIN
src/gui/DAMARIS.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 664 B

2880
src/gui/DamarisGUI.py Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,95 @@
import threading
import StringIO
import traceback
import sys
import time
from damaris.experiments.Experiment import Quit
from damaris.experiments import Experiment
class ExperimentHandling(threading.Thread):
"""
runs the experiment script in sandbox
"""
def __init__(self, script, exp_writer, data):
threading.Thread.__init__(self, name="experiment handler")
self.script=script
self.writer=exp_writer
self.data=data
self.quit_flag = threading.Event()
if self.data is not None:
self.data["__recentexperiment"]=-1
def synchronize(self, before=0, waitsteps=0.1):
while (self.data["__recentexperiment"]>self.data["__recentresult"]+before) and not self.quit_flag.isSet():
self.quit_flag.wait(waitsteps)
if self.quit_flag.isSet():
raise StopIteration
def run(self):
dataspace={}
exp_classes = __import__('damaris.experiments', dataspace, dataspace, ['Experiment'])
for name in dir(exp_classes):
if name[:2]=="__" and name[-2:]=="__": continue
dataspace[name]=exp_classes.__dict__[name]
del exp_classes
dataspace["data"]=self.data
dataspace["synchronize"]=self.synchronize
self.raised_exception = None
self.location = None
exp_iterator=None
try:
exec self.script in dataspace
except Exception, e:
self.raised_exception=e
self.location=traceback.extract_tb(sys.exc_info()[2])[-1][1:3]
traceback_file=StringIO.StringIO()
traceback.print_tb(sys.exc_info()[2], None, traceback_file)
self.traceback=traceback_file.getvalue()
traceback_file=None
return
if "experiment" in dataspace:
try:
exp_iterator=dataspace["experiment"]()
except Exception, e:
self.raised_exception=e
self.location=traceback.extract_tb(sys.exc_info()[2])[-1][1:3]
traceback_file=StringIO.StringIO()
traceback.print_tb(sys.exc_info()[2], None, traceback_file)
self.traceback=traceback_file.getvalue()
traceback_file=None
return
while exp_iterator is not None and not self.quit_flag.isSet():
# get next experiment from script
try:
job=exp_iterator.next()
except StopIteration:
break
except Exception, e:
self.raised_exception=e
self.location=traceback.extract_tb(sys.exc_info()[2])[-1][1:3]
traceback_file=StringIO.StringIO()
traceback.print_tb(sys.exc_info()[2], None, traceback_file)
self.traceback=traceback_file.getvalue()
traceback_file=None
break
# send it
self.writer.send_next(job)
# write a note
if isinstance(job, Experiment):
if self.data is not None:
self.data["__recentexperiment"]=job.job_id+0
# relax for a short time
if "__resultsinadvance" in self.data and self.data["__resultsinadvance"]+100<job.job_id:
self.quit_flag.wait(0.05)
if self.quit_flag.isSet():
data_sapce=None
exp_iterator=None
break
self.writer.send_next(Quit(), quit=True)
# do not count quit job (is this a good idea?)
dataspace=None
self.exp_iterator=None
self.writer=None

View File

@ -0,0 +1,66 @@
import os
import os.path
import shutil
from damaris.experiments import Experiment
class ExperimentWriter:
"""
writes experiments in propper way to spool directory
"""
def __init__(self, spool, no=0, job_pattern="job.%09d", inform_last_job=None):
self.spool=spool
self.job_pattern=job_pattern
self.no=no
self.inform_last_job=inform_last_job
# test if spool exists
if not os.path.isdir(spool):
os.mkdir(spool)
def send_next(self, job, quit=False):
"""
"""
if quit and self.inform_last_job is not None:
self.inform_last_job.stop_no=self.no
self.inform_last_job=None
job.job_id=self.no
job_filename=os.path.join(self.spool,self.job_pattern%self.no)
f=file(job_filename+".tmp","w")
f.write(job.write_xml_string())
f.flush()
f.close() # explicit close under windows necessary (don't know why)
del f
# this implementation tries to satisfiy msvc filehandle caching
os.rename(job_filename+".tmp", job_filename)
#shutil.copyfile(job_filename+".tmp", job_filename)
#try:
# os.unlink(job_filename+".tmp")
#except OSError:
# print "could not delete temporary file %s.tmp"%job_filename
self.no+=1
def __del__(self):
if self.inform_last_job is not None:
self.inform_last_job.stop_no=self.no-1
self.inform_last_job=None
class ExperimentWriterWithCleanup(ExperimentWriter):
"""
writes experiments and cleans up in front of queue
"""
def __init__(self, spool, no=0, job_pattern="job.%09d", inform_last_job=None):
ExperimentWriter.__init__(self, spool, no, job_pattern, inform_last_job=inform_last_job)
self.delete_no_files(self.no)
def send_next(self, job, quit=False):
self.delete_no_files(self.no+1)
ExperimentWriter.send_next(self,job,quit)
def delete_no_files(self,no):
"""
delete everything with this job number
"""
filename=os.path.join(self.spool,(self.job_pattern%no))
if os.path.isfile(filename): os.unlink(filename)
if os.path.isfile(filename+".tmp"): os.unlink(filename+".tmp")
if os.path.isfile(filename+".result"): os.unlink(filename+".result")

78
src/gui/ResultHandling.py Normal file
View File

@ -0,0 +1,78 @@
import threading
import StringIO
import sys
import os
import os.path
import traceback
from damaris.data import Resultable
class ResultHandling(threading.Thread):
"""
runs the result script in sandbox
"""
def __init__(self, script_data, result_iterator, data_pool):
threading.Thread.__init__(self,name="result handler")
self.script=script_data
self.results=result_iterator
self.data_space=data_pool
self.quit_flag=self.results.quit_flag
if self.data_space is not None:
self.data_space["__recentresult"]=-1
def run(self):
# execute it
dataspace={}
data_classes = __import__('damaris.data', dataspace, dataspace, ['*'])
for name in dir(data_classes):
if name[:2]=="__" and name[-2:]=="__": continue
dataspace[name]=data_classes.__dict__[name]
del data_classes
dataspace["results"]=self
dataspace["data"]=self.data_space
self.raised_exception=None
self.location = None
try:
exec self.script in dataspace
except Exception, e:
self.raised_exception=e
self.location=traceback.extract_tb(sys.exc_info()[2])[-1][1:3]
traceback_file=StringIO.StringIO()
traceback.print_tb(sys.exc_info()[2], None, traceback_file)
self.traceback=traceback_file.getvalue()
traceback_file=None
return
if not "result" in dataspace:
dataspace=None
return
try:
dataspace["result"]()
except Exception, e:
self.raised_exception=e
self.location=traceback.extract_tb(sys.exc_info()[2])[-1][1:3]
traceback_file=StringIO.StringIO()
traceback.print_tb(sys.exc_info()[2], None, traceback_file)
self.traceback=traceback_file.getvalue()
traceback_file=None
dataspace=None
def __iter__(self):
if self.quit_flag.isSet():
self.results=None
return
for i in self.results:
if hasattr(self.results, "in_advance"):
self.data_space["__resultsinadvance"]=self.results.in_advance
if self.quit_flag.isSet():
self.results=None
return
if isinstance(i, Resultable.Resultable):
if self.data_space is not None:
self.data_space["__recentresult"]=i.job_id+0
yield i
if self.quit_flag.isSet():
self.results=None
return
def stop(self):
self.quit_flag.set()

576
src/gui/ResultReader.py Normal file
View File

@ -0,0 +1,576 @@
# -*- coding: iso-8859-1 -*-
#############################################################################
# #
# Name: Class ResultReader #
# #
#############################################################################
import os
import os.path
import glob
import time
import sys
import base64
import numpy
try:
import xml.etree.cElementTree
ELEMENT_TREE = True
except:
import xml.parsers.expat
ELEMENT_TREE = False
import threading
from datetime import datetime
from damaris.data import ADC_Result
from damaris.data import Error_Result
from damaris.data import Temp_Result
from damaris.data import Config_Result
class ResultReader:
"""
starts at some point and returns result objects until none are there
"""
CONFIG_TYPE = 3
TEMP_TYPE = 2
ADCDATA_TYPE = 1
ERROR_TYPE = 0
def __init__(self, spool_dir=".", no=0, result_pattern="job.%09d.result", clear_jobs=False, clear_results=False):
self.spool_dir = spool_dir
self.start_no = no
self.no = self.start_no
self.result_pattern = result_pattern
self.clear_jobs=clear_jobs
self.clear_results=clear_results
self.quit_flag=threading.Event() # asychronous quit flag
def __iter__(self):
"""
get next job with iterator
"""
expected_filename=os.path.join(self.spool_dir,self.result_pattern%(self.no))
while os.access(expected_filename,os.R_OK):
yield self.get_result_object(expected_filename)
# purge result file
if self.clear_results:
if os.path.isfile(expected_filename): os.remove(expected_filename)
if self.clear_jobs:
if os.path.isfile(expected_filename[:-7]): os.remove(expected_filename[:-7])
self.no+=1
expected_filename=os.path.join(self.spool_dir,self.result_pattern%(self.no))
return
def get_result_object(self, in_filename):
"""
get result object
"""
# class-intern result-object currently being processed
retries=0
result_file=None
while result_file is None:
try:
result_file = file(in_filename, "r")
except IOError, e:
if retries>10:
raise e
print e, "retry", retries
time.sleep(0.05)
retries+=1
# get date of last modification
self.result_job_date = datetime.fromtimestamp(os.stat(in_filename)[8])
if ELEMENT_TREE:
self.__parseFile = self.__parseFile_cETree
else:
self.__parseFile = self.__parseFile_expat
self.__parseFile (result_file)
result_file.close()
result_file = None
r=self.result
self.result = None
return r
def __parseFile_cETree(self, in_file):
self.result = None
self.in_description_section=False
self.result_description = { }
self.result_job_number = None
# Job Date is set in __read_file()
self.__filetype = None
for elem in xml.etree.cElementTree.ElementTree(file=in_file).getiterator():
if elem.tag == 'result':
self.result_job_number = int(elem.get("job"))
pass
elif elem.tag == 'description':
if elem.text!=None:
self.result_description = {}
self.in_description_section=True
self.in_description_data=()
for an_item in elem.getchildren():
self.in_description_data = (an_item.get("key"), an_item.get("type"), an_item.text)
# make item contents to dictionary item:
k,t,v=self.in_description_data
self.in_description_data=()
if t == "None":
self.result_description[k]=None
if t == "Float":
self.result_description[k]=float(v)
elif t == "Int":
self.result_description[k]=int(v)
elif t == "Long":
self.result_description[k]=long(v)
elif t == "Complex":
self.result_description[k]=complex(v)
elif t == "Boolean":
self.result_description[k]=bool(v)
elif t == "String":
self.result_description[k]=v
else:
# Anything else will be handled as a string
# Probably "repr".
self.result_description[k]=v
elif elem.tag == 'adcdata':
self.__filetype = ResultReader.ADCDATA_TYPE
self.adc_result_trailing_chars = ""
if self.result is None:
self.result = ADC_Result()
# None: new guess for adc data encoding
# "a": ascii
# "b": base64
self.adc_data_encoding = None
self.result.set_sampling_rate(float(elem.get("rate")))
self.result.set_job_id(self.result_job_number)
self.result.set_job_date(self.result_job_date)
self.result.set_nChannels(int(elem.get("channels")))
self.result.set_description_dictionary(self.result_description.copy())
title = "ADC-Result: job-id=%d"%int(self.result_job_number)
if len(self.result_description) > 0:
for k,v in self.result_description.iteritems():
title += ", %s=%s"%(k,v)
self.result.set_title(title)
self.result_description = None
self.adc_result_sample_counter = 0
self.adc_result_parts = [] # will contain arrays of sampled intervals, assumes same sample rate
else:
if float(elem.get("rate")) != self.result.get_sampling_rate():
print "sample rate different in ADC_Result, found %f, former value %f"%\
(float(in_attribute["rate"]),self.result.get_sampling_rate())
new_samples = int(elem.get("samples"))
self.adc_result_sample_counter += new_samples
self.adc_result_trailing_chars = "".join(elem.text.splitlines())
tmp_string = base64.standard_b64decode(self.adc_result_trailing_chars)
self.adc_result_trailing_chars = None
tmp = numpy.fromstring(tmp_string,dtype='Int16')
tmp_string = None
self.adc_result_parts.append(tmp)
tmp = None
# we do not need this adcdata anymore, delete it
elem.clear()
elif elem.tag == 'error':
self.__filetype = ResultReader.ERROR_TYPE
self.result = Error_Result()
self.result.set_job_id(self.result_job_number)
self.result.set_job_date(self.result_job_date)
self.result.set_description_dictionary(self.result_description.copy())
self.result.set_error_message(elem.text)
elif elem.tag == 'temp':
self.__filetype = ResultReader.TEMP_TYPE
self.result = Error_Result()
self.result.set_job_id(self.result_job_number)
self.result.set_job_date(self.result_job_date)
elif elem.tag == 'conf':
self.__filetype = ResultReader.CONF_TYPE
self.result = Error_Result()
self.result.set_job_id(self.result_job_number)
self.result.set_job_date(self.result_job_date)
# xml file was traversed now prepare the data in one go
# prepare result data
if self.result is not None and \
self.__filetype == ResultReader.ADCDATA_TYPE and \
self.adc_result_sample_counter>0:
# fill the ADC_Result with collected data
# x data
self.result.x=numpy.arange(self.adc_result_sample_counter, dtype="Float64")/\
self.result.get_sampling_rate()
self.result.y = []
nChannels = self.result.get_nChannels()
# initialise the y arrays
for i in xrange(nChannels):
self.result.y.append(numpy.empty(self.adc_result_sample_counter, dtype='Int16'))
# remove from result stack
tmp_index = 0
while self.adc_result_parts:
tmp_part=self.adc_result_parts.pop(0)
tmp_size = tmp_part.size/nChannels
for i in xrange(nChannels):
# split interleaved data
self.result.y[i][tmp_index:tmp_index+tmp_size] = tmp_part[i::nChannels]
self.result.y[i][tmp_index:tmp_index+tmp_size] = tmp_part[i::nChannels]
if self.result.index != []:
self.result.index.append((tmp_index, tmp_index+tmp_size-1))
else:
self.result.index = [(0,tmp_size-1)]
tmp_index += tmp_size
self.result.cont_data=True
tmp_part = None
def __parseFile_expat(self, in_file):
"Parses the given file, adding it to the result-queue"
self.result = None
self.in_description_section=False
self.result_description = { }
self.result_job_number = None
# Job Date is set in __read_file()
self.__filetype = None
# Expat XML-Parser & Binding handlers
self.xml_parser = xml.parsers.expat.ParserCreate()
self.xml_parser.StartElementHandler = self.__xmlStartTagFound
self.xml_parser.CharacterDataHandler = self.__xmlCharacterDataFound
self.xml_parser.EndElementHandler = self.__xmlEndTagFound
self.element_stack=[]
try:
# short version, but pyexpat buffers are awfully small
# self.xml_parser.ParseFile(in_file)
# read all, at least try
databuffer=in_file.read(-1)
# test wether really everything was read...
databuffer2=in_file.read(self.xml_parser.buffer_size)
if databuffer2=="":
# parse everything at once
self.xml_parser.Parse(databuffer,True)
else:
# do the first part ...
self.xml_parser.Parse(databuffer,False)
databuffer=databuffer2
# ... and again and again
while databuffer!="":
self.xml_parser.Parse(databuffer,False)
databuffer=in_file.read(-1)
self.xml_parser.Parse("",True)
except xml.parsers.expat.ExpatError, e:
print "result file %d: xml parser '%s' error at line %d, offset %d"%(self.no,
xml.parsers.expat.ErrorString(e.code),
e.lineno,
e.offset)
self.result = None
del databuffer
self.xml_parser.StartElementHandler=None
self.xml_parser.EndElementHandler=None
self.xml_parser.CharacterDataHandler=None
del self.xml_parser
# prepare result data
if self.result is not None and \
self.__filetype == ResultReader.ADCDATA_TYPE and \
self.adc_result_sample_counter>0:
# fill the ADC_Result with collected data
self.result.x=numpy.arange(self.adc_result_sample_counter, dtype="Float64")/\
self.result.get_sampling_rate()
self.result.y=[]
self.result.index=[]
for i in xrange(2):
self.result.y.append(numpy.empty((self.adc_result_sample_counter,), dtype="Int16"))
tmp_sample_counter=0
while self.adc_result_parts:
tmp_part=self.adc_result_parts.pop(0)
tmp_size=tmp_part.size/2
self.result.y[0][tmp_sample_counter:tmp_sample_counter+tmp_size]=tmp_part[::2]
self.result.y[1][tmp_sample_counter:tmp_sample_counter+tmp_size]=tmp_part[1::2]
self.result.index.append((tmp_sample_counter,tmp_sample_counter+tmp_size-1))
tmp_sample_counter+=tmp_size
self.result.cont_data=True
# Callback when a xml start tag is found
def __xmlStartTagFound(self, in_name, in_attribute):
# General Result-Tag
if in_name == "result":
self.result_job_number = int(in_attribute["job"])
# Job-Date is set in __read_file()
# Description
elif in_name == "description":
# old style description:
if len(in_attribute)!=0:
self.result_description = in_attribute.copy()
self.in_description_section=True
self.in_description_data=()
elif self.in_description_section and in_name == "item":
self.in_description_data=[in_attribute["key"], in_attribute["type"], ""]
# ADC_Results
elif in_name == "adcdata":
self.__filetype = ResultReader.ADCDATA_TYPE
self.adc_result_trailing_chars = ""
if self.result is None:
self.result = ADC_Result()
# None: new guess for adc data encoding
# "a": ascii
# "b": base64
self.adc_data_encoding = None
self.result.set_sampling_rate(float(in_attribute["rate"]))
self.result.set_job_id(self.result_job_number)
self.result.set_job_date(self.result_job_date)
self.result.set_description_dictionary(self.result_description.copy())
title="ADC-Result: job-id=%d"%int(self.result_job_number)
if len(self.result_description)>0:
for k,v in self.result_description.iteritems():
title+=", %s=%s"%(k,v)
self.result.set_title(title)
self.result_description=None
self.adc_result_sample_counter = 0
self.adc_result_parts=[] # will contain arrays of sampled intervals, assumes same sample rate
else:
if float(in_attribute["rate"])!=self.result.get_sampling_rate():
print "sample rate different in ADC_Result, found %f, former value %f"%\
(float(in_attribute["rate"]),self.result.get_sampling_rate())
new_samples=int(in_attribute["samples"])
self.adc_result_sample_counter += new_samples
# version depends on the inclusion of http://bugs.python.org/issue1137
if sys.hexversion>=0x020501f0:
# extend buffer to expected base64 size (2 channels, 2 byte)
required_buffer=int(new_samples*4/45+1)*62
if self.xml_parser.buffer_size < required_buffer:
try:
self.xml_parser.buffer_size=required_buffer
except AttributeError:
pass
# pass all chardata as one block
self.xml_parser.buffer_text = True
# do not change the contents
self.xml_parser.returns_unicode=False
# Error_Results
elif in_name == "error":
self.__filetype = ResultReader.ERROR_TYPE
self.result = Error_Result()
self.result.set_job_id(self.result_job_number)
self.result.set_job_date(self.result_job_date)
self.result.set_description_dictionary(self.result_description.copy())
# Temp_Results
elif in_name == "temp":
self.__filetype = ResultReader.TEMP_TYPE
self.result = Temp_Result()
self.result.set_job_id(self.result_job_number)
self.result.set_job_date(self.result_job_date)
# Config_Results
elif in_name == "conf":
self.__filetype = ResultReader.CONFIG_TYPE
self.result = Config_Result()
self.result.set_job_id(self.result_job_number)
self.result.set_job_date(self.result_job_date)
# maintain the stack
self.element_stack.append(in_name)
def __xmlCharacterDataFound(self, in_cdata):
if self.in_description_section and len(self.in_description_data):
self.in_description_data[2]+=in_cdata
# ADC_Result
elif self.__filetype == ResultReader.ADCDATA_TYPE and self.element_stack[-1]=="adcdata":
self.adc_result_trailing_chars+=in_cdata
# Error_Result
elif self.__filetype == ResultReader.ERROR_TYPE:
tmp_string = self.result.get_error_message()
if tmp_string is None: tmp_string = ""
tmp_string += in_cdata
self.result.set_error_message(tmp_string)
# Temp_Results
elif self.__filetype == ResultReader.TEMP_TYPE:
pass
# Config_Results
elif self.__filetype == ResultReader.CONFIG_TYPE:
pass
def __xmlEndTagFound(self, in_name):
# maintain the stack
self.element_stack.pop()
if in_name == "adcdata":
# ADC_Result
if self.__filetype == ResultReader.ADCDATA_TYPE:
# detect type of data encoding from first line
if self.adc_data_encoding is None:
self.adc_result_trailing_chars=self.adc_result_trailing_chars.strip()
first_line_end=self.adc_result_trailing_chars.find("\n")
first_line=""
if first_line_end!=-1:
first_line=self.adc_result_trailing_chars[:first_line_end]
else:
first_line=self.adc_result_trailing_chars
if len(first_line.lstrip("-0123456789 \t\n\r"))==0:
try:
map(int,filter(len,first_line.split()))
except ValueError,e:
pass
else:
self.adc_data_encoding="a"
if self.adc_data_encoding is None and len(first_line)%4==0:
try:
base64.standard_b64decode(first_line)
except TypeError:
pass
else:
self.adc_data_encoding="b"
if self.adc_data_encoding is None:
print "unknown ADC data format \"%s\""%first_line
tmp=None
if self.adc_data_encoding=="a":
values=map(int,self.adc_result_trailing_chars.split())
tmp=numpy.array(values, dtype="Int16")
elif self.adc_data_encoding=="b":
tmp_string=base64.standard_b64decode(self.adc_result_trailing_chars)
tmp=numpy.fromstring(tmp_string, dtype="Int16")
del tmp_string
else:
print "unknown ADC data format"
self.adc_result_trailing_chars=""
self.adc_result_parts.append(tmp)
del tmp
return
elif in_name == "description":
self.in_description_section=False
elif self.in_description_section and in_name == "item":
# make item contents to dictionary item:
k,t,v=self.in_description_data
self.in_description_data=()
if t == "None":
self.result_description[k]=None
if t == "Float":
self.result_description[k]=float(v)
elif t == "Int":
self.result_description[k]=int(v)
elif t == "Long":
self.result_description[k]=long(v)
elif t == "Complex":
self.result_description[k]=complex(v)
elif t == "Boolean":
self.result_description[k]=bool(v)
elif t == "String":
self.result_description[k]=v
else:
# Anything else will be handled as a string
# Probably "repr".
self.result_description[k]=v
elif in_name == "result":
pass
# Error_Result
elif self.__filetype == ResultReader.ERROR_TYPE:
pass
# Temp_Result
elif self.__filetype == ResultReader.TEMP_TYPE:
pass
# Config_Result
elif self.__filetype == ResultReader.CONFIG_TYPE:
pass
class BlockingResultReader(ResultReader):
"""
to follow an active result stream
"""
def __init__(self, spool_dir=".", no=0, result_pattern="job.%09d.result", clear_jobs=False, clear_results=False):
ResultReader.__init__(self, spool_dir, no, result_pattern, clear_jobs=clear_jobs, clear_results=clear_results)
self.stop_no=None # end of job queue
self.poll_time=0.1 # sleep interval for polling results, <0 means no polling and stop
self.in_advance=0
def __iter__(self):
"""
get next job with iterator
block until result is available
"""
expected_filename=os.path.join(self.spool_dir,self.result_pattern%(self.no))
while (not self.quit_flag.isSet()) and (self.stop_no is None or self.stop_no>self.no):
if not os.access(expected_filename,os.R_OK):
# stop polling, if required
if self.poll_time<0: break
self.quit_flag.wait(self.poll_time)
continue
# find pending results
self.in_advance=max(self.no,self.in_advance)
in_advance_filename=os.path.join(self.spool_dir,self.result_pattern%(self.in_advance+1))
while os.access(in_advance_filename, os.R_OK) and (self.stop_no is None or self.stop_no>self.in_advance+1):
# do not more than 100 results in advance at one glance
if self.in_advance>self.no+100: break
self.in_advance+=1
in_advance_filename=os.path.join(self.spool_dir,self.result_pattern%(self.in_advance+1))
if self.quit_flag.isSet(): break
r=self.get_result_object(expected_filename)
if self.quit_flag.isSet(): break
if self.quit_flag.isSet(): break
yield r
if self.clear_results:
if os.path.isfile(expected_filename): os.remove(expected_filename)
if self.clear_jobs:
if os.path.isfile(expected_filename[:-7]): os.remove(expected_filename[:-7])
self.no+=1
expected_filename=os.path.join(self.spool_dir,self.result_pattern%(self.no))
return
def quit(self):
self.quit_flag.set()

0
src/gui/__init__.py Normal file
View File

2106
src/gui/damaris.glade Normal file

File diff suppressed because it is too large Load Diff

8
src/gui/damaris.gladep Normal file
View File

@ -0,0 +1,8 @@
<?xml version="1.0" standalone="no"?> <!--*- mode: xml -*-->
<!DOCTYPE glade-project SYSTEM "http://glade.gnome.org/glade-project-2.0.dtd">
<glade-project>
<name>damaris-gui</name>
<program_name>damaris</program_name>
<gnome_support>FALSE</gnome_support>
</glade-project>

681
src/gui/gtkcodebuffer.py Normal file
View File

@ -0,0 +1,681 @@
""" This module contains the PyGTKCodeBuffer-class. This class is a
specialisation of the gtk.TextBuffer and enables syntax-highlighting for
PyGTK's TextView-widget.
To use the syntax-highlighting feature you have load a syntax-definition or
specify your own. To load one please read the docs for the SyntaxLoader()
class. """
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gtk
import pango
import re
import sys
import os.path
import xml.sax
import imp
from xml.sax.handler import ContentHandler
from xml.sax.saxutils import unescape
__version__ = "1.0RC2"
__author__ = "Hannes Matuschek <hmatuschek@gmail.com>"
# defined the default styles
DEFAULT_STYLES = {
'DEFAULT': {'font': 'monospace'},
'comment': {'foreground': '#0000FF'},
'preprocessor': {'foreground': '#A020F0',
'weight': pango.WEIGHT_BOLD},
'keyword': {'foreground': '#A52A2A',
'weight': pango.WEIGHT_BOLD},
'special': {'foreground': '#006600'},
'mark1': {'foreground': '#008B8B'},
'mark2': {'foreground': '#6A5ACD'},
'string': {'foreground': '#CC00CC'},
'number': {'foreground': '#CC00CC'},
'datatype': {'foreground': '#2E8B57',
'weight': pango.WEIGHT_BOLD},
'function': {'foreground': '#008A8C'},
'link': {'foreground': '#0000FF',
'underline': pango.UNDERLINE_SINGLE}}
def _main_is_frozen():
""" Internal used function. """
return (hasattr(sys, "frozen") or # new py2exe
hasattr(sys, "importers") # old py2exe
or imp.is_frozen("__main__")) # tools/freeze
if _main_is_frozen():
this_module_path = os.path.dirname(sys.executable)
else:
this_module_path = os.path.abspath(os.path.dirname(__file__))
# defines default-search paths for syntax-files
SYNTAX_PATH = [ os.path.join('.', 'syntax'),
this_module_path,
os.path.join(os.path.expanduser('~'),".pygtkcodebuffer"),
os.path.join(sys.prefix,"share","pygtkcodebuffer","syntax")]
# enable/disable debug-messages
DEBUG_FLAG = False
#
# Some log functions...
# (internal used)
def _log_debug(msg):
if not DEBUG_FLAG:
return
sys.stderr.write("DEBUG: ")
sys.stderr.write(msg)
sys.stderr.write("\n")
def _log_warn(msg):
sys.stderr.write("WARN: ")
sys.stderr.write(msg)
sys.stderr.write("\n")
def _log_error(msg):
sys.stderr.write("ERROR: ")
sys.stderr.write(msg)
sys.stderr.write("\n")
def add_syntax_path(path_or_list):
""" This function adds one (string) or many (list of strings) paths to the
global search-paths for syntax-files. """
global SYNTAX_PATH
# handle list of strings
if isinstance(path_or_list, (list, tuple)):
for i in range(len(path_or_list)):
SYNTAX_PATH.insert(0, path_or_list[-i])
# handle single string
elif isinstance(path_or_list, basestring):
SYNTAX_PATH.insert(0, path_or_list)
# handle attr-error
else:
raise TypeError, "Argument must be path-string or list of strings"
class Pattern:
""" More or less internal used class representing a pattern. You may use
this class to "hard-code" your syntax-definition. """
def __init__(self, regexp, style="DEFAULT", group=0, flags=""):
""" The constructor takes at least on argument: the regular-expression.
The optional kwarg style defines the style applied to the string
matched by the regexp.
The kwarg group may be used to define which group of the regular
expression will be used for highlighting (Note: This means that only
the selected group will be highlighted but the complete pattern must
match!)
The optional kwarg flags specifies flags for the regular expression.
Look at the Python lib-ref for a list of flags and there meaning."""
# assemble re-flag
flags += "ML"; flag = 0
_log_debug("init rule %s -> %s (%s)"%(regexp, style, flags))
for char in flags:
if char == 'M': flag |= re.M
if char == 'L': flag |= re.L
if char == 'S': flag |= re.S
if char == 'I': flag |= re.I
if char == 'U': flag |= re.U
if char == 'X': flag |= re.X
# compile re
try: self._regexp = re.compile(regexp, flag)
except re.error, e:
raise Exception("Invalid regexp \"%s\": %s"%(regexp,str(e)))
self._group = group
self.tag_name = style
def __call__(self, txt, start, end):
m = self._regexp.search(txt)
if not m: return None
mstart, mend = m.start(self._group), m.end(self._group)
s = start.copy(); s.forward_chars(mstart)
e = start.copy(); e.forward_chars(mend)
return (s,e)
class KeywordList(Pattern):
""" This class may be used for hard-code a syntax-definition. It specifies
a pattern for a keyword-list. This simplifies the definition of
keyword-lists. """
def __init__(self, keywords, style="keyword", flags=""):
""" The constructor takes at least on argument: A list of strings
specifying the keywords to highlight.
The optional kwarg style specifies the style used to highlight these
keywords.
The optional kwarg flags specifies the flags for the
(internal generated) regular-expression. """
regexp = "(?:\W|^)(%s)\W"%("|".join(keywords),)
Pattern.__init__(self, regexp, style, group=1, flags=flags)
class String:
""" This class may be used to hard-code a syntax-definition. It simplifies
the definition of a "string". A "string" is something that consists of
a start-pattern and an end-pattern. The end-pattern may be content of
the string if it is escaped. """
def __init__(self, starts, ends, escape=None, style="string"):
""" The constructor needs at least two arguments: The start- and
end-pattern.
The optional kwarg escape specifies a escape-sequence escaping the
end-pattern.
The optional kwarg style specifies the style used to highlight the
string. """
try:
self._starts = re.compile(starts)
except re.error, e:
raise Exception("Invalid regexp \"%s\": %s"%(regexp,str(e)))
if escape:
end_exp = "[^%(esc)s](?:%(esc)s%(esc)s)*%(end)s"
end_exp = end_exp%{'esc':escape*2,'end':ends}
else:
end_exp = ends
try:
self._ends = re.compile(end_exp)
except re.error, e:
raise Exception("Invalid regexp \"%s\": %s"%(regexp,str(e)))
self.tag_name = style
def __call__(self, txt, start, end):
start_match = self._starts.search(txt)
if not start_match: return
start_it = start.copy()
start_it.forward_chars(start_match.start(0))
end_it = end.copy()
end_match = self._ends.search(txt, start_match.end(0)-1)
if end_match:
end_it.set_offset(start.get_offset()+end_match.end(0))
return start_it, end_it
class LanguageDefinition:
""" This class is a container class for all rules (Pattern, KeywordList,
...) specifying the language. You have to used this class if you like
to hard-code your syntax-definition. """
def __init__(self, rules):
""" The constructor takes only one argument: A list of rules (i.e
Pattern, KeywordList and String). """
self._grammar = rules
self._styles = dict()
def __call__(self, buf, start, end=None):
# if no end given -> end of buffer
if not end: end = buf.get_end_iter()
mstart = mend = end
mtag = None
txt = buf.get_slice(start, end)
# search min match
for rule in self._grammar:
# search pattern
m = rule(txt, start, end)
if not m: continue
# prefer match with smallest start-iter
if m[0].compare(mstart) < 0:
mstart, mend = m
mtag = rule.tag_name
continue
if m[0].compare(mstart)==0 and m[1].compare(mend)>0:
mstart, mend = m
mtag = rule.tag_name
continue
return (mstart, mend, mtag)
def get_styles(self):
return self._styles
class SyntaxLoader(ContentHandler, LanguageDefinition):
""" This class loads a syntax definition. There have to be a file
named LANGUAGENAME.xml in one of the directories specified in the
global path-list. You may add a directory using the add_syntax_path()
function. """
# some translation-tables for the style-defs:
style_weight_table = {'ultralight': pango.WEIGHT_ULTRALIGHT,
'light': pango.WEIGHT_LIGHT,
'normal': pango.WEIGHT_NORMAL,
'bold': pango.WEIGHT_BOLD,
'ultrabold': pango.WEIGHT_ULTRABOLD,
'heavy': pango.WEIGHT_HEAVY}
style_variant_table = {'normal': pango.VARIANT_NORMAL,
'smallcaps': pango.VARIANT_SMALL_CAPS}
style_underline_table = {'none': pango.UNDERLINE_NONE,
'single': pango.UNDERLINE_SINGLE,
'double': pango.UNDERLINE_DOUBLE}
style_style_table = {'normal': pango.STYLE_NORMAL,
'oblique': pango.STYLE_OBLIQUE,
'italic': pango.STYLE_ITALIC}
style_scale_table = {
'xx_small': pango.SCALE_XX_SMALL,
'x_small': pango.SCALE_X_SMALL,
'small': pango.SCALE_SMALL,
'medium': pango.SCALE_MEDIUM,
'large': pango.SCALE_LARGE,
'x_large': pango.SCALE_X_LARGE,
'xx_large': pango.SCALE_XX_LARGE,
}
def __init__(self, lang_name):
""" The constructor takes only one argument: the language name.
The constructor tries to load the syntax-definition from a
syntax-file in one directory of the global path-list.
An instance of this class IS a LanguageDefinition. You can pass it
to the constructor of the CodeBuffer class. """
LanguageDefinition.__init__(self, [])
ContentHandler.__init__(self)
# search for syntax-files:
fname = None
for syntax_dir in SYNTAX_PATH:
fname = os.path.join(syntax_dir, "%s.xml"%lang_name)
if os.path.isfile(fname): break
_log_debug("Loading syntaxfile %s"%fname)
if not os.path.isfile(fname):
raise Exception("No snytax-file for %s found!"%lang_name)
xml.sax.parse(fname, self)
# Dispatch start/end - document/element and chars
def startDocument(self):
self.__stack = []
def endDocument(self):
del self.__stack
def startElement(self, name, attr):
self.__stack.append( (name, attr) )
if hasattr(self, "start_%s"%name):
handler = getattr(self, "start_%s"%name)
handler(attr)
def endElement(self, name):
if hasattr(self, "end_%s"%name):
handler = getattr(self, "end_%s"%name)
handler()
del self.__stack[-1]
def characters(self, txt):
if not self.__stack: return
name, attr = self.__stack[-1]
if hasattr(self, "chars_%s"%name):
handler = getattr(self, "chars_%s"%name)
handler(txt)
# Handle regexp-patterns
def start_pattern(self, attr):
self.__pattern = ""
self.__group = 0
self.__flags = ''
self.__style = attr['style']
if 'group' in attr.keys(): self.__group = int(attr['group'])
if 'flags' in attr.keys(): self.__flags = attr['flags']
def end_pattern(self):
rule = Pattern(self.__pattern, self.__style, self.__group, self.__flags)
self._grammar.append(rule)
del self.__pattern
del self.__group
del self.__flags
del self.__style
def chars_pattern(self, txt):
self.__pattern += unescape(txt)
# handle keyword-lists
def start_keywordlist(self, attr):
self.__style = "keyword"
self.__flags = ""
if 'style' in attr.keys():
self.__style = attr['style']
if 'flags' in attr.keys():
self.__flags = attr['flags']
self.__keywords = []
def end_keywordlist(self):
kwlist = KeywordList(self.__keywords, self.__style, self.__flags)
self._grammar.append(kwlist)
del self.__keywords
del self.__style
del self.__flags
def start_keyword(self, attr):
self.__keywords.append("")
def end_keyword(self):
if not self.__keywords[-1]:
del self.__keywords[-1]
def chars_keyword(self, txt):
parent,pattr = self.__stack[-2]
if not parent == "keywordlist": return
self.__keywords[-1] += unescape(txt)
#handle String-definitions
def start_string(self, attr):
self.__style = "string"
self.__escape = None
if 'escape' in attr.keys():
self.__escape = attr['escape']
if 'style' in attr.keys():
self.__style = attr['style']
self.__start_pattern = ""
self.__end_pattern = ""
def end_string(self):
strdef = String(self.__start_pattern, self.__end_pattern,
self.__escape, self.__style)
self._grammar.append(strdef)
del self.__style
del self.__escape
del self.__start_pattern
del self.__end_pattern
def chars_starts(self, txt):
self.__start_pattern += unescape(txt)
def chars_ends(self, txt):
self.__end_pattern += unescape(txt)
# handle style
def start_style(self, attr):
self.__style_props = dict()
self.__style_name = attr['name']
def end_style(self):
self._styles[self.__style_name] = self.__style_props
del self.__style_props
del self.__style_name
def start_property(self, attr):
self.__style_prop_name = attr['name']
def chars_property(self, value):
value.strip()
# convert value
if self.__style_prop_name in ['font','foreground','background',]:
pass
elif self.__style_prop_name == 'variant':
if not value in self.style_variant_table.keys():
Exception("Unknown style-variant: %s"%value)
value = self.style_variant_table[value]
elif self.__style_prop_name == 'underline':
if not value in self.style_underline_table.keys():
Exception("Unknown underline-style: %s"%value)
value = self.style_underline_table[value]
elif self.__style_prop_name == 'scale':
if not value in self.style_scale_table.keys():
Exception("Unknown scale-style: %s"%value)
value = self.style_scale_table[value]
elif self.__style_prop_name == 'weight':
if not value in self.style_weight_table.keys():
Exception("Unknown style-weight: %s"%value)
value = self.style_weight_table[value]
elif self.__style_prop_name == 'style':
if not value in self.style_style_table[value]:
Exception("Unknwon text-style: %s"%value)
value = self.style_style_table[value]
else:
raise Exception("Unknown style-property %s"%self.__style_prop_name)
# store value
self.__style_props[self.__style_prop_name] = value
class CodeBuffer(gtk.TextBuffer):
""" This class extends the gtk.TextBuffer to support syntax-highlighting.
You can use this class like a normal TextBuffer. """
def __init__(self, table=None, lang=None, styles={}):
""" The constructor takes 3 optional arguments.
table specifies a tag-table associated with the TextBuffer-instance.
This argument will be passed directly to the constructor of the
TextBuffer-class.
lang specifies the language-definition. You have to load one using
the SyntaxLoader-class or you may hard-code your syntax-definition
using the LanguageDefinition-class.
styles is a dictionary used to extend or overwrite the default styles
provided by this module (DEFAULT_STYLE) and any language specific
styles defined by the LanguageDefinition. """
gtk.TextBuffer.__init__(self, table)
# default styles
self.styles = DEFAULT_STYLES
# update styles with lang-spec:
if lang:
self.styles.update(lang.get_styles())
# update styles with user-defined
self.styles.update(styles)
# create tags
for name, props in self.styles.items():
style = dict(self.styles['DEFAULT']) # take default
style.update(props) # and update with props
self.create_tag(name, **style)
# store lang-definition
self._lang_def = lang
self.connect_after("insert-text", self._on_insert_text)
self.connect_after("delete-range", self._on_delete_range)
self.connect('apply-tag', self._on_apply_tag)
self._apply_tags = False
def _on_apply_tag(self, buf, tag, start, end):
# FIXME This is a hack! It allows apply-tag only while
# _on_insert_text() and _on_delete_range()
if not self._apply_tags:
self.emit_stop_by_name('apply-tag')
return True
_log_debug("tag \"%s\" as %s"%(self.get_slice(start,end), tag.get_property("name")))
def _on_insert_text(self, buf, it, text, length):
# if no syntax defined -> nop
if not self._lang_def: return False
it = it.copy()
it.backward_chars(length)
if not it.begins_tag():
it.backward_to_tag_toggle(None)
_log_debug("Not tag-start -> moved iter to %i (%s)"%(it.get_offset(), it.get_char()))
if it.begins_tag(self.get_tag_table().lookup("DEFAULT")):
it.backward_to_tag_toggle(None)
_log_debug("Iter at DEFAULT-start -> moved to %i (%s)"%(it.get_offset(), it.get_char()))
self._apply_tags = True
self.update_syntax(it)
self._apply_tags = False
def _on_delete_range(self, buf, start, end):
# if no syntax defined -> nop
if not self._lang_def: return False
start = start.copy()
if not start.begins_tag():
start.backward_to_tag_toggle(None)
self._apply_tags = True
self.update_syntax(start)
self._apply_tags = False
def update_syntax(self, start, end=None):
""" More or less internal used method to update the
syntax-highlighting. """
# if no lang set
if not self._lang_def: return
_log_debug("Update syntax from %i"%start.get_offset())
# if not end defined
if not end: end = self.get_end_iter()
# We do not use recursion -> long files exceed rec-limit!
finished = False
while not finished:
# search first rule matching txt[start..end]
mstart, mend, tagname = self._lang_def(self, start, end)
# optimisation: if mstart-mend is allready tagged with tagname
# -> finished
if tagname: #if something found
tag = self.get_tag_table().lookup(tagname)
if mstart.begins_tag(tag) and mend.ends_tag(tag) and not mstart.equal(start):
self.remove_all_tags(start,mstart)
self.apply_tag_by_name("DEFAULT", start, mstart)
_log_debug("Optimized: Found old tag at %i (%s)"%(mstart.get_offset(), mstart.get_char()))
# finish
finished = True
continue
# remove all tags from start..mend (mend == buffer-end if no match)
self.remove_all_tags(start, mend)
# make start..mstart = DEFAUL (mstart == buffer-end if no match)
if not start.equal(mstart):
_log_debug("Apply DEFAULT")
self.apply_tag_by_name("DEFAULT", start, mstart)
# nothing found -> finished
if not tagname:
finished = True
continue
# apply tag
_log_debug("Apply %s"%tagname)
self.apply_tag_by_name(tagname, mstart, mend)
start = mend
if start == end:
finished = True
continue
def reset_language(self, lang_def):
""" Reset the currently used language-definition. """
# remove all tags from complete text
start = self.get_start_iter()
self.remove_all_tags(start, self.get_end_iter())
# store lexer
self._lang_def = lang_def
# update styles from lang_def:
if self._lang_def:
self.update_styles(self._lang_def.get_styles())
# and ...
self._apply_tags = True
self.update_syntax(start)
self._apply_tags = False
def update_styles(self, styles):
""" Update styles. This method may be used to reset any styles at
runtime. """
self.styles.update(styles)
table = self.get_tag_table()
for name, props in styles.items():
style = self.styles['DEFAULT']
style.update(props)
# if tagname is unknown:
if not table.lookup(name):
_log_debug("Create tag: %s (%s)"%(name, style))
self.create_tag(name, **style)
else: # update tag
tag = table.lookup(name)
_log_debug("Update tag %s with (%s)"%(name, style))
map(lambda i: tag.set_property(i[0],i[1]), style.items())

210
src/gui/python.xml Normal file
View File

@ -0,0 +1,210 @@
<?xml version="1.0"?>
<!--
This syntax-file was generated by sourceview2codebuffer.xsl from
GtkSourceView's Python-syntax-file!
This transformation is not perfect so it may need some hand-word to fix
minor issues in this file.
You can get sourceview2codebuffer.xsl from http://pygtkcodebuffer.googlecode.com/.
-->
<syntax>
<string style="string" escape="\"><starts>([uUrR]|[uU][rR]|[rR][uU])?"""</starts><ends>"""</ends></string>
<string style="string" escape="\"><starts>([uUrR]|[uU][rR]|[rR][uU])?'''</starts><ends>'''</ends></string>
<string style="string" escape="\"><starts>([uUrR]|[uU][rR]|[rR][uU])?"</starts><ends>"</ends></string>
<string style="string" escape="\"><starts>([uUrR]|[uU][rR]|[rR][uU])?'</starts><ends>'</ends></string>
<keywordlist style="preprocessor">
<keyword>import</keyword>
<keyword>from</keyword>
<keyword>as</keyword>
<keyword>False</keyword>
<keyword>None</keyword>
<keyword>True</keyword>
<keyword>__name__</keyword>
<keyword>__debug__</keyword>
</keywordlist>
<keywordlist style="keyword">
<keyword>def</keyword>
<keyword>class</keyword>
<keyword>return</keyword>
</keywordlist>
<keywordlist style="keyword">
<keyword>and</keyword>
<keyword>assert</keyword>
<keyword>break</keyword>
<keyword>continue</keyword>
<keyword>del</keyword>
<keyword>elif</keyword>
<keyword>else</keyword>
<keyword>except</keyword>
<keyword>exec</keyword>
<keyword>finally</keyword>
<keyword>for</keyword>
<keyword>global</keyword>
<keyword>if</keyword>
<keyword>in</keyword>
<keyword>is</keyword>
<keyword>lambda</keyword>
<keyword>not</keyword>
<keyword>or</keyword>
<keyword>pass</keyword>
<keyword>print</keyword>
<keyword>raise</keyword>
<keyword>try</keyword>
<keyword>while</keyword>
<keyword>yield</keyword>
</keywordlist>
<keywordlist style="special">
<keyword>ArithmeticError</keyword>
<keyword>AssertionError</keyword>
<keyword>AttributeError</keyword>
<keyword>EnvironmentError</keyword>
<keyword>EOFError</keyword>
<keyword>Exception</keyword>
<keyword>FloatingPointError</keyword>
<keyword>ImportError</keyword>
<keyword>IndentationError</keyword>
<keyword>IndexError</keyword>
<keyword>IOError</keyword>
<keyword>KeyboardInterrupt</keyword>
<keyword>KeyError</keyword>
<keyword>LookupError</keyword>
<keyword>MemoryError</keyword>
<keyword>NameError</keyword>
<keyword>NotImplementedError</keyword>
<keyword>OSError</keyword>
<keyword>OverflowError</keyword>
<keyword>ReferenceError</keyword>
<keyword>RuntimeError</keyword>
<keyword>StandardError</keyword>
<keyword>StopIteration</keyword>
<keyword>SyntaxError</keyword>
<keyword>SystemError</keyword>
<keyword>SystemExit</keyword>
<keyword>TabError</keyword>
<keyword>TypeError</keyword>
<keyword>UnboundLocalError</keyword>
<keyword>UnicodeDecodeError</keyword>
<keyword>UnicodeEncodeError</keyword>
<keyword>UnicodeError</keyword>
<keyword>UnicodeTranslateError</keyword>
<keyword>ValueError</keyword>
<keyword>WindowsError</keyword>
<keyword>ZeroDivisionError</keyword>
<keyword>Warning</keyword>
<keyword>UserWarning</keyword>
<keyword>DeprecationWarning</keyword>
<keyword>PendingDeprecationWarning</keyword>
<keyword>SyntaxWarning</keyword>
<keyword>OverflowWarning</keyword>
<keyword>RuntimeWarning</keyword>
<keyword>FutureWarning</keyword>
<keyword>__import__</keyword>
<keyword>abs</keyword>
<keyword>apply</keyword>
<keyword>basestring</keyword>
<keyword>bool</keyword>
<keyword>buffer</keyword>
<keyword>callable</keyword>
<keyword>chr</keyword>
<keyword>classmethod</keyword>
<keyword>cmp</keyword>
<keyword>coerce</keyword>
<keyword>compile</keyword>
<keyword>complex</keyword>
<keyword>delattr</keyword>
<keyword>dict</keyword>
<keyword>dir</keyword>
<keyword>divmod</keyword>
<keyword>enumerate</keyword>
<keyword>eval</keyword>
<keyword>execfile</keyword>
<keyword>file</keyword>
<keyword>filter</keyword>
<keyword>float</keyword>
<keyword>getattr</keyword>
<keyword>globals</keyword>
<keyword>hasattr</keyword>
<keyword>hash</keyword>
<keyword>hex</keyword>
<keyword>id</keyword>
<keyword>input</keyword>
<keyword>int</keyword>
<keyword>intern</keyword>
<keyword>isinstance</keyword>
<keyword>issubclass</keyword>
<keyword>iter</keyword>
<keyword>len</keyword>
<keyword>list</keyword>
<keyword>locals</keyword>
<keyword>long</keyword>
<keyword>map</keyword>
<keyword>max</keyword>
<keyword>min</keyword>
<keyword>object</keyword>
<keyword>oct</keyword>
<keyword>open</keyword>
<keyword>ord</keyword>
<keyword>pow</keyword>
<keyword>property</keyword>
<keyword>range</keyword>
<keyword>raw_input</keyword>
<keyword>reduce</keyword>
<keyword>reload</keyword>
<keyword>repr</keyword>
<keyword>round</keyword>
<keyword>setattr</keyword>
<keyword>slice</keyword>
<keyword>staticmethod</keyword>
<keyword>str</keyword>
<keyword>sum</keyword>
<keyword>super</keyword>
<keyword>tuple</keyword>
<keyword>type</keyword>
<keyword>unichr</keyword>
<keyword>unicode</keyword>
<keyword>vars</keyword>
<keyword>xrange</keyword>
<keyword>zip</keyword>
</keywordlist>
<!-- Some Experiment keywords -->
<keywordlist style="special">
<keyword>set_pfg</keyword>
<keyword>set_pfg_wt</keyword>
<keyword>set_description</keyword>
<keyword>get_description</keyword>
<keyword>set_phase</keyword>
<keyword>set_frequency</keyword>
<keyword>ttl_pulse</keyword>
<keyword>rf_pulse</keyword>
<keyword>state_start</keyword>
<keyword>state_end</keyword>
<keyword>loop_start</keyword>
<keyword>loop_end</keyword>
<keyword>set_pts_local</keyword>
<keyword>wait</keyword>
<keyword>record</keyword>
</keywordlist>
<keywordlist style="datatype">
<keyword>Accumulation</keyword>
<keyword>Experiment</keyword>
<keyword>ADC_Result</keyword>
<keyword>MeasurementResult</keyword>
<keyword>AccumulatedValue</keyword>
</keywordlist>
<pattern style="comment">#.*$</pattern>
<pattern style="datatype">\bself\b</pattern>
<pattern style="number">\b([1-9][0-9]*|0)([Uu]([Ll]|LL|ll)?|([Ll]|LL|ll)[Uu]?)?\b</pattern>
<pattern style="number">\b([0-9]+[Ee][-]?[0-9]+|([0-9]*\.[0-9]+|[0-9]+\.)([Ee][-]?[0-9]+)?)[fFlL]?</pattern>
<pattern style="number">\b0[0-7]+([Uu]([Ll]|LL|ll)?|([Ll]|LL|ll)[Uu]?)?\b</pattern>
<pattern style="number">\b0[xX][0-9a-fA-F]+([Uu]([Ll]|LL|ll)?|([Ll]|LL|ll)[Uu]?)?\b</pattern>
</syntax>

152
src/gui/script_interface.py Normal file
View File

@ -0,0 +1,152 @@
#! /usr/bin/env python
import time
import sys
import os
import os.path
import tables
import damaris.data.DataPool as DataPool
import damaris.gui.ResultReader as ResultReader
import damaris.gui.ExperimentWriter as ExperimentWriter
import damaris.gui.BackendDriver as BackendDriver
import damaris.gui.ResultHandling as ResultHandling
import damaris.gui.ExperimentHandling as ExperimentHandling
def some_listener(event):
if event.subject=="__recentexperiment" or event.subject=="__recentresult":
r=event.origin.get("__recentresult",-1)+1
e=event.origin.get("__recentexperiment",-1)+1
if e!=0:
ratio=100.0*r/e
else:
ratio=100.0
print "\r%d/%d (%.0f%%)"%(r,e,ratio),
class ScriptInterface:
def __init__(self, exp_script=None, res_script=None, backend_executable=None, spool_dir="spool"):
self.exp_script=exp_script
self.res_script=res_script
self.backend_executable=backend_executable
self.spool_dir=os.path.abspath(spool_dir)
self.exp_handling=self.res_handling=None
self.exp_writer=self.res_reader=self.back_driver=None
if self.backend_executable is not None:
self.back_driver=BackendDriver.BackendDriver(self.backend_executable, spool_dir)
if self.exp_script: self.exp_writer=self.back_driver.get_exp_writer()
if self.res_script: self.res_reader=self.back_driver.get_res_reader()
else:
self.back_driver=None
if self.exp_script: self.exp_writer=ExperimentWriter.ExperimentWriter(spool_dir)
if self.res_script: self.res_reader=ResultReader.ResultReader(spool_dir)
self.data=DataPool()
def runScripts(self):
# get script engines
if self.exp_script and self.exp_writer:
self.exp_handling=ExperimentHandling.ExperimentHandling(self.exp_script, self.exp_writer, self.data)
if self.res_script and self.res_reader:
self.res_handling=ResultHandling.ResultHandling(self.res_script, self.res_reader, self.data)
# start them
if self.exp_handling: self.exp_handling.start()
if self.back_driver is not None: self.back_driver.start()
if self.res_handling: self.res_handling.start()
def waitForScriptsEnding(self):
# time of last dump
dump_interval=600
next_dump_time=time.time()+dump_interval
# keyboard interrupts are handled in extra cleanup loop
try:
while filter(None,[self.exp_handling,self.res_handling,self.back_driver]):
time.sleep(0.1)
if time.time()>next_dump_time:
self.dump_data("pool/data_pool.h5")
next_dump_time+=dump_interval
if self.exp_handling is not None:
if not self.exp_handling.isAlive():
self.exp_handling.join()
if self.exp_handling.raised_exception:
print ": experiment script failed at line %d (function %s): %s"%(self.exp_handling.location[0],
self.exp_handling.location[1],
self.exp_handling.raised_exception)
else:
print ": experiment script finished"
self.exp_handling = None
if self.res_handling is not None:
if not self.res_handling.isAlive():
self.res_handling.join()
if self.res_handling.raised_exception:
print ": result script failed at line %d (function %s): %s"%(self.res_handling.location[0],
self.res_handling.location[1],
self.res_handling.raised_exception)
else:
print ": result script finished"
self.res_handling = None
if self.back_driver is not None:
if not self.back_driver.isAlive():
print ": backend finished"
self.back_driver=None
except KeyboardInterrupt:
still_running=filter(None,[self.exp_handling,self.res_handling,self.back_driver])
for r in still_running:
r.quit_flag.set()
for r in still_running:
r.join()
def dump_data(self, filename):
try:
# write data from pool
dump_file=tables.openFile(filename,mode="w",title="DAMARIS experiment data")
self.data.write_hdf5(dump_file, complib='zlib', complevel=6)
# write scripts
scriptgroup=dump_file.createGroup("/","scripts","Used Scripts")
dump_file.createArray(scriptgroup,"experiment_script", self.exp_script)
dump_file.createArray(scriptgroup,"result_script", self.res_script)
dump_file.createArray(scriptgroup,"backend_executable", self.backend_executable)
dump_file.createArray(scriptgroup,"spool_directory", self.spool_dir)
dump_file.flush()
dump_file.close()
dump_file=None
# todo
except Exception,e:
print "dump failed", e
if __name__=="__main__":
if len(sys.argv)==1:
print "%s: data_handling_script [spool directory]"%sys.argv[0]
sys.exit(1)
if len(sys.argv)==3:
spool_dir=os.getcwd()
else:
spool_dir=sys.argv[3]
expscriptfile=open(sys.argv[1])
expscript=expscriptfile.read()
resscriptfile=open(sys.argv[2])
resscript=resscriptfile.read()
si=ScriptInterface(expscript, resscript,"/usr/lib/damaris/backends/Mobilecore", spool_dir)
si.data.register_listener(some_listener)
si.runScripts()
si.waitForScriptsEnding()
si.dump_data("data_pool.h5")
si=None

0
src/tools/__init__.py Normal file
View File

119
src/tools/eurotherm.py Normal file
View File

@ -0,0 +1,119 @@
import serial
import re
import operator
DEBUG=False
reply_pattern = re.compile(r"\x02..(.*)\x03.", re.DOTALL)
# example answer '\x02PV279.8\x03/'
# [EOT] = \x04
# [STX] = \x02
# [ENQ] = \x05
# [ETX] = \x03
# [ACK] = \x06
# BCC = checksum
standard_device='0011'
EOT = '\x04'
STX = '\x02'
ENQ = '\x05'
ETX = '\x03'
ACK = '\x06'
NAK = '\x15'
"""
Paramter read example:
Master: [EOT]0011PV[ENQ]
Instrument: [STX]PV16.4[ETX]{BCC}
Writing data:
Master: [EOT] {GID}{GID}{UID}{UID}[STX]{CHAN}(c1)(c2)<DATA>[ETX](BCC)
"""
def checksum(message):
bcc = (reduce(operator.xor, map(ord,message)))
return chr(bcc)
class Eurotherm(object):
def __init__(self, serial_device, baudrate = 19200):
self.device = standard_device
# timeout: 110 ms to get all answers.
self.s = serial.Serial(serial_device,
baudrate = baudrate,
bytesize=7,
parity='E',
stopbits=1,
timeout=0.11)
self._expect_len = 50
def send_read_param(self, param):
self.s.write(EOT + self.device + param + ENQ)
def read_param(self, param):
self.s.flushInput()
self.send_read_param(param)
answer = self.s.read(self._expect_len)
m = reply_pattern.search(answer)
if m is None:
# Reading _expect_len bytes was not enough...
answer += self.s.read(200)
m = reply_pattern.search(answer)
if m is not None:
self._expect_len = len(answer)
return m.group(1)
else:
print "received:", repr(answer)
return None
def write_param(self, mnemonic, data):
if len(mnemonic) > 2:
raise ValueError
bcc = checksum(mnemonic + data + ETX)
mes = EOT+self.device+STX+mnemonic+data+ETX+bcc
if DEBUG:
for i in mes:
print i,hex(ord(i))
self.s.flushInput()
self.s.write(mes)
answer = self.s.read(1)
# print "received:", repr(answer)
if answer == "":
# raise IOError("No answer from device")
return None
return answer[-1] == ACK
def get_current_temperature(self):
temp = self.read_param('PV')
if temp is None:
temp = "0"
return temp
def set_temperature(self, temperature):
return self.write_param('SL', str(temperature))
def get_setpoint_temperature(self):
return self.read_param('SL')
if __name__ == '__main__':
import time
delta=5
date = time.strftime('%Y-%m-%d')
f = open('templog_%s'%date,'w')
f.write('# Start time: %s\n#delta t : %.1f s\n'%(time.asctime(), delta))
et = Eurotherm("/dev/ttyUSB0")
while True:
for i in xrange(120):
time.sleep(delta)
#t = time.strftime()
T = et.get_current_temperature()
l = '%f %s\n'%(time.time(),T)
print time.asctime(), T
f.write(l)
f.flush()
f.write('# MARK -- %s --\n'%(time.asctime()))

127
src/tools/ranges.py Normal file
View File

@ -0,0 +1,127 @@
import numpy as N
import sys
if sys.version_info > (2,6,0):
import numbers
else:
pass
if sys.version_info > (2,6,0):
def lin_range(start,stop,step):
if isinstance(step, numbers.Integral):
return N.linspace(start,stop,step)
else:
return N.arange(start,stop,step)
else:
def lin_range(start,stop,step):
return N.arange(start,stop,step)
def log_range(start, stop, stepno):
if (start<=0 or stop<=0 or stepno<1):
raise ValueError("start, stop must be positive and stepno must be >=1")
return N.logspace(N.log10(start),N.log10(stop), num=stepno)
def staggered_range(some_range, size=3):
m=0
if isinstance(some_range, N.ndarray):
is_numpy = True
some_range = list(some_range)
else:
is_numpy = False
new_list=[]
for k in xrange(len(some_range)):
for i in xrange(size):
try:
index = (m*size)
new_list.append(some_range.pop(index))
except IndexError:
break
m+=1
if is_numpy:
new_list = N.asarray(new_list+some_range)
else:
new_list+=some_range
return new_list
def combine_ranges(*ranges):
new_list = []
for r in ranges:
new_list.extend(r)
return new_list
combined_ranges=combine_ranges
def interleaved_range(some_list, left_out):
"""
in first run, do every n-th, then do n-1-th of the remaining values and so on...
"""
m=0
new_list = []
for j in xrange(left_out):
for i in xrange(len(some_list)):
if (i*left_out+m) < len(some_list):
new_list.append(some_list[i*left_out+m])
else:
m+=1
break
if isinstance(some_list, N.ndarray):
new_list = N.array(new_list)
return new_list
# These are the generators
def lin_range_iter(start,stop, step):
this_one=float(start)+0.0
if step>0:
while (this_one<=float(stop)):
yield this_one
this_one+=float(step)
else:
while (this_one>=float(stop)):
yield this_one
this_one+=float(step)
def log_range_iter(start, stop, stepno):
if (start<=0 or stop<=0 or stepno<1):
raise ValueError("start, stop must be positive and stepno must be >=1")
if int(stepno)==1:
factor=1.0
else:
factor=(stop/start)**(1.0/int(stepno-1))
for i in xrange(int(stepno)):
yield start*(factor**i)
def staggered_range_iter(some_range, size = 1):
"""
size=1: do one, drop one, ....
size=n: do 1 ... n, drop n+1 ... 2*n
in a second run the dropped values were done
"""
left_out=[]
try:
while True:
for i in xrange(size):
yield some_range.next()
for i in xrange(size):
left_out.append(some_range.next())
except StopIteration:
pass
# now do the droped ones
for i in left_out:
yield i
def combined_ranges_iter(*ranges):
"""
iterate over one range after the other
"""
for r in ranges:
for i in r:
yield i
combine_ranges_iter=combined_ranges_iter

30
src/tools/signal.py Normal file
View File

@ -0,0 +1,30 @@
import math
__all__ = ['rotate_signal']
def rotate_signal(timesignal, angle):
"Rotate <timesignal> by <angle> degrees"
# implicit change to float arrays!
if timesignal.get_number_of_channels()!=2:
raise Exception("rotation defined only for 2 channels")
# simple case 0, 90, 180, 270 degree
reduced_angle=divmod(angle, 90)
if abs(reduced_angle[1])<1e-6:
reduced_angle=reduced_angle[0]%4
if reduced_angle==0:
return
elif reduced_angle==1:
timesignal.y[1]*=-1
timesignal.y=[timesignal.y[1],timesignal.y[0]]
elif reduced_angle==2:
timesignal.y[0]*=-1
timesignal.y[1]*=-1
elif reduced_angle==3:
timesignal.y[0]*=-1
timesignal.y=[timesignal.y[1],timesignal.y[0]]
else:
sin_angle=math.sin(angle/180.0*math.pi)
cos_angle=math.cos(angle/180.0*math.pi)
timesignal.y=[cos_angle*timesignal.y[0]-sin_angle*timesignal.y[1],
sin_angle*timesignal.y[0]+cos_angle*timesignal.y[1]]

97
tests/datatest.py Normal file
View File

@ -0,0 +1,97 @@
import sys
import numpy
import math
import os.path
import unittest
print "running tests on modules in source directory"
# assume, script is in tests directory and we are testing modules in src
sys.path[0:0]=[os.path.join(os.path.dirname(sys.argv[0]), "..", "src", "data")]
from MeasurementResult import *
class TestAccumulatedValueClass(unittest.TestCase):
def setUp(self):
# is called before each test
pass
def testInitialization_Empty(self):
a=AccumulatedValue()
self.assert_(a.mean() is None)
self.assert_(a.mean_error() is None)
self.assert_(a.sigma() is None)
self.assert_(a.n==0)
def testInitialization_1Value(self):
a=AccumulatedValue(0)
self.assert_(a.mean()==0)
self.assertAlmostEqual(a.mean_error(),0)
self.assertAlmostEqual(a.sigma(),0)
self.assert_(a.n==1)
def testInitialization_2Values(self):
a=AccumulatedValue(1, 0.1)
self.assert_(a.mean(),1.0)
self.assertAlmostEqual(a.mean_error(), 0.1)
self.assertAlmostEqual(a.sigma(), 0.1*math.sqrt(2.0))
self.assert_(a.n==2)
def testInitialization_3Values(self):
a=AccumulatedValue(1, 0.1, 10)
self.assertAlmostEqual(a.mean(),1)
self.assertAlmostEqual(a.mean_error(), 0.1)
self.assertAlmostEqual(a.sigma(), 0.1*math.sqrt(10.0))
self.assert_(a.n==10)
def testStatistics(self):
test_dataset=numpy.arange(10.0)
a=AccumulatedValue()
for i in test_dataset:
a+=i
self.assert_(a.n==len(test_dataset))
# sum x_i/n
self.assertAlmostEqual(a.mean(), test_dataset.mean())
# std_dev_n-1 x_i= sqrt(sum (x-xmean)**2/(n-1))
self.assertAlmostEqual(a.sigma(), math.sqrt(((test_dataset-a.mean())**2).sum()/(len(test_dataset)-1.)))
# std_dev_n-1 x_i/sqrt(n)
self.assertAlmostEqual(a.mean_error(), a.sigma()/math.sqrt(len(test_dataset)))
def tearDown(self):
# is called after each test
pass
class TestMeasurementResult(unittest.TestCase):
def testImplicitCast(self):
# check wether other data types will be converted to AccumulatedValue
m=MeasurementResult("TestData")
m[1.0]
m[2.0]=2
self.assert_(isinstance(m[1.0], AccumulatedValue))
self.assert_(isinstance(m[2.0], AccumulatedValue))
def testUninitalizedEntries(self):
# assure that entries with no data are listed as xdata
m=MeasurementResult("TestData")
a=m[2.0]
self.assert_(isinstance(a, AccumulatedValue))
self.assert_(2.0 in m)
self.assert_(2.0 not in m.get_xdata())
m[2.0]+=1
self.assert_(2.0 in m.get_xdata())
def testZeroError(self):
# AccumulatedValues with only one Accumulation should have 0 error
m=MeasurementResult("TestData")
m[0.0]
m[1.0]=AccumulatedValue()
m[2.0]=0
m[3.0]=AccumulatedValue(0,1.0)
k,v,e=m.get_errorplotdata()
self.assert_(2.0 in k and 3.0 in k)
self.assert_(1.0 not in k and 0.0 not in k)
self.assertAlmostEqual(e[k==2.0][0], 0)
self.assertAlmostEqual(e[k==3.0][0], 1.0)
if __name__=="__main__":
unittest.main()