migrate to standard svn repo layout

This commit is contained in:
Markus Rosenstihl
2014-06-26 11:10:51 +00:00
commit 0a393b0748
55 changed files with 13617 additions and 0 deletions

294
src/gui/BackendDriver.py Normal file
View File

@@ -0,0 +1,294 @@
import os
import os.path
import subprocess
import sys
import time
import re
import glob
import ExperimentWriter
import ResultReader
import threading
import types
import signal
if sys.platform=="win32":
import _winreg
__doc__ = """
This class handles the backend driver
"""
class BackendDriver(threading.Thread):
def __init__(self, executable, spool, clear_jobs=False, clear_results=False):
threading.Thread.__init__(self, name="Backend Driver")
self.core_pid = None
self.core_input = None
self.core_output = None
self.statefilename = None
self.executable=str(executable)
self.spool_dir=spool
self.experiment_pattern="job.%09d"
self.result_pattern=self.experiment_pattern+".result"
if not os.path.isfile(self.executable):
raise AssertionError("could not find backend %s "%self.executable)
if not os.access(self.executable,os.X_OK):
raise AssertionError("insufficient rights for backend %s execution"%self.executable)
if not os.path.isdir(self.spool_dir):
try:
os.makedirs(os.path.abspath(self.spool_dir))
except OSError,e:
print e
raise AssertionError("could not create backend's spool directory %s "%self.spool_dir)
# remove stale state filenames
if sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
old_state_files=glob.glob(os.path.join(self.spool_dir,"*.state"))
statelinepattern=re.compile("<state name=\"([^\"]+)\" pid=\"([^\"]+)\" starttime=\"([^\"]+)\">")
for statefilename in old_state_files:
statefile=file(statefilename,"r")
statelines=statefile.readlines()
statefile.close
del statefile
core_pid=None
for l in statelines:
matched=statelinepattern.match(l)
if matched:
core_pid=int(matched.group(2))
break
if core_pid is not None:
if os.path.isdir("/proc/%d"%core_pid):
raise AssertionError("found backend with pid %d (state file %s) in same spool dir"%(core_pid,statefilename))
else:
print "removing stale backend state file", statefilename
os.remove(statefilename)
else:
print "todo: take care of existing backend state files"
self.result_reader = ResultReader.BlockingResultReader(self.spool_dir,
no=0,
result_pattern=self.result_pattern,
clear_jobs=clear_jobs,
clear_results=clear_results)
self.experiment_writer = ExperimentWriter.ExperimentWriterWithCleanup(self.spool_dir,
no=0,
job_pattern=self.experiment_pattern,
inform_last_job=self.result_reader)
self.quit_flag=threading.Event()
self.raised_exception=None
def run(self):
# take care of older logfiles
self.core_output_filename=os.path.join(self.spool_dir,"logdata")
if os.path.isfile(self.core_output_filename):
i=0
max_logs=100
while os.path.isfile(self.core_output_filename+".%02d"%i):
i+=1
while (i>=max_logs):
i-=1
os.remove(self.core_output_filename+".%02d"%i)
for j in xrange(i):
os.rename(self.core_output_filename+".%02d"%(i-j-1),self.core_output_filename+".%02d"%(i-j))
os.rename(self.core_output_filename, self.core_output_filename+".%02d"%0)
# create logfile
self.core_output=file(self.core_output_filename,"w")
# again look out for existing state files
state_files=glob.glob(os.path.join(self.spool_dir,"*.state"))
if state_files:
self.raised_exception="found other state file(s) in spool directory: "+",".join(state_files)
self.quit_flag.set()
return
# start backend
if sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
self.core_input=subprocess.Popen([self.executable, "--spool", self.spool_dir],
stdout=self.core_output,
stderr=self.core_output)
if sys.platform=="win32":
cygwin_root_key=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Cygnus Solutions\\Cygwin\\mounts v2\\/")
cygwin_path=_winreg.QueryValueEx(cygwin_root_key,"native")[0]
os.environ["PATH"]+=";"+os.path.join(cygwin_path,"bin")+";"+os.path.join(cygwin_path,"lib")
self.core_input=subprocess.Popen("\"" + self.executable + "\"" + " --spool "+self.spool_dir,
stdout=self.core_output,
stderr=self.core_output)
# wait till state file shows up
timeout=10
# to do: how should I know core's state name????!!!!!
self.statefilename=None
state_files=glob.glob(os.path.join(self.spool_dir,"*.state"))
while len(state_files)==0:
if timeout<0 or self.core_input is None or self.core_input.poll() is not None or self.quit_flag.isSet():
# look into core log file and include contents
log_message=''
self.core_input=None
if os.path.isfile(self.core_output_filename):
# to do include log data
log_message='\n'+''.join(file(self.core_output_filename,"r").readlines()[:10])
if not log_message:
log_message=" no error message from core"
self.core_output.close()
self.raised_exception="no state file appeared or backend died away:"+log_message
print self.raised_exception
self.quit_flag.set()
return
time.sleep(0.05)
timeout-=0.05
state_files=glob.glob(os.path.join(self.spool_dir,"*.state"))
# save the one
if len(state_files)>1:
print "did find more than one state file, taking first one!"
self.statefilename=state_files[0]
# read state file
statefile=file(self.statefilename,"r")
statelines=statefile.readlines()
statefile=None
statelinepattern=re.compile("<state name=\"([^\"]+)\" pid=\"([^\"]+)\" starttime=\"([^\"]+)\">")
self.core_pid=-1
for l in statelines:
matched=statelinepattern.match(l)
if matched:
self.core_pid=int(matched.group(2))
break
# wait on flag and look after backend
while not self.quit_flag.isSet() and self.is_busy():
self.quit_flag.wait(0.1)
if self.quit_flag.isSet():
self.stop_queue()
while self.is_busy():
time.sleep(0.1)
if not self.is_busy():
if self.core_input is not None:
backend_result=self.core_input.poll()
wait_loop_counter=0
while backend_result is None:
# waiting in tenth of a second
time.sleep(0.1)
wait_loop_counter+=1
backend_result=self.core_input.poll()
if backend_result is not None: break
if wait_loop_counter==10:
print "sending termination signal to backend process"
self.send_signal("SIGTERM")
elif wait_loop_counter==20:
print "sending kill signal to backend process"
self.send_signal("SIGKILL")
elif wait_loop_counter>30:
print "no longer waiting for backend shutdown"
break
if backend_result is None:
print "backend dit not end properly, please stop it manually"
elif backend_result>0:
print "backend returned ", backend_result
elif backend_result<0:
sig_name=filter(lambda x: x.startswith("SIG") and \
x[3]!="_" and \
(type(signal.__dict__[x])is types.IntType) and \
signal.__dict__[x]==-backend_result,
dir(signal))
if sig_name:
print "backend was terminated by signal ",sig_name[0]
else:
print "backend was terminated by signal no",-backend_result
self.core_input = None
self.core_pid = None
# the experiment handler should stop
if self.experiment_writer is not None:
# self.experiment_writer.
self.experiment_writer=None
# tell result reader, game is over...
#self.result_reader.stop_no=self.experiment_writer.no
if self.result_reader is not None:
self.result_reader.poll_time=-1
self.result_reader=None
def clear_job(self,no):
jobfilename=os.path.join(self.spool_dir,"job.%09d")
resultfilename=os.path.join(self.spool_dir,"job.%09d.result")
if os.path.isfile(jobfilename):
os.remove(jobfilename)
if os.path.isfile(resultfilename):
os.remove(resultfilename)
def get_messages(self):
# return pending messages
if self.core_output.tell()==os.path.getsize(self.core_output_filename):
return None
return self.core_output.read()
def restart_queue(self):
self.send_signal("SIGUSR1")
def stop_queue(self):
self.send_signal("SIGQUIT")
# assumes success
#self.core_pid=None
#self.core_input=None
def abort(self):
# abort execution
self.send_signal("SIGTERM")
# assumes success
#self.core_pid=None
#self.core_input=None
def send_signal(self, sig):
if self.core_pid is None:
print "BackendDriver.send_signal is called with core_pid=None"
return
try:
if sys.platform[:5]=="linux":
os.kill(self.core_pid,signal.__dict__[sig])
if sys.platform[:7]=="win32":
# reg_handle=_winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
cygwin_root_key=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Cygnus Solutions\\Cygwin\\mounts v2\\/")
cygwin_path=_winreg.QueryValueEx(cygwin_root_key,"native")[0]
kill_command=os.path.join(cygwin_path,"bin","kill.exe")
os.popen("%s -%s %d"%(kill_command,sig,self.core_pid))
except OSError, e:
print "could not send signal %s to core: %s"%(sig, str(e))
def is_busy(self):
"Checks for state file"
return self.statefilename is not None and os.path.isfile(self.statefilename) and \
self.core_input is not None and self.core_input.poll() is None
#file_list = glob.glob(os.path.join(self.spool_dir, self.core_state_file))
#if len(file_list) != 0:
# return True
#else:
# return False
def get_exp_writer(self):
return self.experiment_writer
def get_res_reader(self):
return self.result_reader
def __del__(self):
# stop core and wait for it
if self.core_pid is not None:
try:
self.abort()
except OSError:
pass
self.core_input=None
if self.core_output:
self.core_output.close()
self.core_output=None

BIN
src/gui/DAMARIS.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

BIN
src/gui/DAMARIS.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 664 B

2880
src/gui/DamarisGUI.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,95 @@
import threading
import StringIO
import traceback
import sys
import time
from damaris.experiments.Experiment import Quit
from damaris.experiments import Experiment
class ExperimentHandling(threading.Thread):
"""
runs the experiment script in sandbox
"""
def __init__(self, script, exp_writer, data):
threading.Thread.__init__(self, name="experiment handler")
self.script=script
self.writer=exp_writer
self.data=data
self.quit_flag = threading.Event()
if self.data is not None:
self.data["__recentexperiment"]=-1
def synchronize(self, before=0, waitsteps=0.1):
while (self.data["__recentexperiment"]>self.data["__recentresult"]+before) and not self.quit_flag.isSet():
self.quit_flag.wait(waitsteps)
if self.quit_flag.isSet():
raise StopIteration
def run(self):
dataspace={}
exp_classes = __import__('damaris.experiments', dataspace, dataspace, ['Experiment'])
for name in dir(exp_classes):
if name[:2]=="__" and name[-2:]=="__": continue
dataspace[name]=exp_classes.__dict__[name]
del exp_classes
dataspace["data"]=self.data
dataspace["synchronize"]=self.synchronize
self.raised_exception = None
self.location = None
exp_iterator=None
try:
exec self.script in dataspace
except Exception, e:
self.raised_exception=e
self.location=traceback.extract_tb(sys.exc_info()[2])[-1][1:3]
traceback_file=StringIO.StringIO()
traceback.print_tb(sys.exc_info()[2], None, traceback_file)
self.traceback=traceback_file.getvalue()
traceback_file=None
return
if "experiment" in dataspace:
try:
exp_iterator=dataspace["experiment"]()
except Exception, e:
self.raised_exception=e
self.location=traceback.extract_tb(sys.exc_info()[2])[-1][1:3]
traceback_file=StringIO.StringIO()
traceback.print_tb(sys.exc_info()[2], None, traceback_file)
self.traceback=traceback_file.getvalue()
traceback_file=None
return
while exp_iterator is not None and not self.quit_flag.isSet():
# get next experiment from script
try:
job=exp_iterator.next()
except StopIteration:
break
except Exception, e:
self.raised_exception=e
self.location=traceback.extract_tb(sys.exc_info()[2])[-1][1:3]
traceback_file=StringIO.StringIO()
traceback.print_tb(sys.exc_info()[2], None, traceback_file)
self.traceback=traceback_file.getvalue()
traceback_file=None
break
# send it
self.writer.send_next(job)
# write a note
if isinstance(job, Experiment):
if self.data is not None:
self.data["__recentexperiment"]=job.job_id+0
# relax for a short time
if "__resultsinadvance" in self.data and self.data["__resultsinadvance"]+100<job.job_id:
self.quit_flag.wait(0.05)
if self.quit_flag.isSet():
data_sapce=None
exp_iterator=None
break
self.writer.send_next(Quit(), quit=True)
# do not count quit job (is this a good idea?)
dataspace=None
self.exp_iterator=None
self.writer=None

View File

@@ -0,0 +1,66 @@
import os
import os.path
import shutil
from damaris.experiments import Experiment
class ExperimentWriter:
"""
writes experiments in propper way to spool directory
"""
def __init__(self, spool, no=0, job_pattern="job.%09d", inform_last_job=None):
self.spool=spool
self.job_pattern=job_pattern
self.no=no
self.inform_last_job=inform_last_job
# test if spool exists
if not os.path.isdir(spool):
os.mkdir(spool)
def send_next(self, job, quit=False):
"""
"""
if quit and self.inform_last_job is not None:
self.inform_last_job.stop_no=self.no
self.inform_last_job=None
job.job_id=self.no
job_filename=os.path.join(self.spool,self.job_pattern%self.no)
f=file(job_filename+".tmp","w")
f.write(job.write_xml_string())
f.flush()
f.close() # explicit close under windows necessary (don't know why)
del f
# this implementation tries to satisfiy msvc filehandle caching
os.rename(job_filename+".tmp", job_filename)
#shutil.copyfile(job_filename+".tmp", job_filename)
#try:
# os.unlink(job_filename+".tmp")
#except OSError:
# print "could not delete temporary file %s.tmp"%job_filename
self.no+=1
def __del__(self):
if self.inform_last_job is not None:
self.inform_last_job.stop_no=self.no-1
self.inform_last_job=None
class ExperimentWriterWithCleanup(ExperimentWriter):
"""
writes experiments and cleans up in front of queue
"""
def __init__(self, spool, no=0, job_pattern="job.%09d", inform_last_job=None):
ExperimentWriter.__init__(self, spool, no, job_pattern, inform_last_job=inform_last_job)
self.delete_no_files(self.no)
def send_next(self, job, quit=False):
self.delete_no_files(self.no+1)
ExperimentWriter.send_next(self,job,quit)
def delete_no_files(self,no):
"""
delete everything with this job number
"""
filename=os.path.join(self.spool,(self.job_pattern%no))
if os.path.isfile(filename): os.unlink(filename)
if os.path.isfile(filename+".tmp"): os.unlink(filename+".tmp")
if os.path.isfile(filename+".result"): os.unlink(filename+".result")

78
src/gui/ResultHandling.py Normal file
View File

@@ -0,0 +1,78 @@
import threading
import StringIO
import sys
import os
import os.path
import traceback
from damaris.data import Resultable
class ResultHandling(threading.Thread):
"""
runs the result script in sandbox
"""
def __init__(self, script_data, result_iterator, data_pool):
threading.Thread.__init__(self,name="result handler")
self.script=script_data
self.results=result_iterator
self.data_space=data_pool
self.quit_flag=self.results.quit_flag
if self.data_space is not None:
self.data_space["__recentresult"]=-1
def run(self):
# execute it
dataspace={}
data_classes = __import__('damaris.data', dataspace, dataspace, ['*'])
for name in dir(data_classes):
if name[:2]=="__" and name[-2:]=="__": continue
dataspace[name]=data_classes.__dict__[name]
del data_classes
dataspace["results"]=self
dataspace["data"]=self.data_space
self.raised_exception=None
self.location = None
try:
exec self.script in dataspace
except Exception, e:
self.raised_exception=e
self.location=traceback.extract_tb(sys.exc_info()[2])[-1][1:3]
traceback_file=StringIO.StringIO()
traceback.print_tb(sys.exc_info()[2], None, traceback_file)
self.traceback=traceback_file.getvalue()
traceback_file=None
return
if not "result" in dataspace:
dataspace=None
return
try:
dataspace["result"]()
except Exception, e:
self.raised_exception=e
self.location=traceback.extract_tb(sys.exc_info()[2])[-1][1:3]
traceback_file=StringIO.StringIO()
traceback.print_tb(sys.exc_info()[2], None, traceback_file)
self.traceback=traceback_file.getvalue()
traceback_file=None
dataspace=None
def __iter__(self):
if self.quit_flag.isSet():
self.results=None
return
for i in self.results:
if hasattr(self.results, "in_advance"):
self.data_space["__resultsinadvance"]=self.results.in_advance
if self.quit_flag.isSet():
self.results=None
return
if isinstance(i, Resultable.Resultable):
if self.data_space is not None:
self.data_space["__recentresult"]=i.job_id+0
yield i
if self.quit_flag.isSet():
self.results=None
return
def stop(self):
self.quit_flag.set()

576
src/gui/ResultReader.py Normal file
View File

@@ -0,0 +1,576 @@
# -*- coding: iso-8859-1 -*-
#############################################################################
# #
# Name: Class ResultReader #
# #
#############################################################################
import os
import os.path
import glob
import time
import sys
import base64
import numpy
try:
import xml.etree.cElementTree
ELEMENT_TREE = True
except:
import xml.parsers.expat
ELEMENT_TREE = False
import threading
from datetime import datetime
from damaris.data import ADC_Result
from damaris.data import Error_Result
from damaris.data import Temp_Result
from damaris.data import Config_Result
class ResultReader:
"""
starts at some point and returns result objects until none are there
"""
CONFIG_TYPE = 3
TEMP_TYPE = 2
ADCDATA_TYPE = 1
ERROR_TYPE = 0
def __init__(self, spool_dir=".", no=0, result_pattern="job.%09d.result", clear_jobs=False, clear_results=False):
self.spool_dir = spool_dir
self.start_no = no
self.no = self.start_no
self.result_pattern = result_pattern
self.clear_jobs=clear_jobs
self.clear_results=clear_results
self.quit_flag=threading.Event() # asychronous quit flag
def __iter__(self):
"""
get next job with iterator
"""
expected_filename=os.path.join(self.spool_dir,self.result_pattern%(self.no))
while os.access(expected_filename,os.R_OK):
yield self.get_result_object(expected_filename)
# purge result file
if self.clear_results:
if os.path.isfile(expected_filename): os.remove(expected_filename)
if self.clear_jobs:
if os.path.isfile(expected_filename[:-7]): os.remove(expected_filename[:-7])
self.no+=1
expected_filename=os.path.join(self.spool_dir,self.result_pattern%(self.no))
return
def get_result_object(self, in_filename):
"""
get result object
"""
# class-intern result-object currently being processed
retries=0
result_file=None
while result_file is None:
try:
result_file = file(in_filename, "r")
except IOError, e:
if retries>10:
raise e
print e, "retry", retries
time.sleep(0.05)
retries+=1
# get date of last modification
self.result_job_date = datetime.fromtimestamp(os.stat(in_filename)[8])
if ELEMENT_TREE:
self.__parseFile = self.__parseFile_cETree
else:
self.__parseFile = self.__parseFile_expat
self.__parseFile (result_file)
result_file.close()
result_file = None
r=self.result
self.result = None
return r
def __parseFile_cETree(self, in_file):
self.result = None
self.in_description_section=False
self.result_description = { }
self.result_job_number = None
# Job Date is set in __read_file()
self.__filetype = None
for elem in xml.etree.cElementTree.ElementTree(file=in_file).getiterator():
if elem.tag == 'result':
self.result_job_number = int(elem.get("job"))
pass
elif elem.tag == 'description':
if elem.text!=None:
self.result_description = {}
self.in_description_section=True
self.in_description_data=()
for an_item in elem.getchildren():
self.in_description_data = (an_item.get("key"), an_item.get("type"), an_item.text)
# make item contents to dictionary item:
k,t,v=self.in_description_data
self.in_description_data=()
if t == "None":
self.result_description[k]=None
if t == "Float":
self.result_description[k]=float(v)
elif t == "Int":
self.result_description[k]=int(v)
elif t == "Long":
self.result_description[k]=long(v)
elif t == "Complex":
self.result_description[k]=complex(v)
elif t == "Boolean":
self.result_description[k]=bool(v)
elif t == "String":
self.result_description[k]=v
else:
# Anything else will be handled as a string
# Probably "repr".
self.result_description[k]=v
elif elem.tag == 'adcdata':
self.__filetype = ResultReader.ADCDATA_TYPE
self.adc_result_trailing_chars = ""
if self.result is None:
self.result = ADC_Result()
# None: new guess for adc data encoding
# "a": ascii
# "b": base64
self.adc_data_encoding = None
self.result.set_sampling_rate(float(elem.get("rate")))
self.result.set_job_id(self.result_job_number)
self.result.set_job_date(self.result_job_date)
self.result.set_nChannels(int(elem.get("channels")))
self.result.set_description_dictionary(self.result_description.copy())
title = "ADC-Result: job-id=%d"%int(self.result_job_number)
if len(self.result_description) > 0:
for k,v in self.result_description.iteritems():
title += ", %s=%s"%(k,v)
self.result.set_title(title)
self.result_description = None
self.adc_result_sample_counter = 0
self.adc_result_parts = [] # will contain arrays of sampled intervals, assumes same sample rate
else:
if float(elem.get("rate")) != self.result.get_sampling_rate():
print "sample rate different in ADC_Result, found %f, former value %f"%\
(float(in_attribute["rate"]),self.result.get_sampling_rate())
new_samples = int(elem.get("samples"))
self.adc_result_sample_counter += new_samples
self.adc_result_trailing_chars = "".join(elem.text.splitlines())
tmp_string = base64.standard_b64decode(self.adc_result_trailing_chars)
self.adc_result_trailing_chars = None
tmp = numpy.fromstring(tmp_string,dtype='Int16')
tmp_string = None
self.adc_result_parts.append(tmp)
tmp = None
# we do not need this adcdata anymore, delete it
elem.clear()
elif elem.tag == 'error':
self.__filetype = ResultReader.ERROR_TYPE
self.result = Error_Result()
self.result.set_job_id(self.result_job_number)
self.result.set_job_date(self.result_job_date)
self.result.set_description_dictionary(self.result_description.copy())
self.result.set_error_message(elem.text)
elif elem.tag == 'temp':
self.__filetype = ResultReader.TEMP_TYPE
self.result = Error_Result()
self.result.set_job_id(self.result_job_number)
self.result.set_job_date(self.result_job_date)
elif elem.tag == 'conf':
self.__filetype = ResultReader.CONF_TYPE
self.result = Error_Result()
self.result.set_job_id(self.result_job_number)
self.result.set_job_date(self.result_job_date)
# xml file was traversed now prepare the data in one go
# prepare result data
if self.result is not None and \
self.__filetype == ResultReader.ADCDATA_TYPE and \
self.adc_result_sample_counter>0:
# fill the ADC_Result with collected data
# x data
self.result.x=numpy.arange(self.adc_result_sample_counter, dtype="Float64")/\
self.result.get_sampling_rate()
self.result.y = []
nChannels = self.result.get_nChannels()
# initialise the y arrays
for i in xrange(nChannels):
self.result.y.append(numpy.empty(self.adc_result_sample_counter, dtype='Int16'))
# remove from result stack
tmp_index = 0
while self.adc_result_parts:
tmp_part=self.adc_result_parts.pop(0)
tmp_size = tmp_part.size/nChannels
for i in xrange(nChannels):
# split interleaved data
self.result.y[i][tmp_index:tmp_index+tmp_size] = tmp_part[i::nChannels]
self.result.y[i][tmp_index:tmp_index+tmp_size] = tmp_part[i::nChannels]
if self.result.index != []:
self.result.index.append((tmp_index, tmp_index+tmp_size-1))
else:
self.result.index = [(0,tmp_size-1)]
tmp_index += tmp_size
self.result.cont_data=True
tmp_part = None
def __parseFile_expat(self, in_file):
"Parses the given file, adding it to the result-queue"
self.result = None
self.in_description_section=False
self.result_description = { }
self.result_job_number = None
# Job Date is set in __read_file()
self.__filetype = None
# Expat XML-Parser & Binding handlers
self.xml_parser = xml.parsers.expat.ParserCreate()
self.xml_parser.StartElementHandler = self.__xmlStartTagFound
self.xml_parser.CharacterDataHandler = self.__xmlCharacterDataFound
self.xml_parser.EndElementHandler = self.__xmlEndTagFound
self.element_stack=[]
try:
# short version, but pyexpat buffers are awfully small
# self.xml_parser.ParseFile(in_file)
# read all, at least try
databuffer=in_file.read(-1)
# test wether really everything was read...
databuffer2=in_file.read(self.xml_parser.buffer_size)
if databuffer2=="":
# parse everything at once
self.xml_parser.Parse(databuffer,True)
else:
# do the first part ...
self.xml_parser.Parse(databuffer,False)
databuffer=databuffer2
# ... and again and again
while databuffer!="":
self.xml_parser.Parse(databuffer,False)
databuffer=in_file.read(-1)
self.xml_parser.Parse("",True)
except xml.parsers.expat.ExpatError, e:
print "result file %d: xml parser '%s' error at line %d, offset %d"%(self.no,
xml.parsers.expat.ErrorString(e.code),
e.lineno,
e.offset)
self.result = None
del databuffer
self.xml_parser.StartElementHandler=None
self.xml_parser.EndElementHandler=None
self.xml_parser.CharacterDataHandler=None
del self.xml_parser
# prepare result data
if self.result is not None and \
self.__filetype == ResultReader.ADCDATA_TYPE and \
self.adc_result_sample_counter>0:
# fill the ADC_Result with collected data
self.result.x=numpy.arange(self.adc_result_sample_counter, dtype="Float64")/\
self.result.get_sampling_rate()
self.result.y=[]
self.result.index=[]
for i in xrange(2):
self.result.y.append(numpy.empty((self.adc_result_sample_counter,), dtype="Int16"))
tmp_sample_counter=0
while self.adc_result_parts:
tmp_part=self.adc_result_parts.pop(0)
tmp_size=tmp_part.size/2
self.result.y[0][tmp_sample_counter:tmp_sample_counter+tmp_size]=tmp_part[::2]
self.result.y[1][tmp_sample_counter:tmp_sample_counter+tmp_size]=tmp_part[1::2]
self.result.index.append((tmp_sample_counter,tmp_sample_counter+tmp_size-1))
tmp_sample_counter+=tmp_size
self.result.cont_data=True
# Callback when a xml start tag is found
def __xmlStartTagFound(self, in_name, in_attribute):
# General Result-Tag
if in_name == "result":
self.result_job_number = int(in_attribute["job"])
# Job-Date is set in __read_file()
# Description
elif in_name == "description":
# old style description:
if len(in_attribute)!=0:
self.result_description = in_attribute.copy()
self.in_description_section=True
self.in_description_data=()
elif self.in_description_section and in_name == "item":
self.in_description_data=[in_attribute["key"], in_attribute["type"], ""]
# ADC_Results
elif in_name == "adcdata":
self.__filetype = ResultReader.ADCDATA_TYPE
self.adc_result_trailing_chars = ""
if self.result is None:
self.result = ADC_Result()
# None: new guess for adc data encoding
# "a": ascii
# "b": base64
self.adc_data_encoding = None
self.result.set_sampling_rate(float(in_attribute["rate"]))
self.result.set_job_id(self.result_job_number)
self.result.set_job_date(self.result_job_date)
self.result.set_description_dictionary(self.result_description.copy())
title="ADC-Result: job-id=%d"%int(self.result_job_number)
if len(self.result_description)>0:
for k,v in self.result_description.iteritems():
title+=", %s=%s"%(k,v)
self.result.set_title(title)
self.result_description=None
self.adc_result_sample_counter = 0
self.adc_result_parts=[] # will contain arrays of sampled intervals, assumes same sample rate
else:
if float(in_attribute["rate"])!=self.result.get_sampling_rate():
print "sample rate different in ADC_Result, found %f, former value %f"%\
(float(in_attribute["rate"]),self.result.get_sampling_rate())
new_samples=int(in_attribute["samples"])
self.adc_result_sample_counter += new_samples
# version depends on the inclusion of http://bugs.python.org/issue1137
if sys.hexversion>=0x020501f0:
# extend buffer to expected base64 size (2 channels, 2 byte)
required_buffer=int(new_samples*4/45+1)*62
if self.xml_parser.buffer_size < required_buffer:
try:
self.xml_parser.buffer_size=required_buffer
except AttributeError:
pass
# pass all chardata as one block
self.xml_parser.buffer_text = True
# do not change the contents
self.xml_parser.returns_unicode=False
# Error_Results
elif in_name == "error":
self.__filetype = ResultReader.ERROR_TYPE
self.result = Error_Result()
self.result.set_job_id(self.result_job_number)
self.result.set_job_date(self.result_job_date)
self.result.set_description_dictionary(self.result_description.copy())
# Temp_Results
elif in_name == "temp":
self.__filetype = ResultReader.TEMP_TYPE
self.result = Temp_Result()
self.result.set_job_id(self.result_job_number)
self.result.set_job_date(self.result_job_date)
# Config_Results
elif in_name == "conf":
self.__filetype = ResultReader.CONFIG_TYPE
self.result = Config_Result()
self.result.set_job_id(self.result_job_number)
self.result.set_job_date(self.result_job_date)
# maintain the stack
self.element_stack.append(in_name)
def __xmlCharacterDataFound(self, in_cdata):
if self.in_description_section and len(self.in_description_data):
self.in_description_data[2]+=in_cdata
# ADC_Result
elif self.__filetype == ResultReader.ADCDATA_TYPE and self.element_stack[-1]=="adcdata":
self.adc_result_trailing_chars+=in_cdata
# Error_Result
elif self.__filetype == ResultReader.ERROR_TYPE:
tmp_string = self.result.get_error_message()
if tmp_string is None: tmp_string = ""
tmp_string += in_cdata
self.result.set_error_message(tmp_string)
# Temp_Results
elif self.__filetype == ResultReader.TEMP_TYPE:
pass
# Config_Results
elif self.__filetype == ResultReader.CONFIG_TYPE:
pass
def __xmlEndTagFound(self, in_name):
# maintain the stack
self.element_stack.pop()
if in_name == "adcdata":
# ADC_Result
if self.__filetype == ResultReader.ADCDATA_TYPE:
# detect type of data encoding from first line
if self.adc_data_encoding is None:
self.adc_result_trailing_chars=self.adc_result_trailing_chars.strip()
first_line_end=self.adc_result_trailing_chars.find("\n")
first_line=""
if first_line_end!=-1:
first_line=self.adc_result_trailing_chars[:first_line_end]
else:
first_line=self.adc_result_trailing_chars
if len(first_line.lstrip("-0123456789 \t\n\r"))==0:
try:
map(int,filter(len,first_line.split()))
except ValueError,e:
pass
else:
self.adc_data_encoding="a"
if self.adc_data_encoding is None and len(first_line)%4==0:
try:
base64.standard_b64decode(first_line)
except TypeError:
pass
else:
self.adc_data_encoding="b"
if self.adc_data_encoding is None:
print "unknown ADC data format \"%s\""%first_line
tmp=None
if self.adc_data_encoding=="a":
values=map(int,self.adc_result_trailing_chars.split())
tmp=numpy.array(values, dtype="Int16")
elif self.adc_data_encoding=="b":
tmp_string=base64.standard_b64decode(self.adc_result_trailing_chars)
tmp=numpy.fromstring(tmp_string, dtype="Int16")
del tmp_string
else:
print "unknown ADC data format"
self.adc_result_trailing_chars=""
self.adc_result_parts.append(tmp)
del tmp
return
elif in_name == "description":
self.in_description_section=False
elif self.in_description_section and in_name == "item":
# make item contents to dictionary item:
k,t,v=self.in_description_data
self.in_description_data=()
if t == "None":
self.result_description[k]=None
if t == "Float":
self.result_description[k]=float(v)
elif t == "Int":
self.result_description[k]=int(v)
elif t == "Long":
self.result_description[k]=long(v)
elif t == "Complex":
self.result_description[k]=complex(v)
elif t == "Boolean":
self.result_description[k]=bool(v)
elif t == "String":
self.result_description[k]=v
else:
# Anything else will be handled as a string
# Probably "repr".
self.result_description[k]=v
elif in_name == "result":
pass
# Error_Result
elif self.__filetype == ResultReader.ERROR_TYPE:
pass
# Temp_Result
elif self.__filetype == ResultReader.TEMP_TYPE:
pass
# Config_Result
elif self.__filetype == ResultReader.CONFIG_TYPE:
pass
class BlockingResultReader(ResultReader):
"""
to follow an active result stream
"""
def __init__(self, spool_dir=".", no=0, result_pattern="job.%09d.result", clear_jobs=False, clear_results=False):
ResultReader.__init__(self, spool_dir, no, result_pattern, clear_jobs=clear_jobs, clear_results=clear_results)
self.stop_no=None # end of job queue
self.poll_time=0.1 # sleep interval for polling results, <0 means no polling and stop
self.in_advance=0
def __iter__(self):
"""
get next job with iterator
block until result is available
"""
expected_filename=os.path.join(self.spool_dir,self.result_pattern%(self.no))
while (not self.quit_flag.isSet()) and (self.stop_no is None or self.stop_no>self.no):
if not os.access(expected_filename,os.R_OK):
# stop polling, if required
if self.poll_time<0: break
self.quit_flag.wait(self.poll_time)
continue
# find pending results
self.in_advance=max(self.no,self.in_advance)
in_advance_filename=os.path.join(self.spool_dir,self.result_pattern%(self.in_advance+1))
while os.access(in_advance_filename, os.R_OK) and (self.stop_no is None or self.stop_no>self.in_advance+1):
# do not more than 100 results in advance at one glance
if self.in_advance>self.no+100: break
self.in_advance+=1
in_advance_filename=os.path.join(self.spool_dir,self.result_pattern%(self.in_advance+1))
if self.quit_flag.isSet(): break
r=self.get_result_object(expected_filename)
if self.quit_flag.isSet(): break
if self.quit_flag.isSet(): break
yield r
if self.clear_results:
if os.path.isfile(expected_filename): os.remove(expected_filename)
if self.clear_jobs:
if os.path.isfile(expected_filename[:-7]): os.remove(expected_filename[:-7])
self.no+=1
expected_filename=os.path.join(self.spool_dir,self.result_pattern%(self.no))
return
def quit(self):
self.quit_flag.set()

0
src/gui/__init__.py Normal file
View File

2106
src/gui/damaris.glade Normal file

File diff suppressed because it is too large Load Diff

8
src/gui/damaris.gladep Normal file
View File

@@ -0,0 +1,8 @@
<?xml version="1.0" standalone="no"?> <!--*- mode: xml -*-->
<!DOCTYPE glade-project SYSTEM "http://glade.gnome.org/glade-project-2.0.dtd">
<glade-project>
<name>damaris-gui</name>
<program_name>damaris</program_name>
<gnome_support>FALSE</gnome_support>
</glade-project>

681
src/gui/gtkcodebuffer.py Normal file
View File

@@ -0,0 +1,681 @@
""" This module contains the PyGTKCodeBuffer-class. This class is a
specialisation of the gtk.TextBuffer and enables syntax-highlighting for
PyGTK's TextView-widget.
To use the syntax-highlighting feature you have load a syntax-definition or
specify your own. To load one please read the docs for the SyntaxLoader()
class. """
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gtk
import pango
import re
import sys
import os.path
import xml.sax
import imp
from xml.sax.handler import ContentHandler
from xml.sax.saxutils import unescape
__version__ = "1.0RC2"
__author__ = "Hannes Matuschek <hmatuschek@gmail.com>"
# defined the default styles
DEFAULT_STYLES = {
'DEFAULT': {'font': 'monospace'},
'comment': {'foreground': '#0000FF'},
'preprocessor': {'foreground': '#A020F0',
'weight': pango.WEIGHT_BOLD},
'keyword': {'foreground': '#A52A2A',
'weight': pango.WEIGHT_BOLD},
'special': {'foreground': '#006600'},
'mark1': {'foreground': '#008B8B'},
'mark2': {'foreground': '#6A5ACD'},
'string': {'foreground': '#CC00CC'},
'number': {'foreground': '#CC00CC'},
'datatype': {'foreground': '#2E8B57',
'weight': pango.WEIGHT_BOLD},
'function': {'foreground': '#008A8C'},
'link': {'foreground': '#0000FF',
'underline': pango.UNDERLINE_SINGLE}}
def _main_is_frozen():
""" Internal used function. """
return (hasattr(sys, "frozen") or # new py2exe
hasattr(sys, "importers") # old py2exe
or imp.is_frozen("__main__")) # tools/freeze
if _main_is_frozen():
this_module_path = os.path.dirname(sys.executable)
else:
this_module_path = os.path.abspath(os.path.dirname(__file__))
# defines default-search paths for syntax-files
SYNTAX_PATH = [ os.path.join('.', 'syntax'),
this_module_path,
os.path.join(os.path.expanduser('~'),".pygtkcodebuffer"),
os.path.join(sys.prefix,"share","pygtkcodebuffer","syntax")]
# enable/disable debug-messages
DEBUG_FLAG = False
#
# Some log functions...
# (internal used)
def _log_debug(msg):
if not DEBUG_FLAG:
return
sys.stderr.write("DEBUG: ")
sys.stderr.write(msg)
sys.stderr.write("\n")
def _log_warn(msg):
sys.stderr.write("WARN: ")
sys.stderr.write(msg)
sys.stderr.write("\n")
def _log_error(msg):
sys.stderr.write("ERROR: ")
sys.stderr.write(msg)
sys.stderr.write("\n")
def add_syntax_path(path_or_list):
""" This function adds one (string) or many (list of strings) paths to the
global search-paths for syntax-files. """
global SYNTAX_PATH
# handle list of strings
if isinstance(path_or_list, (list, tuple)):
for i in range(len(path_or_list)):
SYNTAX_PATH.insert(0, path_or_list[-i])
# handle single string
elif isinstance(path_or_list, basestring):
SYNTAX_PATH.insert(0, path_or_list)
# handle attr-error
else:
raise TypeError, "Argument must be path-string or list of strings"
class Pattern:
""" More or less internal used class representing a pattern. You may use
this class to "hard-code" your syntax-definition. """
def __init__(self, regexp, style="DEFAULT", group=0, flags=""):
""" The constructor takes at least on argument: the regular-expression.
The optional kwarg style defines the style applied to the string
matched by the regexp.
The kwarg group may be used to define which group of the regular
expression will be used for highlighting (Note: This means that only
the selected group will be highlighted but the complete pattern must
match!)
The optional kwarg flags specifies flags for the regular expression.
Look at the Python lib-ref for a list of flags and there meaning."""
# assemble re-flag
flags += "ML"; flag = 0
_log_debug("init rule %s -> %s (%s)"%(regexp, style, flags))
for char in flags:
if char == 'M': flag |= re.M
if char == 'L': flag |= re.L
if char == 'S': flag |= re.S
if char == 'I': flag |= re.I
if char == 'U': flag |= re.U
if char == 'X': flag |= re.X
# compile re
try: self._regexp = re.compile(regexp, flag)
except re.error, e:
raise Exception("Invalid regexp \"%s\": %s"%(regexp,str(e)))
self._group = group
self.tag_name = style
def __call__(self, txt, start, end):
m = self._regexp.search(txt)
if not m: return None
mstart, mend = m.start(self._group), m.end(self._group)
s = start.copy(); s.forward_chars(mstart)
e = start.copy(); e.forward_chars(mend)
return (s,e)
class KeywordList(Pattern):
""" This class may be used for hard-code a syntax-definition. It specifies
a pattern for a keyword-list. This simplifies the definition of
keyword-lists. """
def __init__(self, keywords, style="keyword", flags=""):
""" The constructor takes at least on argument: A list of strings
specifying the keywords to highlight.
The optional kwarg style specifies the style used to highlight these
keywords.
The optional kwarg flags specifies the flags for the
(internal generated) regular-expression. """
regexp = "(?:\W|^)(%s)\W"%("|".join(keywords),)
Pattern.__init__(self, regexp, style, group=1, flags=flags)
class String:
""" This class may be used to hard-code a syntax-definition. It simplifies
the definition of a "string". A "string" is something that consists of
a start-pattern and an end-pattern. The end-pattern may be content of
the string if it is escaped. """
def __init__(self, starts, ends, escape=None, style="string"):
""" The constructor needs at least two arguments: The start- and
end-pattern.
The optional kwarg escape specifies a escape-sequence escaping the
end-pattern.
The optional kwarg style specifies the style used to highlight the
string. """
try:
self._starts = re.compile(starts)
except re.error, e:
raise Exception("Invalid regexp \"%s\": %s"%(regexp,str(e)))
if escape:
end_exp = "[^%(esc)s](?:%(esc)s%(esc)s)*%(end)s"
end_exp = end_exp%{'esc':escape*2,'end':ends}
else:
end_exp = ends
try:
self._ends = re.compile(end_exp)
except re.error, e:
raise Exception("Invalid regexp \"%s\": %s"%(regexp,str(e)))
self.tag_name = style
def __call__(self, txt, start, end):
start_match = self._starts.search(txt)
if not start_match: return
start_it = start.copy()
start_it.forward_chars(start_match.start(0))
end_it = end.copy()
end_match = self._ends.search(txt, start_match.end(0)-1)
if end_match:
end_it.set_offset(start.get_offset()+end_match.end(0))
return start_it, end_it
class LanguageDefinition:
""" This class is a container class for all rules (Pattern, KeywordList,
...) specifying the language. You have to used this class if you like
to hard-code your syntax-definition. """
def __init__(self, rules):
""" The constructor takes only one argument: A list of rules (i.e
Pattern, KeywordList and String). """
self._grammar = rules
self._styles = dict()
def __call__(self, buf, start, end=None):
# if no end given -> end of buffer
if not end: end = buf.get_end_iter()
mstart = mend = end
mtag = None
txt = buf.get_slice(start, end)
# search min match
for rule in self._grammar:
# search pattern
m = rule(txt, start, end)
if not m: continue
# prefer match with smallest start-iter
if m[0].compare(mstart) < 0:
mstart, mend = m
mtag = rule.tag_name
continue
if m[0].compare(mstart)==0 and m[1].compare(mend)>0:
mstart, mend = m
mtag = rule.tag_name
continue
return (mstart, mend, mtag)
def get_styles(self):
return self._styles
class SyntaxLoader(ContentHandler, LanguageDefinition):
""" This class loads a syntax definition. There have to be a file
named LANGUAGENAME.xml in one of the directories specified in the
global path-list. You may add a directory using the add_syntax_path()
function. """
# some translation-tables for the style-defs:
style_weight_table = {'ultralight': pango.WEIGHT_ULTRALIGHT,
'light': pango.WEIGHT_LIGHT,
'normal': pango.WEIGHT_NORMAL,
'bold': pango.WEIGHT_BOLD,
'ultrabold': pango.WEIGHT_ULTRABOLD,
'heavy': pango.WEIGHT_HEAVY}
style_variant_table = {'normal': pango.VARIANT_NORMAL,
'smallcaps': pango.VARIANT_SMALL_CAPS}
style_underline_table = {'none': pango.UNDERLINE_NONE,
'single': pango.UNDERLINE_SINGLE,
'double': pango.UNDERLINE_DOUBLE}
style_style_table = {'normal': pango.STYLE_NORMAL,
'oblique': pango.STYLE_OBLIQUE,
'italic': pango.STYLE_ITALIC}
style_scale_table = {
'xx_small': pango.SCALE_XX_SMALL,
'x_small': pango.SCALE_X_SMALL,
'small': pango.SCALE_SMALL,
'medium': pango.SCALE_MEDIUM,
'large': pango.SCALE_LARGE,
'x_large': pango.SCALE_X_LARGE,
'xx_large': pango.SCALE_XX_LARGE,
}
def __init__(self, lang_name):
""" The constructor takes only one argument: the language name.
The constructor tries to load the syntax-definition from a
syntax-file in one directory of the global path-list.
An instance of this class IS a LanguageDefinition. You can pass it
to the constructor of the CodeBuffer class. """
LanguageDefinition.__init__(self, [])
ContentHandler.__init__(self)
# search for syntax-files:
fname = None
for syntax_dir in SYNTAX_PATH:
fname = os.path.join(syntax_dir, "%s.xml"%lang_name)
if os.path.isfile(fname): break
_log_debug("Loading syntaxfile %s"%fname)
if not os.path.isfile(fname):
raise Exception("No snytax-file for %s found!"%lang_name)
xml.sax.parse(fname, self)
# Dispatch start/end - document/element and chars
def startDocument(self):
self.__stack = []
def endDocument(self):
del self.__stack
def startElement(self, name, attr):
self.__stack.append( (name, attr) )
if hasattr(self, "start_%s"%name):
handler = getattr(self, "start_%s"%name)
handler(attr)
def endElement(self, name):
if hasattr(self, "end_%s"%name):
handler = getattr(self, "end_%s"%name)
handler()
del self.__stack[-1]
def characters(self, txt):
if not self.__stack: return
name, attr = self.__stack[-1]
if hasattr(self, "chars_%s"%name):
handler = getattr(self, "chars_%s"%name)
handler(txt)
# Handle regexp-patterns
def start_pattern(self, attr):
self.__pattern = ""
self.__group = 0
self.__flags = ''
self.__style = attr['style']
if 'group' in attr.keys(): self.__group = int(attr['group'])
if 'flags' in attr.keys(): self.__flags = attr['flags']
def end_pattern(self):
rule = Pattern(self.__pattern, self.__style, self.__group, self.__flags)
self._grammar.append(rule)
del self.__pattern
del self.__group
del self.__flags
del self.__style
def chars_pattern(self, txt):
self.__pattern += unescape(txt)
# handle keyword-lists
def start_keywordlist(self, attr):
self.__style = "keyword"
self.__flags = ""
if 'style' in attr.keys():
self.__style = attr['style']
if 'flags' in attr.keys():
self.__flags = attr['flags']
self.__keywords = []
def end_keywordlist(self):
kwlist = KeywordList(self.__keywords, self.__style, self.__flags)
self._grammar.append(kwlist)
del self.__keywords
del self.__style
del self.__flags
def start_keyword(self, attr):
self.__keywords.append("")
def end_keyword(self):
if not self.__keywords[-1]:
del self.__keywords[-1]
def chars_keyword(self, txt):
parent,pattr = self.__stack[-2]
if not parent == "keywordlist": return
self.__keywords[-1] += unescape(txt)
#handle String-definitions
def start_string(self, attr):
self.__style = "string"
self.__escape = None
if 'escape' in attr.keys():
self.__escape = attr['escape']
if 'style' in attr.keys():
self.__style = attr['style']
self.__start_pattern = ""
self.__end_pattern = ""
def end_string(self):
strdef = String(self.__start_pattern, self.__end_pattern,
self.__escape, self.__style)
self._grammar.append(strdef)
del self.__style
del self.__escape
del self.__start_pattern
del self.__end_pattern
def chars_starts(self, txt):
self.__start_pattern += unescape(txt)
def chars_ends(self, txt):
self.__end_pattern += unescape(txt)
# handle style
def start_style(self, attr):
self.__style_props = dict()
self.__style_name = attr['name']
def end_style(self):
self._styles[self.__style_name] = self.__style_props
del self.__style_props
del self.__style_name
def start_property(self, attr):
self.__style_prop_name = attr['name']
def chars_property(self, value):
value.strip()
# convert value
if self.__style_prop_name in ['font','foreground','background',]:
pass
elif self.__style_prop_name == 'variant':
if not value in self.style_variant_table.keys():
Exception("Unknown style-variant: %s"%value)
value = self.style_variant_table[value]
elif self.__style_prop_name == 'underline':
if not value in self.style_underline_table.keys():
Exception("Unknown underline-style: %s"%value)
value = self.style_underline_table[value]
elif self.__style_prop_name == 'scale':
if not value in self.style_scale_table.keys():
Exception("Unknown scale-style: %s"%value)
value = self.style_scale_table[value]
elif self.__style_prop_name == 'weight':
if not value in self.style_weight_table.keys():
Exception("Unknown style-weight: %s"%value)
value = self.style_weight_table[value]
elif self.__style_prop_name == 'style':
if not value in self.style_style_table[value]:
Exception("Unknwon text-style: %s"%value)
value = self.style_style_table[value]
else:
raise Exception("Unknown style-property %s"%self.__style_prop_name)
# store value
self.__style_props[self.__style_prop_name] = value
class CodeBuffer(gtk.TextBuffer):
""" This class extends the gtk.TextBuffer to support syntax-highlighting.
You can use this class like a normal TextBuffer. """
def __init__(self, table=None, lang=None, styles={}):
""" The constructor takes 3 optional arguments.
table specifies a tag-table associated with the TextBuffer-instance.
This argument will be passed directly to the constructor of the
TextBuffer-class.
lang specifies the language-definition. You have to load one using
the SyntaxLoader-class or you may hard-code your syntax-definition
using the LanguageDefinition-class.
styles is a dictionary used to extend or overwrite the default styles
provided by this module (DEFAULT_STYLE) and any language specific
styles defined by the LanguageDefinition. """
gtk.TextBuffer.__init__(self, table)
# default styles
self.styles = DEFAULT_STYLES
# update styles with lang-spec:
if lang:
self.styles.update(lang.get_styles())
# update styles with user-defined
self.styles.update(styles)
# create tags
for name, props in self.styles.items():
style = dict(self.styles['DEFAULT']) # take default
style.update(props) # and update with props
self.create_tag(name, **style)
# store lang-definition
self._lang_def = lang
self.connect_after("insert-text", self._on_insert_text)
self.connect_after("delete-range", self._on_delete_range)
self.connect('apply-tag', self._on_apply_tag)
self._apply_tags = False
def _on_apply_tag(self, buf, tag, start, end):
# FIXME This is a hack! It allows apply-tag only while
# _on_insert_text() and _on_delete_range()
if not self._apply_tags:
self.emit_stop_by_name('apply-tag')
return True
_log_debug("tag \"%s\" as %s"%(self.get_slice(start,end), tag.get_property("name")))
def _on_insert_text(self, buf, it, text, length):
# if no syntax defined -> nop
if not self._lang_def: return False
it = it.copy()
it.backward_chars(length)
if not it.begins_tag():
it.backward_to_tag_toggle(None)
_log_debug("Not tag-start -> moved iter to %i (%s)"%(it.get_offset(), it.get_char()))
if it.begins_tag(self.get_tag_table().lookup("DEFAULT")):
it.backward_to_tag_toggle(None)
_log_debug("Iter at DEFAULT-start -> moved to %i (%s)"%(it.get_offset(), it.get_char()))
self._apply_tags = True
self.update_syntax(it)
self._apply_tags = False
def _on_delete_range(self, buf, start, end):
# if no syntax defined -> nop
if not self._lang_def: return False
start = start.copy()
if not start.begins_tag():
start.backward_to_tag_toggle(None)
self._apply_tags = True
self.update_syntax(start)
self._apply_tags = False
def update_syntax(self, start, end=None):
""" More or less internal used method to update the
syntax-highlighting. """
# if no lang set
if not self._lang_def: return
_log_debug("Update syntax from %i"%start.get_offset())
# if not end defined
if not end: end = self.get_end_iter()
# We do not use recursion -> long files exceed rec-limit!
finished = False
while not finished:
# search first rule matching txt[start..end]
mstart, mend, tagname = self._lang_def(self, start, end)
# optimisation: if mstart-mend is allready tagged with tagname
# -> finished
if tagname: #if something found
tag = self.get_tag_table().lookup(tagname)
if mstart.begins_tag(tag) and mend.ends_tag(tag) and not mstart.equal(start):
self.remove_all_tags(start,mstart)
self.apply_tag_by_name("DEFAULT", start, mstart)
_log_debug("Optimized: Found old tag at %i (%s)"%(mstart.get_offset(), mstart.get_char()))
# finish
finished = True
continue
# remove all tags from start..mend (mend == buffer-end if no match)
self.remove_all_tags(start, mend)
# make start..mstart = DEFAUL (mstart == buffer-end if no match)
if not start.equal(mstart):
_log_debug("Apply DEFAULT")
self.apply_tag_by_name("DEFAULT", start, mstart)
# nothing found -> finished
if not tagname:
finished = True
continue
# apply tag
_log_debug("Apply %s"%tagname)
self.apply_tag_by_name(tagname, mstart, mend)
start = mend
if start == end:
finished = True
continue
def reset_language(self, lang_def):
""" Reset the currently used language-definition. """
# remove all tags from complete text
start = self.get_start_iter()
self.remove_all_tags(start, self.get_end_iter())
# store lexer
self._lang_def = lang_def
# update styles from lang_def:
if self._lang_def:
self.update_styles(self._lang_def.get_styles())
# and ...
self._apply_tags = True
self.update_syntax(start)
self._apply_tags = False
def update_styles(self, styles):
""" Update styles. This method may be used to reset any styles at
runtime. """
self.styles.update(styles)
table = self.get_tag_table()
for name, props in styles.items():
style = self.styles['DEFAULT']
style.update(props)
# if tagname is unknown:
if not table.lookup(name):
_log_debug("Create tag: %s (%s)"%(name, style))
self.create_tag(name, **style)
else: # update tag
tag = table.lookup(name)
_log_debug("Update tag %s with (%s)"%(name, style))
map(lambda i: tag.set_property(i[0],i[1]), style.items())

210
src/gui/python.xml Normal file
View File

@@ -0,0 +1,210 @@
<?xml version="1.0"?>
<!--
This syntax-file was generated by sourceview2codebuffer.xsl from
GtkSourceView's Python-syntax-file!
This transformation is not perfect so it may need some hand-word to fix
minor issues in this file.
You can get sourceview2codebuffer.xsl from http://pygtkcodebuffer.googlecode.com/.
-->
<syntax>
<string style="string" escape="\"><starts>([uUrR]|[uU][rR]|[rR][uU])?"""</starts><ends>"""</ends></string>
<string style="string" escape="\"><starts>([uUrR]|[uU][rR]|[rR][uU])?'''</starts><ends>'''</ends></string>
<string style="string" escape="\"><starts>([uUrR]|[uU][rR]|[rR][uU])?"</starts><ends>"</ends></string>
<string style="string" escape="\"><starts>([uUrR]|[uU][rR]|[rR][uU])?'</starts><ends>'</ends></string>
<keywordlist style="preprocessor">
<keyword>import</keyword>
<keyword>from</keyword>
<keyword>as</keyword>
<keyword>False</keyword>
<keyword>None</keyword>
<keyword>True</keyword>
<keyword>__name__</keyword>
<keyword>__debug__</keyword>
</keywordlist>
<keywordlist style="keyword">
<keyword>def</keyword>
<keyword>class</keyword>
<keyword>return</keyword>
</keywordlist>
<keywordlist style="keyword">
<keyword>and</keyword>
<keyword>assert</keyword>
<keyword>break</keyword>
<keyword>continue</keyword>
<keyword>del</keyword>
<keyword>elif</keyword>
<keyword>else</keyword>
<keyword>except</keyword>
<keyword>exec</keyword>
<keyword>finally</keyword>
<keyword>for</keyword>
<keyword>global</keyword>
<keyword>if</keyword>
<keyword>in</keyword>
<keyword>is</keyword>
<keyword>lambda</keyword>
<keyword>not</keyword>
<keyword>or</keyword>
<keyword>pass</keyword>
<keyword>print</keyword>
<keyword>raise</keyword>
<keyword>try</keyword>
<keyword>while</keyword>
<keyword>yield</keyword>
</keywordlist>
<keywordlist style="special">
<keyword>ArithmeticError</keyword>
<keyword>AssertionError</keyword>
<keyword>AttributeError</keyword>
<keyword>EnvironmentError</keyword>
<keyword>EOFError</keyword>
<keyword>Exception</keyword>
<keyword>FloatingPointError</keyword>
<keyword>ImportError</keyword>
<keyword>IndentationError</keyword>
<keyword>IndexError</keyword>
<keyword>IOError</keyword>
<keyword>KeyboardInterrupt</keyword>
<keyword>KeyError</keyword>
<keyword>LookupError</keyword>
<keyword>MemoryError</keyword>
<keyword>NameError</keyword>
<keyword>NotImplementedError</keyword>
<keyword>OSError</keyword>
<keyword>OverflowError</keyword>
<keyword>ReferenceError</keyword>
<keyword>RuntimeError</keyword>
<keyword>StandardError</keyword>
<keyword>StopIteration</keyword>
<keyword>SyntaxError</keyword>
<keyword>SystemError</keyword>
<keyword>SystemExit</keyword>
<keyword>TabError</keyword>
<keyword>TypeError</keyword>
<keyword>UnboundLocalError</keyword>
<keyword>UnicodeDecodeError</keyword>
<keyword>UnicodeEncodeError</keyword>
<keyword>UnicodeError</keyword>
<keyword>UnicodeTranslateError</keyword>
<keyword>ValueError</keyword>
<keyword>WindowsError</keyword>
<keyword>ZeroDivisionError</keyword>
<keyword>Warning</keyword>
<keyword>UserWarning</keyword>
<keyword>DeprecationWarning</keyword>
<keyword>PendingDeprecationWarning</keyword>
<keyword>SyntaxWarning</keyword>
<keyword>OverflowWarning</keyword>
<keyword>RuntimeWarning</keyword>
<keyword>FutureWarning</keyword>
<keyword>__import__</keyword>
<keyword>abs</keyword>
<keyword>apply</keyword>
<keyword>basestring</keyword>
<keyword>bool</keyword>
<keyword>buffer</keyword>
<keyword>callable</keyword>
<keyword>chr</keyword>
<keyword>classmethod</keyword>
<keyword>cmp</keyword>
<keyword>coerce</keyword>
<keyword>compile</keyword>
<keyword>complex</keyword>
<keyword>delattr</keyword>
<keyword>dict</keyword>
<keyword>dir</keyword>
<keyword>divmod</keyword>
<keyword>enumerate</keyword>
<keyword>eval</keyword>
<keyword>execfile</keyword>
<keyword>file</keyword>
<keyword>filter</keyword>
<keyword>float</keyword>
<keyword>getattr</keyword>
<keyword>globals</keyword>
<keyword>hasattr</keyword>
<keyword>hash</keyword>
<keyword>hex</keyword>
<keyword>id</keyword>
<keyword>input</keyword>
<keyword>int</keyword>
<keyword>intern</keyword>
<keyword>isinstance</keyword>
<keyword>issubclass</keyword>
<keyword>iter</keyword>
<keyword>len</keyword>
<keyword>list</keyword>
<keyword>locals</keyword>
<keyword>long</keyword>
<keyword>map</keyword>
<keyword>max</keyword>
<keyword>min</keyword>
<keyword>object</keyword>
<keyword>oct</keyword>
<keyword>open</keyword>
<keyword>ord</keyword>
<keyword>pow</keyword>
<keyword>property</keyword>
<keyword>range</keyword>
<keyword>raw_input</keyword>
<keyword>reduce</keyword>
<keyword>reload</keyword>
<keyword>repr</keyword>
<keyword>round</keyword>
<keyword>setattr</keyword>
<keyword>slice</keyword>
<keyword>staticmethod</keyword>
<keyword>str</keyword>
<keyword>sum</keyword>
<keyword>super</keyword>
<keyword>tuple</keyword>
<keyword>type</keyword>
<keyword>unichr</keyword>
<keyword>unicode</keyword>
<keyword>vars</keyword>
<keyword>xrange</keyword>
<keyword>zip</keyword>
</keywordlist>
<!-- Some Experiment keywords -->
<keywordlist style="special">
<keyword>set_pfg</keyword>
<keyword>set_pfg_wt</keyword>
<keyword>set_description</keyword>
<keyword>get_description</keyword>
<keyword>set_phase</keyword>
<keyword>set_frequency</keyword>
<keyword>ttl_pulse</keyword>
<keyword>rf_pulse</keyword>
<keyword>state_start</keyword>
<keyword>state_end</keyword>
<keyword>loop_start</keyword>
<keyword>loop_end</keyword>
<keyword>set_pts_local</keyword>
<keyword>wait</keyword>
<keyword>record</keyword>
</keywordlist>
<keywordlist style="datatype">
<keyword>Accumulation</keyword>
<keyword>Experiment</keyword>
<keyword>ADC_Result</keyword>
<keyword>MeasurementResult</keyword>
<keyword>AccumulatedValue</keyword>
</keywordlist>
<pattern style="comment">#.*$</pattern>
<pattern style="datatype">\bself\b</pattern>
<pattern style="number">\b([1-9][0-9]*|0)([Uu]([Ll]|LL|ll)?|([Ll]|LL|ll)[Uu]?)?\b</pattern>
<pattern style="number">\b([0-9]+[Ee][-]?[0-9]+|([0-9]*\.[0-9]+|[0-9]+\.)([Ee][-]?[0-9]+)?)[fFlL]?</pattern>
<pattern style="number">\b0[0-7]+([Uu]([Ll]|LL|ll)?|([Ll]|LL|ll)[Uu]?)?\b</pattern>
<pattern style="number">\b0[xX][0-9a-fA-F]+([Uu]([Ll]|LL|ll)?|([Ll]|LL|ll)[Uu]?)?\b</pattern>
</syntax>

152
src/gui/script_interface.py Normal file
View File

@@ -0,0 +1,152 @@
#! /usr/bin/env python
import time
import sys
import os
import os.path
import tables
import damaris.data.DataPool as DataPool
import damaris.gui.ResultReader as ResultReader
import damaris.gui.ExperimentWriter as ExperimentWriter
import damaris.gui.BackendDriver as BackendDriver
import damaris.gui.ResultHandling as ResultHandling
import damaris.gui.ExperimentHandling as ExperimentHandling
def some_listener(event):
if event.subject=="__recentexperiment" or event.subject=="__recentresult":
r=event.origin.get("__recentresult",-1)+1
e=event.origin.get("__recentexperiment",-1)+1
if e!=0:
ratio=100.0*r/e
else:
ratio=100.0
print "\r%d/%d (%.0f%%)"%(r,e,ratio),
class ScriptInterface:
def __init__(self, exp_script=None, res_script=None, backend_executable=None, spool_dir="spool"):
self.exp_script=exp_script
self.res_script=res_script
self.backend_executable=backend_executable
self.spool_dir=os.path.abspath(spool_dir)
self.exp_handling=self.res_handling=None
self.exp_writer=self.res_reader=self.back_driver=None
if self.backend_executable is not None:
self.back_driver=BackendDriver.BackendDriver(self.backend_executable, spool_dir)
if self.exp_script: self.exp_writer=self.back_driver.get_exp_writer()
if self.res_script: self.res_reader=self.back_driver.get_res_reader()
else:
self.back_driver=None
if self.exp_script: self.exp_writer=ExperimentWriter.ExperimentWriter(spool_dir)
if self.res_script: self.res_reader=ResultReader.ResultReader(spool_dir)
self.data=DataPool()
def runScripts(self):
# get script engines
if self.exp_script and self.exp_writer:
self.exp_handling=ExperimentHandling.ExperimentHandling(self.exp_script, self.exp_writer, self.data)
if self.res_script and self.res_reader:
self.res_handling=ResultHandling.ResultHandling(self.res_script, self.res_reader, self.data)
# start them
if self.exp_handling: self.exp_handling.start()
if self.back_driver is not None: self.back_driver.start()
if self.res_handling: self.res_handling.start()
def waitForScriptsEnding(self):
# time of last dump
dump_interval=600
next_dump_time=time.time()+dump_interval
# keyboard interrupts are handled in extra cleanup loop
try:
while filter(None,[self.exp_handling,self.res_handling,self.back_driver]):
time.sleep(0.1)
if time.time()>next_dump_time:
self.dump_data("pool/data_pool.h5")
next_dump_time+=dump_interval
if self.exp_handling is not None:
if not self.exp_handling.isAlive():
self.exp_handling.join()
if self.exp_handling.raised_exception:
print ": experiment script failed at line %d (function %s): %s"%(self.exp_handling.location[0],
self.exp_handling.location[1],
self.exp_handling.raised_exception)
else:
print ": experiment script finished"
self.exp_handling = None
if self.res_handling is not None:
if not self.res_handling.isAlive():
self.res_handling.join()
if self.res_handling.raised_exception:
print ": result script failed at line %d (function %s): %s"%(self.res_handling.location[0],
self.res_handling.location[1],
self.res_handling.raised_exception)
else:
print ": result script finished"
self.res_handling = None
if self.back_driver is not None:
if not self.back_driver.isAlive():
print ": backend finished"
self.back_driver=None
except KeyboardInterrupt:
still_running=filter(None,[self.exp_handling,self.res_handling,self.back_driver])
for r in still_running:
r.quit_flag.set()
for r in still_running:
r.join()
def dump_data(self, filename):
try:
# write data from pool
dump_file=tables.openFile(filename,mode="w",title="DAMARIS experiment data")
self.data.write_hdf5(dump_file, complib='zlib', complevel=6)
# write scripts
scriptgroup=dump_file.createGroup("/","scripts","Used Scripts")
dump_file.createArray(scriptgroup,"experiment_script", self.exp_script)
dump_file.createArray(scriptgroup,"result_script", self.res_script)
dump_file.createArray(scriptgroup,"backend_executable", self.backend_executable)
dump_file.createArray(scriptgroup,"spool_directory", self.spool_dir)
dump_file.flush()
dump_file.close()
dump_file=None
# todo
except Exception,e:
print "dump failed", e
if __name__=="__main__":
if len(sys.argv)==1:
print "%s: data_handling_script [spool directory]"%sys.argv[0]
sys.exit(1)
if len(sys.argv)==3:
spool_dir=os.getcwd()
else:
spool_dir=sys.argv[3]
expscriptfile=open(sys.argv[1])
expscript=expscriptfile.read()
resscriptfile=open(sys.argv[2])
resscript=resscriptfile.read()
si=ScriptInterface(expscript, resscript,"/usr/lib/damaris/backends/Mobilecore", spool_dir)
si.data.register_listener(some_listener)
si.runScripts()
si.waitForScriptsEnding()
si.dump_data("data_pool.h5")
si=None