pyMez.Code.DataHandlers.NISTModels module
NISTModels is a module to handle data types found at NIST in Boulder, CO
#----------------------------------------------------------------------------- # Name: NISTModels # Purpose: To handle data generated at NIST Boulder # Author: Aric Sanders # Created: 2/22/2016 # License: MIT License #----------------------------------------------------------------------------- """ NISTModels is a module to handle data types found at NIST in Boulder, CO """ #----------------------------------------------------------------------------- # Standard Imports import os import fnmatch #----------------------------------------------------------------------------- # Third Party Imports try: from pyMez.Code.Utils.Alias import * METHOD_ALIASES=1 except: print("The module pyMez.Code.Utils.Alias was not found") METHOD_ALIASES=0 pass try: from pyMez.Code.DataHandlers.GeneralModels import * except: print("The module pyMez.Code.DataHandlers.GeneralModels was not found," "please put it on the python path") raise ImportError try: from pyMez.Code.DataHandlers.TouchstoneModels import * except: print("The module pyMez.Code.DataHandlers.TouchstoneModels was not found," "please put it on the python path") raise ImportError try: import numpy as np except: print("The module numpy was not found," "please put it on the python path") raise ImportError try: import matplotlib.pyplot as plt except: print("The module matplotlib was not found," "please put it on the python path") #----------------------------------------------------------------------------- # Module Constants ONE_PORT_COLUMN_NAMES=["Frequency", "mag", "uMb", "uMa", "uMd", "uMg", "arg", "uAb", "uAa", "uAd", "uAg"] #Note there are 2 power models!!! one with 4 error terms and one with 3 POWER_COLUMN_NAMES=['Frequency','Efficiency','uEb', 'uEa','uEd','uEg', 'Calibration_Factor','uCb','uCa','uCd','uCg'] POWER_3TERM_COLUMN_NAMES=['Frequency','Efficiency','uEs', 'uEc','uEe', 'Calibration_Factor','uCs','uCc','uCe'] CONVERT_S21=True #----------------------------------------------------------------------------- # Module Functions def calrep_to_benchmark(file_path): """Creates a benchmark list given a path to a calrep file, assumes column names are 2 lines after the occurrence of the last /""" in_file=open(file_path,'r') lines=[] for line in in_file: lines.append(line) block_end=re.compile('/') for index,line in enumerate(lines): if re.match(block_end,line): last_block_comment_line=index header=lines[0:last_block_comment_line+1] columns_line=last_block_comment_line+2 column_names=lines[columns_line].split(' ') data=lines[columns_line+1:None] return [header,column_names,data] #----------------------------------------------------------------------------- # Module Classes class OnePortCalrepModel(AsciiDataTable): def __init__(self,file_path,**options): "Intializes the OnePortCalrepModel Class, it is assumed that the file is of the .asc or table type" # This is a general pattern for adding a lot of options defaults= {"data_delimiter": ",", "column_names_delimiter": ",", "specific_descriptor": 'One_Port', "general_descriptor": 'Sparameter', "extension": 'txt', "comment_begin": "#", "comment_end": "\n", "column_types": ['float' for i in range(11)], "column_descriptions": {"Frequency": "Frequency in GHz", "Magnitude": "Linear magnitude", "uMb": "Uncertainty in magnitude due to standards", "uMa": "Uncertainty in magnitude due to electronics", "uMd": "Uncertainty in magnitude for repeated connects", "uMg": "Total uncertainty in magnitude", "Phase": "Phase in degrees", "uPhb": "Uncertainty in phase due to standards", "uPha": "Uncertainty in phase due to electronics", "uPhd": "Uncertainty in phase for repeated connects", "uPhg": "Total uncertainty in phase"}, "header": None, "column_names": ONE_PORT_COLUMN_NAMES, "column_names_end_token": "\n", "data": None, "row_formatter_string": None, "data_table_element_separator": None,"row_begin_token":None, "row_end_token":None,"escape_character":None, "data_begin_token":None,"data_end_token":None} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: self.path=file_path self.__read_and_fix__() #build the row_formatting string, the original files have 4 decimals of precision for freq/gamma and 2 for Phase row_formatter="" for i in range(11): if i<6: row_formatter=row_formatter+"{"+str(i)+":.4f}{delimiter}" elif i==10: row_formatter=row_formatter+"{"+str(i)+":.2f}" else: row_formatter=row_formatter+"{"+str(i)+":.2f}{delimiter}" self.options["row_formatter_string"]=row_formatter AsciiDataTable.__init__(self,None,**self.options) if file_path is not None: self.path=file_path def __read_and_fix__(self): """Reads in a 1 port ascii file and fixes any issues with inconsistent delimiters, etc""" lines=[] table_type=self.path.split(".")[-1] in_file=open(self.path,'r') for line in in_file: if not re.match('[\s]+(?!\w+)',line): #print line lines.append(line) # Handle the cases in which it is the comma delimited table if re.match('txt',table_type,re.IGNORECASE): lines=strip_tokens(lines,*[self.options['data_begin_token'], self.options['data_end_token']]) self.options["data"]=strip_all_line_tokens(lines,begin_token=self.options["row_begin_token"], end_token=self.options["row_end_token"]) self.options["data"]=split_all_rows(self.options["data"],delimiter=self.options["data_delimiter"], escape_character=self.options["escape_character"]) self.options["data"]=convert_all_rows(self.options["data"],self.options["column_types"]) #print self.options["data"] root_name_pattern=re.compile('(?P<root_name>\w+)[abc].txt',re.IGNORECASE) root_name_match=re.search(root_name_pattern,self.path) root_name=root_name_match.groupdict()["root_name"] self.options["header"]=["Device_Id = {0}".format(root_name)] elif re.match("asc",table_type,re.IGNORECASE): self.lines=lines data_begin_line=self.find_line(" TABLE")+2 data=np.loadtxt(self.path,skiprows=data_begin_line) self.options["data"]=data.tolist() self.options["header"]=lines[:self.find_line(" TABLE")] #print("The {0} variable is {1}".format('data.tolist()',data.tolist())) def show(self): fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True) ax0.errorbar(self.get_column('Frequency'),self.get_column('mag'), yerr=self.get_column('uMg'),fmt='k--') ax0.set_title('Magnitude S11') ax1.errorbar(self.get_column('Frequency'),self.get_column('arg'), yerr=self.get_column('uAg'),fmt='ro') ax1.set_title('Phase S11') plt.show() class PowerCalrepModel(AsciiDataTable): def __init__(self,file_path,**options): "Intializes the PowerModel Class, it is assumed that the file is of table type" # This is a general pattern for adding a lot of options defaults= {"data_delimiter": ",", "column_names_delimiter": ",", "specific_descriptor": 'One_Port', "general_descriptor": 'Power', "extension": 'txt', "comment_begin": "#", "comment_end": "\n", "column_types": ['float' for i in range(len(POWER_COLUMN_NAMES))], "column_descriptions": {"Frequency": "Frequency in GHz", "Efficiency":"Effective Efficiency", "uEs": "Uncertainty in efficiency due to standards", "uEc": "Uncertainty in efficiency for repeated connects", "uEe": "Total uncertainty in Efficiency", "Calibration_Factor": "Effective efficiency modified by reflection coefficient", "uCs": "Uncertainty in calibration factor due to standards", "uCc": "Uncertainty in calibration factor for repeated connects", "uCe": "Total uncertainty in calibration factor"}, "header": None, "column_names":POWER_COLUMN_NAMES, "column_names_end_token": "\n", "data": None, "row_formatter_string": None, "data_table_element_separator": None,"row_begin_token":None, "row_end_token":None,"escape_character":None, "data_begin_token":None,"data_end_token":None} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: self.path=file_path self.__read_and_fix__() #build the row_formatting string, the original files have 4 decimals of precision for freq/gamma and 2 for Phase row_formatter="" for i in range(11): if i<6: row_formatter=row_formatter+"{"+str(i)+":.4f}{delimiter}" elif i==10: row_formatter=row_formatter+"{"+str(i)+":.2f}" else: row_formatter=row_formatter+"{"+str(i)+":.2f}{delimiter}" self.options["row_formatter_string"]=row_formatter AsciiDataTable.__init__(self,None,**self.options) if file_path is not None: self.path=file_path def __read_and_fix__(self): """Reads in a 1 port ascii file and fixes any issues with inconsistent delimiters, etc""" lines=[] table_type=self.path.split(".")[-1] in_file=open(self.path,'r') for line in in_file: if not re.match('[\s]+(?!\w+)',line): #print line lines.append(line) # Handle the cases in which it is the comma delimited table if re.match('txt',table_type,re.IGNORECASE): lines=strip_tokens(lines,*[self.options['data_begin_token'], self.options['data_end_token']]) self.options["data"]=strip_all_line_tokens(lines,begin_token=self.options["row_begin_token"], end_token=self.options["row_end_token"]) self.options["data"]=split_all_rows(self.options["data"],delimiter=self.options["data_delimiter"], escape_character=self.options["escape_character"]) self.options["data"]=convert_all_rows(self.options["data"],self.options["column_types"]) #print self.options["data"] root_name_pattern=re.compile('(?P<root_name>\w+)[abc].txt',re.IGNORECASE) root_name_match=re.search(root_name_pattern,self.path) root_name=root_name_match.groupdict()["root_name"] self.options["header"]=["Device_Id = {0}".format(root_name)] class OnePortRawModel(AsciiDataTable): """ Class that deals with the OnePort Raw Files after conversion to Ascii using Ron Ginley's converter. These files typically have header information seperated from data by !! Header format is: Line 1: Spid$ - identification of type of system used Line 2: Systemletter$ - letter name indicating which system was used Line 3: Conncal$ - connector type from the system calibration Line 4: Connectors$ - connector type used for the measurement Line 5: Meastype$ - type of measurement (basically 1-port, 2-port or power) Line 6: Datea$ - date of measurement Line 7: Timea$ - time of measurement Line 8: Programm$ - name of program used Line 9: Rev$ - program revision Line 10: Opr$ - operator Line 11: Cfile$ - calibration name Line 12: Cdate$ - calibration date Line 13: Sport - identification of which port or direction was used for measurement Line 14: Numconnects ? number of disconnect/reconnect cycles Line 15: Numrepeats ? number of repeat measurements for each connect (usually 1) Line 16: Nbs ? not sure Line 17: Nfreq ? number of frequencies Line 18: Startfreq ? data row pointer for bdat files Line 19: Devicedescript$ - description of device being measured or of test being done Line 20: Devicenum$ - Identifying number for device ? used for file names """ def __init__(self,file_path=None,**options): """Initializes the OnePortRaw class, if a file_path is specified opens an existing file, else creates an empty container""" defaults= {"data_delimiter": ",", "column_names_delimiter": ",", "specific_descriptor": 'One_Port_Raw', "general_descriptor": 'Sparameter', "extension": 'txt', "comment_begin": "#", "comment_end": "\n", "column_types": ['float','int','int','float','float','float','float'], "column_descriptions": {"Frequency":"Frequency in GHz", "Direction":"Direction of connects, may be unused", "Connect":"Connect number", "magS11":"Linear magnitude for port 1", "argS11":"Phase in degrees for port 1", "magS22":"Linear magnitude for port 2", "argS22":"Phase in degrees for port 2"}, "header": None, "column_names": ["Frequency","Direction","Connect", "magS11", "argS11","magS22", "argS22"], "column_names_end_token": "\n", "data": None, 'row_formatter_string': "{0:.5f}{delimiter}{1}{delimiter}{2}{delimiter}" "{3:.4f}{delimiter}{4:.2f}{delimiter}{5:.4f}{delimiter}{6:.2f}", "data_table_element_separator": None} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: self.__read_and_fix__(file_path) AsciiDataTable.__init__(self,None,**self.options) self.path=file_path self.structure_metadata() def __read_and_fix__(self,file_path=None): """Inputs in the raw OnePortRaw file and fixes any problems with delimiters,etc.""" lines=[] in_file=open(file_path,'r') for index,line in enumerate(in_file): lines.append(line) if re.match("!!",line): data_begin_line=index+1 self.lines=lines data=split_all_rows(lines[data_begin_line:],delimiter=", ") self.options["data"]=data self.options["header"]=lines[:data_begin_line-1] def structure_metadata(self): """Returns a dictionary of key,value pairs extracted from the header""" keys=["System_Id","System_Letter","Connector_Type_Calibration","Connector_Type_Measurement", "Measurement_Type","Measurement_Date","Measurement_Time","Program_Used","Program_Revision","Operator", "Calibration_Name","calibration_date","Port_Used","Number_Connects","Number_Repeats","Nbs", "Number_Frequencies","Start_Frequency", "Device_Description","Device_Id"] self.metadata={} for index,key in enumerate(keys): self.metadata[key]=self.header[index] def show(self): fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True) ax0.plot(self.get_column('Frequency'),self.get_column('magS11'),'k--') ax0.set_title('Magnitude S11') ax1.plot(self.get_column('Frequency'),self.get_column('argS11'),'ro') ax1.set_title('Phase S11') plt.show() class TwoPortRawModel(AsciiDataTable): """ Class that deals with the TwoPort Raw Files after conversion to Ascii using Ron Ginley's converter. These files typically have header information seperated from data by !! Header format is: Line 1: Spid$ - identification of type of system used Line 2: Systemletter$ - letter name indicating which system was used Line 3: Conncal$ - connector type from the system calibration Line 4: Connectors$ - connector type used for the measurement Line 5: Meastype$ - type of measurement (basically 1-port, 2-port or power) Line 6: Datea$ - date of measurement Line 7: Timea$ - time of measurement Line 8: Programm$ - name of program used Line 9: Rev$ - program revision Line 10: Opr$ - operator Line 11: Cfile$ - calibration name Line 12: Cdate$ - calibration date Line 13: Sport - identification of which port or direction was used for measurement Line 14: Numconnects ? number of disconnect/reconnect cycles Line 15: Numrepeats ? number of repeat measurements for each connect (usually 1) Line 16: Nbs ? not sure Line 17: Nfreq ? number of frequencies Line 18: Startfreq ? data row pointer for bdat files Line 19: Devicedescript$ - description of device being measured or of test being done Line 20: Devicenum$ - Identifying number for device ? used for file names """ def __init__(self,file_path=None,**options): """Initializes the TwoPortRaw class, if a file_path is specified opens an existing file, else creates an empty container""" defaults= {"data_delimiter": ",", "column_names_delimiter": ",", "specific_descriptor": 'Two_Port_Raw', "general_descriptor": 'Sparameter', "extension": 'txt', "comment_begin": "#", "comment_end": "\n", "column_types": ['float','int','int','float','float','float','float','float','float'], "column_descriptions": {"Frequency":"Frequency in GHz", "Direction":"Direction of connects, may be unused", "Connect":"Connect number", "magS11":"Linear magnitude for S11", "argS11":"Phase in degrees for S11", "magS21":"Linear magnitude for S21", "argS21":"Phase in degrees for S21", "magS22":"Linear magnitude for S22", "argS22":"Phase in degrees for S22"}, "header": None, "column_names": ["Frequency","Direction","Connect", "magS11", "argS11","magS21","argS21","magS22","argS22"], "column_names_end_token": "\n", "data": None, 'row_formatter_string': "{0:.5f}{delimiter}{1}{delimiter}{2}" "{delimiter}{3:.4f}{delimiter}{4:.2f}{delimiter}" "{5:.4f}{delimiter}{6:.2f}{delimiter}" "{7:.4f}{delimiter}{8:.2f}", "data_table_element_separator": None} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: self.__read_and_fix__(file_path) AsciiDataTable.__init__(self,None,**self.options) self.path=file_path self.structure_metadata() def __read_and_fix__(self,file_path=None): """Inputs in the raw OnePortRaw file and fixes any problems with delimiters,etc.""" lines=[] in_file=open(file_path,'r') for index,line in enumerate(in_file): lines.append(line) if re.search("!!",line): data_begin_line=index+1 self.lines=lines parse_options={"delimiter":", ","row_end_token":'\n'} data=parse_lines(lines[data_begin_line:],**parse_options) self.options["data"]=data self.options["header"]=lines[:data_begin_line-1] #print data def structure_metadata(self): """Returns a dictionary of key,value pairs extracted from the header""" keys=["System_Id","System_Letter","Connector_Type_Calibration","Connector_Type_Measurement", "Measurement_Type","Measurement_Date","Measurement_Time","Program_Used","Program_Revision","Operator", "Calibration_Name","calibration_date","Port_Used","Number_Connects","Number_Repeats","Nbs", "Number_Frequencies","Start_Frequency", "Device_Description","Device_Id"] self.metadata={} for index,key in enumerate(keys): self.metadata[key]=self.header[index] def show(self): fig, axes = plt.subplots(nrows=3, ncols=2) ax0, ax1, ax2, ax3, ax4, ax5 = axes.flat ax0.plot(self.get_column('Frequency'),self.get_column('magS11'),'k-o') ax0.set_title('Magnitude S11') ax1.plot(self.get_column('Frequency'),self.get_column('argS11'),'ro') ax1.set_title('Phase S11') ax2.plot(self.get_column('Frequency'),self.get_column('magS21'),'k-o') ax2.set_title('Magnitude S21 in dB') ax3.plot(self.get_column('Frequency'),self.get_column('argS21'),'ro') ax3.set_title('Phase S21') ax4.plot(self.get_column('Frequency'),self.get_column('magS22'),'k-o') ax4.set_title('Magnitude S22') ax5.plot(self.get_column('Frequency'),self.get_column('argS22'),'ro') ax5.set_title('Phase S22') plt.tight_layout() plt.show() class PowerRawModel(AsciiDataTable): """ Class that deals with the PowerRaw Files after conversion to Ascii using Ron Ginley's converter. These files typically have header information seperated from data by !! Header format is: Line 1: Spid$ - identification of type of system used Line 2: Systemletter$ - letter name indicating which system was used Line 3: Conncal$ - connector type from the system calibration Line 4: Connectors$ - connector type used for the measurement Line 5: Meastype$ - type of measurement (basically 1-port, 2-port or power) Line 6: Datea$ - date of measurement Line 7: Timea$ - time of measurement Line 8: Programm$ - name of program used Line 9: Rev$ - program revision Line 10: Opr$ - operator Line 11: Cfile$ - calibration name Line 12: Cdate$ - calibration date Line 13: Sport - identification of which port or direction was used for measurement Line 14: Numconnects ? number of disconnect/reconnect cycles Line 15: Numrepeats ? number of repeat measurements for each connect (usually 1) Line 16: Nbs ? not sure Line 17: Nfreq ? number of frequencies Line 18: Startfreq ? data row pointer for bdat files Line 19: Devicedescript$ - description of device being measured or of test being done Line 20: Devicenum$ - Identifying number for device ? used for file names """ def __init__(self,file_path=None,**options): """Initializes the PowerRaw class, if a file_path is specified opens an existing file, else creates an empty container""" defaults= {"data_delimiter": ",", "column_names_delimiter": ",", "specific_descriptor": 'Raw', "general_descriptor": 'Power', "extension": 'txt', "comment_begin": "#", "comment_end": "\n", "column_types": ['float','int','int','float','float','float','float'], "column_descriptions": {"Frequency":"Frequency in GHz", "Direction":"Direction of connects, may be unused", "Connect":"Connect number", "magS11":"Linear magnitude for S11", "argS11":"Phase in degrees for S11", "Efficiency":"Effective Efficiency", "Calibration_Factor":"Effective efficiency " "modified by reflection coefficient"}, "header": None, "column_names": ["Frequency","Direction","Connect", "magS11", "argS11","Efficiency","Calibration_Factor"], "column_names_end_token": "\n", "data": None, 'row_formatter_string': "{0:.5g}{delimiter}{1}{delimiter}{2}" "{delimiter}{3:.5g}{delimiter}{4:.3f}{delimiter}" "{5:.5g}{delimiter}{6:.5g}", "data_table_element_separator": None} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: self.__read_and_fix__(file_path) AsciiDataTable.__init__(self,None,**self.options) self.path=file_path self.structure_metadata() def __read_and_fix__(self,file_path=None): """Inputs in the PowerRaw file and fixes any problems with delimiters,etc.""" lines=[] in_file=open(file_path,'r') for index,line in enumerate(in_file): lines.append(line) if re.search("!!",line): data_begin_line=index+1 self.lines=lines parse_options={"delimiter":", ","row_end_token":'\n'} data=parse_lines(lines[data_begin_line:],**parse_options) self.options["data"]=data self.options["header"]=lines[:data_begin_line-1] #print data def structure_metadata(self): """Returns a dictionary of key,value pairs extracted from the header""" keys=["System_Id","System_Letter","Connector_Type_Calibration","Connector_Type_Measurement", "Measurement_Type","Measurement_Date","Measurement_Time","Program_Used","Program_Revision","Operator", "Calibration_Name","calibration_date","Port_Used","Number_Connects","Number_Repeats","Nbs", "Number_Frequencies","Start_Frequency", "Device_Description","Device_Id"] self.metadata={} for index,key in enumerate(keys): self.metadata[key]=self.header[index] class TwoPortCalrepModel(): """TwoPortCalrepModel is a model that holds data output by analyzing several datafiles using the HPBasic program Calrep. The data is stored in 3 tables: a S11 table, a S21 table and a S22 table. The data is in linear magnitude and angle in degrees. There are 2 types of files, one is a single file with .asc extension and 3 files with .txt extension""" def __init__(self,file_path=None,**options): """Intializes the TwoPortCalrepModel class, if a file path is specified it opens and reads the file""" defaults= {"specific_descriptor": 'Two_Port_Calrep'} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value if file_path is None: pass elif re.match('asc',file_path.split(".")[-1],re.IGNORECASE): self.table_names=['header','S11','S22','S21'] self.row_pattern=make_row_match_string(ONE_PORT_COLUMN_NAMES) self.path=file_path self.__read_and_fix__() elif re.match('txt',file_path.split(".")[-1],re.IGNORECASE) or type(file_path) is ListType: self.table_names=['S11','S22','S21'] if type(file_path) is ListType: self.file_names=file_path self.tables=[] for index,table in enumerate(self.table_names): if index==2: #fixes a problem with the c tables, extra comma at the end options={"row_end_token":',\n'} self.tables.append(OnePortCalrepModel(self.file_names[index],**options)) self.tables[2].options["row_end_token"]=None else: self.tables.append(OnePortCalrepModel(self.file_names[index])) else: try: root_name_pattern=re.compile('(?P<root_name>\w+)[abc].txt',re.IGNORECASE) root_name_match=re.search(root_name_pattern,file_path) root_name=root_name_match.groupdict()["root_name"] directory=os.path.dirname(os.path.realpath(file_path)) self.file_names=[os.path.join(directory,root_name+end) for end in ['a.txt','b.txt','c.txt']] self.tables=[] for index,table in enumerate(self.table_names): if index==2: #fixes a problem with the c tables, extra comma at the end options={"row_end_token":',\n'} self.tables.append(OnePortCalrepModel(self.file_names[index],**options)) self.tables[2].options["row_end_token"]=None else: self.tables.append(OnePortCalrepModel(self.file_names[index])) except: print("Could not import {0} please check that the a,b,c " "tables are all in the same directory".format(file_path)) raise for index,table in enumerate(self.tables): column_names=[] for column_number,column in enumerate(table.column_names): if column is not "Frequency": column_names.append(column+self.table_names[index]) else: column_names.append(column) #print column_names table.column_names=column_names if CONVERT_S21: for row_number,row in enumerate(self.tables[2].data): new_S21=self.tables[2].data[row_number][1] new_S21=10.**(-1*new_S21/20.) new_value=[self.tables[2].data[row_number][i] for i in range(2,6)] new_value=map(lambda x:abs((1/np.log10(np.e))*new_S21*x/20.),new_value) self.tables[2].data[row_number][1]=new_S21 for i in range(2,6): self.tables[2].data[row_number][i]=new_value[i-2] for key,value in self.options.iteritems(): self.tables[0].options[key]=value self.joined_table=ascii_data_table_join("Frequency",self.tables[0],self.tables[2]) self.joined_table=ascii_data_table_join("Frequency",self.joined_table,self.tables[1]) def __read_and_fix__(self): in_file=open(self.path,'r') self.lines=[] table_locators=["Table 1","Table 2","Table 3"] begin_lines=[] for index,line in enumerate(in_file): self.lines.append(line) for table in table_locators: if re.search(table,line,re.IGNORECASE): begin_lines.append(index) in_file.close() self.table_line_numbers=[] for index,begin_line in enumerate(begin_lines): if index == 0: header_begin_line=0 header_end_line=begin_line-2 table_1_begin_line=begin_line+3 table_1_end_line=begin_lines[index+1]-1 self.table_line_numbers.append([header_begin_line,header_end_line]) self.table_line_numbers.append([table_1_begin_line,table_1_end_line]) elif index>0 and index<(len(begin_lines)-1): table_begin_line=begin_line+3 table_end_line=begin_lines[index+1]-1 self.table_line_numbers.append([table_begin_line,table_end_line]) elif index==(len(begin_lines)-1): table_begin_line=begin_line+3 table_end_line=None self.table_line_numbers.append([table_begin_line,table_end_line]) self.tables=[] for index,name in enumerate(self.table_names): self.table_lines=self.lines[self.table_line_numbers[index][0]:self.table_line_numbers[index][1]] self.tables.append(self.table_lines) for index,table in enumerate(self.table_names): if index==0: # by using parse_lines we get a list_list of strings instead of list_string # we can just remove end lines self.tables[index]=strip_all_line_tokens(self.tables[index],begin_token=None,end_token='\n') else: column_types=['float' for i in range(len(ONE_PORT_COLUMN_NAMES))] options={"row_pattern":self.row_pattern,"column_names":ONE_PORT_COLUMN_NAMES,"output":"list_list"} options["column_types"]=column_types self.tables[index]=parse_lines(self.tables[index],**options) # need to put S21 mag into linear magnitude if CONVERT_S21: for row_number,row in enumerate(self.tables[3]): new_S21=self.tables[3][row_number][1] new_S21=10.**(-1*new_S21/20.) new_value=[self.tables[3][row_number][i] for i in range(2,6)] new_value=map(lambda x:abs((1/np.log10(np.e))*new_S21*x/20),new_value) self.tables[3][row_number][1]=new_S21 for i in range(2,6): self.tables[3][row_number][i]=new_value[i-2] for index,table in enumerate(self.tables): #print("{0} is {1}".format("index",index)) if index==0: pass else: table_options={"data":self.tables[index]} self.tables[index]=OnePortCalrepModel(None,**table_options) #print("{0} is {1}".format("self.tables[index].column_names",self.tables[index].column_names)) column_names=[] for column_number,column in enumerate(self.tables[index].column_names): if column is not "Frequency": #print("{0} is {1}".format("self.table_names[index]",self.table_names[index])) #print("{0} is {1}".format("column",column)) column_names.append(column+self.table_names[:][index]) else: column_names.append(column) self.tables[index].column_names=column_names self.tables[1].header=self.tables[0] for key,value in self.options.iteritems(): self.tables[1].options[key]=value self.joined_table=ascii_data_table_join("Frequency",self.tables[1],self.tables[3]) self.joined_table=ascii_data_table_join("Frequency",self.joined_table,self.tables[2]) def __str__(self): return self.joined_table.build_string() def show(self): fig, axes = plt.subplots(nrows=3, ncols=2) ax0, ax1, ax2, ax3, ax4, ax5 = axes.flat ax0.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('magS11'), yerr=self.joined_table.get_column('uMgS11'),fmt='k-o') ax0.set_title('Magnitude S11') ax1.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('argS11'), yerr=self.joined_table.get_column('uAgS11'),fmt='ro') ax1.set_title('Phase S11') ax2.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('magS21'), yerr=self.joined_table.get_column('uMgS21'),fmt='k-o') ax2.set_title('Magnitude S21') ax3.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('argS21'), yerr=self.joined_table.get_column('uAgS21'),fmt='ro') ax3.set_title('Phase S21') ax4.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('magS22'), yerr=self.joined_table.get_column('uMgS22'),fmt='k-o') ax4.set_title('Magnitude S22') ax5.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('argS22'), yerr=self.joined_table.get_column('uAgS22'),fmt='ro') ax5.set_title('Phase S22') plt.tight_layout() plt.show() class PowerCalrep(): """PowerCalrep is a model that holds data output by analyzing several datafiles using the HPBasic program Calrep. The data is stored in 2 tables: a S11 table, and a power table. The data is in linear magnitude and angle in degrees. There are 2 types of files, one is a single file with .asc extension and 2 files with .txt extension""" def __init__(self,file_path=None,**options): """Intializes the PowerCalrep class, if a file path is specified it opens and reads the file""" if file_path is None: pass elif re.match('asc',file_path.split(".")[-1],re.IGNORECASE): self.table_names=['header','S11','Efficiency'] self.row_pattern=make_row_match_string(ONE_PORT_COLUMN_NAMES) self.power_row_pattern=make_row_match_string(POWER_COLUMN_NAMES) self.path=file_path self.__read_and_fix__() elif re.match('txt',file_path.split(".")[-1],re.IGNORECASE) or type(file_path) is ListType: self.table_names=['S11','Efficiency'] if type(file_path) is ListType: self.file_names=file_path self.tables=[] for index,table in enumerate(self.table_names): if index==0: self.tables.append(PowerModel(self.file_names[index])) elif index==1: self.tables.append(OnePortCalrepModel(self.file_names[index])) else: try: root_name_pattern=re.compile('(?P<root_name>\w+)[abc].txt',re.IGNORECASE) root_name_match=re.search(root_name_pattern,file_path) root_name=root_name_match.groupdict()["root_name"] directory=os.path.dirname(os.path.realpath(file_path)) self.file_names=[os.path.join(directory,root_name+end) for end in ['a.txt','b.txt']] self.tables=[] for index,table in enumerate(self.table_names): if index==0: self.tables.append(OnePortCalrepModel(self.file_names[index])) elif index==1: self.tables.append(PowerModel(self.file_names[index])) except: print("Could not import {0} please check that the a,b " "tables are all in the same directory".format(file_path)) raise for index,table in enumerate(self.tables): for column_number,column in enumerate(table.column_names): if column is not "Frequency": table.column_names[column_number]=self.table_names[index]+"_"+column self.joined_table=ascii_data_table_join("Frequency",self.tables[0],self.tables[1]) def __read_and_fix__(self): in_file=open(self.path,'r') self.lines=[] table_locators=["Table 1","Table 2"] begin_lines=[] for index,line in enumerate(in_file): self.lines.append(line) for table in table_locators: if re.search(table,line,re.IGNORECASE): begin_lines.append(index) in_file.close() self.table_line_numbers=[] for index,begin_line in enumerate(begin_lines): if index == 0: header_begin_line=0 header_end_line=begin_line-2 table_1_begin_line=begin_line+3 table_1_end_line=begin_lines[index+1]-1 self.table_line_numbers.append([header_begin_line,header_end_line]) self.table_line_numbers.append([table_1_begin_line,table_1_end_line]) elif index>0 and index<(len(begin_lines)-1): table_begin_line=begin_line+3 table_end_line=begin_lines[index+1]-1 self.table_line_numbers.append([table_begin_line,table_end_line]) elif index==(len(begin_lines)-1): table_begin_line=begin_line+3 table_end_line=None self.table_line_numbers.append([table_begin_line,table_end_line]) self.tables=[] for index,name in enumerate(self.table_names): self.table_lines=self.lines[self.table_line_numbers[index][0]:self.table_line_numbers[index][1]] self.tables.append(self.table_lines) for index,table in enumerate(self.table_names): if index==0: # by using parse_lines we get a list_list of strings instead of list_string # we can just remove end lines self.tables[index]=strip_all_line_tokens(self.tables[index],begin_token=None,end_token='\n') elif index==1: column_types=['float' for i in range(len(ONE_PORT_COLUMN_NAMES))] options={"row_pattern":self.row_pattern,"column_names":ONE_PORT_COLUMN_NAMES,"output":"list_list"} options["column_types"]=column_types self.tables[index]=parse_lines(self.tables[index],**options) table_options={"data":self.tables[index]} self.tables[index]=OnePortCalrepModel(None,**table_options) elif index==2: column_types=['float' for i in range(len(POWER_COLUMN_NAMES))] options={"row_pattern":self.power_row_pattern,"column_names":POWER_COLUMN_NAMES,"output":"list_list"} options["column_types"]=column_types self.tables[index]=parse_lines(self.tables[index],**options) table_options={"data":self.tables[index]} self.tables[index]=PowerModel(None,**table_options) self.tables[1].header=self.tables[0] self.joined_table=ascii_data_table_join("Frequency",self.tables[1],self.tables[2]) class JBSparameter(AsciiDataTable): """JBSparameter is a class that holds data taken and stored using Jim Booth's two port format. """ def __init__(self,file_path=None,**options): """Initializes the JBSparameter class. JB Sparameter data is very close to s2p, but has # as a comment begin token, and space as a data delimiter. The first line has structured metadata that usually includes date and IFBW""" defaults={"header_begin_line":0,"data_end_line":None,"column_names_delimiter":' ', "column_names_begin_token":'#',"column_names_end_token":'\n',"data_table_element_separator":None, "data_delimiter":' ',"comment_begin":"#", "comment_end":"\n","row_end_token":'\n',"column_types":['float' for i in range(9)], "column_descriptions":["Frequency in Hz", "Real part of S11", "Imaginary part of S11", "Real part of S21", "Imaginary part of S21", "Real part of S12", "Imaginary part of S12", "Real part of S22", "Imaginary part of S22"]} rfs="" for i in range(9): if i==8: rfs=rfs+"{%s:.6g}"%(str(i)) else: rfs=rfs+"{%s:.6g}{delimiter}"%(str(i)) options["row_formatter_string"]=rfs self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: column_name_line=0 in_file=open(file_path) for line in in_file: if line[0] is '#': column_name_line+=1 self.options["header_end_line"]=column_name_line-1 self.options["column_names_begin_line"]=column_name_line-1 self.options["column_names_end_line"]=column_name_line self.options["data_begin_line"]=column_name_line self.path=file_path AsciiDataTable.__init__(self,file_path,**self.options) else: AsciiDataTable.__init__(self,file_path,**self.options) def get_frequency_units(self): """Returns the frequency units by looking at the 0 index element of column names""" pattern='freq\((?P<Frequency_Units>\w+)\)' match=re.match(pattern,self.column_names[0]) return match.groupdict()['Frequency_Units'] class SwitchTermsFR(): pass class SwitchTermsPort(): pass class NoiseCalRaw(): pass class ReverbChamber(): pass class RobotData(): pass #----------------------------------------------------------------------------- # Module Scripts def test_OnePortCalrepModel(file_path_1='700437.txt',file_path_2="700437.asc"): os.chdir(TESTS_DIRECTORY) print(" Import of {0} results in:".format(file_path_1)) new_table_1=OnePortCalrepModel(file_path=file_path_1) print new_table_1 print("-"*80) print("\n") print(" Import of {0} results in:".format(file_path_2)) new_table_2=OnePortCalrepModel(file_path=file_path_2) print new_table_2 print("{0} results in {1}:".format('new_table_1.get_column("Frequency")',new_table_1.get_column("Frequency"))) print new_table_1.get_options() def test_OnePortCalrepModel_Ctable(file_path_1='700437.txt'): """Tests the OnePortCalrepModel on ctables from 2 port """ os.chdir(TESTS_DIRECTORY) print(" Import of {0} results in:".format(file_path_1)) new_table_1=OnePortCalrepModel(file_path=file_path_1,**{"row_end_token":",\n"}) print new_table_1 print("-"*80) print("\n") def test_OnePortRawModel(file_path='OnePortRawTestFile.txt'): os.chdir(TESTS_DIRECTORY) print(" Import of {0} results in:".format(file_path)) new_table_1=OnePortRawModel(file_path=file_path) print new_table_1 print("-"*80) print("{0} results in {1}:".format('new_table_1.get_column("Frequency")',new_table_1.get_column("Frequency"))) print new_table_1.get_options() print new_table_1.metadata print new_table_1.column_names print('index' in new_table_1.column_names ) def test_TwoPortRawModel(file_path='TestFileTwoPortRaw.txt'): os.chdir(TESTS_DIRECTORY) print(" Import of {0} results in:".format(file_path)) new_table_1=TwoPortRawModel(file_path=file_path) print new_table_1 def test_PowerRawModel(file_path='TestFilePowerRaw.txt'): os.chdir(TESTS_DIRECTORY) print(" Import of {0} results in:".format(file_path)) new_table_1=PowerRawModel(file_path=file_path) print new_table_1 def test_JBSparameter(file_path="ftest6_L1_g5_HF_air"): """Tests the JBSparameter class""" os.chdir(TESTS_DIRECTORY) # open an existing file new_table=JBSparameter(file_path=file_path) print new_table.column_names print new_table.get_frequency_units() old_prefix=new_table.get_frequency_units().replace('Hz','') #new_table.change_unit_prefix(column_selector=0,old_prefix='',new_prefix='G',unit='Hz') new_table.change_unit_prefix(column_selector=0,old_prefix=old_prefix,new_prefix='G',unit='Hz') print new_table.column_names print new_table.get_column(None,0) print new_table.get_frequency_units() print new_table.get_header_string() def test_TwoPortCalrepModel(file_name="922729a.txt"): """Tests the TwoPortCalrepModel model type""" os.chdir(TESTS_DIRECTORY) new_two_port=TwoPortCalrepModel(file_name) for table in new_two_port.tables: print table print new_two_port.joined_table new_two_port.joined_table.save() new_two_port.joined_table.path='N205RV.txt' new_two_port.joined_table.header=None new_two_port.joined_table.column_names=None new_two_port.joined_table.save() def test_PowerCalrep(file_name="700196.asc"): """Tests the TwoPortCalrepModel model type""" os.chdir(TESTS_DIRECTORY) new_power=PowerCalrep(file_name) for table in new_power.tables: print table print new_power.joined_table #----------------------------------------------------------------------------- # Module Runner if __name__ == '__main__': #test_OnePortCalrepModel() #test_OnePortCalrepModel_Ctable(file_path_1='922729c.txt') #test_OnePortRawModel() #test_OnePortRawModel('OnePortRawTestFile_002.txt') #test_TwoPortRawModel() #test_PowerRawModel() #test_JBSparameter() #test_JBSparameter('QuartzRefExample_L1_g10_HF') #test_TwoPortCalrepModel() #test_TwoPortCalrepModel('N205RV.asc') test_PowerCalrep()
Module variables
var COMMENT_PATTERN
var CONVERT_S21
var DEFAULT_FILE_NAME
var FORMATS
var FREQUENCY_UNITS
var METHOD_ALIASES
var NUMBER_MATCH_STRING
var ONE_PORT_COLUMN_NAMES
var OPTION_LINE_PATTERN
var PARAMETERS
var POWER_3TERM_COLUMN_NAMES
var POWER_COLUMN_NAMES
var S1P_DB_COLUMN_NAMES
var S1P_MA_COLUMN_NAMES
var S1P_RI_COLUMN_NAMES
var S2P_COMPLEX_COLUMN_NAMES
var S2P_DB_COLUMN_DESCRIPTION
var S2P_DB_COLUMN_NAMES
var S2P_MA_COLUMN_DESCRIPTION
var S2P_MA_COLUMN_NAMES
var S2P_NOISE_PARAMETER_COLUMN_NAMES
var S2P_RI_COLUMN_DESCRIPTION
var S2P_RI_COLUMN_NAMES
var SMITHPLOT
var StringTypes
var TESTS_DIRECTORY
var TOUCHSTONE_KEYWORDS
Functions
def calrep_to_benchmark(
file_path)
Creates a benchmark list given a path to a calrep file, assumes column names are 2 lines after the occurrence of the last /
def calrep_to_benchmark(file_path): """Creates a benchmark list given a path to a calrep file, assumes column names are 2 lines after the occurrence of the last /""" in_file=open(file_path,'r') lines=[] for line in in_file: lines.append(line) block_end=re.compile('/') for index,line in enumerate(lines): if re.match(block_end,line): last_block_comment_line=index header=lines[0:last_block_comment_line+1] columns_line=last_block_comment_line+2 column_names=lines[columns_line].split(' ') data=lines[columns_line+1:None] return [header,column_names,data]
def test_JBSparameter(
file_path='ftest6_L1_g5_HF_air')
Tests the JBSparameter class
def test_JBSparameter(file_path="ftest6_L1_g5_HF_air"): """Tests the JBSparameter class""" os.chdir(TESTS_DIRECTORY) # open an existing file new_table=JBSparameter(file_path=file_path) print new_table.column_names print new_table.get_frequency_units() old_prefix=new_table.get_frequency_units().replace('Hz','') #new_table.change_unit_prefix(column_selector=0,old_prefix='',new_prefix='G',unit='Hz') new_table.change_unit_prefix(column_selector=0,old_prefix=old_prefix,new_prefix='G',unit='Hz') print new_table.column_names print new_table.get_column(None,0) print new_table.get_frequency_units() print new_table.get_header_string()
def test_OnePortCalrepModel(
file_path_1='700437.txt', file_path_2='700437.asc')
def test_OnePortCalrepModel(file_path_1='700437.txt',file_path_2="700437.asc"): os.chdir(TESTS_DIRECTORY) print(" Import of {0} results in:".format(file_path_1)) new_table_1=OnePortCalrepModel(file_path=file_path_1) print new_table_1 print("-"*80) print("\n") print(" Import of {0} results in:".format(file_path_2)) new_table_2=OnePortCalrepModel(file_path=file_path_2) print new_table_2 print("{0} results in {1}:".format('new_table_1.get_column("Frequency")',new_table_1.get_column("Frequency"))) print new_table_1.get_options()
def test_OnePortCalrepModel_Ctable(
file_path_1='700437.txt')
Tests the OnePortCalrepModel on ctables from 2 port
def test_OnePortCalrepModel_Ctable(file_path_1='700437.txt'): """Tests the OnePortCalrepModel on ctables from 2 port """ os.chdir(TESTS_DIRECTORY) print(" Import of {0} results in:".format(file_path_1)) new_table_1=OnePortCalrepModel(file_path=file_path_1,**{"row_end_token":",\n"}) print new_table_1 print("-"*80) print("\n")
def test_OnePortRawModel(
file_path='OnePortRawTestFile.txt')
def test_OnePortRawModel(file_path='OnePortRawTestFile.txt'): os.chdir(TESTS_DIRECTORY) print(" Import of {0} results in:".format(file_path)) new_table_1=OnePortRawModel(file_path=file_path) print new_table_1 print("-"*80) print("{0} results in {1}:".format('new_table_1.get_column("Frequency")',new_table_1.get_column("Frequency"))) print new_table_1.get_options() print new_table_1.metadata print new_table_1.column_names print('index' in new_table_1.column_names )
def test_PowerCalrep(
file_name='700196.asc')
Tests the TwoPortCalrepModel model type
def test_PowerCalrep(file_name="700196.asc"): """Tests the TwoPortCalrepModel model type""" os.chdir(TESTS_DIRECTORY) new_power=PowerCalrep(file_name) for table in new_power.tables: print table print new_power.joined_table
def test_PowerRawModel(
file_path='TestFilePowerRaw.txt')
def test_PowerRawModel(file_path='TestFilePowerRaw.txt'): os.chdir(TESTS_DIRECTORY) print(" Import of {0} results in:".format(file_path)) new_table_1=PowerRawModel(file_path=file_path) print new_table_1
def test_TwoPortCalrepModel(
file_name='922729a.txt')
Tests the TwoPortCalrepModel model type
def test_TwoPortCalrepModel(file_name="922729a.txt"): """Tests the TwoPortCalrepModel model type""" os.chdir(TESTS_DIRECTORY) new_two_port=TwoPortCalrepModel(file_name) for table in new_two_port.tables: print table print new_two_port.joined_table new_two_port.joined_table.save() new_two_port.joined_table.path='N205RV.txt' new_two_port.joined_table.header=None new_two_port.joined_table.column_names=None new_two_port.joined_table.save()
def test_TwoPortRawModel(
file_path='TestFileTwoPortRaw.txt')
def test_TwoPortRawModel(file_path='TestFileTwoPortRaw.txt'): os.chdir(TESTS_DIRECTORY) print(" Import of {0} results in:".format(file_path)) new_table_1=TwoPortRawModel(file_path=file_path) print new_table_1
Classes
class JBSparameter
JBSparameter is a class that holds data taken and stored using Jim Booth's two port format.
class JBSparameter(AsciiDataTable): """JBSparameter is a class that holds data taken and stored using Jim Booth's two port format. """ def __init__(self,file_path=None,**options): """Initializes the JBSparameter class. JB Sparameter data is very close to s2p, but has # as a comment begin token, and space as a data delimiter. The first line has structured metadata that usually includes date and IFBW""" defaults={"header_begin_line":0,"data_end_line":None,"column_names_delimiter":' ', "column_names_begin_token":'#',"column_names_end_token":'\n',"data_table_element_separator":None, "data_delimiter":' ',"comment_begin":"#", "comment_end":"\n","row_end_token":'\n',"column_types":['float' for i in range(9)], "column_descriptions":["Frequency in Hz", "Real part of S11", "Imaginary part of S11", "Real part of S21", "Imaginary part of S21", "Real part of S12", "Imaginary part of S12", "Real part of S22", "Imaginary part of S22"]} rfs="" for i in range(9): if i==8: rfs=rfs+"{%s:.6g}"%(str(i)) else: rfs=rfs+"{%s:.6g}{delimiter}"%(str(i)) options["row_formatter_string"]=rfs self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: column_name_line=0 in_file=open(file_path) for line in in_file: if line[0] is '#': column_name_line+=1 self.options["header_end_line"]=column_name_line-1 self.options["column_names_begin_line"]=column_name_line-1 self.options["column_names_end_line"]=column_name_line self.options["data_begin_line"]=column_name_line self.path=file_path AsciiDataTable.__init__(self,file_path,**self.options) else: AsciiDataTable.__init__(self,file_path,**self.options) def get_frequency_units(self): """Returns the frequency units by looking at the 0 index element of column names""" pattern='freq\((?P<Frequency_Units>\w+)\)' match=re.match(pattern,self.column_names[0]) return match.groupdict()['Frequency_Units']
Ancestors (in MRO)
- JBSparameter
- pyMez.Code.DataHandlers.GeneralModels.AsciiDataTable
Instance variables
var options
Methods
def __init__(
self, file_path=None, **options)
Initializes the JBSparameter class. JB Sparameter data is very close to s2p, but has # as a comment begin token, and space as a data delimiter. The first line has structured metadata that usually includes date and IFBW
def __init__(self,file_path=None,**options): """Initializes the JBSparameter class. JB Sparameter data is very close to s2p, but has # as a comment begin token, and space as a data delimiter. The first line has structured metadata that usually includes date and IFBW""" defaults={"header_begin_line":0,"data_end_line":None,"column_names_delimiter":' ', "column_names_begin_token":'#',"column_names_end_token":'\n',"data_table_element_separator":None, "data_delimiter":' ',"comment_begin":"#", "comment_end":"\n","row_end_token":'\n',"column_types":['float' for i in range(9)], "column_descriptions":["Frequency in Hz", "Real part of S11", "Imaginary part of S11", "Real part of S21", "Imaginary part of S21", "Real part of S12", "Imaginary part of S12", "Real part of S22", "Imaginary part of S22"]} rfs="" for i in range(9): if i==8: rfs=rfs+"{%s:.6g}"%(str(i)) else: rfs=rfs+"{%s:.6g}{delimiter}"%(str(i)) options["row_formatter_string"]=rfs self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: column_name_line=0 in_file=open(file_path) for line in in_file: if line[0] is '#': column_name_line+=1 self.options["header_end_line"]=column_name_line-1 self.options["column_names_begin_line"]=column_name_line-1 self.options["column_names_end_line"]=column_name_line self.options["data_begin_line"]=column_name_line self.path=file_path AsciiDataTable.__init__(self,file_path,**self.options) else: AsciiDataTable.__init__(self,file_path,**self.options)
def add_column(
self, column_name=None, column_type=None, column_data=None, format_string=None)
Adds a column with column_name, and column_type. If column data is supplied and it's length is the same as data(same number of rows) then it is added, else self.options['empty_character'] is added in each spot in the preceding rows
def add_column(self,column_name=None,column_type=None,column_data=None,format_string=None): """Adds a column with column_name, and column_type. If column data is supplied and it's length is the same as data(same number of rows) then it is added, else self.options['empty_character'] is added in each spot in the preceding rows""" original_column_names=self.column_names[:] try: self.column_names.append(column_name) if self.options["column_types"]: self.options["column_types"]=self.options["column_types"].append(column_type) if len(column_data) == len(self.data): for index,row in enumerate(self.data): #print("{0} is {1}".format('self.data[index]',self.data[index])) #print("{0} is {1}".format('row',row)) new_row=row[:] new_row.append(column_data[index]) self.data[index]=new_row else: for index,row in enumerate(self.data): self.data[index]=row.append(self.options['empty_value']) if column_data is not None: for item in column_data: empty_row=[self.options['empty_value'] for column in original_column_names] empty_row.append(item) self.add_row(empty_row) if self.options["row_formatter_string"] is None: pass else: if format_string is None: self.options["row_formatter_string"]=self.options["row_formatter_string"]+\ '{delimiter}'+"{"+str(len(self.column_names)-1)+"}" else: self.options["row_formatter_string"]=self.options["row_formatter_string"]+format_string #self.update_model() except: self.column_names=original_column_names print("Could not add columns") raise
def add_index(
self)
Adds a column with name index and values that are 0 referenced indices, does nothing if there is already a column with name index, always inserts it at the 0 position
def add_index(self): """Adds a column with name index and values that are 0 referenced indices, does nothing if there is already a column with name index, always inserts it at the 0 position""" if 'index' in self.column_names: print("Add Index passed") pass else: self.column_names.insert(0,'index') for index,row in enumerate(self.data): self.data[index].insert(0,index) if self.options['column_types']: self.options['column_types'].insert(0,'int') if self.options['row_formatter_string']: temp_formatter_list=self.options['row_formatter_string'].split("{delimiter}") iterated_row_formatter_list=[temp_formatter_list[i].replace(str(i),str(i+1)) for i in range(len(temp_formatter_list))] new_formatter=string_list_collapse(iterated_row_formatter_list,string_delimiter="{delimiter}") self.options['row_formatter_string']='{0}{delimiter}'+new_formatter
def add_inline_comment(
self, comment='', line_number=None, string_position=None)
Adds an inline in the specified location
def add_inline_comment(self,comment="",line_number=None,string_position=None): "Adds an inline in the specified location" try: self.inline_comments.append([comment,line_number,string_position]) except:pass
def add_row(
self, row_data)
Adds a single row given row_data which can be an ordered list/tuple or a dictionary with column names as keys
def add_row(self,row_data): """Adds a single row given row_data which can be an ordered list/tuple or a dictionary with column names as keys""" if len(row_data) not in [len(self.column_names),len(self.column_names)]: print(" could not add the row, dimensions do not match") return if type(row_data) in [ListType,np.ndarray]: self.data.append(row_data) elif type(row_data) in [DictionaryType]: data_list=[row_data[column_name] for column_name in self.column_names] self.data.append(data_list)
def build_string(
self, **temp_options)
Builds a string representation of the data table based on self.options, or temp_options. Passing temp_options does not permanently change the model
def build_string(self,**temp_options): """Builds a string representation of the data table based on self.options, or temp_options. Passing temp_options does not permanently change the model""" # store the original options to be put back after the string is made original_options=self.options for key,value in temp_options.iteritems(): self.options[key]=value section_end=0 next_section_begin=0 if self.options['data_table_element_separator'] is None: inner_element_spacing=0 else: inner_element_spacing=self.options['data_table_element_separator'].count('\n')-1 string_out="" between_section="" if self.options['data_table_element_separator'] is not None: between_section=self.options['data_table_element_separator'] if self.header is None: self.options['header_begin_line']=self.options['header_end_line']=None pass else: self.options["header_begin_line"]=0 if self.data is None and self.column_names is None and self.footer is None: string_out=self.get_header_string() self.options["header_end_line"]=None else: string_out=self.get_header_string()+between_section last_header_line=self.get_header_string().count('\n')+1 self.options["header_end_line"]=last_header_line next_section_begin=last_header_line+inner_element_spacing if self.column_names is None: self.options['column_names_begin_line']=self.options['column_names_end_line']=None pass else: self.options["column_names_begin_line"]=next_section_begin if self.data is None and self.footer is None: self.options["column_names_end_line"]=None string_out=string_out+self.get_column_names_string() else: string_out=string_out+self.get_column_names_string()+between_section last_column_names_line=self.get_column_names_string().count('\n')+\ self.options["column_names_begin_line"]+1 self.options["column_names_end_line"]=last_column_names_line next_section_begin=last_column_names_line+inner_element_spacing if self.data is None: self.options['data_begin_line']=self.options['data_end_line']=None pass else: self.options["data_begin_line"]=next_section_begin if self.footer is None: self.options["data_end_line"]=None string_out=string_out+self.get_data_string() else: string_out=string_out+self.get_data_string()+between_section last_data_line=self.get_data_string().count("\n")+\ self.options["data_begin_line"]+1 self.options["data_end_line"]=last_data_line next_section_begin=last_data_line+inner_element_spacing if self.footer is None: self.options['footer_begin_line']=self.options['footer_end_line']=None pass else: self.options["footer_begin_line"]=next_section_begin string_out=string_out+self.get_footer_string() self.options['footer_end_line']=None # set the options back after the string has been made if self.inline_comments is None: pass else: lines=string_out.splitlines() for comment in self.inline_comments: lines=insert_inline_comment(lines,comment=comment[0],line_number=comment[1], string_position=comment[2], begin_token=self.options['inline_comment_begin'], end_token=self.options['inline_comment_end']) string_out=string_list_collapse(lines,string_delimiter='\n') self.options=original_options return string_out
def change_unit_prefix(
self, column_selector=None, old_prefix=None, new_prefix=None, unit='Hz')
Changes the prefix of the units of the column specified by column_selector (column name or index) example usage is self.change_unit_prefix(column_selector='Frequency',old_prefix=None,new_prefix='G',unit='Hz') to change a column from Hz to GHz. It updates the data values, column_descriptions, and column_units if they exist, see http://www.nist.gov/pml/wmd/metric/prefixes.cfm for possible prefixes
def change_unit_prefix(self,column_selector=None,old_prefix=None,new_prefix=None,unit='Hz'): """Changes the prefix of the units of the column specified by column_selector (column name or index) example usage is self.change_unit_prefix(column_selector='Frequency',old_prefix=None,new_prefix='G',unit='Hz') to change a column from Hz to GHz. It updates the data values, column_descriptions, and column_units if they exist, see http://www.nist.gov/pml/wmd/metric/prefixes.cfm for possible prefixes""" multipliers={"yotta":10.**24,"Y":10.**24,"zetta":10.**21,"Z":10.**21,"exa":10.**18,"E":10.**18,"peta":10.**15, "P":10.**15,"tera":10.**12,"T":10.**12,"giga":10.**9,"G":10.**9,"mega":10.**6,"M":10.**6, "kilo":10.**3,"k":10.**3,"hecto":10.**2,"h":10.**2,"deka":10.,"da":10.,None:1.,"":1., "deci":10.**-1,"d":10.**-1,"centi":10.**-2,"c":10.**-2,"milli":10.**-3,"m":10.**-3, "micro":10.**-6,"mu":10.**-6,u"\u00B5":10.**-6,"nano":10.**-9, "n":10.**-9,"pico":10.**-12,"p":10.**-12,"femto":10.**-15, "f":10.**-15,"atto":10.**-18,"a":10.**-18,"zepto":10.**-21,"z":10.**-21, "yocto":10.**-24,"y":10.**-24} # change column name into column index try: if old_prefix is None: old_prefix="" if new_prefix is None: new_prefix="" old_unit=old_prefix+unit new_unit=new_prefix+unit if column_selector in self.column_names: column_selector=self.column_names.index(column_selector) for index,row in enumerate(self.data): if type(self.data[index][column_selector]) in [FloatType,LongType]: #print "{0:e}".format(multipliers[old_prefix]/multipliers[new_prefix]) self.data[index][column_selector]=\ (multipliers[old_prefix]/multipliers[new_prefix])*self.data[index][column_selector] elif type(self.data[index][column_selector]) in [StringType,IntType]: self.data[index][column_selector]=\ str((multipliers[old_prefix]/multipliers[new_prefix])*float(self.data[index][column_selector])) else: print type(self.data[index][column_selector]) raise if self.options["column_descriptions"] is not None: old=self.options["column_descriptions"][column_selector] self.options["column_descriptions"][column_selector]=old.replace(old_unit,new_unit) if self.options["column_units"] is not None: old=self.options["column_units"][column_selector] self.options["column_units"][column_selector]=old.replace(old_unit,new_unit) if re.search(old_unit,self.column_names[column_selector]): old=self.column_names[column_selector] self.column_names[column_selector]=old.replace(old_unit,new_unit) except: print("Could not change the unit prefix of column {0}".format(column_selector)) raise
def find_line(
self, begin_token)
Finds the first line that has begin token in it
def find_line(self,begin_token): """Finds the first line that has begin token in it""" for index,line in enumerate(self.lines): if re.search(begin_token,line): return index
def get_column(
self, column_name=None, column_index=None)
Returns a column as a list given a column name or column index
def get_column(self,column_name=None,column_index=None): """Returns a column as a list given a column name or column index""" if column_name is None: if column_index is None: return else: column_selector=column_index else: column_selector=self.column_names.index(column_name) out_list=[self.data[i][column_selector] for i in range(len(self.data))] return out_list
def get_column_names_string(
self)
Returns the column names as a string using options
def get_column_names_string(self): "Returns the column names as a string using options" string_out="" # This writes the column_names column_name_begin="" column_name_end="" if self.options["column_names_begin_token"] is None: column_name_begin="" else: column_name_begin=self.options["column_names_begin_token"] if self.options["column_names_end_token"] is None: column_name_end="" else: column_name_end=self.options["column_names_end_token"] if self.column_names is None: string_out="" else: if type(self.column_names) is StringType: string_out=self.column_names elif type(self.column_names) is ListType: string_out=list_to_string(self.column_names, data_delimiter=self.options["column_names_delimiter"],end="") #print("{0} is {1}".format('string_out',string_out)) else: string_out=ensure_string(self.column_names) #print column_name_begin,string_out,column_name_end return column_name_begin+string_out+column_name_end
def get_data_dictionary_list(
self, use_row_formatter_string=True)
Returns a python list with a row dictionary of form {column_name:data_column}
def get_data_dictionary_list(self,use_row_formatter_string=True): """Returns a python list with a row dictionary of form {column_name:data_column}""" try: if self.options["row_formatter_string"] is None: use_row_formatter_string=False if use_row_formatter_string: list_formatter=[item.replace("{"+str(index),"{0") for index,item in enumerate(self.options["row_formatter_string"].split("{delimiter}"))] else: list_formatter=["{0}" for i in self.column_names] #print self.column_names #print self.data #print list_formatter #print len(self.column_names)==len(self.data[0]) #print len(list_formatter)==len(self.data[0]) #print type(self.data[0]) out_list=[{self.column_names[i]:list_formatter[i].format(value) for i,value in enumerate(line)} for line in self.data] return out_list except: print("Could not form a data_dictionary_list, check that row_formatter_string is properly defined") #print(out_list) raise
def get_data_string(
self)
Returns the data as a string
def get_data_string(self): "Returns the data as a string" #Todo:refactor to cut out unused lines string_out="" if self.data is None: string_out= "" else: if type(self.data) is StringType: if self.options['data_begin_token'] is None: if self.options['data_end_token'] is None: string_out=self.data else: if re.search(self.options['data_end_token'],self.data): string_out=self.data else: string_out=self.data+self.options['data_end_token'] else: if self.options['data_end_token'] is None: if re.match(self.options['data_begin_token'],self.data): string_out=self.data else: string_out=self.options['data_begin_token']+self.data elif type(self.data) in [ListType,np.ndarray]: try: #If the first row is a string, we should strip all the tokens and add them back in if type(self.data[0]) is StringType: if self.options['data_begin_token'] is None: string_out=string_list_collapse(self.data) else: if re.match(self.options['data_begin_token'],self.data[0]): if self.options['data_end_token'] is None: string_out=string_list_collapse(self.data) else: if re.search(self.options['data_end_token'],self.data[-1]): string_out=string_list_collapse(self.data) else: string_out=string_list_collapse(self.data)+self.options['data_end_token'] else: if self.options['data_end_token'] is None: string_out=self.options['data_begin_token']+string_list_collapse(self.data) else: if re.search(self.options['data_end_token'],self.data[-1]): string_out=self.options['data_begin_token']+string_list_collapse(self.data) else: string_out=self.options['data_begin_token']+\ string_list_collapse(self.data)+\ self.options['data_end_token'] elif type(self.data[0]) in [ListType,np.ndarray]: prefix="" if self.options['data_begin_token'] is None: if self.options['data_end_token'] is None: string_out=list_list_to_string(self.data,data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"]) else: if self.options['data_end_token'] is None: string_out=self.options['data_begin_token']+\ list_list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"]) else: string_out=self.options['data_begin_token']+\ list_list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=\ self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"])+\ self.options['data_end_token'] else: string_out=list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], begin=self.options["row_begin_token"], end=self.options["row_end_token"]) except IndexError: pass else: string_out=ensure_string(self.data) return string_out
Returns the footer using options in self.options. If block comment is specified, and the footer is a list it will block comment out the footer. If comment_begin and comment_end are specified it will use those to represent each line of the footer. If footer_begin_token and/or footer_end_token are specified it will wrap the footer in those.
def get_frequency_units(
self)
Returns the frequency units by looking at the 0 index element of column names
def get_frequency_units(self): """Returns the frequency units by looking at the 0 index element of column names""" pattern='freq\((?P<Frequency_Units>\w+)\)' match=re.match(pattern,self.column_names[0]) return match.groupdict()['Frequency_Units']
def get_header_string(
self)
Returns the header using options in self.options. If block comment is specified, and the header is a list it will block comment out the header. If comment_begin and comment_end are specified it will use those to represent each line of the header. If header_begin_token and/or header_end_token are specified it will wrap the header in those.
def get_header_string(self): """Returns the header using options in self.options. If block comment is specified, and the header is a list it will block comment out the header. If comment_begin and comment_end are specified it will use those to represent each line of the header. If header_begin_token and/or header_end_token are specified it will wrap the header in those. """ string_out="" header_begin="" header_end="" if self.options["header_begin_token"] is None: header_begin="" else: header_begin=self.options["header_begin_token"] if self.options["header_end_token"] is None: header_end="" else: header_end=self.options["header_end_token"] # This writes the header if self.header is None: string_out= "" elif self.options["header_line_types"] is not None: for index,line in enumerate(self.options["header_line_types"]): if index == len(self.options["header_line_types"])-1: end='' else: end='\n' if line in ['header','header_line','normal']: string_out=string_out+self.header[index]+end elif line in ['line_comment','comment']: string_out=string_out+line_comment_string(self.header[index], comment_begin=self.options["comment_begin"], comment_end=self.options["comment_end"])+end elif line in ['block_comment','block']: if index-1<0: block_comment_begin=index block_comment_end=index+2 continue elif self.options["header_line_types"][index-1] not in ['block_comment','block']: block_comment_begin=index block_comment_end=index+2 continue else: if index+1>len(self.options["header_line_types"])-1: string_out=string_out+line_list_comment_string(self.header[block_comment_begin:], comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'], block=True)+end elif self.options["header_line_types"][index+1] in ['block_comment','block']: block_comment_end+=1 else: string_out=string_out+\ line_list_comment_string(self.header[block_comment_begin:block_comment_end], comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'], block=True)+end else: string_out=string_out+line elif self.options['treat_header_as_comment'] in [None,True] and self.options["header_line_types"] in [None]: # Just happens if the user has set self.header manually if type(self.header) is StringType: string_out=line_comment_string(self.header, comment_begin=self.options["comment_begin"], comment_end=self.options["comment_end"]) #string_out=re.sub('\n','',string_out,count=1) elif type(self.header) is ListType: if self.options['block_comment_begin'] is None: if self.options['comment_begin'] is None: string_out=string_list_collapse(self.header) else: string_out=line_list_comment_string(self.header,comment_begin=self.options['comment_begin'], comment_end=self.options['comment_end']) lines_out=string_out.splitlines() # if re.search('\n',self.options['comment_end']): # string_out=re.sub('\n','',string_out,count=1) #self.options["header_line_types"]=["line_comment" for line in self.header] else: string_out=line_list_comment_string(self.header,comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'],block=True) #self.options["header_line_types"]=["block_comment" for line in self.header] else: string_out=ensure_string(self.header,list_delimiter="\n",end_if_list="") return header_begin+string_out+header_end
def get_options(
self)
Prints the option list
def get_options(self): "Prints the option list" for key,value in self.options.iteritems(): print("{0} = {1}".format(key,value))
def get_options_by_element(
self, element_name)
returns a dictionary of all the options that have to do with element. Element must be header,column_names,data, or footer
def get_options_by_element(self,element_name): """ returns a dictionary of all the options that have to do with element. Element must be header,column_names,data, or footer""" keys_regarding_element=filter(lambda x: re.search(element_name,str(x),re.IGNORECASE),self.options.keys()) out_dictionary={key:self.options[key] for key in keys_regarding_element} #print out_dictionary return out_dictionary
def get_row(
self, row_index=None)
Returns the row as a list specified by row_index
def get_row(self,row_index=None): """Returns the row as a list specified by row_index""" if row_index is None: return else: return self.data[row_index]
def is_valid(
self)
Returns True if ascii table conforms to its specification given by its own options
def is_valid(self): """Returns True if ascii table conforms to its specification given by its own options""" options={} for key,value in self.options.iteritems(): options[key]=value # print("self.options[{0}] is {1} ".format(key,value)) for element in self.elements: if self.__dict__[element] is None: options[element]=None else: options[element]=[] options["validate"]=True newtable=AsciiDataTable(None,**options) lines=self.build_string().splitlines() for index,line in enumerate(lines): lines[index]=line+"\n" newtable.lines=lines newtable.__parse__() # print newtable.data # print newtable.column_names # print newtable #print_comparison(newtable.footer,None) newtable.update_model() # The new table rows are not being coerced into the right format #print newtable #newtable.update_model() #print newtable.options #print self.options #print newtable.data # print newtable.options==self.options # for option_key,option_value in newtable.options.iteritems(): # print("New Table Option {0} is {1} ".format(option_key,option_value)) # print("self.options[{0}] is {1} ".format(option_key,self.options[option_key])) # print_comparison(option_value,self.options[option_key]) # #print self return self==newtable
def lines_defined(
self)
If begin_line and end_line for all elements that are None are defined returns True
def lines_defined(self): """If begin_line and end_line for all elements that are None are defined returns True""" truth_table=[] last_element="" output=False for index,element in enumerate(self.elements): if element not in ['inline_comments','metadata'] and self.__dict__[element] is not None: try: last_element=element if not None in [self.options['%s_begin_line'%element],self.options['%s_end_line'%element]]: truth_table.append(True) else: truth_table.append(False) except: return False #print truth_table # The last_line of the last element is fine to be none if truth_table[-1] is False: if self.options['%s_begin_line'%last_element] is not None: truth_table[-1]=True if False in truth_table: output=False else: output=True #print output return output
Moves the DataTable's footer to the header and updates the model
def remove_column(
self, column_name=None, column_index=None)
Removes the column specified by column_name or column_index and updates the model. The column is removed from column_names, data and if present column_types, column_descriptions and row formatter
def remove_column(self,column_name=None,column_index=None): """Removes the column specified by column_name or column_index and updates the model. The column is removed from column_names, data and if present column_types, column_descriptions and row formatter""" pass
def remove_row(
self, row_index)
Removes the row specified by row_index and updates the model. Note index is relative to the data attribute so to remove the first row use row_index=0 and the last data row is row_index=-1
def remove_row(self,row_index): """Removes the row specified by row_index and updates the model. Note index is relative to the data attribute so to remove the first row use row_index=0 and the last data row is row_index=-1""" self.data.pop(row_index) self.update_model()
def save(
self, path=None, **temp_options)
" Saves the file, to save in another ascii format specify elements in temp_options, the options specified do not permanently change the object's options. If path is supplied it saves the file to that path otherwise uses the object's attribute path to define the saving location
def save(self,path=None,**temp_options): """" Saves the file, to save in another ascii format specify elements in temp_options, the options specified do not permanently change the object's options. If path is supplied it saves the file to that path otherwise uses the object's attribute path to define the saving location """ original_options=self.options for key,value in temp_options.iteritems(): self.options[key]=value out_string=self.build_string(**temp_options) if path is None: path=self.path file_out=open(path,'w') file_out.write(out_string) file_out.close() self.options=original_options
def save_schema(
self, path=None, format=None)
Saves the tables options as a text file or pickled dictionary (default). If no name is supplied, autonames it and saves
def save_schema(self,path=None,format=None): """Saves the tables options as a text file or pickled dictionary (default). If no name is supplied, autonames it and saves""" if path is None: path=auto_name(self.name.replace('.'+self.options["extension"],""),'Schema',self.options["directory"],'txt') if format in [None,'python','pickle']: pickle.dump(self.options,open(path,'wb')) elif format in ['txt','text','.txt']: file_out=open(path,'w') keys=self.options.keys() keys.sort() for key in keys: out_key=str(key).replace("\n","\\n") out_value=str(self.options[key]).replace("\n","\\n") file_out.write("{0} : {1} \n".format(out_key,out_value)) file_out.close()
def update_column_names(
self)
Update column names adds the value x# for any column that exists in self.data that is not named
def update_column_names(self): """Update column names adds the value x# for any column that exists in self.data that is not named""" if self.data is None: return elif type(self.column_names) is StringType: self.column_names=split_row(self.column_names,self.options["column_names_delimiter"]) elif self.column_names is None: column_names=[] for index,column in enumerate(self.data[0]): column_names.append("x"+str(index)) self.column_names=column_names return elif len(self.column_names)==len(self.data[0]): return elif len(self.column_names) < len(self.data[0]): for index in range(len(self.column_names),len(self.data[0])): self.column_names.append("x"+str(index)) return
def update_import_options(
self, import_table)
Updates the options in the import table
def update_import_options(self,import_table): """Updates the options in the import table""" for index,element in enumerate(['header','column_names','data','footer']): if self.__dict__[element] is not None: print("The {0} variable is {1}".format('index',index)) print("The {0} variable is {1}".format('element',element)) print("The {0} variable is {1}".format('import_table',import_table)) [self.options['%s_begin_line'%element], self.options['%s_end_line'%element], self.options['%s_begin_token'%element], self.options['%s_end_token'%element]]=import_table[index][:]
def update_index(
self)
Updates the index column if it exits, otherwise exits quietly
def update_index(self): """ Updates the index column if it exits, otherwise exits quietly """ if 'index' not in self.column_names: return else: try: #This should be 0 but just in case index_column_number=self.column_names.index('index') for i in range(len(self.data)): self.data[i][index_column_number]=i except: pass
def update_model(
self)
Updates the model after a change has been made. If you add anything to the attributes of the model, or change this updates the values. If the model has an index column it will make sure the numbers are correct. In addition, it will update the options dictionary to reflect added rows, changes in deliminators etc.
def update_model(self): """Updates the model after a change has been made. If you add anything to the attributes of the model, or change this updates the values. If the model has an index column it will make sure the numbers are correct. In addition, it will update the options dictionary to reflect added rows, changes in deliminators etc. """ if self.column_names is not None and 'index' in self.column_names: self.update_index() #make sure there are no "\n" characters in the element lists (if so replace them with "") for data this is # done on import list_types=["header","column_names","footer"] for element in list_types: if self.__dict__[element] is not None: for index,item in enumerate(self.__dict__[element]): self.__dict__[element][index]=item.replace("\n","") self.update_column_names() if self.data is not None: self.data=convert_all_rows(self.data,self.options["column_types"]) self.string=self.build_string() self.lines=self.string.splitlines()
class OnePortCalrepModel
class OnePortCalrepModel(AsciiDataTable): def __init__(self,file_path,**options): "Intializes the OnePortCalrepModel Class, it is assumed that the file is of the .asc or table type" # This is a general pattern for adding a lot of options defaults= {"data_delimiter": ",", "column_names_delimiter": ",", "specific_descriptor": 'One_Port', "general_descriptor": 'Sparameter', "extension": 'txt', "comment_begin": "#", "comment_end": "\n", "column_types": ['float' for i in range(11)], "column_descriptions": {"Frequency": "Frequency in GHz", "Magnitude": "Linear magnitude", "uMb": "Uncertainty in magnitude due to standards", "uMa": "Uncertainty in magnitude due to electronics", "uMd": "Uncertainty in magnitude for repeated connects", "uMg": "Total uncertainty in magnitude", "Phase": "Phase in degrees", "uPhb": "Uncertainty in phase due to standards", "uPha": "Uncertainty in phase due to electronics", "uPhd": "Uncertainty in phase for repeated connects", "uPhg": "Total uncertainty in phase"}, "header": None, "column_names": ONE_PORT_COLUMN_NAMES, "column_names_end_token": "\n", "data": None, "row_formatter_string": None, "data_table_element_separator": None,"row_begin_token":None, "row_end_token":None,"escape_character":None, "data_begin_token":None,"data_end_token":None} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: self.path=file_path self.__read_and_fix__() #build the row_formatting string, the original files have 4 decimals of precision for freq/gamma and 2 for Phase row_formatter="" for i in range(11): if i<6: row_formatter=row_formatter+"{"+str(i)+":.4f}{delimiter}" elif i==10: row_formatter=row_formatter+"{"+str(i)+":.2f}" else: row_formatter=row_formatter+"{"+str(i)+":.2f}{delimiter}" self.options["row_formatter_string"]=row_formatter AsciiDataTable.__init__(self,None,**self.options) if file_path is not None: self.path=file_path def __read_and_fix__(self): """Reads in a 1 port ascii file and fixes any issues with inconsistent delimiters, etc""" lines=[] table_type=self.path.split(".")[-1] in_file=open(self.path,'r') for line in in_file: if not re.match('[\s]+(?!\w+)',line): #print line lines.append(line) # Handle the cases in which it is the comma delimited table if re.match('txt',table_type,re.IGNORECASE): lines=strip_tokens(lines,*[self.options['data_begin_token'], self.options['data_end_token']]) self.options["data"]=strip_all_line_tokens(lines,begin_token=self.options["row_begin_token"], end_token=self.options["row_end_token"]) self.options["data"]=split_all_rows(self.options["data"],delimiter=self.options["data_delimiter"], escape_character=self.options["escape_character"]) self.options["data"]=convert_all_rows(self.options["data"],self.options["column_types"]) #print self.options["data"] root_name_pattern=re.compile('(?P<root_name>\w+)[abc].txt',re.IGNORECASE) root_name_match=re.search(root_name_pattern,self.path) root_name=root_name_match.groupdict()["root_name"] self.options["header"]=["Device_Id = {0}".format(root_name)] elif re.match("asc",table_type,re.IGNORECASE): self.lines=lines data_begin_line=self.find_line(" TABLE")+2 data=np.loadtxt(self.path,skiprows=data_begin_line) self.options["data"]=data.tolist() self.options["header"]=lines[:self.find_line(" TABLE")] #print("The {0} variable is {1}".format('data.tolist()',data.tolist())) def show(self): fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True) ax0.errorbar(self.get_column('Frequency'),self.get_column('mag'), yerr=self.get_column('uMg'),fmt='k--') ax0.set_title('Magnitude S11') ax1.errorbar(self.get_column('Frequency'),self.get_column('arg'), yerr=self.get_column('uAg'),fmt='ro') ax1.set_title('Phase S11') plt.show()
Ancestors (in MRO)
- OnePortCalrepModel
- pyMez.Code.DataHandlers.GeneralModels.AsciiDataTable
Instance variables
var options
Methods
def __init__(
self, file_path, **options)
Intializes the OnePortCalrepModel Class, it is assumed that the file is of the .asc or table type
def __init__(self,file_path,**options): "Intializes the OnePortCalrepModel Class, it is assumed that the file is of the .asc or table type" # This is a general pattern for adding a lot of options defaults= {"data_delimiter": ",", "column_names_delimiter": ",", "specific_descriptor": 'One_Port', "general_descriptor": 'Sparameter', "extension": 'txt', "comment_begin": "#", "comment_end": "\n", "column_types": ['float' for i in range(11)], "column_descriptions": {"Frequency": "Frequency in GHz", "Magnitude": "Linear magnitude", "uMb": "Uncertainty in magnitude due to standards", "uMa": "Uncertainty in magnitude due to electronics", "uMd": "Uncertainty in magnitude for repeated connects", "uMg": "Total uncertainty in magnitude", "Phase": "Phase in degrees", "uPhb": "Uncertainty in phase due to standards", "uPha": "Uncertainty in phase due to electronics", "uPhd": "Uncertainty in phase for repeated connects", "uPhg": "Total uncertainty in phase"}, "header": None, "column_names": ONE_PORT_COLUMN_NAMES, "column_names_end_token": "\n", "data": None, "row_formatter_string": None, "data_table_element_separator": None,"row_begin_token":None, "row_end_token":None,"escape_character":None, "data_begin_token":None,"data_end_token":None} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: self.path=file_path self.__read_and_fix__() #build the row_formatting string, the original files have 4 decimals of precision for freq/gamma and 2 for Phase row_formatter="" for i in range(11): if i<6: row_formatter=row_formatter+"{"+str(i)+":.4f}{delimiter}" elif i==10: row_formatter=row_formatter+"{"+str(i)+":.2f}" else: row_formatter=row_formatter+"{"+str(i)+":.2f}{delimiter}" self.options["row_formatter_string"]=row_formatter AsciiDataTable.__init__(self,None,**self.options) if file_path is not None: self.path=file_path
def add_column(
self, column_name=None, column_type=None, column_data=None, format_string=None)
Adds a column with column_name, and column_type. If column data is supplied and it's length is the same as data(same number of rows) then it is added, else self.options['empty_character'] is added in each spot in the preceding rows
def add_column(self,column_name=None,column_type=None,column_data=None,format_string=None): """Adds a column with column_name, and column_type. If column data is supplied and it's length is the same as data(same number of rows) then it is added, else self.options['empty_character'] is added in each spot in the preceding rows""" original_column_names=self.column_names[:] try: self.column_names.append(column_name) if self.options["column_types"]: self.options["column_types"]=self.options["column_types"].append(column_type) if len(column_data) == len(self.data): for index,row in enumerate(self.data): #print("{0} is {1}".format('self.data[index]',self.data[index])) #print("{0} is {1}".format('row',row)) new_row=row[:] new_row.append(column_data[index]) self.data[index]=new_row else: for index,row in enumerate(self.data): self.data[index]=row.append(self.options['empty_value']) if column_data is not None: for item in column_data: empty_row=[self.options['empty_value'] for column in original_column_names] empty_row.append(item) self.add_row(empty_row) if self.options["row_formatter_string"] is None: pass else: if format_string is None: self.options["row_formatter_string"]=self.options["row_formatter_string"]+\ '{delimiter}'+"{"+str(len(self.column_names)-1)+"}" else: self.options["row_formatter_string"]=self.options["row_formatter_string"]+format_string #self.update_model() except: self.column_names=original_column_names print("Could not add columns") raise
def add_index(
self)
Adds a column with name index and values that are 0 referenced indices, does nothing if there is already a column with name index, always inserts it at the 0 position
def add_index(self): """Adds a column with name index and values that are 0 referenced indices, does nothing if there is already a column with name index, always inserts it at the 0 position""" if 'index' in self.column_names: print("Add Index passed") pass else: self.column_names.insert(0,'index') for index,row in enumerate(self.data): self.data[index].insert(0,index) if self.options['column_types']: self.options['column_types'].insert(0,'int') if self.options['row_formatter_string']: temp_formatter_list=self.options['row_formatter_string'].split("{delimiter}") iterated_row_formatter_list=[temp_formatter_list[i].replace(str(i),str(i+1)) for i in range(len(temp_formatter_list))] new_formatter=string_list_collapse(iterated_row_formatter_list,string_delimiter="{delimiter}") self.options['row_formatter_string']='{0}{delimiter}'+new_formatter
def add_inline_comment(
self, comment='', line_number=None, string_position=None)
Adds an inline in the specified location
def add_inline_comment(self,comment="",line_number=None,string_position=None): "Adds an inline in the specified location" try: self.inline_comments.append([comment,line_number,string_position]) except:pass
def add_row(
self, row_data)
Adds a single row given row_data which can be an ordered list/tuple or a dictionary with column names as keys
def add_row(self,row_data): """Adds a single row given row_data which can be an ordered list/tuple or a dictionary with column names as keys""" if len(row_data) not in [len(self.column_names),len(self.column_names)]: print(" could not add the row, dimensions do not match") return if type(row_data) in [ListType,np.ndarray]: self.data.append(row_data) elif type(row_data) in [DictionaryType]: data_list=[row_data[column_name] for column_name in self.column_names] self.data.append(data_list)
def build_string(
self, **temp_options)
Builds a string representation of the data table based on self.options, or temp_options. Passing temp_options does not permanently change the model
def build_string(self,**temp_options): """Builds a string representation of the data table based on self.options, or temp_options. Passing temp_options does not permanently change the model""" # store the original options to be put back after the string is made original_options=self.options for key,value in temp_options.iteritems(): self.options[key]=value section_end=0 next_section_begin=0 if self.options['data_table_element_separator'] is None: inner_element_spacing=0 else: inner_element_spacing=self.options['data_table_element_separator'].count('\n')-1 string_out="" between_section="" if self.options['data_table_element_separator'] is not None: between_section=self.options['data_table_element_separator'] if self.header is None: self.options['header_begin_line']=self.options['header_end_line']=None pass else: self.options["header_begin_line"]=0 if self.data is None and self.column_names is None and self.footer is None: string_out=self.get_header_string() self.options["header_end_line"]=None else: string_out=self.get_header_string()+between_section last_header_line=self.get_header_string().count('\n')+1 self.options["header_end_line"]=last_header_line next_section_begin=last_header_line+inner_element_spacing if self.column_names is None: self.options['column_names_begin_line']=self.options['column_names_end_line']=None pass else: self.options["column_names_begin_line"]=next_section_begin if self.data is None and self.footer is None: self.options["column_names_end_line"]=None string_out=string_out+self.get_column_names_string() else: string_out=string_out+self.get_column_names_string()+between_section last_column_names_line=self.get_column_names_string().count('\n')+\ self.options["column_names_begin_line"]+1 self.options["column_names_end_line"]=last_column_names_line next_section_begin=last_column_names_line+inner_element_spacing if self.data is None: self.options['data_begin_line']=self.options['data_end_line']=None pass else: self.options["data_begin_line"]=next_section_begin if self.footer is None: self.options["data_end_line"]=None string_out=string_out+self.get_data_string() else: string_out=string_out+self.get_data_string()+between_section last_data_line=self.get_data_string().count("\n")+\ self.options["data_begin_line"]+1 self.options["data_end_line"]=last_data_line next_section_begin=last_data_line+inner_element_spacing if self.footer is None: self.options['footer_begin_line']=self.options['footer_end_line']=None pass else: self.options["footer_begin_line"]=next_section_begin string_out=string_out+self.get_footer_string() self.options['footer_end_line']=None # set the options back after the string has been made if self.inline_comments is None: pass else: lines=string_out.splitlines() for comment in self.inline_comments: lines=insert_inline_comment(lines,comment=comment[0],line_number=comment[1], string_position=comment[2], begin_token=self.options['inline_comment_begin'], end_token=self.options['inline_comment_end']) string_out=string_list_collapse(lines,string_delimiter='\n') self.options=original_options return string_out
def change_unit_prefix(
self, column_selector=None, old_prefix=None, new_prefix=None, unit='Hz')
Changes the prefix of the units of the column specified by column_selector (column name or index) example usage is self.change_unit_prefix(column_selector='Frequency',old_prefix=None,new_prefix='G',unit='Hz') to change a column from Hz to GHz. It updates the data values, column_descriptions, and column_units if they exist, see http://www.nist.gov/pml/wmd/metric/prefixes.cfm for possible prefixes
def change_unit_prefix(self,column_selector=None,old_prefix=None,new_prefix=None,unit='Hz'): """Changes the prefix of the units of the column specified by column_selector (column name or index) example usage is self.change_unit_prefix(column_selector='Frequency',old_prefix=None,new_prefix='G',unit='Hz') to change a column from Hz to GHz. It updates the data values, column_descriptions, and column_units if they exist, see http://www.nist.gov/pml/wmd/metric/prefixes.cfm for possible prefixes""" multipliers={"yotta":10.**24,"Y":10.**24,"zetta":10.**21,"Z":10.**21,"exa":10.**18,"E":10.**18,"peta":10.**15, "P":10.**15,"tera":10.**12,"T":10.**12,"giga":10.**9,"G":10.**9,"mega":10.**6,"M":10.**6, "kilo":10.**3,"k":10.**3,"hecto":10.**2,"h":10.**2,"deka":10.,"da":10.,None:1.,"":1., "deci":10.**-1,"d":10.**-1,"centi":10.**-2,"c":10.**-2,"milli":10.**-3,"m":10.**-3, "micro":10.**-6,"mu":10.**-6,u"\u00B5":10.**-6,"nano":10.**-9, "n":10.**-9,"pico":10.**-12,"p":10.**-12,"femto":10.**-15, "f":10.**-15,"atto":10.**-18,"a":10.**-18,"zepto":10.**-21,"z":10.**-21, "yocto":10.**-24,"y":10.**-24} # change column name into column index try: if old_prefix is None: old_prefix="" if new_prefix is None: new_prefix="" old_unit=old_prefix+unit new_unit=new_prefix+unit if column_selector in self.column_names: column_selector=self.column_names.index(column_selector) for index,row in enumerate(self.data): if type(self.data[index][column_selector]) in [FloatType,LongType]: #print "{0:e}".format(multipliers[old_prefix]/multipliers[new_prefix]) self.data[index][column_selector]=\ (multipliers[old_prefix]/multipliers[new_prefix])*self.data[index][column_selector] elif type(self.data[index][column_selector]) in [StringType,IntType]: self.data[index][column_selector]=\ str((multipliers[old_prefix]/multipliers[new_prefix])*float(self.data[index][column_selector])) else: print type(self.data[index][column_selector]) raise if self.options["column_descriptions"] is not None: old=self.options["column_descriptions"][column_selector] self.options["column_descriptions"][column_selector]=old.replace(old_unit,new_unit) if self.options["column_units"] is not None: old=self.options["column_units"][column_selector] self.options["column_units"][column_selector]=old.replace(old_unit,new_unit) if re.search(old_unit,self.column_names[column_selector]): old=self.column_names[column_selector] self.column_names[column_selector]=old.replace(old_unit,new_unit) except: print("Could not change the unit prefix of column {0}".format(column_selector)) raise
def find_line(
self, begin_token)
Finds the first line that has begin token in it
def find_line(self,begin_token): """Finds the first line that has begin token in it""" for index,line in enumerate(self.lines): if re.search(begin_token,line): return index
def get_column(
self, column_name=None, column_index=None)
Returns a column as a list given a column name or column index
def get_column(self,column_name=None,column_index=None): """Returns a column as a list given a column name or column index""" if column_name is None: if column_index is None: return else: column_selector=column_index else: column_selector=self.column_names.index(column_name) out_list=[self.data[i][column_selector] for i in range(len(self.data))] return out_list
def get_column_names_string(
self)
Returns the column names as a string using options
def get_column_names_string(self): "Returns the column names as a string using options" string_out="" # This writes the column_names column_name_begin="" column_name_end="" if self.options["column_names_begin_token"] is None: column_name_begin="" else: column_name_begin=self.options["column_names_begin_token"] if self.options["column_names_end_token"] is None: column_name_end="" else: column_name_end=self.options["column_names_end_token"] if self.column_names is None: string_out="" else: if type(self.column_names) is StringType: string_out=self.column_names elif type(self.column_names) is ListType: string_out=list_to_string(self.column_names, data_delimiter=self.options["column_names_delimiter"],end="") #print("{0} is {1}".format('string_out',string_out)) else: string_out=ensure_string(self.column_names) #print column_name_begin,string_out,column_name_end return column_name_begin+string_out+column_name_end
def get_data_dictionary_list(
self, use_row_formatter_string=True)
Returns a python list with a row dictionary of form {column_name:data_column}
def get_data_dictionary_list(self,use_row_formatter_string=True): """Returns a python list with a row dictionary of form {column_name:data_column}""" try: if self.options["row_formatter_string"] is None: use_row_formatter_string=False if use_row_formatter_string: list_formatter=[item.replace("{"+str(index),"{0") for index,item in enumerate(self.options["row_formatter_string"].split("{delimiter}"))] else: list_formatter=["{0}" for i in self.column_names] #print self.column_names #print self.data #print list_formatter #print len(self.column_names)==len(self.data[0]) #print len(list_formatter)==len(self.data[0]) #print type(self.data[0]) out_list=[{self.column_names[i]:list_formatter[i].format(value) for i,value in enumerate(line)} for line in self.data] return out_list except: print("Could not form a data_dictionary_list, check that row_formatter_string is properly defined") #print(out_list) raise
def get_data_string(
self)
Returns the data as a string
def get_data_string(self): "Returns the data as a string" #Todo:refactor to cut out unused lines string_out="" if self.data is None: string_out= "" else: if type(self.data) is StringType: if self.options['data_begin_token'] is None: if self.options['data_end_token'] is None: string_out=self.data else: if re.search(self.options['data_end_token'],self.data): string_out=self.data else: string_out=self.data+self.options['data_end_token'] else: if self.options['data_end_token'] is None: if re.match(self.options['data_begin_token'],self.data): string_out=self.data else: string_out=self.options['data_begin_token']+self.data elif type(self.data) in [ListType,np.ndarray]: try: #If the first row is a string, we should strip all the tokens and add them back in if type(self.data[0]) is StringType: if self.options['data_begin_token'] is None: string_out=string_list_collapse(self.data) else: if re.match(self.options['data_begin_token'],self.data[0]): if self.options['data_end_token'] is None: string_out=string_list_collapse(self.data) else: if re.search(self.options['data_end_token'],self.data[-1]): string_out=string_list_collapse(self.data) else: string_out=string_list_collapse(self.data)+self.options['data_end_token'] else: if self.options['data_end_token'] is None: string_out=self.options['data_begin_token']+string_list_collapse(self.data) else: if re.search(self.options['data_end_token'],self.data[-1]): string_out=self.options['data_begin_token']+string_list_collapse(self.data) else: string_out=self.options['data_begin_token']+\ string_list_collapse(self.data)+\ self.options['data_end_token'] elif type(self.data[0]) in [ListType,np.ndarray]: prefix="" if self.options['data_begin_token'] is None: if self.options['data_end_token'] is None: string_out=list_list_to_string(self.data,data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"]) else: if self.options['data_end_token'] is None: string_out=self.options['data_begin_token']+\ list_list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"]) else: string_out=self.options['data_begin_token']+\ list_list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=\ self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"])+\ self.options['data_end_token'] else: string_out=list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], begin=self.options["row_begin_token"], end=self.options["row_end_token"]) except IndexError: pass else: string_out=ensure_string(self.data) return string_out
Returns the footer using options in self.options. If block comment is specified, and the footer is a list it will block comment out the footer. If comment_begin and comment_end are specified it will use those to represent each line of the footer. If footer_begin_token and/or footer_end_token are specified it will wrap the footer in those.
def get_header_string(
self)
Returns the header using options in self.options. If block comment is specified, and the header is a list it will block comment out the header. If comment_begin and comment_end are specified it will use those to represent each line of the header. If header_begin_token and/or header_end_token are specified it will wrap the header in those.
def get_header_string(self): """Returns the header using options in self.options. If block comment is specified, and the header is a list it will block comment out the header. If comment_begin and comment_end are specified it will use those to represent each line of the header. If header_begin_token and/or header_end_token are specified it will wrap the header in those. """ string_out="" header_begin="" header_end="" if self.options["header_begin_token"] is None: header_begin="" else: header_begin=self.options["header_begin_token"] if self.options["header_end_token"] is None: header_end="" else: header_end=self.options["header_end_token"] # This writes the header if self.header is None: string_out= "" elif self.options["header_line_types"] is not None: for index,line in enumerate(self.options["header_line_types"]): if index == len(self.options["header_line_types"])-1: end='' else: end='\n' if line in ['header','header_line','normal']: string_out=string_out+self.header[index]+end elif line in ['line_comment','comment']: string_out=string_out+line_comment_string(self.header[index], comment_begin=self.options["comment_begin"], comment_end=self.options["comment_end"])+end elif line in ['block_comment','block']: if index-1<0: block_comment_begin=index block_comment_end=index+2 continue elif self.options["header_line_types"][index-1] not in ['block_comment','block']: block_comment_begin=index block_comment_end=index+2 continue else: if index+1>len(self.options["header_line_types"])-1: string_out=string_out+line_list_comment_string(self.header[block_comment_begin:], comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'], block=True)+end elif self.options["header_line_types"][index+1] in ['block_comment','block']: block_comment_end+=1 else: string_out=string_out+\ line_list_comment_string(self.header[block_comment_begin:block_comment_end], comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'], block=True)+end else: string_out=string_out+line elif self.options['treat_header_as_comment'] in [None,True] and self.options["header_line_types"] in [None]: # Just happens if the user has set self.header manually if type(self.header) is StringType: string_out=line_comment_string(self.header, comment_begin=self.options["comment_begin"], comment_end=self.options["comment_end"]) #string_out=re.sub('\n','',string_out,count=1) elif type(self.header) is ListType: if self.options['block_comment_begin'] is None: if self.options['comment_begin'] is None: string_out=string_list_collapse(self.header) else: string_out=line_list_comment_string(self.header,comment_begin=self.options['comment_begin'], comment_end=self.options['comment_end']) lines_out=string_out.splitlines() # if re.search('\n',self.options['comment_end']): # string_out=re.sub('\n','',string_out,count=1) #self.options["header_line_types"]=["line_comment" for line in self.header] else: string_out=line_list_comment_string(self.header,comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'],block=True) #self.options["header_line_types"]=["block_comment" for line in self.header] else: string_out=ensure_string(self.header,list_delimiter="\n",end_if_list="") return header_begin+string_out+header_end
def get_options(
self)
Prints the option list
def get_options(self): "Prints the option list" for key,value in self.options.iteritems(): print("{0} = {1}".format(key,value))
def get_options_by_element(
self, element_name)
returns a dictionary of all the options that have to do with element. Element must be header,column_names,data, or footer
def get_options_by_element(self,element_name): """ returns a dictionary of all the options that have to do with element. Element must be header,column_names,data, or footer""" keys_regarding_element=filter(lambda x: re.search(element_name,str(x),re.IGNORECASE),self.options.keys()) out_dictionary={key:self.options[key] for key in keys_regarding_element} #print out_dictionary return out_dictionary
def get_row(
self, row_index=None)
Returns the row as a list specified by row_index
def get_row(self,row_index=None): """Returns the row as a list specified by row_index""" if row_index is None: return else: return self.data[row_index]
def is_valid(
self)
Returns True if ascii table conforms to its specification given by its own options
def is_valid(self): """Returns True if ascii table conforms to its specification given by its own options""" options={} for key,value in self.options.iteritems(): options[key]=value # print("self.options[{0}] is {1} ".format(key,value)) for element in self.elements: if self.__dict__[element] is None: options[element]=None else: options[element]=[] options["validate"]=True newtable=AsciiDataTable(None,**options) lines=self.build_string().splitlines() for index,line in enumerate(lines): lines[index]=line+"\n" newtable.lines=lines newtable.__parse__() # print newtable.data # print newtable.column_names # print newtable #print_comparison(newtable.footer,None) newtable.update_model() # The new table rows are not being coerced into the right format #print newtable #newtable.update_model() #print newtable.options #print self.options #print newtable.data # print newtable.options==self.options # for option_key,option_value in newtable.options.iteritems(): # print("New Table Option {0} is {1} ".format(option_key,option_value)) # print("self.options[{0}] is {1} ".format(option_key,self.options[option_key])) # print_comparison(option_value,self.options[option_key]) # #print self return self==newtable
def lines_defined(
self)
If begin_line and end_line for all elements that are None are defined returns True
def lines_defined(self): """If begin_line and end_line for all elements that are None are defined returns True""" truth_table=[] last_element="" output=False for index,element in enumerate(self.elements): if element not in ['inline_comments','metadata'] and self.__dict__[element] is not None: try: last_element=element if not None in [self.options['%s_begin_line'%element],self.options['%s_end_line'%element]]: truth_table.append(True) else: truth_table.append(False) except: return False #print truth_table # The last_line of the last element is fine to be none if truth_table[-1] is False: if self.options['%s_begin_line'%last_element] is not None: truth_table[-1]=True if False in truth_table: output=False else: output=True #print output return output
Moves the DataTable's footer to the header and updates the model
def remove_column(
self, column_name=None, column_index=None)
Removes the column specified by column_name or column_index and updates the model. The column is removed from column_names, data and if present column_types, column_descriptions and row formatter
def remove_column(self,column_name=None,column_index=None): """Removes the column specified by column_name or column_index and updates the model. The column is removed from column_names, data and if present column_types, column_descriptions and row formatter""" pass
def remove_row(
self, row_index)
Removes the row specified by row_index and updates the model. Note index is relative to the data attribute so to remove the first row use row_index=0 and the last data row is row_index=-1
def remove_row(self,row_index): """Removes the row specified by row_index and updates the model. Note index is relative to the data attribute so to remove the first row use row_index=0 and the last data row is row_index=-1""" self.data.pop(row_index) self.update_model()
def save(
self, path=None, **temp_options)
" Saves the file, to save in another ascii format specify elements in temp_options, the options specified do not permanently change the object's options. If path is supplied it saves the file to that path otherwise uses the object's attribute path to define the saving location
def save(self,path=None,**temp_options): """" Saves the file, to save in another ascii format specify elements in temp_options, the options specified do not permanently change the object's options. If path is supplied it saves the file to that path otherwise uses the object's attribute path to define the saving location """ original_options=self.options for key,value in temp_options.iteritems(): self.options[key]=value out_string=self.build_string(**temp_options) if path is None: path=self.path file_out=open(path,'w') file_out.write(out_string) file_out.close() self.options=original_options
def save_schema(
self, path=None, format=None)
Saves the tables options as a text file or pickled dictionary (default). If no name is supplied, autonames it and saves
def save_schema(self,path=None,format=None): """Saves the tables options as a text file or pickled dictionary (default). If no name is supplied, autonames it and saves""" if path is None: path=auto_name(self.name.replace('.'+self.options["extension"],""),'Schema',self.options["directory"],'txt') if format in [None,'python','pickle']: pickle.dump(self.options,open(path,'wb')) elif format in ['txt','text','.txt']: file_out=open(path,'w') keys=self.options.keys() keys.sort() for key in keys: out_key=str(key).replace("\n","\\n") out_value=str(self.options[key]).replace("\n","\\n") file_out.write("{0} : {1} \n".format(out_key,out_value)) file_out.close()
def show(
self)
def show(self): fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True) ax0.errorbar(self.get_column('Frequency'),self.get_column('mag'), yerr=self.get_column('uMg'),fmt='k--') ax0.set_title('Magnitude S11') ax1.errorbar(self.get_column('Frequency'),self.get_column('arg'), yerr=self.get_column('uAg'),fmt='ro') ax1.set_title('Phase S11') plt.show()
def update_column_names(
self)
Update column names adds the value x# for any column that exists in self.data that is not named
def update_column_names(self): """Update column names adds the value x# for any column that exists in self.data that is not named""" if self.data is None: return elif type(self.column_names) is StringType: self.column_names=split_row(self.column_names,self.options["column_names_delimiter"]) elif self.column_names is None: column_names=[] for index,column in enumerate(self.data[0]): column_names.append("x"+str(index)) self.column_names=column_names return elif len(self.column_names)==len(self.data[0]): return elif len(self.column_names) < len(self.data[0]): for index in range(len(self.column_names),len(self.data[0])): self.column_names.append("x"+str(index)) return
def update_import_options(
self, import_table)
Updates the options in the import table
def update_import_options(self,import_table): """Updates the options in the import table""" for index,element in enumerate(['header','column_names','data','footer']): if self.__dict__[element] is not None: print("The {0} variable is {1}".format('index',index)) print("The {0} variable is {1}".format('element',element)) print("The {0} variable is {1}".format('import_table',import_table)) [self.options['%s_begin_line'%element], self.options['%s_end_line'%element], self.options['%s_begin_token'%element], self.options['%s_end_token'%element]]=import_table[index][:]
def update_index(
self)
Updates the index column if it exits, otherwise exits quietly
def update_index(self): """ Updates the index column if it exits, otherwise exits quietly """ if 'index' not in self.column_names: return else: try: #This should be 0 but just in case index_column_number=self.column_names.index('index') for i in range(len(self.data)): self.data[i][index_column_number]=i except: pass
def update_model(
self)
Updates the model after a change has been made. If you add anything to the attributes of the model, or change this updates the values. If the model has an index column it will make sure the numbers are correct. In addition, it will update the options dictionary to reflect added rows, changes in deliminators etc.
def update_model(self): """Updates the model after a change has been made. If you add anything to the attributes of the model, or change this updates the values. If the model has an index column it will make sure the numbers are correct. In addition, it will update the options dictionary to reflect added rows, changes in deliminators etc. """ if self.column_names is not None and 'index' in self.column_names: self.update_index() #make sure there are no "\n" characters in the element lists (if so replace them with "") for data this is # done on import list_types=["header","column_names","footer"] for element in list_types: if self.__dict__[element] is not None: for index,item in enumerate(self.__dict__[element]): self.__dict__[element][index]=item.replace("\n","") self.update_column_names() if self.data is not None: self.data=convert_all_rows(self.data,self.options["column_types"]) self.string=self.build_string() self.lines=self.string.splitlines()
class OnePortRawModel
Class that deals with the OnePort Raw Files after conversion to Ascii using Ron Ginley's converter. These files typically have header information seperated from data by !! Header format is: Line 1: Spid$ - identification of type of system used Line 2: Systemletter$ - letter name indicating which system was used Line 3: Conncal$ - connector type from the system calibration Line 4: Connectors$ - connector type used for the measurement Line 5: Meastype$ - type of measurement (basically 1-port, 2-port or power) Line 6: Datea$ - date of measurement Line 7: Timea$ - time of measurement Line 8: Programm$ - name of program used Line 9: Rev$ - program revision Line 10: Opr$ - operator Line 11: Cfile$ - calibration name Line 12: Cdate$ - calibration date Line 13: Sport - identification of which port or direction was used for measurement Line 14: Numconnects ? number of disconnect/reconnect cycles Line 15: Numrepeats ? number of repeat measurements for each connect (usually 1) Line 16: Nbs ? not sure Line 17: Nfreq ? number of frequencies Line 18: Startfreq ? data row pointer for bdat files Line 19: Devicedescript$ - description of device being measured or of test being done Line 20: Devicenum$ - Identifying number for device ? used for file names
class OnePortRawModel(AsciiDataTable): """ Class that deals with the OnePort Raw Files after conversion to Ascii using Ron Ginley's converter. These files typically have header information seperated from data by !! Header format is: Line 1: Spid$ - identification of type of system used Line 2: Systemletter$ - letter name indicating which system was used Line 3: Conncal$ - connector type from the system calibration Line 4: Connectors$ - connector type used for the measurement Line 5: Meastype$ - type of measurement (basically 1-port, 2-port or power) Line 6: Datea$ - date of measurement Line 7: Timea$ - time of measurement Line 8: Programm$ - name of program used Line 9: Rev$ - program revision Line 10: Opr$ - operator Line 11: Cfile$ - calibration name Line 12: Cdate$ - calibration date Line 13: Sport - identification of which port or direction was used for measurement Line 14: Numconnects ? number of disconnect/reconnect cycles Line 15: Numrepeats ? number of repeat measurements for each connect (usually 1) Line 16: Nbs ? not sure Line 17: Nfreq ? number of frequencies Line 18: Startfreq ? data row pointer for bdat files Line 19: Devicedescript$ - description of device being measured or of test being done Line 20: Devicenum$ - Identifying number for device ? used for file names """ def __init__(self,file_path=None,**options): """Initializes the OnePortRaw class, if a file_path is specified opens an existing file, else creates an empty container""" defaults= {"data_delimiter": ",", "column_names_delimiter": ",", "specific_descriptor": 'One_Port_Raw', "general_descriptor": 'Sparameter', "extension": 'txt', "comment_begin": "#", "comment_end": "\n", "column_types": ['float','int','int','float','float','float','float'], "column_descriptions": {"Frequency":"Frequency in GHz", "Direction":"Direction of connects, may be unused", "Connect":"Connect number", "magS11":"Linear magnitude for port 1", "argS11":"Phase in degrees for port 1", "magS22":"Linear magnitude for port 2", "argS22":"Phase in degrees for port 2"}, "header": None, "column_names": ["Frequency","Direction","Connect", "magS11", "argS11","magS22", "argS22"], "column_names_end_token": "\n", "data": None, 'row_formatter_string': "{0:.5f}{delimiter}{1}{delimiter}{2}{delimiter}" "{3:.4f}{delimiter}{4:.2f}{delimiter}{5:.4f}{delimiter}{6:.2f}", "data_table_element_separator": None} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: self.__read_and_fix__(file_path) AsciiDataTable.__init__(self,None,**self.options) self.path=file_path self.structure_metadata() def __read_and_fix__(self,file_path=None): """Inputs in the raw OnePortRaw file and fixes any problems with delimiters,etc.""" lines=[] in_file=open(file_path,'r') for index,line in enumerate(in_file): lines.append(line) if re.match("!!",line): data_begin_line=index+1 self.lines=lines data=split_all_rows(lines[data_begin_line:],delimiter=", ") self.options["data"]=data self.options["header"]=lines[:data_begin_line-1] def structure_metadata(self): """Returns a dictionary of key,value pairs extracted from the header""" keys=["System_Id","System_Letter","Connector_Type_Calibration","Connector_Type_Measurement", "Measurement_Type","Measurement_Date","Measurement_Time","Program_Used","Program_Revision","Operator", "Calibration_Name","calibration_date","Port_Used","Number_Connects","Number_Repeats","Nbs", "Number_Frequencies","Start_Frequency", "Device_Description","Device_Id"] self.metadata={} for index,key in enumerate(keys): self.metadata[key]=self.header[index] def show(self): fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True) ax0.plot(self.get_column('Frequency'),self.get_column('magS11'),'k--') ax0.set_title('Magnitude S11') ax1.plot(self.get_column('Frequency'),self.get_column('argS11'),'ro') ax1.set_title('Phase S11') plt.show()
Ancestors (in MRO)
- OnePortRawModel
- pyMez.Code.DataHandlers.GeneralModels.AsciiDataTable
Instance variables
var options
var path
Methods
def __init__(
self, file_path=None, **options)
Initializes the OnePortRaw class, if a file_path is specified opens an existing file, else creates an empty container
def __init__(self,file_path=None,**options): """Initializes the OnePortRaw class, if a file_path is specified opens an existing file, else creates an empty container""" defaults= {"data_delimiter": ",", "column_names_delimiter": ",", "specific_descriptor": 'One_Port_Raw', "general_descriptor": 'Sparameter', "extension": 'txt', "comment_begin": "#", "comment_end": "\n", "column_types": ['float','int','int','float','float','float','float'], "column_descriptions": {"Frequency":"Frequency in GHz", "Direction":"Direction of connects, may be unused", "Connect":"Connect number", "magS11":"Linear magnitude for port 1", "argS11":"Phase in degrees for port 1", "magS22":"Linear magnitude for port 2", "argS22":"Phase in degrees for port 2"}, "header": None, "column_names": ["Frequency","Direction","Connect", "magS11", "argS11","magS22", "argS22"], "column_names_end_token": "\n", "data": None, 'row_formatter_string': "{0:.5f}{delimiter}{1}{delimiter}{2}{delimiter}" "{3:.4f}{delimiter}{4:.2f}{delimiter}{5:.4f}{delimiter}{6:.2f}", "data_table_element_separator": None} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: self.__read_and_fix__(file_path) AsciiDataTable.__init__(self,None,**self.options) self.path=file_path self.structure_metadata()
def add_column(
self, column_name=None, column_type=None, column_data=None, format_string=None)
Adds a column with column_name, and column_type. If column data is supplied and it's length is the same as data(same number of rows) then it is added, else self.options['empty_character'] is added in each spot in the preceding rows
def add_column(self,column_name=None,column_type=None,column_data=None,format_string=None): """Adds a column with column_name, and column_type. If column data is supplied and it's length is the same as data(same number of rows) then it is added, else self.options['empty_character'] is added in each spot in the preceding rows""" original_column_names=self.column_names[:] try: self.column_names.append(column_name) if self.options["column_types"]: self.options["column_types"]=self.options["column_types"].append(column_type) if len(column_data) == len(self.data): for index,row in enumerate(self.data): #print("{0} is {1}".format('self.data[index]',self.data[index])) #print("{0} is {1}".format('row',row)) new_row=row[:] new_row.append(column_data[index]) self.data[index]=new_row else: for index,row in enumerate(self.data): self.data[index]=row.append(self.options['empty_value']) if column_data is not None: for item in column_data: empty_row=[self.options['empty_value'] for column in original_column_names] empty_row.append(item) self.add_row(empty_row) if self.options["row_formatter_string"] is None: pass else: if format_string is None: self.options["row_formatter_string"]=self.options["row_formatter_string"]+\ '{delimiter}'+"{"+str(len(self.column_names)-1)+"}" else: self.options["row_formatter_string"]=self.options["row_formatter_string"]+format_string #self.update_model() except: self.column_names=original_column_names print("Could not add columns") raise
def add_index(
self)
Adds a column with name index and values that are 0 referenced indices, does nothing if there is already a column with name index, always inserts it at the 0 position
def add_index(self): """Adds a column with name index and values that are 0 referenced indices, does nothing if there is already a column with name index, always inserts it at the 0 position""" if 'index' in self.column_names: print("Add Index passed") pass else: self.column_names.insert(0,'index') for index,row in enumerate(self.data): self.data[index].insert(0,index) if self.options['column_types']: self.options['column_types'].insert(0,'int') if self.options['row_formatter_string']: temp_formatter_list=self.options['row_formatter_string'].split("{delimiter}") iterated_row_formatter_list=[temp_formatter_list[i].replace(str(i),str(i+1)) for i in range(len(temp_formatter_list))] new_formatter=string_list_collapse(iterated_row_formatter_list,string_delimiter="{delimiter}") self.options['row_formatter_string']='{0}{delimiter}'+new_formatter
def add_inline_comment(
self, comment='', line_number=None, string_position=None)
Adds an inline in the specified location
def add_inline_comment(self,comment="",line_number=None,string_position=None): "Adds an inline in the specified location" try: self.inline_comments.append([comment,line_number,string_position]) except:pass
def add_row(
self, row_data)
Adds a single row given row_data which can be an ordered list/tuple or a dictionary with column names as keys
def add_row(self,row_data): """Adds a single row given row_data which can be an ordered list/tuple or a dictionary with column names as keys""" if len(row_data) not in [len(self.column_names),len(self.column_names)]: print(" could not add the row, dimensions do not match") return if type(row_data) in [ListType,np.ndarray]: self.data.append(row_data) elif type(row_data) in [DictionaryType]: data_list=[row_data[column_name] for column_name in self.column_names] self.data.append(data_list)
def build_string(
self, **temp_options)
Builds a string representation of the data table based on self.options, or temp_options. Passing temp_options does not permanently change the model
def build_string(self,**temp_options): """Builds a string representation of the data table based on self.options, or temp_options. Passing temp_options does not permanently change the model""" # store the original options to be put back after the string is made original_options=self.options for key,value in temp_options.iteritems(): self.options[key]=value section_end=0 next_section_begin=0 if self.options['data_table_element_separator'] is None: inner_element_spacing=0 else: inner_element_spacing=self.options['data_table_element_separator'].count('\n')-1 string_out="" between_section="" if self.options['data_table_element_separator'] is not None: between_section=self.options['data_table_element_separator'] if self.header is None: self.options['header_begin_line']=self.options['header_end_line']=None pass else: self.options["header_begin_line"]=0 if self.data is None and self.column_names is None and self.footer is None: string_out=self.get_header_string() self.options["header_end_line"]=None else: string_out=self.get_header_string()+between_section last_header_line=self.get_header_string().count('\n')+1 self.options["header_end_line"]=last_header_line next_section_begin=last_header_line+inner_element_spacing if self.column_names is None: self.options['column_names_begin_line']=self.options['column_names_end_line']=None pass else: self.options["column_names_begin_line"]=next_section_begin if self.data is None and self.footer is None: self.options["column_names_end_line"]=None string_out=string_out+self.get_column_names_string() else: string_out=string_out+self.get_column_names_string()+between_section last_column_names_line=self.get_column_names_string().count('\n')+\ self.options["column_names_begin_line"]+1 self.options["column_names_end_line"]=last_column_names_line next_section_begin=last_column_names_line+inner_element_spacing if self.data is None: self.options['data_begin_line']=self.options['data_end_line']=None pass else: self.options["data_begin_line"]=next_section_begin if self.footer is None: self.options["data_end_line"]=None string_out=string_out+self.get_data_string() else: string_out=string_out+self.get_data_string()+between_section last_data_line=self.get_data_string().count("\n")+\ self.options["data_begin_line"]+1 self.options["data_end_line"]=last_data_line next_section_begin=last_data_line+inner_element_spacing if self.footer is None: self.options['footer_begin_line']=self.options['footer_end_line']=None pass else: self.options["footer_begin_line"]=next_section_begin string_out=string_out+self.get_footer_string() self.options['footer_end_line']=None # set the options back after the string has been made if self.inline_comments is None: pass else: lines=string_out.splitlines() for comment in self.inline_comments: lines=insert_inline_comment(lines,comment=comment[0],line_number=comment[1], string_position=comment[2], begin_token=self.options['inline_comment_begin'], end_token=self.options['inline_comment_end']) string_out=string_list_collapse(lines,string_delimiter='\n') self.options=original_options return string_out
def change_unit_prefix(
self, column_selector=None, old_prefix=None, new_prefix=None, unit='Hz')
Changes the prefix of the units of the column specified by column_selector (column name or index) example usage is self.change_unit_prefix(column_selector='Frequency',old_prefix=None,new_prefix='G',unit='Hz') to change a column from Hz to GHz. It updates the data values, column_descriptions, and column_units if they exist, see http://www.nist.gov/pml/wmd/metric/prefixes.cfm for possible prefixes
def change_unit_prefix(self,column_selector=None,old_prefix=None,new_prefix=None,unit='Hz'): """Changes the prefix of the units of the column specified by column_selector (column name or index) example usage is self.change_unit_prefix(column_selector='Frequency',old_prefix=None,new_prefix='G',unit='Hz') to change a column from Hz to GHz. It updates the data values, column_descriptions, and column_units if they exist, see http://www.nist.gov/pml/wmd/metric/prefixes.cfm for possible prefixes""" multipliers={"yotta":10.**24,"Y":10.**24,"zetta":10.**21,"Z":10.**21,"exa":10.**18,"E":10.**18,"peta":10.**15, "P":10.**15,"tera":10.**12,"T":10.**12,"giga":10.**9,"G":10.**9,"mega":10.**6,"M":10.**6, "kilo":10.**3,"k":10.**3,"hecto":10.**2,"h":10.**2,"deka":10.,"da":10.,None:1.,"":1., "deci":10.**-1,"d":10.**-1,"centi":10.**-2,"c":10.**-2,"milli":10.**-3,"m":10.**-3, "micro":10.**-6,"mu":10.**-6,u"\u00B5":10.**-6,"nano":10.**-9, "n":10.**-9,"pico":10.**-12,"p":10.**-12,"femto":10.**-15, "f":10.**-15,"atto":10.**-18,"a":10.**-18,"zepto":10.**-21,"z":10.**-21, "yocto":10.**-24,"y":10.**-24} # change column name into column index try: if old_prefix is None: old_prefix="" if new_prefix is None: new_prefix="" old_unit=old_prefix+unit new_unit=new_prefix+unit if column_selector in self.column_names: column_selector=self.column_names.index(column_selector) for index,row in enumerate(self.data): if type(self.data[index][column_selector]) in [FloatType,LongType]: #print "{0:e}".format(multipliers[old_prefix]/multipliers[new_prefix]) self.data[index][column_selector]=\ (multipliers[old_prefix]/multipliers[new_prefix])*self.data[index][column_selector] elif type(self.data[index][column_selector]) in [StringType,IntType]: self.data[index][column_selector]=\ str((multipliers[old_prefix]/multipliers[new_prefix])*float(self.data[index][column_selector])) else: print type(self.data[index][column_selector]) raise if self.options["column_descriptions"] is not None: old=self.options["column_descriptions"][column_selector] self.options["column_descriptions"][column_selector]=old.replace(old_unit,new_unit) if self.options["column_units"] is not None: old=self.options["column_units"][column_selector] self.options["column_units"][column_selector]=old.replace(old_unit,new_unit) if re.search(old_unit,self.column_names[column_selector]): old=self.column_names[column_selector] self.column_names[column_selector]=old.replace(old_unit,new_unit) except: print("Could not change the unit prefix of column {0}".format(column_selector)) raise
def find_line(
self, begin_token)
Finds the first line that has begin token in it
def find_line(self,begin_token): """Finds the first line that has begin token in it""" for index,line in enumerate(self.lines): if re.search(begin_token,line): return index
def get_column(
self, column_name=None, column_index=None)
Returns a column as a list given a column name or column index
def get_column(self,column_name=None,column_index=None): """Returns a column as a list given a column name or column index""" if column_name is None: if column_index is None: return else: column_selector=column_index else: column_selector=self.column_names.index(column_name) out_list=[self.data[i][column_selector] for i in range(len(self.data))] return out_list
def get_column_names_string(
self)
Returns the column names as a string using options
def get_column_names_string(self): "Returns the column names as a string using options" string_out="" # This writes the column_names column_name_begin="" column_name_end="" if self.options["column_names_begin_token"] is None: column_name_begin="" else: column_name_begin=self.options["column_names_begin_token"] if self.options["column_names_end_token"] is None: column_name_end="" else: column_name_end=self.options["column_names_end_token"] if self.column_names is None: string_out="" else: if type(self.column_names) is StringType: string_out=self.column_names elif type(self.column_names) is ListType: string_out=list_to_string(self.column_names, data_delimiter=self.options["column_names_delimiter"],end="") #print("{0} is {1}".format('string_out',string_out)) else: string_out=ensure_string(self.column_names) #print column_name_begin,string_out,column_name_end return column_name_begin+string_out+column_name_end
def get_data_dictionary_list(
self, use_row_formatter_string=True)
Returns a python list with a row dictionary of form {column_name:data_column}
def get_data_dictionary_list(self,use_row_formatter_string=True): """Returns a python list with a row dictionary of form {column_name:data_column}""" try: if self.options["row_formatter_string"] is None: use_row_formatter_string=False if use_row_formatter_string: list_formatter=[item.replace("{"+str(index),"{0") for index,item in enumerate(self.options["row_formatter_string"].split("{delimiter}"))] else: list_formatter=["{0}" for i in self.column_names] #print self.column_names #print self.data #print list_formatter #print len(self.column_names)==len(self.data[0]) #print len(list_formatter)==len(self.data[0]) #print type(self.data[0]) out_list=[{self.column_names[i]:list_formatter[i].format(value) for i,value in enumerate(line)} for line in self.data] return out_list except: print("Could not form a data_dictionary_list, check that row_formatter_string is properly defined") #print(out_list) raise
def get_data_string(
self)
Returns the data as a string
def get_data_string(self): "Returns the data as a string" #Todo:refactor to cut out unused lines string_out="" if self.data is None: string_out= "" else: if type(self.data) is StringType: if self.options['data_begin_token'] is None: if self.options['data_end_token'] is None: string_out=self.data else: if re.search(self.options['data_end_token'],self.data): string_out=self.data else: string_out=self.data+self.options['data_end_token'] else: if self.options['data_end_token'] is None: if re.match(self.options['data_begin_token'],self.data): string_out=self.data else: string_out=self.options['data_begin_token']+self.data elif type(self.data) in [ListType,np.ndarray]: try: #If the first row is a string, we should strip all the tokens and add them back in if type(self.data[0]) is StringType: if self.options['data_begin_token'] is None: string_out=string_list_collapse(self.data) else: if re.match(self.options['data_begin_token'],self.data[0]): if self.options['data_end_token'] is None: string_out=string_list_collapse(self.data) else: if re.search(self.options['data_end_token'],self.data[-1]): string_out=string_list_collapse(self.data) else: string_out=string_list_collapse(self.data)+self.options['data_end_token'] else: if self.options['data_end_token'] is None: string_out=self.options['data_begin_token']+string_list_collapse(self.data) else: if re.search(self.options['data_end_token'],self.data[-1]): string_out=self.options['data_begin_token']+string_list_collapse(self.data) else: string_out=self.options['data_begin_token']+\ string_list_collapse(self.data)+\ self.options['data_end_token'] elif type(self.data[0]) in [ListType,np.ndarray]: prefix="" if self.options['data_begin_token'] is None: if self.options['data_end_token'] is None: string_out=list_list_to_string(self.data,data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"]) else: if self.options['data_end_token'] is None: string_out=self.options['data_begin_token']+\ list_list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"]) else: string_out=self.options['data_begin_token']+\ list_list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=\ self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"])+\ self.options['data_end_token'] else: string_out=list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], begin=self.options["row_begin_token"], end=self.options["row_end_token"]) except IndexError: pass else: string_out=ensure_string(self.data) return string_out
Returns the footer using options in self.options. If block comment is specified, and the footer is a list it will block comment out the footer. If comment_begin and comment_end are specified it will use those to represent each line of the footer. If footer_begin_token and/or footer_end_token are specified it will wrap the footer in those.
def get_header_string(
self)
Returns the header using options in self.options. If block comment is specified, and the header is a list it will block comment out the header. If comment_begin and comment_end are specified it will use those to represent each line of the header. If header_begin_token and/or header_end_token are specified it will wrap the header in those.
def get_header_string(self): """Returns the header using options in self.options. If block comment is specified, and the header is a list it will block comment out the header. If comment_begin and comment_end are specified it will use those to represent each line of the header. If header_begin_token and/or header_end_token are specified it will wrap the header in those. """ string_out="" header_begin="" header_end="" if self.options["header_begin_token"] is None: header_begin="" else: header_begin=self.options["header_begin_token"] if self.options["header_end_token"] is None: header_end="" else: header_end=self.options["header_end_token"] # This writes the header if self.header is None: string_out= "" elif self.options["header_line_types"] is not None: for index,line in enumerate(self.options["header_line_types"]): if index == len(self.options["header_line_types"])-1: end='' else: end='\n' if line in ['header','header_line','normal']: string_out=string_out+self.header[index]+end elif line in ['line_comment','comment']: string_out=string_out+line_comment_string(self.header[index], comment_begin=self.options["comment_begin"], comment_end=self.options["comment_end"])+end elif line in ['block_comment','block']: if index-1<0: block_comment_begin=index block_comment_end=index+2 continue elif self.options["header_line_types"][index-1] not in ['block_comment','block']: block_comment_begin=index block_comment_end=index+2 continue else: if index+1>len(self.options["header_line_types"])-1: string_out=string_out+line_list_comment_string(self.header[block_comment_begin:], comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'], block=True)+end elif self.options["header_line_types"][index+1] in ['block_comment','block']: block_comment_end+=1 else: string_out=string_out+\ line_list_comment_string(self.header[block_comment_begin:block_comment_end], comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'], block=True)+end else: string_out=string_out+line elif self.options['treat_header_as_comment'] in [None,True] and self.options["header_line_types"] in [None]: # Just happens if the user has set self.header manually if type(self.header) is StringType: string_out=line_comment_string(self.header, comment_begin=self.options["comment_begin"], comment_end=self.options["comment_end"]) #string_out=re.sub('\n','',string_out,count=1) elif type(self.header) is ListType: if self.options['block_comment_begin'] is None: if self.options['comment_begin'] is None: string_out=string_list_collapse(self.header) else: string_out=line_list_comment_string(self.header,comment_begin=self.options['comment_begin'], comment_end=self.options['comment_end']) lines_out=string_out.splitlines() # if re.search('\n',self.options['comment_end']): # string_out=re.sub('\n','',string_out,count=1) #self.options["header_line_types"]=["line_comment" for line in self.header] else: string_out=line_list_comment_string(self.header,comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'],block=True) #self.options["header_line_types"]=["block_comment" for line in self.header] else: string_out=ensure_string(self.header,list_delimiter="\n",end_if_list="") return header_begin+string_out+header_end
def get_options(
self)
Prints the option list
def get_options(self): "Prints the option list" for key,value in self.options.iteritems(): print("{0} = {1}".format(key,value))
def get_options_by_element(
self, element_name)
returns a dictionary of all the options that have to do with element. Element must be header,column_names,data, or footer
def get_options_by_element(self,element_name): """ returns a dictionary of all the options that have to do with element. Element must be header,column_names,data, or footer""" keys_regarding_element=filter(lambda x: re.search(element_name,str(x),re.IGNORECASE),self.options.keys()) out_dictionary={key:self.options[key] for key in keys_regarding_element} #print out_dictionary return out_dictionary
def get_row(
self, row_index=None)
Returns the row as a list specified by row_index
def get_row(self,row_index=None): """Returns the row as a list specified by row_index""" if row_index is None: return else: return self.data[row_index]
def is_valid(
self)
Returns True if ascii table conforms to its specification given by its own options
def is_valid(self): """Returns True if ascii table conforms to its specification given by its own options""" options={} for key,value in self.options.iteritems(): options[key]=value # print("self.options[{0}] is {1} ".format(key,value)) for element in self.elements: if self.__dict__[element] is None: options[element]=None else: options[element]=[] options["validate"]=True newtable=AsciiDataTable(None,**options) lines=self.build_string().splitlines() for index,line in enumerate(lines): lines[index]=line+"\n" newtable.lines=lines newtable.__parse__() # print newtable.data # print newtable.column_names # print newtable #print_comparison(newtable.footer,None) newtable.update_model() # The new table rows are not being coerced into the right format #print newtable #newtable.update_model() #print newtable.options #print self.options #print newtable.data # print newtable.options==self.options # for option_key,option_value in newtable.options.iteritems(): # print("New Table Option {0} is {1} ".format(option_key,option_value)) # print("self.options[{0}] is {1} ".format(option_key,self.options[option_key])) # print_comparison(option_value,self.options[option_key]) # #print self return self==newtable
def lines_defined(
self)
If begin_line and end_line for all elements that are None are defined returns True
def lines_defined(self): """If begin_line and end_line for all elements that are None are defined returns True""" truth_table=[] last_element="" output=False for index,element in enumerate(self.elements): if element not in ['inline_comments','metadata'] and self.__dict__[element] is not None: try: last_element=element if not None in [self.options['%s_begin_line'%element],self.options['%s_end_line'%element]]: truth_table.append(True) else: truth_table.append(False) except: return False #print truth_table # The last_line of the last element is fine to be none if truth_table[-1] is False: if self.options['%s_begin_line'%last_element] is not None: truth_table[-1]=True if False in truth_table: output=False else: output=True #print output return output
Moves the DataTable's footer to the header and updates the model
def remove_column(
self, column_name=None, column_index=None)
Removes the column specified by column_name or column_index and updates the model. The column is removed from column_names, data and if present column_types, column_descriptions and row formatter
def remove_column(self,column_name=None,column_index=None): """Removes the column specified by column_name or column_index and updates the model. The column is removed from column_names, data and if present column_types, column_descriptions and row formatter""" pass
def remove_row(
self, row_index)
Removes the row specified by row_index and updates the model. Note index is relative to the data attribute so to remove the first row use row_index=0 and the last data row is row_index=-1
def remove_row(self,row_index): """Removes the row specified by row_index and updates the model. Note index is relative to the data attribute so to remove the first row use row_index=0 and the last data row is row_index=-1""" self.data.pop(row_index) self.update_model()
def save(
self, path=None, **temp_options)
" Saves the file, to save in another ascii format specify elements in temp_options, the options specified do not permanently change the object's options. If path is supplied it saves the file to that path otherwise uses the object's attribute path to define the saving location
def save(self,path=None,**temp_options): """" Saves the file, to save in another ascii format specify elements in temp_options, the options specified do not permanently change the object's options. If path is supplied it saves the file to that path otherwise uses the object's attribute path to define the saving location """ original_options=self.options for key,value in temp_options.iteritems(): self.options[key]=value out_string=self.build_string(**temp_options) if path is None: path=self.path file_out=open(path,'w') file_out.write(out_string) file_out.close() self.options=original_options
def save_schema(
self, path=None, format=None)
Saves the tables options as a text file or pickled dictionary (default). If no name is supplied, autonames it and saves
def save_schema(self,path=None,format=None): """Saves the tables options as a text file or pickled dictionary (default). If no name is supplied, autonames it and saves""" if path is None: path=auto_name(self.name.replace('.'+self.options["extension"],""),'Schema',self.options["directory"],'txt') if format in [None,'python','pickle']: pickle.dump(self.options,open(path,'wb')) elif format in ['txt','text','.txt']: file_out=open(path,'w') keys=self.options.keys() keys.sort() for key in keys: out_key=str(key).replace("\n","\\n") out_value=str(self.options[key]).replace("\n","\\n") file_out.write("{0} : {1} \n".format(out_key,out_value)) file_out.close()
def show(
self)
def show(self): fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True) ax0.plot(self.get_column('Frequency'),self.get_column('magS11'),'k--') ax0.set_title('Magnitude S11') ax1.plot(self.get_column('Frequency'),self.get_column('argS11'),'ro') ax1.set_title('Phase S11') plt.show()
def structure_metadata(
self)
Returns a dictionary of key,value pairs extracted from the header
def structure_metadata(self): """Returns a dictionary of key,value pairs extracted from the header""" keys=["System_Id","System_Letter","Connector_Type_Calibration","Connector_Type_Measurement", "Measurement_Type","Measurement_Date","Measurement_Time","Program_Used","Program_Revision","Operator", "Calibration_Name","calibration_date","Port_Used","Number_Connects","Number_Repeats","Nbs", "Number_Frequencies","Start_Frequency", "Device_Description","Device_Id"] self.metadata={} for index,key in enumerate(keys): self.metadata[key]=self.header[index]
def update_column_names(
self)
Update column names adds the value x# for any column that exists in self.data that is not named
def update_column_names(self): """Update column names adds the value x# for any column that exists in self.data that is not named""" if self.data is None: return elif type(self.column_names) is StringType: self.column_names=split_row(self.column_names,self.options["column_names_delimiter"]) elif self.column_names is None: column_names=[] for index,column in enumerate(self.data[0]): column_names.append("x"+str(index)) self.column_names=column_names return elif len(self.column_names)==len(self.data[0]): return elif len(self.column_names) < len(self.data[0]): for index in range(len(self.column_names),len(self.data[0])): self.column_names.append("x"+str(index)) return
def update_import_options(
self, import_table)
Updates the options in the import table
def update_import_options(self,import_table): """Updates the options in the import table""" for index,element in enumerate(['header','column_names','data','footer']): if self.__dict__[element] is not None: print("The {0} variable is {1}".format('index',index)) print("The {0} variable is {1}".format('element',element)) print("The {0} variable is {1}".format('import_table',import_table)) [self.options['%s_begin_line'%element], self.options['%s_end_line'%element], self.options['%s_begin_token'%element], self.options['%s_end_token'%element]]=import_table[index][:]
def update_index(
self)
Updates the index column if it exits, otherwise exits quietly
def update_index(self): """ Updates the index column if it exits, otherwise exits quietly """ if 'index' not in self.column_names: return else: try: #This should be 0 but just in case index_column_number=self.column_names.index('index') for i in range(len(self.data)): self.data[i][index_column_number]=i except: pass
def update_model(
self)
Updates the model after a change has been made. If you add anything to the attributes of the model, or change this updates the values. If the model has an index column it will make sure the numbers are correct. In addition, it will update the options dictionary to reflect added rows, changes in deliminators etc.
def update_model(self): """Updates the model after a change has been made. If you add anything to the attributes of the model, or change this updates the values. If the model has an index column it will make sure the numbers are correct. In addition, it will update the options dictionary to reflect added rows, changes in deliminators etc. """ if self.column_names is not None and 'index' in self.column_names: self.update_index() #make sure there are no "\n" characters in the element lists (if so replace them with "") for data this is # done on import list_types=["header","column_names","footer"] for element in list_types: if self.__dict__[element] is not None: for index,item in enumerate(self.__dict__[element]): self.__dict__[element][index]=item.replace("\n","") self.update_column_names() if self.data is not None: self.data=convert_all_rows(self.data,self.options["column_types"]) self.string=self.build_string() self.lines=self.string.splitlines()
class PowerCalrep
PowerCalrep is a model that holds data output by analyzing several datafiles using the HPBasic program Calrep. The data is stored in 2 tables: a S11 table, and a power table. The data is in linear magnitude and angle in degrees. There are 2 types of files, one is a single file with .asc extension and 2 files with .txt extension
class PowerCalrep(): """PowerCalrep is a model that holds data output by analyzing several datafiles using the HPBasic program Calrep. The data is stored in 2 tables: a S11 table, and a power table. The data is in linear magnitude and angle in degrees. There are 2 types of files, one is a single file with .asc extension and 2 files with .txt extension""" def __init__(self,file_path=None,**options): """Intializes the PowerCalrep class, if a file path is specified it opens and reads the file""" if file_path is None: pass elif re.match('asc',file_path.split(".")[-1],re.IGNORECASE): self.table_names=['header','S11','Efficiency'] self.row_pattern=make_row_match_string(ONE_PORT_COLUMN_NAMES) self.power_row_pattern=make_row_match_string(POWER_COLUMN_NAMES) self.path=file_path self.__read_and_fix__() elif re.match('txt',file_path.split(".")[-1],re.IGNORECASE) or type(file_path) is ListType: self.table_names=['S11','Efficiency'] if type(file_path) is ListType: self.file_names=file_path self.tables=[] for index,table in enumerate(self.table_names): if index==0: self.tables.append(PowerModel(self.file_names[index])) elif index==1: self.tables.append(OnePortCalrepModel(self.file_names[index])) else: try: root_name_pattern=re.compile('(?P<root_name>\w+)[abc].txt',re.IGNORECASE) root_name_match=re.search(root_name_pattern,file_path) root_name=root_name_match.groupdict()["root_name"] directory=os.path.dirname(os.path.realpath(file_path)) self.file_names=[os.path.join(directory,root_name+end) for end in ['a.txt','b.txt']] self.tables=[] for index,table in enumerate(self.table_names): if index==0: self.tables.append(OnePortCalrepModel(self.file_names[index])) elif index==1: self.tables.append(PowerModel(self.file_names[index])) except: print("Could not import {0} please check that the a,b " "tables are all in the same directory".format(file_path)) raise for index,table in enumerate(self.tables): for column_number,column in enumerate(table.column_names): if column is not "Frequency": table.column_names[column_number]=self.table_names[index]+"_"+column self.joined_table=ascii_data_table_join("Frequency",self.tables[0],self.tables[1]) def __read_and_fix__(self): in_file=open(self.path,'r') self.lines=[] table_locators=["Table 1","Table 2"] begin_lines=[] for index,line in enumerate(in_file): self.lines.append(line) for table in table_locators: if re.search(table,line,re.IGNORECASE): begin_lines.append(index) in_file.close() self.table_line_numbers=[] for index,begin_line in enumerate(begin_lines): if index == 0: header_begin_line=0 header_end_line=begin_line-2 table_1_begin_line=begin_line+3 table_1_end_line=begin_lines[index+1]-1 self.table_line_numbers.append([header_begin_line,header_end_line]) self.table_line_numbers.append([table_1_begin_line,table_1_end_line]) elif index>0 and index<(len(begin_lines)-1): table_begin_line=begin_line+3 table_end_line=begin_lines[index+1]-1 self.table_line_numbers.append([table_begin_line,table_end_line]) elif index==(len(begin_lines)-1): table_begin_line=begin_line+3 table_end_line=None self.table_line_numbers.append([table_begin_line,table_end_line]) self.tables=[] for index,name in enumerate(self.table_names): self.table_lines=self.lines[self.table_line_numbers[index][0]:self.table_line_numbers[index][1]] self.tables.append(self.table_lines) for index,table in enumerate(self.table_names): if index==0: # by using parse_lines we get a list_list of strings instead of list_string # we can just remove end lines self.tables[index]=strip_all_line_tokens(self.tables[index],begin_token=None,end_token='\n') elif index==1: column_types=['float' for i in range(len(ONE_PORT_COLUMN_NAMES))] options={"row_pattern":self.row_pattern,"column_names":ONE_PORT_COLUMN_NAMES,"output":"list_list"} options["column_types"]=column_types self.tables[index]=parse_lines(self.tables[index],**options) table_options={"data":self.tables[index]} self.tables[index]=OnePortCalrepModel(None,**table_options) elif index==2: column_types=['float' for i in range(len(POWER_COLUMN_NAMES))] options={"row_pattern":self.power_row_pattern,"column_names":POWER_COLUMN_NAMES,"output":"list_list"} options["column_types"]=column_types self.tables[index]=parse_lines(self.tables[index],**options) table_options={"data":self.tables[index]} self.tables[index]=PowerModel(None,**table_options) self.tables[1].header=self.tables[0] self.joined_table=ascii_data_table_join("Frequency",self.tables[1],self.tables[2])
Ancestors (in MRO)
Methods
def __init__(
self, file_path=None, **options)
Intializes the PowerCalrep class, if a file path is specified it opens and reads the file
def __init__(self,file_path=None,**options): """Intializes the PowerCalrep class, if a file path is specified it opens and reads the file""" if file_path is None: pass elif re.match('asc',file_path.split(".")[-1],re.IGNORECASE): self.table_names=['header','S11','Efficiency'] self.row_pattern=make_row_match_string(ONE_PORT_COLUMN_NAMES) self.power_row_pattern=make_row_match_string(POWER_COLUMN_NAMES) self.path=file_path self.__read_and_fix__() elif re.match('txt',file_path.split(".")[-1],re.IGNORECASE) or type(file_path) is ListType: self.table_names=['S11','Efficiency'] if type(file_path) is ListType: self.file_names=file_path self.tables=[] for index,table in enumerate(self.table_names): if index==0: self.tables.append(PowerModel(self.file_names[index])) elif index==1: self.tables.append(OnePortCalrepModel(self.file_names[index])) else: try: root_name_pattern=re.compile('(?P<root_name>\w+)[abc].txt',re.IGNORECASE) root_name_match=re.search(root_name_pattern,file_path) root_name=root_name_match.groupdict()["root_name"] directory=os.path.dirname(os.path.realpath(file_path)) self.file_names=[os.path.join(directory,root_name+end) for end in ['a.txt','b.txt']] self.tables=[] for index,table in enumerate(self.table_names): if index==0: self.tables.append(OnePortCalrepModel(self.file_names[index])) elif index==1: self.tables.append(PowerModel(self.file_names[index])) except: print("Could not import {0} please check that the a,b " "tables are all in the same directory".format(file_path)) raise for index,table in enumerate(self.tables): for column_number,column in enumerate(table.column_names): if column is not "Frequency": table.column_names[column_number]=self.table_names[index]+"_"+column self.joined_table=ascii_data_table_join("Frequency",self.tables[0],self.tables[1])
class PowerCalrepModel
class PowerCalrepModel(AsciiDataTable): def __init__(self,file_path,**options): "Intializes the PowerModel Class, it is assumed that the file is of table type" # This is a general pattern for adding a lot of options defaults= {"data_delimiter": ",", "column_names_delimiter": ",", "specific_descriptor": 'One_Port', "general_descriptor": 'Power', "extension": 'txt', "comment_begin": "#", "comment_end": "\n", "column_types": ['float' for i in range(len(POWER_COLUMN_NAMES))], "column_descriptions": {"Frequency": "Frequency in GHz", "Efficiency":"Effective Efficiency", "uEs": "Uncertainty in efficiency due to standards", "uEc": "Uncertainty in efficiency for repeated connects", "uEe": "Total uncertainty in Efficiency", "Calibration_Factor": "Effective efficiency modified by reflection coefficient", "uCs": "Uncertainty in calibration factor due to standards", "uCc": "Uncertainty in calibration factor for repeated connects", "uCe": "Total uncertainty in calibration factor"}, "header": None, "column_names":POWER_COLUMN_NAMES, "column_names_end_token": "\n", "data": None, "row_formatter_string": None, "data_table_element_separator": None,"row_begin_token":None, "row_end_token":None,"escape_character":None, "data_begin_token":None,"data_end_token":None} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: self.path=file_path self.__read_and_fix__() #build the row_formatting string, the original files have 4 decimals of precision for freq/gamma and 2 for Phase row_formatter="" for i in range(11): if i<6: row_formatter=row_formatter+"{"+str(i)+":.4f}{delimiter}" elif i==10: row_formatter=row_formatter+"{"+str(i)+":.2f}" else: row_formatter=row_formatter+"{"+str(i)+":.2f}{delimiter}" self.options["row_formatter_string"]=row_formatter AsciiDataTable.__init__(self,None,**self.options) if file_path is not None: self.path=file_path def __read_and_fix__(self): """Reads in a 1 port ascii file and fixes any issues with inconsistent delimiters, etc""" lines=[] table_type=self.path.split(".")[-1] in_file=open(self.path,'r') for line in in_file: if not re.match('[\s]+(?!\w+)',line): #print line lines.append(line) # Handle the cases in which it is the comma delimited table if re.match('txt',table_type,re.IGNORECASE): lines=strip_tokens(lines,*[self.options['data_begin_token'], self.options['data_end_token']]) self.options["data"]=strip_all_line_tokens(lines,begin_token=self.options["row_begin_token"], end_token=self.options["row_end_token"]) self.options["data"]=split_all_rows(self.options["data"],delimiter=self.options["data_delimiter"], escape_character=self.options["escape_character"]) self.options["data"]=convert_all_rows(self.options["data"],self.options["column_types"]) #print self.options["data"] root_name_pattern=re.compile('(?P<root_name>\w+)[abc].txt',re.IGNORECASE) root_name_match=re.search(root_name_pattern,self.path) root_name=root_name_match.groupdict()["root_name"] self.options["header"]=["Device_Id = {0}".format(root_name)]
Ancestors (in MRO)
- PowerCalrepModel
- pyMez.Code.DataHandlers.GeneralModels.AsciiDataTable
Instance variables
var options
Methods
def __init__(
self, file_path, **options)
Intializes the PowerModel Class, it is assumed that the file is of table type
def __init__(self,file_path,**options): "Intializes the PowerModel Class, it is assumed that the file is of table type" # This is a general pattern for adding a lot of options defaults= {"data_delimiter": ",", "column_names_delimiter": ",", "specific_descriptor": 'One_Port', "general_descriptor": 'Power', "extension": 'txt', "comment_begin": "#", "comment_end": "\n", "column_types": ['float' for i in range(len(POWER_COLUMN_NAMES))], "column_descriptions": {"Frequency": "Frequency in GHz", "Efficiency":"Effective Efficiency", "uEs": "Uncertainty in efficiency due to standards", "uEc": "Uncertainty in efficiency for repeated connects", "uEe": "Total uncertainty in Efficiency", "Calibration_Factor": "Effective efficiency modified by reflection coefficient", "uCs": "Uncertainty in calibration factor due to standards", "uCc": "Uncertainty in calibration factor for repeated connects", "uCe": "Total uncertainty in calibration factor"}, "header": None, "column_names":POWER_COLUMN_NAMES, "column_names_end_token": "\n", "data": None, "row_formatter_string": None, "data_table_element_separator": None,"row_begin_token":None, "row_end_token":None,"escape_character":None, "data_begin_token":None,"data_end_token":None} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: self.path=file_path self.__read_and_fix__() #build the row_formatting string, the original files have 4 decimals of precision for freq/gamma and 2 for Phase row_formatter="" for i in range(11): if i<6: row_formatter=row_formatter+"{"+str(i)+":.4f}{delimiter}" elif i==10: row_formatter=row_formatter+"{"+str(i)+":.2f}" else: row_formatter=row_formatter+"{"+str(i)+":.2f}{delimiter}" self.options["row_formatter_string"]=row_formatter AsciiDataTable.__init__(self,None,**self.options) if file_path is not None: self.path=file_path
def add_column(
self, column_name=None, column_type=None, column_data=None, format_string=None)
Adds a column with column_name, and column_type. If column data is supplied and it's length is the same as data(same number of rows) then it is added, else self.options['empty_character'] is added in each spot in the preceding rows
def add_column(self,column_name=None,column_type=None,column_data=None,format_string=None): """Adds a column with column_name, and column_type. If column data is supplied and it's length is the same as data(same number of rows) then it is added, else self.options['empty_character'] is added in each spot in the preceding rows""" original_column_names=self.column_names[:] try: self.column_names.append(column_name) if self.options["column_types"]: self.options["column_types"]=self.options["column_types"].append(column_type) if len(column_data) == len(self.data): for index,row in enumerate(self.data): #print("{0} is {1}".format('self.data[index]',self.data[index])) #print("{0} is {1}".format('row',row)) new_row=row[:] new_row.append(column_data[index]) self.data[index]=new_row else: for index,row in enumerate(self.data): self.data[index]=row.append(self.options['empty_value']) if column_data is not None: for item in column_data: empty_row=[self.options['empty_value'] for column in original_column_names] empty_row.append(item) self.add_row(empty_row) if self.options["row_formatter_string"] is None: pass else: if format_string is None: self.options["row_formatter_string"]=self.options["row_formatter_string"]+\ '{delimiter}'+"{"+str(len(self.column_names)-1)+"}" else: self.options["row_formatter_string"]=self.options["row_formatter_string"]+format_string #self.update_model() except: self.column_names=original_column_names print("Could not add columns") raise
def add_index(
self)
Adds a column with name index and values that are 0 referenced indices, does nothing if there is already a column with name index, always inserts it at the 0 position
def add_index(self): """Adds a column with name index and values that are 0 referenced indices, does nothing if there is already a column with name index, always inserts it at the 0 position""" if 'index' in self.column_names: print("Add Index passed") pass else: self.column_names.insert(0,'index') for index,row in enumerate(self.data): self.data[index].insert(0,index) if self.options['column_types']: self.options['column_types'].insert(0,'int') if self.options['row_formatter_string']: temp_formatter_list=self.options['row_formatter_string'].split("{delimiter}") iterated_row_formatter_list=[temp_formatter_list[i].replace(str(i),str(i+1)) for i in range(len(temp_formatter_list))] new_formatter=string_list_collapse(iterated_row_formatter_list,string_delimiter="{delimiter}") self.options['row_formatter_string']='{0}{delimiter}'+new_formatter
def add_inline_comment(
self, comment='', line_number=None, string_position=None)
Adds an inline in the specified location
def add_inline_comment(self,comment="",line_number=None,string_position=None): "Adds an inline in the specified location" try: self.inline_comments.append([comment,line_number,string_position]) except:pass
def add_row(
self, row_data)
Adds a single row given row_data which can be an ordered list/tuple or a dictionary with column names as keys
def add_row(self,row_data): """Adds a single row given row_data which can be an ordered list/tuple or a dictionary with column names as keys""" if len(row_data) not in [len(self.column_names),len(self.column_names)]: print(" could not add the row, dimensions do not match") return if type(row_data) in [ListType,np.ndarray]: self.data.append(row_data) elif type(row_data) in [DictionaryType]: data_list=[row_data[column_name] for column_name in self.column_names] self.data.append(data_list)
def build_string(
self, **temp_options)
Builds a string representation of the data table based on self.options, or temp_options. Passing temp_options does not permanently change the model
def build_string(self,**temp_options): """Builds a string representation of the data table based on self.options, or temp_options. Passing temp_options does not permanently change the model""" # store the original options to be put back after the string is made original_options=self.options for key,value in temp_options.iteritems(): self.options[key]=value section_end=0 next_section_begin=0 if self.options['data_table_element_separator'] is None: inner_element_spacing=0 else: inner_element_spacing=self.options['data_table_element_separator'].count('\n')-1 string_out="" between_section="" if self.options['data_table_element_separator'] is not None: between_section=self.options['data_table_element_separator'] if self.header is None: self.options['header_begin_line']=self.options['header_end_line']=None pass else: self.options["header_begin_line"]=0 if self.data is None and self.column_names is None and self.footer is None: string_out=self.get_header_string() self.options["header_end_line"]=None else: string_out=self.get_header_string()+between_section last_header_line=self.get_header_string().count('\n')+1 self.options["header_end_line"]=last_header_line next_section_begin=last_header_line+inner_element_spacing if self.column_names is None: self.options['column_names_begin_line']=self.options['column_names_end_line']=None pass else: self.options["column_names_begin_line"]=next_section_begin if self.data is None and self.footer is None: self.options["column_names_end_line"]=None string_out=string_out+self.get_column_names_string() else: string_out=string_out+self.get_column_names_string()+between_section last_column_names_line=self.get_column_names_string().count('\n')+\ self.options["column_names_begin_line"]+1 self.options["column_names_end_line"]=last_column_names_line next_section_begin=last_column_names_line+inner_element_spacing if self.data is None: self.options['data_begin_line']=self.options['data_end_line']=None pass else: self.options["data_begin_line"]=next_section_begin if self.footer is None: self.options["data_end_line"]=None string_out=string_out+self.get_data_string() else: string_out=string_out+self.get_data_string()+between_section last_data_line=self.get_data_string().count("\n")+\ self.options["data_begin_line"]+1 self.options["data_end_line"]=last_data_line next_section_begin=last_data_line+inner_element_spacing if self.footer is None: self.options['footer_begin_line']=self.options['footer_end_line']=None pass else: self.options["footer_begin_line"]=next_section_begin string_out=string_out+self.get_footer_string() self.options['footer_end_line']=None # set the options back after the string has been made if self.inline_comments is None: pass else: lines=string_out.splitlines() for comment in self.inline_comments: lines=insert_inline_comment(lines,comment=comment[0],line_number=comment[1], string_position=comment[2], begin_token=self.options['inline_comment_begin'], end_token=self.options['inline_comment_end']) string_out=string_list_collapse(lines,string_delimiter='\n') self.options=original_options return string_out
def change_unit_prefix(
self, column_selector=None, old_prefix=None, new_prefix=None, unit='Hz')
Changes the prefix of the units of the column specified by column_selector (column name or index) example usage is self.change_unit_prefix(column_selector='Frequency',old_prefix=None,new_prefix='G',unit='Hz') to change a column from Hz to GHz. It updates the data values, column_descriptions, and column_units if they exist, see http://www.nist.gov/pml/wmd/metric/prefixes.cfm for possible prefixes
def change_unit_prefix(self,column_selector=None,old_prefix=None,new_prefix=None,unit='Hz'): """Changes the prefix of the units of the column specified by column_selector (column name or index) example usage is self.change_unit_prefix(column_selector='Frequency',old_prefix=None,new_prefix='G',unit='Hz') to change a column from Hz to GHz. It updates the data values, column_descriptions, and column_units if they exist, see http://www.nist.gov/pml/wmd/metric/prefixes.cfm for possible prefixes""" multipliers={"yotta":10.**24,"Y":10.**24,"zetta":10.**21,"Z":10.**21,"exa":10.**18,"E":10.**18,"peta":10.**15, "P":10.**15,"tera":10.**12,"T":10.**12,"giga":10.**9,"G":10.**9,"mega":10.**6,"M":10.**6, "kilo":10.**3,"k":10.**3,"hecto":10.**2,"h":10.**2,"deka":10.,"da":10.,None:1.,"":1., "deci":10.**-1,"d":10.**-1,"centi":10.**-2,"c":10.**-2,"milli":10.**-3,"m":10.**-3, "micro":10.**-6,"mu":10.**-6,u"\u00B5":10.**-6,"nano":10.**-9, "n":10.**-9,"pico":10.**-12,"p":10.**-12,"femto":10.**-15, "f":10.**-15,"atto":10.**-18,"a":10.**-18,"zepto":10.**-21,"z":10.**-21, "yocto":10.**-24,"y":10.**-24} # change column name into column index try: if old_prefix is None: old_prefix="" if new_prefix is None: new_prefix="" old_unit=old_prefix+unit new_unit=new_prefix+unit if column_selector in self.column_names: column_selector=self.column_names.index(column_selector) for index,row in enumerate(self.data): if type(self.data[index][column_selector]) in [FloatType,LongType]: #print "{0:e}".format(multipliers[old_prefix]/multipliers[new_prefix]) self.data[index][column_selector]=\ (multipliers[old_prefix]/multipliers[new_prefix])*self.data[index][column_selector] elif type(self.data[index][column_selector]) in [StringType,IntType]: self.data[index][column_selector]=\ str((multipliers[old_prefix]/multipliers[new_prefix])*float(self.data[index][column_selector])) else: print type(self.data[index][column_selector]) raise if self.options["column_descriptions"] is not None: old=self.options["column_descriptions"][column_selector] self.options["column_descriptions"][column_selector]=old.replace(old_unit,new_unit) if self.options["column_units"] is not None: old=self.options["column_units"][column_selector] self.options["column_units"][column_selector]=old.replace(old_unit,new_unit) if re.search(old_unit,self.column_names[column_selector]): old=self.column_names[column_selector] self.column_names[column_selector]=old.replace(old_unit,new_unit) except: print("Could not change the unit prefix of column {0}".format(column_selector)) raise
def find_line(
self, begin_token)
Finds the first line that has begin token in it
def find_line(self,begin_token): """Finds the first line that has begin token in it""" for index,line in enumerate(self.lines): if re.search(begin_token,line): return index
def get_column(
self, column_name=None, column_index=None)
Returns a column as a list given a column name or column index
def get_column(self,column_name=None,column_index=None): """Returns a column as a list given a column name or column index""" if column_name is None: if column_index is None: return else: column_selector=column_index else: column_selector=self.column_names.index(column_name) out_list=[self.data[i][column_selector] for i in range(len(self.data))] return out_list
def get_column_names_string(
self)
Returns the column names as a string using options
def get_column_names_string(self): "Returns the column names as a string using options" string_out="" # This writes the column_names column_name_begin="" column_name_end="" if self.options["column_names_begin_token"] is None: column_name_begin="" else: column_name_begin=self.options["column_names_begin_token"] if self.options["column_names_end_token"] is None: column_name_end="" else: column_name_end=self.options["column_names_end_token"] if self.column_names is None: string_out="" else: if type(self.column_names) is StringType: string_out=self.column_names elif type(self.column_names) is ListType: string_out=list_to_string(self.column_names, data_delimiter=self.options["column_names_delimiter"],end="") #print("{0} is {1}".format('string_out',string_out)) else: string_out=ensure_string(self.column_names) #print column_name_begin,string_out,column_name_end return column_name_begin+string_out+column_name_end
def get_data_dictionary_list(
self, use_row_formatter_string=True)
Returns a python list with a row dictionary of form {column_name:data_column}
def get_data_dictionary_list(self,use_row_formatter_string=True): """Returns a python list with a row dictionary of form {column_name:data_column}""" try: if self.options["row_formatter_string"] is None: use_row_formatter_string=False if use_row_formatter_string: list_formatter=[item.replace("{"+str(index),"{0") for index,item in enumerate(self.options["row_formatter_string"].split("{delimiter}"))] else: list_formatter=["{0}" for i in self.column_names] #print self.column_names #print self.data #print list_formatter #print len(self.column_names)==len(self.data[0]) #print len(list_formatter)==len(self.data[0]) #print type(self.data[0]) out_list=[{self.column_names[i]:list_formatter[i].format(value) for i,value in enumerate(line)} for line in self.data] return out_list except: print("Could not form a data_dictionary_list, check that row_formatter_string is properly defined") #print(out_list) raise
def get_data_string(
self)
Returns the data as a string
def get_data_string(self): "Returns the data as a string" #Todo:refactor to cut out unused lines string_out="" if self.data is None: string_out= "" else: if type(self.data) is StringType: if self.options['data_begin_token'] is None: if self.options['data_end_token'] is None: string_out=self.data else: if re.search(self.options['data_end_token'],self.data): string_out=self.data else: string_out=self.data+self.options['data_end_token'] else: if self.options['data_end_token'] is None: if re.match(self.options['data_begin_token'],self.data): string_out=self.data else: string_out=self.options['data_begin_token']+self.data elif type(self.data) in [ListType,np.ndarray]: try: #If the first row is a string, we should strip all the tokens and add them back in if type(self.data[0]) is StringType: if self.options['data_begin_token'] is None: string_out=string_list_collapse(self.data) else: if re.match(self.options['data_begin_token'],self.data[0]): if self.options['data_end_token'] is None: string_out=string_list_collapse(self.data) else: if re.search(self.options['data_end_token'],self.data[-1]): string_out=string_list_collapse(self.data) else: string_out=string_list_collapse(self.data)+self.options['data_end_token'] else: if self.options['data_end_token'] is None: string_out=self.options['data_begin_token']+string_list_collapse(self.data) else: if re.search(self.options['data_end_token'],self.data[-1]): string_out=self.options['data_begin_token']+string_list_collapse(self.data) else: string_out=self.options['data_begin_token']+\ string_list_collapse(self.data)+\ self.options['data_end_token'] elif type(self.data[0]) in [ListType,np.ndarray]: prefix="" if self.options['data_begin_token'] is None: if self.options['data_end_token'] is None: string_out=list_list_to_string(self.data,data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"]) else: if self.options['data_end_token'] is None: string_out=self.options['data_begin_token']+\ list_list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"]) else: string_out=self.options['data_begin_token']+\ list_list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=\ self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"])+\ self.options['data_end_token'] else: string_out=list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], begin=self.options["row_begin_token"], end=self.options["row_end_token"]) except IndexError: pass else: string_out=ensure_string(self.data) return string_out
Returns the footer using options in self.options. If block comment is specified, and the footer is a list it will block comment out the footer. If comment_begin and comment_end are specified it will use those to represent each line of the footer. If footer_begin_token and/or footer_end_token are specified it will wrap the footer in those.
def get_header_string(
self)
Returns the header using options in self.options. If block comment is specified, and the header is a list it will block comment out the header. If comment_begin and comment_end are specified it will use those to represent each line of the header. If header_begin_token and/or header_end_token are specified it will wrap the header in those.
def get_header_string(self): """Returns the header using options in self.options. If block comment is specified, and the header is a list it will block comment out the header. If comment_begin and comment_end are specified it will use those to represent each line of the header. If header_begin_token and/or header_end_token are specified it will wrap the header in those. """ string_out="" header_begin="" header_end="" if self.options["header_begin_token"] is None: header_begin="" else: header_begin=self.options["header_begin_token"] if self.options["header_end_token"] is None: header_end="" else: header_end=self.options["header_end_token"] # This writes the header if self.header is None: string_out= "" elif self.options["header_line_types"] is not None: for index,line in enumerate(self.options["header_line_types"]): if index == len(self.options["header_line_types"])-1: end='' else: end='\n' if line in ['header','header_line','normal']: string_out=string_out+self.header[index]+end elif line in ['line_comment','comment']: string_out=string_out+line_comment_string(self.header[index], comment_begin=self.options["comment_begin"], comment_end=self.options["comment_end"])+end elif line in ['block_comment','block']: if index-1<0: block_comment_begin=index block_comment_end=index+2 continue elif self.options["header_line_types"][index-1] not in ['block_comment','block']: block_comment_begin=index block_comment_end=index+2 continue else: if index+1>len(self.options["header_line_types"])-1: string_out=string_out+line_list_comment_string(self.header[block_comment_begin:], comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'], block=True)+end elif self.options["header_line_types"][index+1] in ['block_comment','block']: block_comment_end+=1 else: string_out=string_out+\ line_list_comment_string(self.header[block_comment_begin:block_comment_end], comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'], block=True)+end else: string_out=string_out+line elif self.options['treat_header_as_comment'] in [None,True] and self.options["header_line_types"] in [None]: # Just happens if the user has set self.header manually if type(self.header) is StringType: string_out=line_comment_string(self.header, comment_begin=self.options["comment_begin"], comment_end=self.options["comment_end"]) #string_out=re.sub('\n','',string_out,count=1) elif type(self.header) is ListType: if self.options['block_comment_begin'] is None: if self.options['comment_begin'] is None: string_out=string_list_collapse(self.header) else: string_out=line_list_comment_string(self.header,comment_begin=self.options['comment_begin'], comment_end=self.options['comment_end']) lines_out=string_out.splitlines() # if re.search('\n',self.options['comment_end']): # string_out=re.sub('\n','',string_out,count=1) #self.options["header_line_types"]=["line_comment" for line in self.header] else: string_out=line_list_comment_string(self.header,comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'],block=True) #self.options["header_line_types"]=["block_comment" for line in self.header] else: string_out=ensure_string(self.header,list_delimiter="\n",end_if_list="") return header_begin+string_out+header_end
def get_options(
self)
Prints the option list
def get_options(self): "Prints the option list" for key,value in self.options.iteritems(): print("{0} = {1}".format(key,value))
def get_options_by_element(
self, element_name)
returns a dictionary of all the options that have to do with element. Element must be header,column_names,data, or footer
def get_options_by_element(self,element_name): """ returns a dictionary of all the options that have to do with element. Element must be header,column_names,data, or footer""" keys_regarding_element=filter(lambda x: re.search(element_name,str(x),re.IGNORECASE),self.options.keys()) out_dictionary={key:self.options[key] for key in keys_regarding_element} #print out_dictionary return out_dictionary
def get_row(
self, row_index=None)
Returns the row as a list specified by row_index
def get_row(self,row_index=None): """Returns the row as a list specified by row_index""" if row_index is None: return else: return self.data[row_index]
def is_valid(
self)
Returns True if ascii table conforms to its specification given by its own options
def is_valid(self): """Returns True if ascii table conforms to its specification given by its own options""" options={} for key,value in self.options.iteritems(): options[key]=value # print("self.options[{0}] is {1} ".format(key,value)) for element in self.elements: if self.__dict__[element] is None: options[element]=None else: options[element]=[] options["validate"]=True newtable=AsciiDataTable(None,**options) lines=self.build_string().splitlines() for index,line in enumerate(lines): lines[index]=line+"\n" newtable.lines=lines newtable.__parse__() # print newtable.data # print newtable.column_names # print newtable #print_comparison(newtable.footer,None) newtable.update_model() # The new table rows are not being coerced into the right format #print newtable #newtable.update_model() #print newtable.options #print self.options #print newtable.data # print newtable.options==self.options # for option_key,option_value in newtable.options.iteritems(): # print("New Table Option {0} is {1} ".format(option_key,option_value)) # print("self.options[{0}] is {1} ".format(option_key,self.options[option_key])) # print_comparison(option_value,self.options[option_key]) # #print self return self==newtable
def lines_defined(
self)
If begin_line and end_line for all elements that are None are defined returns True
def lines_defined(self): """If begin_line and end_line for all elements that are None are defined returns True""" truth_table=[] last_element="" output=False for index,element in enumerate(self.elements): if element not in ['inline_comments','metadata'] and self.__dict__[element] is not None: try: last_element=element if not None in [self.options['%s_begin_line'%element],self.options['%s_end_line'%element]]: truth_table.append(True) else: truth_table.append(False) except: return False #print truth_table # The last_line of the last element is fine to be none if truth_table[-1] is False: if self.options['%s_begin_line'%last_element] is not None: truth_table[-1]=True if False in truth_table: output=False else: output=True #print output return output
Moves the DataTable's footer to the header and updates the model
def remove_column(
self, column_name=None, column_index=None)
Removes the column specified by column_name or column_index and updates the model. The column is removed from column_names, data and if present column_types, column_descriptions and row formatter
def remove_column(self,column_name=None,column_index=None): """Removes the column specified by column_name or column_index and updates the model. The column is removed from column_names, data and if present column_types, column_descriptions and row formatter""" pass
def remove_row(
self, row_index)
Removes the row specified by row_index and updates the model. Note index is relative to the data attribute so to remove the first row use row_index=0 and the last data row is row_index=-1
def remove_row(self,row_index): """Removes the row specified by row_index and updates the model. Note index is relative to the data attribute so to remove the first row use row_index=0 and the last data row is row_index=-1""" self.data.pop(row_index) self.update_model()
def save(
self, path=None, **temp_options)
" Saves the file, to save in another ascii format specify elements in temp_options, the options specified do not permanently change the object's options. If path is supplied it saves the file to that path otherwise uses the object's attribute path to define the saving location
def save(self,path=None,**temp_options): """" Saves the file, to save in another ascii format specify elements in temp_options, the options specified do not permanently change the object's options. If path is supplied it saves the file to that path otherwise uses the object's attribute path to define the saving location """ original_options=self.options for key,value in temp_options.iteritems(): self.options[key]=value out_string=self.build_string(**temp_options) if path is None: path=self.path file_out=open(path,'w') file_out.write(out_string) file_out.close() self.options=original_options
def save_schema(
self, path=None, format=None)
Saves the tables options as a text file or pickled dictionary (default). If no name is supplied, autonames it and saves
def save_schema(self,path=None,format=None): """Saves the tables options as a text file or pickled dictionary (default). If no name is supplied, autonames it and saves""" if path is None: path=auto_name(self.name.replace('.'+self.options["extension"],""),'Schema',self.options["directory"],'txt') if format in [None,'python','pickle']: pickle.dump(self.options,open(path,'wb')) elif format in ['txt','text','.txt']: file_out=open(path,'w') keys=self.options.keys() keys.sort() for key in keys: out_key=str(key).replace("\n","\\n") out_value=str(self.options[key]).replace("\n","\\n") file_out.write("{0} : {1} \n".format(out_key,out_value)) file_out.close()
def update_column_names(
self)
Update column names adds the value x# for any column that exists in self.data that is not named
def update_column_names(self): """Update column names adds the value x# for any column that exists in self.data that is not named""" if self.data is None: return elif type(self.column_names) is StringType: self.column_names=split_row(self.column_names,self.options["column_names_delimiter"]) elif self.column_names is None: column_names=[] for index,column in enumerate(self.data[0]): column_names.append("x"+str(index)) self.column_names=column_names return elif len(self.column_names)==len(self.data[0]): return elif len(self.column_names) < len(self.data[0]): for index in range(len(self.column_names),len(self.data[0])): self.column_names.append("x"+str(index)) return
def update_import_options(
self, import_table)
Updates the options in the import table
def update_import_options(self,import_table): """Updates the options in the import table""" for index,element in enumerate(['header','column_names','data','footer']): if self.__dict__[element] is not None: print("The {0} variable is {1}".format('index',index)) print("The {0} variable is {1}".format('element',element)) print("The {0} variable is {1}".format('import_table',import_table)) [self.options['%s_begin_line'%element], self.options['%s_end_line'%element], self.options['%s_begin_token'%element], self.options['%s_end_token'%element]]=import_table[index][:]
def update_index(
self)
Updates the index column if it exits, otherwise exits quietly
def update_index(self): """ Updates the index column if it exits, otherwise exits quietly """ if 'index' not in self.column_names: return else: try: #This should be 0 but just in case index_column_number=self.column_names.index('index') for i in range(len(self.data)): self.data[i][index_column_number]=i except: pass
def update_model(
self)
Updates the model after a change has been made. If you add anything to the attributes of the model, or change this updates the values. If the model has an index column it will make sure the numbers are correct. In addition, it will update the options dictionary to reflect added rows, changes in deliminators etc.
def update_model(self): """Updates the model after a change has been made. If you add anything to the attributes of the model, or change this updates the values. If the model has an index column it will make sure the numbers are correct. In addition, it will update the options dictionary to reflect added rows, changes in deliminators etc. """ if self.column_names is not None and 'index' in self.column_names: self.update_index() #make sure there are no "\n" characters in the element lists (if so replace them with "") for data this is # done on import list_types=["header","column_names","footer"] for element in list_types: if self.__dict__[element] is not None: for index,item in enumerate(self.__dict__[element]): self.__dict__[element][index]=item.replace("\n","") self.update_column_names() if self.data is not None: self.data=convert_all_rows(self.data,self.options["column_types"]) self.string=self.build_string() self.lines=self.string.splitlines()
class PowerRawModel
Class that deals with the PowerRaw Files after conversion to Ascii using Ron Ginley's converter. These files typically have header information seperated from data by !! Header format is: Line 1: Spid$ - identification of type of system used Line 2: Systemletter$ - letter name indicating which system was used Line 3: Conncal$ - connector type from the system calibration Line 4: Connectors$ - connector type used for the measurement Line 5: Meastype$ - type of measurement (basically 1-port, 2-port or power) Line 6: Datea$ - date of measurement Line 7: Timea$ - time of measurement Line 8: Programm$ - name of program used Line 9: Rev$ - program revision Line 10: Opr$ - operator Line 11: Cfile$ - calibration name Line 12: Cdate$ - calibration date Line 13: Sport - identification of which port or direction was used for measurement Line 14: Numconnects ? number of disconnect/reconnect cycles Line 15: Numrepeats ? number of repeat measurements for each connect (usually 1) Line 16: Nbs ? not sure Line 17: Nfreq ? number of frequencies Line 18: Startfreq ? data row pointer for bdat files Line 19: Devicedescript$ - description of device being measured or of test being done Line 20: Devicenum$ - Identifying number for device ? used for file names
class PowerRawModel(AsciiDataTable): """ Class that deals with the PowerRaw Files after conversion to Ascii using Ron Ginley's converter. These files typically have header information seperated from data by !! Header format is: Line 1: Spid$ - identification of type of system used Line 2: Systemletter$ - letter name indicating which system was used Line 3: Conncal$ - connector type from the system calibration Line 4: Connectors$ - connector type used for the measurement Line 5: Meastype$ - type of measurement (basically 1-port, 2-port or power) Line 6: Datea$ - date of measurement Line 7: Timea$ - time of measurement Line 8: Programm$ - name of program used Line 9: Rev$ - program revision Line 10: Opr$ - operator Line 11: Cfile$ - calibration name Line 12: Cdate$ - calibration date Line 13: Sport - identification of which port or direction was used for measurement Line 14: Numconnects ? number of disconnect/reconnect cycles Line 15: Numrepeats ? number of repeat measurements for each connect (usually 1) Line 16: Nbs ? not sure Line 17: Nfreq ? number of frequencies Line 18: Startfreq ? data row pointer for bdat files Line 19: Devicedescript$ - description of device being measured or of test being done Line 20: Devicenum$ - Identifying number for device ? used for file names """ def __init__(self,file_path=None,**options): """Initializes the PowerRaw class, if a file_path is specified opens an existing file, else creates an empty container""" defaults= {"data_delimiter": ",", "column_names_delimiter": ",", "specific_descriptor": 'Raw', "general_descriptor": 'Power', "extension": 'txt', "comment_begin": "#", "comment_end": "\n", "column_types": ['float','int','int','float','float','float','float'], "column_descriptions": {"Frequency":"Frequency in GHz", "Direction":"Direction of connects, may be unused", "Connect":"Connect number", "magS11":"Linear magnitude for S11", "argS11":"Phase in degrees for S11", "Efficiency":"Effective Efficiency", "Calibration_Factor":"Effective efficiency " "modified by reflection coefficient"}, "header": None, "column_names": ["Frequency","Direction","Connect", "magS11", "argS11","Efficiency","Calibration_Factor"], "column_names_end_token": "\n", "data": None, 'row_formatter_string': "{0:.5g}{delimiter}{1}{delimiter}{2}" "{delimiter}{3:.5g}{delimiter}{4:.3f}{delimiter}" "{5:.5g}{delimiter}{6:.5g}", "data_table_element_separator": None} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: self.__read_and_fix__(file_path) AsciiDataTable.__init__(self,None,**self.options) self.path=file_path self.structure_metadata() def __read_and_fix__(self,file_path=None): """Inputs in the PowerRaw file and fixes any problems with delimiters,etc.""" lines=[] in_file=open(file_path,'r') for index,line in enumerate(in_file): lines.append(line) if re.search("!!",line): data_begin_line=index+1 self.lines=lines parse_options={"delimiter":", ","row_end_token":'\n'} data=parse_lines(lines[data_begin_line:],**parse_options) self.options["data"]=data self.options["header"]=lines[:data_begin_line-1] #print data def structure_metadata(self): """Returns a dictionary of key,value pairs extracted from the header""" keys=["System_Id","System_Letter","Connector_Type_Calibration","Connector_Type_Measurement", "Measurement_Type","Measurement_Date","Measurement_Time","Program_Used","Program_Revision","Operator", "Calibration_Name","calibration_date","Port_Used","Number_Connects","Number_Repeats","Nbs", "Number_Frequencies","Start_Frequency", "Device_Description","Device_Id"] self.metadata={} for index,key in enumerate(keys): self.metadata[key]=self.header[index]
Ancestors (in MRO)
- PowerRawModel
- pyMez.Code.DataHandlers.GeneralModels.AsciiDataTable
Instance variables
var options
var path
Methods
def __init__(
self, file_path=None, **options)
Initializes the PowerRaw class, if a file_path is specified opens an existing file, else creates an empty container
def __init__(self,file_path=None,**options): """Initializes the PowerRaw class, if a file_path is specified opens an existing file, else creates an empty container""" defaults= {"data_delimiter": ",", "column_names_delimiter": ",", "specific_descriptor": 'Raw', "general_descriptor": 'Power', "extension": 'txt', "comment_begin": "#", "comment_end": "\n", "column_types": ['float','int','int','float','float','float','float'], "column_descriptions": {"Frequency":"Frequency in GHz", "Direction":"Direction of connects, may be unused", "Connect":"Connect number", "magS11":"Linear magnitude for S11", "argS11":"Phase in degrees for S11", "Efficiency":"Effective Efficiency", "Calibration_Factor":"Effective efficiency " "modified by reflection coefficient"}, "header": None, "column_names": ["Frequency","Direction","Connect", "magS11", "argS11","Efficiency","Calibration_Factor"], "column_names_end_token": "\n", "data": None, 'row_formatter_string': "{0:.5g}{delimiter}{1}{delimiter}{2}" "{delimiter}{3:.5g}{delimiter}{4:.3f}{delimiter}" "{5:.5g}{delimiter}{6:.5g}", "data_table_element_separator": None} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: self.__read_and_fix__(file_path) AsciiDataTable.__init__(self,None,**self.options) self.path=file_path self.structure_metadata()
def add_column(
self, column_name=None, column_type=None, column_data=None, format_string=None)
Adds a column with column_name, and column_type. If column data is supplied and it's length is the same as data(same number of rows) then it is added, else self.options['empty_character'] is added in each spot in the preceding rows
def add_column(self,column_name=None,column_type=None,column_data=None,format_string=None): """Adds a column with column_name, and column_type. If column data is supplied and it's length is the same as data(same number of rows) then it is added, else self.options['empty_character'] is added in each spot in the preceding rows""" original_column_names=self.column_names[:] try: self.column_names.append(column_name) if self.options["column_types"]: self.options["column_types"]=self.options["column_types"].append(column_type) if len(column_data) == len(self.data): for index,row in enumerate(self.data): #print("{0} is {1}".format('self.data[index]',self.data[index])) #print("{0} is {1}".format('row',row)) new_row=row[:] new_row.append(column_data[index]) self.data[index]=new_row else: for index,row in enumerate(self.data): self.data[index]=row.append(self.options['empty_value']) if column_data is not None: for item in column_data: empty_row=[self.options['empty_value'] for column in original_column_names] empty_row.append(item) self.add_row(empty_row) if self.options["row_formatter_string"] is None: pass else: if format_string is None: self.options["row_formatter_string"]=self.options["row_formatter_string"]+\ '{delimiter}'+"{"+str(len(self.column_names)-1)+"}" else: self.options["row_formatter_string"]=self.options["row_formatter_string"]+format_string #self.update_model() except: self.column_names=original_column_names print("Could not add columns") raise
def add_index(
self)
Adds a column with name index and values that are 0 referenced indices, does nothing if there is already a column with name index, always inserts it at the 0 position
def add_index(self): """Adds a column with name index and values that are 0 referenced indices, does nothing if there is already a column with name index, always inserts it at the 0 position""" if 'index' in self.column_names: print("Add Index passed") pass else: self.column_names.insert(0,'index') for index,row in enumerate(self.data): self.data[index].insert(0,index) if self.options['column_types']: self.options['column_types'].insert(0,'int') if self.options['row_formatter_string']: temp_formatter_list=self.options['row_formatter_string'].split("{delimiter}") iterated_row_formatter_list=[temp_formatter_list[i].replace(str(i),str(i+1)) for i in range(len(temp_formatter_list))] new_formatter=string_list_collapse(iterated_row_formatter_list,string_delimiter="{delimiter}") self.options['row_formatter_string']='{0}{delimiter}'+new_formatter
def add_inline_comment(
self, comment='', line_number=None, string_position=None)
Adds an inline in the specified location
def add_inline_comment(self,comment="",line_number=None,string_position=None): "Adds an inline in the specified location" try: self.inline_comments.append([comment,line_number,string_position]) except:pass
def add_row(
self, row_data)
Adds a single row given row_data which can be an ordered list/tuple or a dictionary with column names as keys
def add_row(self,row_data): """Adds a single row given row_data which can be an ordered list/tuple or a dictionary with column names as keys""" if len(row_data) not in [len(self.column_names),len(self.column_names)]: print(" could not add the row, dimensions do not match") return if type(row_data) in [ListType,np.ndarray]: self.data.append(row_data) elif type(row_data) in [DictionaryType]: data_list=[row_data[column_name] for column_name in self.column_names] self.data.append(data_list)
def build_string(
self, **temp_options)
Builds a string representation of the data table based on self.options, or temp_options. Passing temp_options does not permanently change the model
def build_string(self,**temp_options): """Builds a string representation of the data table based on self.options, or temp_options. Passing temp_options does not permanently change the model""" # store the original options to be put back after the string is made original_options=self.options for key,value in temp_options.iteritems(): self.options[key]=value section_end=0 next_section_begin=0 if self.options['data_table_element_separator'] is None: inner_element_spacing=0 else: inner_element_spacing=self.options['data_table_element_separator'].count('\n')-1 string_out="" between_section="" if self.options['data_table_element_separator'] is not None: between_section=self.options['data_table_element_separator'] if self.header is None: self.options['header_begin_line']=self.options['header_end_line']=None pass else: self.options["header_begin_line"]=0 if self.data is None and self.column_names is None and self.footer is None: string_out=self.get_header_string() self.options["header_end_line"]=None else: string_out=self.get_header_string()+between_section last_header_line=self.get_header_string().count('\n')+1 self.options["header_end_line"]=last_header_line next_section_begin=last_header_line+inner_element_spacing if self.column_names is None: self.options['column_names_begin_line']=self.options['column_names_end_line']=None pass else: self.options["column_names_begin_line"]=next_section_begin if self.data is None and self.footer is None: self.options["column_names_end_line"]=None string_out=string_out+self.get_column_names_string() else: string_out=string_out+self.get_column_names_string()+between_section last_column_names_line=self.get_column_names_string().count('\n')+\ self.options["column_names_begin_line"]+1 self.options["column_names_end_line"]=last_column_names_line next_section_begin=last_column_names_line+inner_element_spacing if self.data is None: self.options['data_begin_line']=self.options['data_end_line']=None pass else: self.options["data_begin_line"]=next_section_begin if self.footer is None: self.options["data_end_line"]=None string_out=string_out+self.get_data_string() else: string_out=string_out+self.get_data_string()+between_section last_data_line=self.get_data_string().count("\n")+\ self.options["data_begin_line"]+1 self.options["data_end_line"]=last_data_line next_section_begin=last_data_line+inner_element_spacing if self.footer is None: self.options['footer_begin_line']=self.options['footer_end_line']=None pass else: self.options["footer_begin_line"]=next_section_begin string_out=string_out+self.get_footer_string() self.options['footer_end_line']=None # set the options back after the string has been made if self.inline_comments is None: pass else: lines=string_out.splitlines() for comment in self.inline_comments: lines=insert_inline_comment(lines,comment=comment[0],line_number=comment[1], string_position=comment[2], begin_token=self.options['inline_comment_begin'], end_token=self.options['inline_comment_end']) string_out=string_list_collapse(lines,string_delimiter='\n') self.options=original_options return string_out
def change_unit_prefix(
self, column_selector=None, old_prefix=None, new_prefix=None, unit='Hz')
Changes the prefix of the units of the column specified by column_selector (column name or index) example usage is self.change_unit_prefix(column_selector='Frequency',old_prefix=None,new_prefix='G',unit='Hz') to change a column from Hz to GHz. It updates the data values, column_descriptions, and column_units if they exist, see http://www.nist.gov/pml/wmd/metric/prefixes.cfm for possible prefixes
def change_unit_prefix(self,column_selector=None,old_prefix=None,new_prefix=None,unit='Hz'): """Changes the prefix of the units of the column specified by column_selector (column name or index) example usage is self.change_unit_prefix(column_selector='Frequency',old_prefix=None,new_prefix='G',unit='Hz') to change a column from Hz to GHz. It updates the data values, column_descriptions, and column_units if they exist, see http://www.nist.gov/pml/wmd/metric/prefixes.cfm for possible prefixes""" multipliers={"yotta":10.**24,"Y":10.**24,"zetta":10.**21,"Z":10.**21,"exa":10.**18,"E":10.**18,"peta":10.**15, "P":10.**15,"tera":10.**12,"T":10.**12,"giga":10.**9,"G":10.**9,"mega":10.**6,"M":10.**6, "kilo":10.**3,"k":10.**3,"hecto":10.**2,"h":10.**2,"deka":10.,"da":10.,None:1.,"":1., "deci":10.**-1,"d":10.**-1,"centi":10.**-2,"c":10.**-2,"milli":10.**-3,"m":10.**-3, "micro":10.**-6,"mu":10.**-6,u"\u00B5":10.**-6,"nano":10.**-9, "n":10.**-9,"pico":10.**-12,"p":10.**-12,"femto":10.**-15, "f":10.**-15,"atto":10.**-18,"a":10.**-18,"zepto":10.**-21,"z":10.**-21, "yocto":10.**-24,"y":10.**-24} # change column name into column index try: if old_prefix is None: old_prefix="" if new_prefix is None: new_prefix="" old_unit=old_prefix+unit new_unit=new_prefix+unit if column_selector in self.column_names: column_selector=self.column_names.index(column_selector) for index,row in enumerate(self.data): if type(self.data[index][column_selector]) in [FloatType,LongType]: #print "{0:e}".format(multipliers[old_prefix]/multipliers[new_prefix]) self.data[index][column_selector]=\ (multipliers[old_prefix]/multipliers[new_prefix])*self.data[index][column_selector] elif type(self.data[index][column_selector]) in [StringType,IntType]: self.data[index][column_selector]=\ str((multipliers[old_prefix]/multipliers[new_prefix])*float(self.data[index][column_selector])) else: print type(self.data[index][column_selector]) raise if self.options["column_descriptions"] is not None: old=self.options["column_descriptions"][column_selector] self.options["column_descriptions"][column_selector]=old.replace(old_unit,new_unit) if self.options["column_units"] is not None: old=self.options["column_units"][column_selector] self.options["column_units"][column_selector]=old.replace(old_unit,new_unit) if re.search(old_unit,self.column_names[column_selector]): old=self.column_names[column_selector] self.column_names[column_selector]=old.replace(old_unit,new_unit) except: print("Could not change the unit prefix of column {0}".format(column_selector)) raise
def find_line(
self, begin_token)
Finds the first line that has begin token in it
def find_line(self,begin_token): """Finds the first line that has begin token in it""" for index,line in enumerate(self.lines): if re.search(begin_token,line): return index
def get_column(
self, column_name=None, column_index=None)
Returns a column as a list given a column name or column index
def get_column(self,column_name=None,column_index=None): """Returns a column as a list given a column name or column index""" if column_name is None: if column_index is None: return else: column_selector=column_index else: column_selector=self.column_names.index(column_name) out_list=[self.data[i][column_selector] for i in range(len(self.data))] return out_list
def get_column_names_string(
self)
Returns the column names as a string using options
def get_column_names_string(self): "Returns the column names as a string using options" string_out="" # This writes the column_names column_name_begin="" column_name_end="" if self.options["column_names_begin_token"] is None: column_name_begin="" else: column_name_begin=self.options["column_names_begin_token"] if self.options["column_names_end_token"] is None: column_name_end="" else: column_name_end=self.options["column_names_end_token"] if self.column_names is None: string_out="" else: if type(self.column_names) is StringType: string_out=self.column_names elif type(self.column_names) is ListType: string_out=list_to_string(self.column_names, data_delimiter=self.options["column_names_delimiter"],end="") #print("{0} is {1}".format('string_out',string_out)) else: string_out=ensure_string(self.column_names) #print column_name_begin,string_out,column_name_end return column_name_begin+string_out+column_name_end
def get_data_dictionary_list(
self, use_row_formatter_string=True)
Returns a python list with a row dictionary of form {column_name:data_column}
def get_data_dictionary_list(self,use_row_formatter_string=True): """Returns a python list with a row dictionary of form {column_name:data_column}""" try: if self.options["row_formatter_string"] is None: use_row_formatter_string=False if use_row_formatter_string: list_formatter=[item.replace("{"+str(index),"{0") for index,item in enumerate(self.options["row_formatter_string"].split("{delimiter}"))] else: list_formatter=["{0}" for i in self.column_names] #print self.column_names #print self.data #print list_formatter #print len(self.column_names)==len(self.data[0]) #print len(list_formatter)==len(self.data[0]) #print type(self.data[0]) out_list=[{self.column_names[i]:list_formatter[i].format(value) for i,value in enumerate(line)} for line in self.data] return out_list except: print("Could not form a data_dictionary_list, check that row_formatter_string is properly defined") #print(out_list) raise
def get_data_string(
self)
Returns the data as a string
def get_data_string(self): "Returns the data as a string" #Todo:refactor to cut out unused lines string_out="" if self.data is None: string_out= "" else: if type(self.data) is StringType: if self.options['data_begin_token'] is None: if self.options['data_end_token'] is None: string_out=self.data else: if re.search(self.options['data_end_token'],self.data): string_out=self.data else: string_out=self.data+self.options['data_end_token'] else: if self.options['data_end_token'] is None: if re.match(self.options['data_begin_token'],self.data): string_out=self.data else: string_out=self.options['data_begin_token']+self.data elif type(self.data) in [ListType,np.ndarray]: try: #If the first row is a string, we should strip all the tokens and add them back in if type(self.data[0]) is StringType: if self.options['data_begin_token'] is None: string_out=string_list_collapse(self.data) else: if re.match(self.options['data_begin_token'],self.data[0]): if self.options['data_end_token'] is None: string_out=string_list_collapse(self.data) else: if re.search(self.options['data_end_token'],self.data[-1]): string_out=string_list_collapse(self.data) else: string_out=string_list_collapse(self.data)+self.options['data_end_token'] else: if self.options['data_end_token'] is None: string_out=self.options['data_begin_token']+string_list_collapse(self.data) else: if re.search(self.options['data_end_token'],self.data[-1]): string_out=self.options['data_begin_token']+string_list_collapse(self.data) else: string_out=self.options['data_begin_token']+\ string_list_collapse(self.data)+\ self.options['data_end_token'] elif type(self.data[0]) in [ListType,np.ndarray]: prefix="" if self.options['data_begin_token'] is None: if self.options['data_end_token'] is None: string_out=list_list_to_string(self.data,data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"]) else: if self.options['data_end_token'] is None: string_out=self.options['data_begin_token']+\ list_list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"]) else: string_out=self.options['data_begin_token']+\ list_list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=\ self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"])+\ self.options['data_end_token'] else: string_out=list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], begin=self.options["row_begin_token"], end=self.options["row_end_token"]) except IndexError: pass else: string_out=ensure_string(self.data) return string_out
Returns the footer using options in self.options. If block comment is specified, and the footer is a list it will block comment out the footer. If comment_begin and comment_end are specified it will use those to represent each line of the footer. If footer_begin_token and/or footer_end_token are specified it will wrap the footer in those.
def get_header_string(
self)
Returns the header using options in self.options. If block comment is specified, and the header is a list it will block comment out the header. If comment_begin and comment_end are specified it will use those to represent each line of the header. If header_begin_token and/or header_end_token are specified it will wrap the header in those.
def get_header_string(self): """Returns the header using options in self.options. If block comment is specified, and the header is a list it will block comment out the header. If comment_begin and comment_end are specified it will use those to represent each line of the header. If header_begin_token and/or header_end_token are specified it will wrap the header in those. """ string_out="" header_begin="" header_end="" if self.options["header_begin_token"] is None: header_begin="" else: header_begin=self.options["header_begin_token"] if self.options["header_end_token"] is None: header_end="" else: header_end=self.options["header_end_token"] # This writes the header if self.header is None: string_out= "" elif self.options["header_line_types"] is not None: for index,line in enumerate(self.options["header_line_types"]): if index == len(self.options["header_line_types"])-1: end='' else: end='\n' if line in ['header','header_line','normal']: string_out=string_out+self.header[index]+end elif line in ['line_comment','comment']: string_out=string_out+line_comment_string(self.header[index], comment_begin=self.options["comment_begin"], comment_end=self.options["comment_end"])+end elif line in ['block_comment','block']: if index-1<0: block_comment_begin=index block_comment_end=index+2 continue elif self.options["header_line_types"][index-1] not in ['block_comment','block']: block_comment_begin=index block_comment_end=index+2 continue else: if index+1>len(self.options["header_line_types"])-1: string_out=string_out+line_list_comment_string(self.header[block_comment_begin:], comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'], block=True)+end elif self.options["header_line_types"][index+1] in ['block_comment','block']: block_comment_end+=1 else: string_out=string_out+\ line_list_comment_string(self.header[block_comment_begin:block_comment_end], comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'], block=True)+end else: string_out=string_out+line elif self.options['treat_header_as_comment'] in [None,True] and self.options["header_line_types"] in [None]: # Just happens if the user has set self.header manually if type(self.header) is StringType: string_out=line_comment_string(self.header, comment_begin=self.options["comment_begin"], comment_end=self.options["comment_end"]) #string_out=re.sub('\n','',string_out,count=1) elif type(self.header) is ListType: if self.options['block_comment_begin'] is None: if self.options['comment_begin'] is None: string_out=string_list_collapse(self.header) else: string_out=line_list_comment_string(self.header,comment_begin=self.options['comment_begin'], comment_end=self.options['comment_end']) lines_out=string_out.splitlines() # if re.search('\n',self.options['comment_end']): # string_out=re.sub('\n','',string_out,count=1) #self.options["header_line_types"]=["line_comment" for line in self.header] else: string_out=line_list_comment_string(self.header,comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'],block=True) #self.options["header_line_types"]=["block_comment" for line in self.header] else: string_out=ensure_string(self.header,list_delimiter="\n",end_if_list="") return header_begin+string_out+header_end
def get_options(
self)
Prints the option list
def get_options(self): "Prints the option list" for key,value in self.options.iteritems(): print("{0} = {1}".format(key,value))
def get_options_by_element(
self, element_name)
returns a dictionary of all the options that have to do with element. Element must be header,column_names,data, or footer
def get_options_by_element(self,element_name): """ returns a dictionary of all the options that have to do with element. Element must be header,column_names,data, or footer""" keys_regarding_element=filter(lambda x: re.search(element_name,str(x),re.IGNORECASE),self.options.keys()) out_dictionary={key:self.options[key] for key in keys_regarding_element} #print out_dictionary return out_dictionary
def get_row(
self, row_index=None)
Returns the row as a list specified by row_index
def get_row(self,row_index=None): """Returns the row as a list specified by row_index""" if row_index is None: return else: return self.data[row_index]
def is_valid(
self)
Returns True if ascii table conforms to its specification given by its own options
def is_valid(self): """Returns True if ascii table conforms to its specification given by its own options""" options={} for key,value in self.options.iteritems(): options[key]=value # print("self.options[{0}] is {1} ".format(key,value)) for element in self.elements: if self.__dict__[element] is None: options[element]=None else: options[element]=[] options["validate"]=True newtable=AsciiDataTable(None,**options) lines=self.build_string().splitlines() for index,line in enumerate(lines): lines[index]=line+"\n" newtable.lines=lines newtable.__parse__() # print newtable.data # print newtable.column_names # print newtable #print_comparison(newtable.footer,None) newtable.update_model() # The new table rows are not being coerced into the right format #print newtable #newtable.update_model() #print newtable.options #print self.options #print newtable.data # print newtable.options==self.options # for option_key,option_value in newtable.options.iteritems(): # print("New Table Option {0} is {1} ".format(option_key,option_value)) # print("self.options[{0}] is {1} ".format(option_key,self.options[option_key])) # print_comparison(option_value,self.options[option_key]) # #print self return self==newtable
def lines_defined(
self)
If begin_line and end_line for all elements that are None are defined returns True
def lines_defined(self): """If begin_line and end_line for all elements that are None are defined returns True""" truth_table=[] last_element="" output=False for index,element in enumerate(self.elements): if element not in ['inline_comments','metadata'] and self.__dict__[element] is not None: try: last_element=element if not None in [self.options['%s_begin_line'%element],self.options['%s_end_line'%element]]: truth_table.append(True) else: truth_table.append(False) except: return False #print truth_table # The last_line of the last element is fine to be none if truth_table[-1] is False: if self.options['%s_begin_line'%last_element] is not None: truth_table[-1]=True if False in truth_table: output=False else: output=True #print output return output
Moves the DataTable's footer to the header and updates the model
def remove_column(
self, column_name=None, column_index=None)
Removes the column specified by column_name or column_index and updates the model. The column is removed from column_names, data and if present column_types, column_descriptions and row formatter
def remove_column(self,column_name=None,column_index=None): """Removes the column specified by column_name or column_index and updates the model. The column is removed from column_names, data and if present column_types, column_descriptions and row formatter""" pass
def remove_row(
self, row_index)
Removes the row specified by row_index and updates the model. Note index is relative to the data attribute so to remove the first row use row_index=0 and the last data row is row_index=-1
def remove_row(self,row_index): """Removes the row specified by row_index and updates the model. Note index is relative to the data attribute so to remove the first row use row_index=0 and the last data row is row_index=-1""" self.data.pop(row_index) self.update_model()
def save(
self, path=None, **temp_options)
" Saves the file, to save in another ascii format specify elements in temp_options, the options specified do not permanently change the object's options. If path is supplied it saves the file to that path otherwise uses the object's attribute path to define the saving location
def save(self,path=None,**temp_options): """" Saves the file, to save in another ascii format specify elements in temp_options, the options specified do not permanently change the object's options. If path is supplied it saves the file to that path otherwise uses the object's attribute path to define the saving location """ original_options=self.options for key,value in temp_options.iteritems(): self.options[key]=value out_string=self.build_string(**temp_options) if path is None: path=self.path file_out=open(path,'w') file_out.write(out_string) file_out.close() self.options=original_options
def save_schema(
self, path=None, format=None)
Saves the tables options as a text file or pickled dictionary (default). If no name is supplied, autonames it and saves
def save_schema(self,path=None,format=None): """Saves the tables options as a text file or pickled dictionary (default). If no name is supplied, autonames it and saves""" if path is None: path=auto_name(self.name.replace('.'+self.options["extension"],""),'Schema',self.options["directory"],'txt') if format in [None,'python','pickle']: pickle.dump(self.options,open(path,'wb')) elif format in ['txt','text','.txt']: file_out=open(path,'w') keys=self.options.keys() keys.sort() for key in keys: out_key=str(key).replace("\n","\\n") out_value=str(self.options[key]).replace("\n","\\n") file_out.write("{0} : {1} \n".format(out_key,out_value)) file_out.close()
def structure_metadata(
self)
Returns a dictionary of key,value pairs extracted from the header
def structure_metadata(self): """Returns a dictionary of key,value pairs extracted from the header""" keys=["System_Id","System_Letter","Connector_Type_Calibration","Connector_Type_Measurement", "Measurement_Type","Measurement_Date","Measurement_Time","Program_Used","Program_Revision","Operator", "Calibration_Name","calibration_date","Port_Used","Number_Connects","Number_Repeats","Nbs", "Number_Frequencies","Start_Frequency", "Device_Description","Device_Id"] self.metadata={} for index,key in enumerate(keys): self.metadata[key]=self.header[index]
def update_column_names(
self)
Update column names adds the value x# for any column that exists in self.data that is not named
def update_column_names(self): """Update column names adds the value x# for any column that exists in self.data that is not named""" if self.data is None: return elif type(self.column_names) is StringType: self.column_names=split_row(self.column_names,self.options["column_names_delimiter"]) elif self.column_names is None: column_names=[] for index,column in enumerate(self.data[0]): column_names.append("x"+str(index)) self.column_names=column_names return elif len(self.column_names)==len(self.data[0]): return elif len(self.column_names) < len(self.data[0]): for index in range(len(self.column_names),len(self.data[0])): self.column_names.append("x"+str(index)) return
def update_import_options(
self, import_table)
Updates the options in the import table
def update_import_options(self,import_table): """Updates the options in the import table""" for index,element in enumerate(['header','column_names','data','footer']): if self.__dict__[element] is not None: print("The {0} variable is {1}".format('index',index)) print("The {0} variable is {1}".format('element',element)) print("The {0} variable is {1}".format('import_table',import_table)) [self.options['%s_begin_line'%element], self.options['%s_end_line'%element], self.options['%s_begin_token'%element], self.options['%s_end_token'%element]]=import_table[index][:]
def update_index(
self)
Updates the index column if it exits, otherwise exits quietly
def update_index(self): """ Updates the index column if it exits, otherwise exits quietly """ if 'index' not in self.column_names: return else: try: #This should be 0 but just in case index_column_number=self.column_names.index('index') for i in range(len(self.data)): self.data[i][index_column_number]=i except: pass
def update_model(
self)
Updates the model after a change has been made. If you add anything to the attributes of the model, or change this updates the values. If the model has an index column it will make sure the numbers are correct. In addition, it will update the options dictionary to reflect added rows, changes in deliminators etc.
def update_model(self): """Updates the model after a change has been made. If you add anything to the attributes of the model, or change this updates the values. If the model has an index column it will make sure the numbers are correct. In addition, it will update the options dictionary to reflect added rows, changes in deliminators etc. """ if self.column_names is not None and 'index' in self.column_names: self.update_index() #make sure there are no "\n" characters in the element lists (if so replace them with "") for data this is # done on import list_types=["header","column_names","footer"] for element in list_types: if self.__dict__[element] is not None: for index,item in enumerate(self.__dict__[element]): self.__dict__[element][index]=item.replace("\n","") self.update_column_names() if self.data is not None: self.data=convert_all_rows(self.data,self.options["column_types"]) self.string=self.build_string() self.lines=self.string.splitlines()
class SwitchTermsPort
class SwitchTermsPort(): pass
Ancestors (in MRO)
class TwoPortCalrepModel
TwoPortCalrepModel is a model that holds data output by analyzing several datafiles using the HPBasic program Calrep. The data is stored in 3 tables: a S11 table, a S21 table and a S22 table. The data is in linear magnitude and angle in degrees. There are 2 types of files, one is a single file with .asc extension and 3 files with .txt extension
class TwoPortCalrepModel(): """TwoPortCalrepModel is a model that holds data output by analyzing several datafiles using the HPBasic program Calrep. The data is stored in 3 tables: a S11 table, a S21 table and a S22 table. The data is in linear magnitude and angle in degrees. There are 2 types of files, one is a single file with .asc extension and 3 files with .txt extension""" def __init__(self,file_path=None,**options): """Intializes the TwoPortCalrepModel class, if a file path is specified it opens and reads the file""" defaults= {"specific_descriptor": 'Two_Port_Calrep'} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value if file_path is None: pass elif re.match('asc',file_path.split(".")[-1],re.IGNORECASE): self.table_names=['header','S11','S22','S21'] self.row_pattern=make_row_match_string(ONE_PORT_COLUMN_NAMES) self.path=file_path self.__read_and_fix__() elif re.match('txt',file_path.split(".")[-1],re.IGNORECASE) or type(file_path) is ListType: self.table_names=['S11','S22','S21'] if type(file_path) is ListType: self.file_names=file_path self.tables=[] for index,table in enumerate(self.table_names): if index==2: #fixes a problem with the c tables, extra comma at the end options={"row_end_token":',\n'} self.tables.append(OnePortCalrepModel(self.file_names[index],**options)) self.tables[2].options["row_end_token"]=None else: self.tables.append(OnePortCalrepModel(self.file_names[index])) else: try: root_name_pattern=re.compile('(?P<root_name>\w+)[abc].txt',re.IGNORECASE) root_name_match=re.search(root_name_pattern,file_path) root_name=root_name_match.groupdict()["root_name"] directory=os.path.dirname(os.path.realpath(file_path)) self.file_names=[os.path.join(directory,root_name+end) for end in ['a.txt','b.txt','c.txt']] self.tables=[] for index,table in enumerate(self.table_names): if index==2: #fixes a problem with the c tables, extra comma at the end options={"row_end_token":',\n'} self.tables.append(OnePortCalrepModel(self.file_names[index],**options)) self.tables[2].options["row_end_token"]=None else: self.tables.append(OnePortCalrepModel(self.file_names[index])) except: print("Could not import {0} please check that the a,b,c " "tables are all in the same directory".format(file_path)) raise for index,table in enumerate(self.tables): column_names=[] for column_number,column in enumerate(table.column_names): if column is not "Frequency": column_names.append(column+self.table_names[index]) else: column_names.append(column) #print column_names table.column_names=column_names if CONVERT_S21: for row_number,row in enumerate(self.tables[2].data): new_S21=self.tables[2].data[row_number][1] new_S21=10.**(-1*new_S21/20.) new_value=[self.tables[2].data[row_number][i] for i in range(2,6)] new_value=map(lambda x:abs((1/np.log10(np.e))*new_S21*x/20.),new_value) self.tables[2].data[row_number][1]=new_S21 for i in range(2,6): self.tables[2].data[row_number][i]=new_value[i-2] for key,value in self.options.iteritems(): self.tables[0].options[key]=value self.joined_table=ascii_data_table_join("Frequency",self.tables[0],self.tables[2]) self.joined_table=ascii_data_table_join("Frequency",self.joined_table,self.tables[1]) def __read_and_fix__(self): in_file=open(self.path,'r') self.lines=[] table_locators=["Table 1","Table 2","Table 3"] begin_lines=[] for index,line in enumerate(in_file): self.lines.append(line) for table in table_locators: if re.search(table,line,re.IGNORECASE): begin_lines.append(index) in_file.close() self.table_line_numbers=[] for index,begin_line in enumerate(begin_lines): if index == 0: header_begin_line=0 header_end_line=begin_line-2 table_1_begin_line=begin_line+3 table_1_end_line=begin_lines[index+1]-1 self.table_line_numbers.append([header_begin_line,header_end_line]) self.table_line_numbers.append([table_1_begin_line,table_1_end_line]) elif index>0 and index<(len(begin_lines)-1): table_begin_line=begin_line+3 table_end_line=begin_lines[index+1]-1 self.table_line_numbers.append([table_begin_line,table_end_line]) elif index==(len(begin_lines)-1): table_begin_line=begin_line+3 table_end_line=None self.table_line_numbers.append([table_begin_line,table_end_line]) self.tables=[] for index,name in enumerate(self.table_names): self.table_lines=self.lines[self.table_line_numbers[index][0]:self.table_line_numbers[index][1]] self.tables.append(self.table_lines) for index,table in enumerate(self.table_names): if index==0: # by using parse_lines we get a list_list of strings instead of list_string # we can just remove end lines self.tables[index]=strip_all_line_tokens(self.tables[index],begin_token=None,end_token='\n') else: column_types=['float' for i in range(len(ONE_PORT_COLUMN_NAMES))] options={"row_pattern":self.row_pattern,"column_names":ONE_PORT_COLUMN_NAMES,"output":"list_list"} options["column_types"]=column_types self.tables[index]=parse_lines(self.tables[index],**options) # need to put S21 mag into linear magnitude if CONVERT_S21: for row_number,row in enumerate(self.tables[3]): new_S21=self.tables[3][row_number][1] new_S21=10.**(-1*new_S21/20.) new_value=[self.tables[3][row_number][i] for i in range(2,6)] new_value=map(lambda x:abs((1/np.log10(np.e))*new_S21*x/20),new_value) self.tables[3][row_number][1]=new_S21 for i in range(2,6): self.tables[3][row_number][i]=new_value[i-2] for index,table in enumerate(self.tables): #print("{0} is {1}".format("index",index)) if index==0: pass else: table_options={"data":self.tables[index]} self.tables[index]=OnePortCalrepModel(None,**table_options) #print("{0} is {1}".format("self.tables[index].column_names",self.tables[index].column_names)) column_names=[] for column_number,column in enumerate(self.tables[index].column_names): if column is not "Frequency": #print("{0} is {1}".format("self.table_names[index]",self.table_names[index])) #print("{0} is {1}".format("column",column)) column_names.append(column+self.table_names[:][index]) else: column_names.append(column) self.tables[index].column_names=column_names self.tables[1].header=self.tables[0] for key,value in self.options.iteritems(): self.tables[1].options[key]=value self.joined_table=ascii_data_table_join("Frequency",self.tables[1],self.tables[3]) self.joined_table=ascii_data_table_join("Frequency",self.joined_table,self.tables[2]) def __str__(self): return self.joined_table.build_string() def show(self): fig, axes = plt.subplots(nrows=3, ncols=2) ax0, ax1, ax2, ax3, ax4, ax5 = axes.flat ax0.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('magS11'), yerr=self.joined_table.get_column('uMgS11'),fmt='k-o') ax0.set_title('Magnitude S11') ax1.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('argS11'), yerr=self.joined_table.get_column('uAgS11'),fmt='ro') ax1.set_title('Phase S11') ax2.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('magS21'), yerr=self.joined_table.get_column('uMgS21'),fmt='k-o') ax2.set_title('Magnitude S21') ax3.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('argS21'), yerr=self.joined_table.get_column('uAgS21'),fmt='ro') ax3.set_title('Phase S21') ax4.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('magS22'), yerr=self.joined_table.get_column('uMgS22'),fmt='k-o') ax4.set_title('Magnitude S22') ax5.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('argS22'), yerr=self.joined_table.get_column('uAgS22'),fmt='ro') ax5.set_title('Phase S22') plt.tight_layout() plt.show()
Ancestors (in MRO)
Instance variables
var options
Methods
def __init__(
self, file_path=None, **options)
Intializes the TwoPortCalrepModel class, if a file path is specified it opens and reads the file
def __init__(self,file_path=None,**options): """Intializes the TwoPortCalrepModel class, if a file path is specified it opens and reads the file""" defaults= {"specific_descriptor": 'Two_Port_Calrep'} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value if file_path is None: pass elif re.match('asc',file_path.split(".")[-1],re.IGNORECASE): self.table_names=['header','S11','S22','S21'] self.row_pattern=make_row_match_string(ONE_PORT_COLUMN_NAMES) self.path=file_path self.__read_and_fix__() elif re.match('txt',file_path.split(".")[-1],re.IGNORECASE) or type(file_path) is ListType: self.table_names=['S11','S22','S21'] if type(file_path) is ListType: self.file_names=file_path self.tables=[] for index,table in enumerate(self.table_names): if index==2: #fixes a problem with the c tables, extra comma at the end options={"row_end_token":',\n'} self.tables.append(OnePortCalrepModel(self.file_names[index],**options)) self.tables[2].options["row_end_token"]=None else: self.tables.append(OnePortCalrepModel(self.file_names[index])) else: try: root_name_pattern=re.compile('(?P<root_name>\w+)[abc].txt',re.IGNORECASE) root_name_match=re.search(root_name_pattern,file_path) root_name=root_name_match.groupdict()["root_name"] directory=os.path.dirname(os.path.realpath(file_path)) self.file_names=[os.path.join(directory,root_name+end) for end in ['a.txt','b.txt','c.txt']] self.tables=[] for index,table in enumerate(self.table_names): if index==2: #fixes a problem with the c tables, extra comma at the end options={"row_end_token":',\n'} self.tables.append(OnePortCalrepModel(self.file_names[index],**options)) self.tables[2].options["row_end_token"]=None else: self.tables.append(OnePortCalrepModel(self.file_names[index])) except: print("Could not import {0} please check that the a,b,c " "tables are all in the same directory".format(file_path)) raise for index,table in enumerate(self.tables): column_names=[] for column_number,column in enumerate(table.column_names): if column is not "Frequency": column_names.append(column+self.table_names[index]) else: column_names.append(column) #print column_names table.column_names=column_names if CONVERT_S21: for row_number,row in enumerate(self.tables[2].data): new_S21=self.tables[2].data[row_number][1] new_S21=10.**(-1*new_S21/20.) new_value=[self.tables[2].data[row_number][i] for i in range(2,6)] new_value=map(lambda x:abs((1/np.log10(np.e))*new_S21*x/20.),new_value) self.tables[2].data[row_number][1]=new_S21 for i in range(2,6): self.tables[2].data[row_number][i]=new_value[i-2] for key,value in self.options.iteritems(): self.tables[0].options[key]=value self.joined_table=ascii_data_table_join("Frequency",self.tables[0],self.tables[2]) self.joined_table=ascii_data_table_join("Frequency",self.joined_table,self.tables[1])
def show(
self)
def show(self): fig, axes = plt.subplots(nrows=3, ncols=2) ax0, ax1, ax2, ax3, ax4, ax5 = axes.flat ax0.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('magS11'), yerr=self.joined_table.get_column('uMgS11'),fmt='k-o') ax0.set_title('Magnitude S11') ax1.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('argS11'), yerr=self.joined_table.get_column('uAgS11'),fmt='ro') ax1.set_title('Phase S11') ax2.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('magS21'), yerr=self.joined_table.get_column('uMgS21'),fmt='k-o') ax2.set_title('Magnitude S21') ax3.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('argS21'), yerr=self.joined_table.get_column('uAgS21'),fmt='ro') ax3.set_title('Phase S21') ax4.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('magS22'), yerr=self.joined_table.get_column('uMgS22'),fmt='k-o') ax4.set_title('Magnitude S22') ax5.errorbar(self.joined_table.get_column('Frequency'),self.joined_table.get_column('argS22'), yerr=self.joined_table.get_column('uAgS22'),fmt='ro') ax5.set_title('Phase S22') plt.tight_layout() plt.show()
class TwoPortRawModel
Class that deals with the TwoPort Raw Files after conversion to Ascii using Ron Ginley's converter. These files typically have header information seperated from data by !! Header format is: Line 1: Spid$ - identification of type of system used Line 2: Systemletter$ - letter name indicating which system was used Line 3: Conncal$ - connector type from the system calibration Line 4: Connectors$ - connector type used for the measurement Line 5: Meastype$ - type of measurement (basically 1-port, 2-port or power) Line 6: Datea$ - date of measurement Line 7: Timea$ - time of measurement Line 8: Programm$ - name of program used Line 9: Rev$ - program revision Line 10: Opr$ - operator Line 11: Cfile$ - calibration name Line 12: Cdate$ - calibration date Line 13: Sport - identification of which port or direction was used for measurement Line 14: Numconnects ? number of disconnect/reconnect cycles Line 15: Numrepeats ? number of repeat measurements for each connect (usually 1) Line 16: Nbs ? not sure Line 17: Nfreq ? number of frequencies Line 18: Startfreq ? data row pointer for bdat files Line 19: Devicedescript$ - description of device being measured or of test being done Line 20: Devicenum$ - Identifying number for device ? used for file names
class TwoPortRawModel(AsciiDataTable): """ Class that deals with the TwoPort Raw Files after conversion to Ascii using Ron Ginley's converter. These files typically have header information seperated from data by !! Header format is: Line 1: Spid$ - identification of type of system used Line 2: Systemletter$ - letter name indicating which system was used Line 3: Conncal$ - connector type from the system calibration Line 4: Connectors$ - connector type used for the measurement Line 5: Meastype$ - type of measurement (basically 1-port, 2-port or power) Line 6: Datea$ - date of measurement Line 7: Timea$ - time of measurement Line 8: Programm$ - name of program used Line 9: Rev$ - program revision Line 10: Opr$ - operator Line 11: Cfile$ - calibration name Line 12: Cdate$ - calibration date Line 13: Sport - identification of which port or direction was used for measurement Line 14: Numconnects ? number of disconnect/reconnect cycles Line 15: Numrepeats ? number of repeat measurements for each connect (usually 1) Line 16: Nbs ? not sure Line 17: Nfreq ? number of frequencies Line 18: Startfreq ? data row pointer for bdat files Line 19: Devicedescript$ - description of device being measured or of test being done Line 20: Devicenum$ - Identifying number for device ? used for file names """ def __init__(self,file_path=None,**options): """Initializes the TwoPortRaw class, if a file_path is specified opens an existing file, else creates an empty container""" defaults= {"data_delimiter": ",", "column_names_delimiter": ",", "specific_descriptor": 'Two_Port_Raw', "general_descriptor": 'Sparameter', "extension": 'txt', "comment_begin": "#", "comment_end": "\n", "column_types": ['float','int','int','float','float','float','float','float','float'], "column_descriptions": {"Frequency":"Frequency in GHz", "Direction":"Direction of connects, may be unused", "Connect":"Connect number", "magS11":"Linear magnitude for S11", "argS11":"Phase in degrees for S11", "magS21":"Linear magnitude for S21", "argS21":"Phase in degrees for S21", "magS22":"Linear magnitude for S22", "argS22":"Phase in degrees for S22"}, "header": None, "column_names": ["Frequency","Direction","Connect", "magS11", "argS11","magS21","argS21","magS22","argS22"], "column_names_end_token": "\n", "data": None, 'row_formatter_string': "{0:.5f}{delimiter}{1}{delimiter}{2}" "{delimiter}{3:.4f}{delimiter}{4:.2f}{delimiter}" "{5:.4f}{delimiter}{6:.2f}{delimiter}" "{7:.4f}{delimiter}{8:.2f}", "data_table_element_separator": None} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: self.__read_and_fix__(file_path) AsciiDataTable.__init__(self,None,**self.options) self.path=file_path self.structure_metadata() def __read_and_fix__(self,file_path=None): """Inputs in the raw OnePortRaw file and fixes any problems with delimiters,etc.""" lines=[] in_file=open(file_path,'r') for index,line in enumerate(in_file): lines.append(line) if re.search("!!",line): data_begin_line=index+1 self.lines=lines parse_options={"delimiter":", ","row_end_token":'\n'} data=parse_lines(lines[data_begin_line:],**parse_options) self.options["data"]=data self.options["header"]=lines[:data_begin_line-1] #print data def structure_metadata(self): """Returns a dictionary of key,value pairs extracted from the header""" keys=["System_Id","System_Letter","Connector_Type_Calibration","Connector_Type_Measurement", "Measurement_Type","Measurement_Date","Measurement_Time","Program_Used","Program_Revision","Operator", "Calibration_Name","calibration_date","Port_Used","Number_Connects","Number_Repeats","Nbs", "Number_Frequencies","Start_Frequency", "Device_Description","Device_Id"] self.metadata={} for index,key in enumerate(keys): self.metadata[key]=self.header[index] def show(self): fig, axes = plt.subplots(nrows=3, ncols=2) ax0, ax1, ax2, ax3, ax4, ax5 = axes.flat ax0.plot(self.get_column('Frequency'),self.get_column('magS11'),'k-o') ax0.set_title('Magnitude S11') ax1.plot(self.get_column('Frequency'),self.get_column('argS11'),'ro') ax1.set_title('Phase S11') ax2.plot(self.get_column('Frequency'),self.get_column('magS21'),'k-o') ax2.set_title('Magnitude S21 in dB') ax3.plot(self.get_column('Frequency'),self.get_column('argS21'),'ro') ax3.set_title('Phase S21') ax4.plot(self.get_column('Frequency'),self.get_column('magS22'),'k-o') ax4.set_title('Magnitude S22') ax5.plot(self.get_column('Frequency'),self.get_column('argS22'),'ro') ax5.set_title('Phase S22') plt.tight_layout() plt.show()
Ancestors (in MRO)
- TwoPortRawModel
- pyMez.Code.DataHandlers.GeneralModels.AsciiDataTable
Instance variables
var options
var path
Methods
def __init__(
self, file_path=None, **options)
Initializes the TwoPortRaw class, if a file_path is specified opens an existing file, else creates an empty container
def __init__(self,file_path=None,**options): """Initializes the TwoPortRaw class, if a file_path is specified opens an existing file, else creates an empty container""" defaults= {"data_delimiter": ",", "column_names_delimiter": ",", "specific_descriptor": 'Two_Port_Raw', "general_descriptor": 'Sparameter', "extension": 'txt', "comment_begin": "#", "comment_end": "\n", "column_types": ['float','int','int','float','float','float','float','float','float'], "column_descriptions": {"Frequency":"Frequency in GHz", "Direction":"Direction of connects, may be unused", "Connect":"Connect number", "magS11":"Linear magnitude for S11", "argS11":"Phase in degrees for S11", "magS21":"Linear magnitude for S21", "argS21":"Phase in degrees for S21", "magS22":"Linear magnitude for S22", "argS22":"Phase in degrees for S22"}, "header": None, "column_names": ["Frequency","Direction","Connect", "magS11", "argS11","magS21","argS21","magS22","argS22"], "column_names_end_token": "\n", "data": None, 'row_formatter_string': "{0:.5f}{delimiter}{1}{delimiter}{2}" "{delimiter}{3:.4f}{delimiter}{4:.2f}{delimiter}" "{5:.4f}{delimiter}{6:.2f}{delimiter}" "{7:.4f}{delimiter}{8:.2f}", "data_table_element_separator": None} self.options={} for key,value in defaults.iteritems(): self.options[key]=value for key,value in options.iteritems(): self.options[key]=value # Define Method Aliases if they are available if METHOD_ALIASES: for command in alias(self): exec(command) if file_path is not None: self.__read_and_fix__(file_path) AsciiDataTable.__init__(self,None,**self.options) self.path=file_path self.structure_metadata()
def add_column(
self, column_name=None, column_type=None, column_data=None, format_string=None)
Adds a column with column_name, and column_type. If column data is supplied and it's length is the same as data(same number of rows) then it is added, else self.options['empty_character'] is added in each spot in the preceding rows
def add_column(self,column_name=None,column_type=None,column_data=None,format_string=None): """Adds a column with column_name, and column_type. If column data is supplied and it's length is the same as data(same number of rows) then it is added, else self.options['empty_character'] is added in each spot in the preceding rows""" original_column_names=self.column_names[:] try: self.column_names.append(column_name) if self.options["column_types"]: self.options["column_types"]=self.options["column_types"].append(column_type) if len(column_data) == len(self.data): for index,row in enumerate(self.data): #print("{0} is {1}".format('self.data[index]',self.data[index])) #print("{0} is {1}".format('row',row)) new_row=row[:] new_row.append(column_data[index]) self.data[index]=new_row else: for index,row in enumerate(self.data): self.data[index]=row.append(self.options['empty_value']) if column_data is not None: for item in column_data: empty_row=[self.options['empty_value'] for column in original_column_names] empty_row.append(item) self.add_row(empty_row) if self.options["row_formatter_string"] is None: pass else: if format_string is None: self.options["row_formatter_string"]=self.options["row_formatter_string"]+\ '{delimiter}'+"{"+str(len(self.column_names)-1)+"}" else: self.options["row_formatter_string"]=self.options["row_formatter_string"]+format_string #self.update_model() except: self.column_names=original_column_names print("Could not add columns") raise
def add_index(
self)
Adds a column with name index and values that are 0 referenced indices, does nothing if there is already a column with name index, always inserts it at the 0 position
def add_index(self): """Adds a column with name index and values that are 0 referenced indices, does nothing if there is already a column with name index, always inserts it at the 0 position""" if 'index' in self.column_names: print("Add Index passed") pass else: self.column_names.insert(0,'index') for index,row in enumerate(self.data): self.data[index].insert(0,index) if self.options['column_types']: self.options['column_types'].insert(0,'int') if self.options['row_formatter_string']: temp_formatter_list=self.options['row_formatter_string'].split("{delimiter}") iterated_row_formatter_list=[temp_formatter_list[i].replace(str(i),str(i+1)) for i in range(len(temp_formatter_list))] new_formatter=string_list_collapse(iterated_row_formatter_list,string_delimiter="{delimiter}") self.options['row_formatter_string']='{0}{delimiter}'+new_formatter
def add_inline_comment(
self, comment='', line_number=None, string_position=None)
Adds an inline in the specified location
def add_inline_comment(self,comment="",line_number=None,string_position=None): "Adds an inline in the specified location" try: self.inline_comments.append([comment,line_number,string_position]) except:pass
def add_row(
self, row_data)
Adds a single row given row_data which can be an ordered list/tuple or a dictionary with column names as keys
def add_row(self,row_data): """Adds a single row given row_data which can be an ordered list/tuple or a dictionary with column names as keys""" if len(row_data) not in [len(self.column_names),len(self.column_names)]: print(" could not add the row, dimensions do not match") return if type(row_data) in [ListType,np.ndarray]: self.data.append(row_data) elif type(row_data) in [DictionaryType]: data_list=[row_data[column_name] for column_name in self.column_names] self.data.append(data_list)
def build_string(
self, **temp_options)
Builds a string representation of the data table based on self.options, or temp_options. Passing temp_options does not permanently change the model
def build_string(self,**temp_options): """Builds a string representation of the data table based on self.options, or temp_options. Passing temp_options does not permanently change the model""" # store the original options to be put back after the string is made original_options=self.options for key,value in temp_options.iteritems(): self.options[key]=value section_end=0 next_section_begin=0 if self.options['data_table_element_separator'] is None: inner_element_spacing=0 else: inner_element_spacing=self.options['data_table_element_separator'].count('\n')-1 string_out="" between_section="" if self.options['data_table_element_separator'] is not None: between_section=self.options['data_table_element_separator'] if self.header is None: self.options['header_begin_line']=self.options['header_end_line']=None pass else: self.options["header_begin_line"]=0 if self.data is None and self.column_names is None and self.footer is None: string_out=self.get_header_string() self.options["header_end_line"]=None else: string_out=self.get_header_string()+between_section last_header_line=self.get_header_string().count('\n')+1 self.options["header_end_line"]=last_header_line next_section_begin=last_header_line+inner_element_spacing if self.column_names is None: self.options['column_names_begin_line']=self.options['column_names_end_line']=None pass else: self.options["column_names_begin_line"]=next_section_begin if self.data is None and self.footer is None: self.options["column_names_end_line"]=None string_out=string_out+self.get_column_names_string() else: string_out=string_out+self.get_column_names_string()+between_section last_column_names_line=self.get_column_names_string().count('\n')+\ self.options["column_names_begin_line"]+1 self.options["column_names_end_line"]=last_column_names_line next_section_begin=last_column_names_line+inner_element_spacing if self.data is None: self.options['data_begin_line']=self.options['data_end_line']=None pass else: self.options["data_begin_line"]=next_section_begin if self.footer is None: self.options["data_end_line"]=None string_out=string_out+self.get_data_string() else: string_out=string_out+self.get_data_string()+between_section last_data_line=self.get_data_string().count("\n")+\ self.options["data_begin_line"]+1 self.options["data_end_line"]=last_data_line next_section_begin=last_data_line+inner_element_spacing if self.footer is None: self.options['footer_begin_line']=self.options['footer_end_line']=None pass else: self.options["footer_begin_line"]=next_section_begin string_out=string_out+self.get_footer_string() self.options['footer_end_line']=None # set the options back after the string has been made if self.inline_comments is None: pass else: lines=string_out.splitlines() for comment in self.inline_comments: lines=insert_inline_comment(lines,comment=comment[0],line_number=comment[1], string_position=comment[2], begin_token=self.options['inline_comment_begin'], end_token=self.options['inline_comment_end']) string_out=string_list_collapse(lines,string_delimiter='\n') self.options=original_options return string_out
def change_unit_prefix(
self, column_selector=None, old_prefix=None, new_prefix=None, unit='Hz')
Changes the prefix of the units of the column specified by column_selector (column name or index) example usage is self.change_unit_prefix(column_selector='Frequency',old_prefix=None,new_prefix='G',unit='Hz') to change a column from Hz to GHz. It updates the data values, column_descriptions, and column_units if they exist, see http://www.nist.gov/pml/wmd/metric/prefixes.cfm for possible prefixes
def change_unit_prefix(self,column_selector=None,old_prefix=None,new_prefix=None,unit='Hz'): """Changes the prefix of the units of the column specified by column_selector (column name or index) example usage is self.change_unit_prefix(column_selector='Frequency',old_prefix=None,new_prefix='G',unit='Hz') to change a column from Hz to GHz. It updates the data values, column_descriptions, and column_units if they exist, see http://www.nist.gov/pml/wmd/metric/prefixes.cfm for possible prefixes""" multipliers={"yotta":10.**24,"Y":10.**24,"zetta":10.**21,"Z":10.**21,"exa":10.**18,"E":10.**18,"peta":10.**15, "P":10.**15,"tera":10.**12,"T":10.**12,"giga":10.**9,"G":10.**9,"mega":10.**6,"M":10.**6, "kilo":10.**3,"k":10.**3,"hecto":10.**2,"h":10.**2,"deka":10.,"da":10.,None:1.,"":1., "deci":10.**-1,"d":10.**-1,"centi":10.**-2,"c":10.**-2,"milli":10.**-3,"m":10.**-3, "micro":10.**-6,"mu":10.**-6,u"\u00B5":10.**-6,"nano":10.**-9, "n":10.**-9,"pico":10.**-12,"p":10.**-12,"femto":10.**-15, "f":10.**-15,"atto":10.**-18,"a":10.**-18,"zepto":10.**-21,"z":10.**-21, "yocto":10.**-24,"y":10.**-24} # change column name into column index try: if old_prefix is None: old_prefix="" if new_prefix is None: new_prefix="" old_unit=old_prefix+unit new_unit=new_prefix+unit if column_selector in self.column_names: column_selector=self.column_names.index(column_selector) for index,row in enumerate(self.data): if type(self.data[index][column_selector]) in [FloatType,LongType]: #print "{0:e}".format(multipliers[old_prefix]/multipliers[new_prefix]) self.data[index][column_selector]=\ (multipliers[old_prefix]/multipliers[new_prefix])*self.data[index][column_selector] elif type(self.data[index][column_selector]) in [StringType,IntType]: self.data[index][column_selector]=\ str((multipliers[old_prefix]/multipliers[new_prefix])*float(self.data[index][column_selector])) else: print type(self.data[index][column_selector]) raise if self.options["column_descriptions"] is not None: old=self.options["column_descriptions"][column_selector] self.options["column_descriptions"][column_selector]=old.replace(old_unit,new_unit) if self.options["column_units"] is not None: old=self.options["column_units"][column_selector] self.options["column_units"][column_selector]=old.replace(old_unit,new_unit) if re.search(old_unit,self.column_names[column_selector]): old=self.column_names[column_selector] self.column_names[column_selector]=old.replace(old_unit,new_unit) except: print("Could not change the unit prefix of column {0}".format(column_selector)) raise
def find_line(
self, begin_token)
Finds the first line that has begin token in it
def find_line(self,begin_token): """Finds the first line that has begin token in it""" for index,line in enumerate(self.lines): if re.search(begin_token,line): return index
def get_column(
self, column_name=None, column_index=None)
Returns a column as a list given a column name or column index
def get_column(self,column_name=None,column_index=None): """Returns a column as a list given a column name or column index""" if column_name is None: if column_index is None: return else: column_selector=column_index else: column_selector=self.column_names.index(column_name) out_list=[self.data[i][column_selector] for i in range(len(self.data))] return out_list
def get_column_names_string(
self)
Returns the column names as a string using options
def get_column_names_string(self): "Returns the column names as a string using options" string_out="" # This writes the column_names column_name_begin="" column_name_end="" if self.options["column_names_begin_token"] is None: column_name_begin="" else: column_name_begin=self.options["column_names_begin_token"] if self.options["column_names_end_token"] is None: column_name_end="" else: column_name_end=self.options["column_names_end_token"] if self.column_names is None: string_out="" else: if type(self.column_names) is StringType: string_out=self.column_names elif type(self.column_names) is ListType: string_out=list_to_string(self.column_names, data_delimiter=self.options["column_names_delimiter"],end="") #print("{0} is {1}".format('string_out',string_out)) else: string_out=ensure_string(self.column_names) #print column_name_begin,string_out,column_name_end return column_name_begin+string_out+column_name_end
def get_data_dictionary_list(
self, use_row_formatter_string=True)
Returns a python list with a row dictionary of form {column_name:data_column}
def get_data_dictionary_list(self,use_row_formatter_string=True): """Returns a python list with a row dictionary of form {column_name:data_column}""" try: if self.options["row_formatter_string"] is None: use_row_formatter_string=False if use_row_formatter_string: list_formatter=[item.replace("{"+str(index),"{0") for index,item in enumerate(self.options["row_formatter_string"].split("{delimiter}"))] else: list_formatter=["{0}" for i in self.column_names] #print self.column_names #print self.data #print list_formatter #print len(self.column_names)==len(self.data[0]) #print len(list_formatter)==len(self.data[0]) #print type(self.data[0]) out_list=[{self.column_names[i]:list_formatter[i].format(value) for i,value in enumerate(line)} for line in self.data] return out_list except: print("Could not form a data_dictionary_list, check that row_formatter_string is properly defined") #print(out_list) raise
def get_data_string(
self)
Returns the data as a string
def get_data_string(self): "Returns the data as a string" #Todo:refactor to cut out unused lines string_out="" if self.data is None: string_out= "" else: if type(self.data) is StringType: if self.options['data_begin_token'] is None: if self.options['data_end_token'] is None: string_out=self.data else: if re.search(self.options['data_end_token'],self.data): string_out=self.data else: string_out=self.data+self.options['data_end_token'] else: if self.options['data_end_token'] is None: if re.match(self.options['data_begin_token'],self.data): string_out=self.data else: string_out=self.options['data_begin_token']+self.data elif type(self.data) in [ListType,np.ndarray]: try: #If the first row is a string, we should strip all the tokens and add them back in if type(self.data[0]) is StringType: if self.options['data_begin_token'] is None: string_out=string_list_collapse(self.data) else: if re.match(self.options['data_begin_token'],self.data[0]): if self.options['data_end_token'] is None: string_out=string_list_collapse(self.data) else: if re.search(self.options['data_end_token'],self.data[-1]): string_out=string_list_collapse(self.data) else: string_out=string_list_collapse(self.data)+self.options['data_end_token'] else: if self.options['data_end_token'] is None: string_out=self.options['data_begin_token']+string_list_collapse(self.data) else: if re.search(self.options['data_end_token'],self.data[-1]): string_out=self.options['data_begin_token']+string_list_collapse(self.data) else: string_out=self.options['data_begin_token']+\ string_list_collapse(self.data)+\ self.options['data_end_token'] elif type(self.data[0]) in [ListType,np.ndarray]: prefix="" if self.options['data_begin_token'] is None: if self.options['data_end_token'] is None: string_out=list_list_to_string(self.data,data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"]) else: if self.options['data_end_token'] is None: string_out=self.options['data_begin_token']+\ list_list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"]) else: string_out=self.options['data_begin_token']+\ list_list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=\ self.options['row_formatter_string'], line_begin=self.options["row_begin_token"], line_end=self.options["row_end_token"])+\ self.options['data_end_token'] else: string_out=list_to_string(self.data, data_delimiter=self.options['data_delimiter'], row_formatter_string=self.options['row_formatter_string'], begin=self.options["row_begin_token"], end=self.options["row_end_token"]) except IndexError: pass else: string_out=ensure_string(self.data) return string_out
Returns the footer using options in self.options. If block comment is specified, and the footer is a list it will block comment out the footer. If comment_begin and comment_end are specified it will use those to represent each line of the footer. If footer_begin_token and/or footer_end_token are specified it will wrap the footer in those.
def get_header_string(
self)
Returns the header using options in self.options. If block comment is specified, and the header is a list it will block comment out the header. If comment_begin and comment_end are specified it will use those to represent each line of the header. If header_begin_token and/or header_end_token are specified it will wrap the header in those.
def get_header_string(self): """Returns the header using options in self.options. If block comment is specified, and the header is a list it will block comment out the header. If comment_begin and comment_end are specified it will use those to represent each line of the header. If header_begin_token and/or header_end_token are specified it will wrap the header in those. """ string_out="" header_begin="" header_end="" if self.options["header_begin_token"] is None: header_begin="" else: header_begin=self.options["header_begin_token"] if self.options["header_end_token"] is None: header_end="" else: header_end=self.options["header_end_token"] # This writes the header if self.header is None: string_out= "" elif self.options["header_line_types"] is not None: for index,line in enumerate(self.options["header_line_types"]): if index == len(self.options["header_line_types"])-1: end='' else: end='\n' if line in ['header','header_line','normal']: string_out=string_out+self.header[index]+end elif line in ['line_comment','comment']: string_out=string_out+line_comment_string(self.header[index], comment_begin=self.options["comment_begin"], comment_end=self.options["comment_end"])+end elif line in ['block_comment','block']: if index-1<0: block_comment_begin=index block_comment_end=index+2 continue elif self.options["header_line_types"][index-1] not in ['block_comment','block']: block_comment_begin=index block_comment_end=index+2 continue else: if index+1>len(self.options["header_line_types"])-1: string_out=string_out+line_list_comment_string(self.header[block_comment_begin:], comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'], block=True)+end elif self.options["header_line_types"][index+1] in ['block_comment','block']: block_comment_end+=1 else: string_out=string_out+\ line_list_comment_string(self.header[block_comment_begin:block_comment_end], comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'], block=True)+end else: string_out=string_out+line elif self.options['treat_header_as_comment'] in [None,True] and self.options["header_line_types"] in [None]: # Just happens if the user has set self.header manually if type(self.header) is StringType: string_out=line_comment_string(self.header, comment_begin=self.options["comment_begin"], comment_end=self.options["comment_end"]) #string_out=re.sub('\n','',string_out,count=1) elif type(self.header) is ListType: if self.options['block_comment_begin'] is None: if self.options['comment_begin'] is None: string_out=string_list_collapse(self.header) else: string_out=line_list_comment_string(self.header,comment_begin=self.options['comment_begin'], comment_end=self.options['comment_end']) lines_out=string_out.splitlines() # if re.search('\n',self.options['comment_end']): # string_out=re.sub('\n','',string_out,count=1) #self.options["header_line_types"]=["line_comment" for line in self.header] else: string_out=line_list_comment_string(self.header,comment_begin=self.options['block_comment_begin'], comment_end=self.options['block_comment_end'],block=True) #self.options["header_line_types"]=["block_comment" for line in self.header] else: string_out=ensure_string(self.header,list_delimiter="\n",end_if_list="") return header_begin+string_out+header_end
def get_options(
self)
Prints the option list
def get_options(self): "Prints the option list" for key,value in self.options.iteritems(): print("{0} = {1}".format(key,value))
def get_options_by_element(
self, element_name)
returns a dictionary of all the options that have to do with element. Element must be header,column_names,data, or footer
def get_options_by_element(self,element_name): """ returns a dictionary of all the options that have to do with element. Element must be header,column_names,data, or footer""" keys_regarding_element=filter(lambda x: re.search(element_name,str(x),re.IGNORECASE),self.options.keys()) out_dictionary={key:self.options[key] for key in keys_regarding_element} #print out_dictionary return out_dictionary
def get_row(
self, row_index=None)
Returns the row as a list specified by row_index
def get_row(self,row_index=None): """Returns the row as a list specified by row_index""" if row_index is None: return else: return self.data[row_index]
def is_valid(
self)
Returns True if ascii table conforms to its specification given by its own options
def is_valid(self): """Returns True if ascii table conforms to its specification given by its own options""" options={} for key,value in self.options.iteritems(): options[key]=value # print("self.options[{0}] is {1} ".format(key,value)) for element in self.elements: if self.__dict__[element] is None: options[element]=None else: options[element]=[] options["validate"]=True newtable=AsciiDataTable(None,**options) lines=self.build_string().splitlines() for index,line in enumerate(lines): lines[index]=line+"\n" newtable.lines=lines newtable.__parse__() # print newtable.data # print newtable.column_names # print newtable #print_comparison(newtable.footer,None) newtable.update_model() # The new table rows are not being coerced into the right format #print newtable #newtable.update_model() #print newtable.options #print self.options #print newtable.data # print newtable.options==self.options # for option_key,option_value in newtable.options.iteritems(): # print("New Table Option {0} is {1} ".format(option_key,option_value)) # print("self.options[{0}] is {1} ".format(option_key,self.options[option_key])) # print_comparison(option_value,self.options[option_key]) # #print self return self==newtable
def lines_defined(
self)
If begin_line and end_line for all elements that are None are defined returns True
def lines_defined(self): """If begin_line and end_line for all elements that are None are defined returns True""" truth_table=[] last_element="" output=False for index,element in enumerate(self.elements): if element not in ['inline_comments','metadata'] and self.__dict__[element] is not None: try: last_element=element if not None in [self.options['%s_begin_line'%element],self.options['%s_end_line'%element]]: truth_table.append(True) else: truth_table.append(False) except: return False #print truth_table # The last_line of the last element is fine to be none if truth_table[-1] is False: if self.options['%s_begin_line'%last_element] is not None: truth_table[-1]=True if False in truth_table: output=False else: output=True #print output return output
Moves the DataTable's footer to the header and updates the model
def remove_column(
self, column_name=None, column_index=None)
Removes the column specified by column_name or column_index and updates the model. The column is removed from column_names, data and if present column_types, column_descriptions and row formatter
def remove_column(self,column_name=None,column_index=None): """Removes the column specified by column_name or column_index and updates the model. The column is removed from column_names, data and if present column_types, column_descriptions and row formatter""" pass
def remove_row(
self, row_index)
Removes the row specified by row_index and updates the model. Note index is relative to the data attribute so to remove the first row use row_index=0 and the last data row is row_index=-1
def remove_row(self,row_index): """Removes the row specified by row_index and updates the model. Note index is relative to the data attribute so to remove the first row use row_index=0 and the last data row is row_index=-1""" self.data.pop(row_index) self.update_model()
def save(
self, path=None, **temp_options)
" Saves the file, to save in another ascii format specify elements in temp_options, the options specified do not permanently change the object's options. If path is supplied it saves the file to that path otherwise uses the object's attribute path to define the saving location
def save(self,path=None,**temp_options): """" Saves the file, to save in another ascii format specify elements in temp_options, the options specified do not permanently change the object's options. If path is supplied it saves the file to that path otherwise uses the object's attribute path to define the saving location """ original_options=self.options for key,value in temp_options.iteritems(): self.options[key]=value out_string=self.build_string(**temp_options) if path is None: path=self.path file_out=open(path,'w') file_out.write(out_string) file_out.close() self.options=original_options
def save_schema(
self, path=None, format=None)
Saves the tables options as a text file or pickled dictionary (default). If no name is supplied, autonames it and saves
def save_schema(self,path=None,format=None): """Saves the tables options as a text file or pickled dictionary (default). If no name is supplied, autonames it and saves""" if path is None: path=auto_name(self.name.replace('.'+self.options["extension"],""),'Schema',self.options["directory"],'txt') if format in [None,'python','pickle']: pickle.dump(self.options,open(path,'wb')) elif format in ['txt','text','.txt']: file_out=open(path,'w') keys=self.options.keys() keys.sort() for key in keys: out_key=str(key).replace("\n","\\n") out_value=str(self.options[key]).replace("\n","\\n") file_out.write("{0} : {1} \n".format(out_key,out_value)) file_out.close()
def show(
self)
def show(self): fig, axes = plt.subplots(nrows=3, ncols=2) ax0, ax1, ax2, ax3, ax4, ax5 = axes.flat ax0.plot(self.get_column('Frequency'),self.get_column('magS11'),'k-o') ax0.set_title('Magnitude S11') ax1.plot(self.get_column('Frequency'),self.get_column('argS11'),'ro') ax1.set_title('Phase S11') ax2.plot(self.get_column('Frequency'),self.get_column('magS21'),'k-o') ax2.set_title('Magnitude S21 in dB') ax3.plot(self.get_column('Frequency'),self.get_column('argS21'),'ro') ax3.set_title('Phase S21') ax4.plot(self.get_column('Frequency'),self.get_column('magS22'),'k-o') ax4.set_title('Magnitude S22') ax5.plot(self.get_column('Frequency'),self.get_column('argS22'),'ro') ax5.set_title('Phase S22') plt.tight_layout() plt.show()
def structure_metadata(
self)
Returns a dictionary of key,value pairs extracted from the header
def structure_metadata(self): """Returns a dictionary of key,value pairs extracted from the header""" keys=["System_Id","System_Letter","Connector_Type_Calibration","Connector_Type_Measurement", "Measurement_Type","Measurement_Date","Measurement_Time","Program_Used","Program_Revision","Operator", "Calibration_Name","calibration_date","Port_Used","Number_Connects","Number_Repeats","Nbs", "Number_Frequencies","Start_Frequency", "Device_Description","Device_Id"] self.metadata={} for index,key in enumerate(keys): self.metadata[key]=self.header[index]
def update_column_names(
self)
Update column names adds the value x# for any column that exists in self.data that is not named
def update_column_names(self): """Update column names adds the value x# for any column that exists in self.data that is not named""" if self.data is None: return elif type(self.column_names) is StringType: self.column_names=split_row(self.column_names,self.options["column_names_delimiter"]) elif self.column_names is None: column_names=[] for index,column in enumerate(self.data[0]): column_names.append("x"+str(index)) self.column_names=column_names return elif len(self.column_names)==len(self.data[0]): return elif len(self.column_names) < len(self.data[0]): for index in range(len(self.column_names),len(self.data[0])): self.column_names.append("x"+str(index)) return
def update_import_options(
self, import_table)
Updates the options in the import table
def update_import_options(self,import_table): """Updates the options in the import table""" for index,element in enumerate(['header','column_names','data','footer']): if self.__dict__[element] is not None: print("The {0} variable is {1}".format('index',index)) print("The {0} variable is {1}".format('element',element)) print("The {0} variable is {1}".format('import_table',import_table)) [self.options['%s_begin_line'%element], self.options['%s_end_line'%element], self.options['%s_begin_token'%element], self.options['%s_end_token'%element]]=import_table[index][:]
def update_index(
self)
Updates the index column if it exits, otherwise exits quietly
def update_index(self): """ Updates the index column if it exits, otherwise exits quietly """ if 'index' not in self.column_names: return else: try: #This should be 0 but just in case index_column_number=self.column_names.index('index') for i in range(len(self.data)): self.data[i][index_column_number]=i except: pass
def update_model(
self)
Updates the model after a change has been made. If you add anything to the attributes of the model, or change this updates the values. If the model has an index column it will make sure the numbers are correct. In addition, it will update the options dictionary to reflect added rows, changes in deliminators etc.
def update_model(self): """Updates the model after a change has been made. If you add anything to the attributes of the model, or change this updates the values. If the model has an index column it will make sure the numbers are correct. In addition, it will update the options dictionary to reflect added rows, changes in deliminators etc. """ if self.column_names is not None and 'index' in self.column_names: self.update_index() #make sure there are no "\n" characters in the element lists (if so replace them with "") for data this is # done on import list_types=["header","column_names","footer"] for element in list_types: if self.__dict__[element] is not None: for index,item in enumerate(self.__dict__[element]): self.__dict__[element][index]=item.replace("\n","") self.update_column_names() if self.data is not None: self.data=convert_all_rows(self.data,self.options["column_types"]) self.string=self.build_string() self.lines=self.string.splitlines()