def email(self): try: variables = Variables(self.csv_file) variables.read() except IOError: raise SystemExit("%s not found" % self.csv_file) except csv.Error, e: raise SystemExit(e.message)
def substitute(self): from variables import Variables if self._last_response: vs = Variables(self._last_response.body) if self._url: self.url(vs.substitute(self._url)) if self._header: self.headers(vs.substitute(self._header)) if self._params: self.params(vs.substitute(self._params)) if self._data: self.data(vs.substitute(self._data))
def set_equations(self, conf_equations=None, user=None, keep_solvers=False, make_virtual=False): """ Set equations of the problem using the `equations` problem description entry. Fields and Regions have to be already set. """ conf_equations = get_default(conf_equations, self.conf.get_default_attr('equations', None)) self.set_variables() variables = Variables.from_conf(self.conf_variables, self.fields) self.set_materials() materials = Materials.from_conf(self.conf_materials, self.functions) self.integrals = self.get_integrals() equations = Equations.from_conf(conf_equations, variables, self.domain.regions, materials, self.integrals, user=user, make_virtual=make_virtual) self.equations = equations if not keep_solvers: self.solvers = None
def set_variables(self, conf_variables=None): conf_variables = get_default(conf_variables, self.conf.variables) variables = Variables.from_conf(conf_variables, self.fields) variables.setup_dof_info() # Call after fields.setup_global_base(). self.variables = variables self.mtx_a = None self.solvers = None self.clear_equations()
def create_variables(self, var_names=None): """ Create variables with names in `var_names`. Their definitions have to be present in `self.conf.variables`. Notes ----- This method does not change `self.equations`, so it should not have any side effects. """ if var_names is not None: conf_variables = self.select_variables(var_names, only_conf=True) else: conf_variables = self.conf.variables variables = Variables.from_conf(conf_variables, self.fields) return variables
import threading import camera_control import database_update import relay_control import printer_values from variables import Variables cam_data = Variables() camThread = threading.Thread(target=camera_control.camera, args=(cam_data, )) dbThread = threading.Thread(target=database_update.update, args=(cam_data, )) relayThread = threading.Thread(target=relay_control.relay, args=(cam_data, )) printerThread = threading.Thread(target=printer_values.printer_val, args=(cam_data, )) camThread.start() dbThread.start() relayThread.start() printerThread.start()
self.scene.addPixmap(pixmap) self.spinner.stop() def show_template_manager(self): """Launch show_template_manager manager""" self.template_manager.show() @pyqtSlot(int) def update_template(self, an_id: TemplateID): """Update Template""" self.compile(template_id=an_id) if __name__ == "__main__": import sys from db_utils import my_connect from variables import Variables app = QApplication(sys.argv) a_conn = my_connect() # region Necesary Data for variable details Widget app_var = Variables(my_conn=a_conn) a_record = app_var.record(6) # endregion dlg = LatexWidget() dlg.set_latex_data(a_record.latex_obj) sys.exit(app.exec_())
from variables import Variables def command_exec(cmd): """Executes the command""" if cmd == '/exit': print('Bye!') quit() elif cmd == '/help': print('This program calculates all operations. | eg: 1 + 2 * 6 / 5 + (2 - 3) ^ 2') print('You can also store values in variables. | eg: x = 2') else: print('Unknown command') # Main calculator = Variables() print('WELCOME TO THE SMORT CALCULATOR\n\n') while True: usr_input = input('Please enter your expression: ') if usr_input.startswith('/'): # if the user inputs a command command_exec(usr_input) else: if usr_input == '': # if the user didn't input anything continue else: calculator.main(usr_input) continue
def amdock_load(self): elements = { 0: 'Working Directory', 1: 'Input Directory', 2: 'Results Directory', 3: 'PDBQT of Target Protein', 4: 'All Poses File of Target Result', 5: 'Best Pose File of Target Result', 6: 'PDBQT of Off-Target Protein', 7: 'All Poses File of Off-Target Result', 8: 'Best Pose File of Off-Target Result' } elements_score = { 0: 'Working Directory', 1: 'Input Directory', 2: 'Results Directory', 3: 'PDBQT of Target Protein', 4: 'PDBQT of Ligand' } self.AMDock.statusbar.showMessage(" Loading .amdock file...", 2000) # if self. self.data = self.AMDock.loader.load_amdock_file() if self.data: if self.AMDock.project.mode == 1: self.table1 = self.data[0] self.complete = self.data[1] self.table2 = self.data[2] else: self.table1 = self.data[0] self.complete = self.data[1] errlist = '' errlist2 = [] if self.AMDock.project.mode != 2: for index in range(0, len(self.complete)): if self.complete[index] == '1': errlist += '\n-%s' % elements[index] if len(errlist) != 0: QtGui.QMessageBox.critical( self, 'Error', 'Some files defined in .amdock file were not found or ' 'they are inaccessible.\nMissing elements:%s' % errlist) self.import_text.clear() Variables.__init__(self.AMDock) else: os.chdir(self.AMDock.project.results) self.prot_label.setText('Target: %s' % self.AMDock.target.name) self.prot_label_sel.setText('%s' % self.AMDock.target.name) self.result_table.setRowCount(len(self.table1)) self.sele1.setRange(1, len(self.table1)) f = 0 for x in self.table1: c = 0 for item in x: item = str(item) self.result_table.setItem( f, c, QtGui.QTableWidgetItem(item)) self.result_table.item( f, c).setTextAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter) if c == 4: item_v = float(item) if item_v <= -0.3: self.result_table.item( f, c).setBackgroundColor( QtGui.QColor(0, 255, 128, 200)) c += 1 f += 1 self.value1 = float(self.result_table.item(0, 1).text()) self.result_table.item(0, 1).setBackgroundColor( QtGui.QColor('darkGray')) selection_model = self.result_table.selectionModel() selection_model.select( self.result_table.model().index(0, 0), QtGui.QItemSelectionModel.ClearAndSelect) if self.AMDock.project.bsd_mode_target == 0: self.result_table.setHorizontalHeaderLabels( QtCore.QString( "Binding Site;Affinity(kcal/mol);Estimated Ki;Ki Units;Ligand Efficiency" ).split(";")) else: self.result_table.setHorizontalHeaderLabels( QtCore.QString( "Pose;Affinity(kcal/mol);Estimated Ki;Ki Units;Ligand Efficiency" ).split(";")) if self.AMDock.project.mode == 1: self.prot_labelB.setText('Off-Target: %s' % self.AMDock.offtarget.name) self.prot_label_selB.setText( '%s' % self.AMDock.offtarget.name) self.result_tableB.show() self.selectivity_value_text.show() self.selectivity.show() self.sele1.show() self.sele2.show() self.prot_label_sel.show() self.prot_label_selB.show() self.div.show() self.equal.show() self.prot_labelB.show() self.result_tableB.setRowCount(len(self.table2)) self.sele2.setRange(1, len(self.table2)) f = 0 for x in self.table2: c = 0 for item in x: item = str(item) self.result_tableB.setItem( f, c, QtGui.QTableWidgetItem(item)) self.result_tableB.item(f, c).setTextAlignment( QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter) if c == 4: item_v = float(item) if item_v <= -0.3: self.result_tableB.item( f, c).setBackgroundColor( QtGui.QColor(0, 255, 128, 200)) c += 1 f += 1 self.value2 = float( self.result_tableB.item(0, 1).text()) self.result_tableB.item(0, 1).setBackgroundColor( QtGui.QColor('darkGray')) selection_model = self.result_tableB.selectionModel() selection_model.select( self.result_tableB.model().index(0, 0), QtGui.QItemSelectionModel.ClearAndSelect) if self.AMDock.project.bsd_mode_offtarget == 0: self.result_tableB.setHorizontalHeaderLabels( QtCore.QString( "Binding Site;Affinity(kcal/mol);Estimated Ki;Ki Units;Ligand Efficiency" ).split(";")) else: self.result_tableB.setHorizontalHeaderLabels( QtCore.QString( "Pose;Affinity(kcal/mol);Estimated Ki;Ki Units;Ligand Efficiency" ).split(";")) self.selectivity_value = math.exp( (self.value2 - self.value1) / (0.001987207 * 298)) self.selectivity_value_text.setText( '%.01f' % self.selectivity_value) else: for index in range(0, len(self.complete)): if self.complete[index] == '1': errlist += '\n-%s' % elements_score[index] errlist2.append(index) if len(errlist) != 0: QtGui.QMessageBox.critical( self, 'Error', 'Some files defined in .amdock file were not found or ' 'they are inaccessible.\nMissing elements:%s' % errlist) self.import_text.clear() Variables.__init__(self.AMDock) else: os.chdir(self.AMDock.project.results) self.prot_label.setText('Target: %s' % self.AMDock.target.name) self.result_table.setRowCount(len(self.table1)) self.sele1.setRange(1, len(self.table1)) f = 0 for x in self.table1: c = 0 for item in x: item = str(item) self.result_table.setItem( f, c, QtGui.QTableWidgetItem(item)) self.result_table.item( f, c).setTextAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter) if c == 4: item_v = float(item) if item_v <= -0.3: self.result_table.item( f, c).setBackgroundColor( QtGui.QColor(0, 255, 128, 200)) c += 1 f += 1 self.result_table.item(0, 1).setBackgroundColor( QtGui.QColor('darkGray')) selection_model = self.result_table.selectionModel() selection_model.select( self.result_table.model().index(0, 0), QtGui.QItemSelectionModel.ClearAndSelect) # open log if exit if os.path.exists( os.path.join(self.AMDock.project.WDIR, self.AMDock.project.name + '.log')): self.AMDock.project.log = os.path.join( self.AMDock.project.WDIR, self.AMDock.project.name + '.log') if not self.AMDock.project.log: return if os.path.exists(self.AMDock.project.log): msg = QtGui.QMessageBox.information( self.AMDock, 'Information', 'This project has a log file.' ' Do you want to open it?', QtGui.QMessageBox.Yes | QtGui.QMessageBox.No) if msg == QtGui.QMessageBox.Yes: self.AMDock.log_widget.textedit.append( Ft('Opening AMDock Log File...').process()) ofile = open(self.AMDock.project.log) for line in ofile: line = line.strip('\n') self.AMDock.log_widget.textedit.append(line) ofile.close() self.AMDock.log_widget.textedit.append( Ft('Opening AMDock Log File... Done.').process())
try: self.result_tab.pymol.force_finished() except: pass event.accept() else: event.ignore() from splash_screen import SplashScreen # from variables import Objects as ob if __name__ == "__main__": app = QtGui.QApplication(sys.argv) app_icon = QtGui.QIcon() v = Variables() # dw = QtGui.QDesktopWidget() app_icon.addFile(v.app_icon, QtCore.QSize(16, 20)) app_icon.addFile(v.app_icon, QtCore.QSize(24, 30)) app_icon.addFile(v.app_icon, QtCore.QSize(32, 40)) app_icon.addFile(v.app_icon, QtCore.QSize(48, 60)) app_icon.addFile(v.app_icon, QtCore.QSize(223, 283)) # app.setStyle("cleanlooks") app.setWindowIcon(app_icon) app.setApplicationName( 'AMDock: Assisted Molecular Docking for AutoDock and AutoDock Vina') splash = SplashScreen(QtGui.QPixmap(v.splashscreen_path), app) main = AMDock() splash.finish(main) main.setWindowState(QtCore.Qt.WindowMaximized) # main.setMinimumSize(1080, 740)
def add_note(self, note, input_event): # find insertion index of the note insert_index = self.index(tkinter.INSERT) # if the inputted key is a `, then run the break apart event if note == "`": self.break_note(insert_index) return # check if at end of the score if int(insert_index.split(".")[1]) == len(self.get(1.0, "end-1c")): # PROBLEM HERE # if it was a keyboard event, there was an attempt to add text to the text area, remove 2 characters # if not keyboard, remove one character from the end (just whitespace) if input_event: string = self.get(1.0, "end-2c") else: string = self.get(1.0, "end-1c") # convert to upper case for easy keyboard parsing current_note = note.upper() # nuke the current text to make sure nothing i dont want added is added self.delete("1.0", tkinter.END) self.insert(tkinter.END, string) # check validity of octave 2 (only high A is allowed) if Variables.octave == 2 and ord(current_note) > 65: return # check if current note fits in the measure if Variables.current_measure_length + (1.0 / float(Variables.note_length)) > Variables.get_measure_length(): messagebox.showerror("Invalid Configuration", "ERROR: This note is too long to fit in this measure!") return # check if the note exists in the naturals table (cross-reference keyboard input to see if valid input) if current_note in note_lookup.naturals: # check if rest, then add the rest symbol from the lookup class if so if current_note == "R": append = LookupNote.get_rest(Variables.note_length) # if not a rest, append the note from the symbol generation in the lookup class else: append = LookupNote.get_note(current_note, Variables.octave, Variables.note_length, Variables.accidental, Variables.key_sig) # insert note self.insert(tkinter.END, append) else: return # play tone so the user knows what they just added Playback.play_tone(current_note, Variables.octave, Variables.accidental) # update the current measure length Variables.current_measure_length += 1.0 / float(Variables.note_length) # compare to see if the measure length was just finished, if so add a measure line if math.isclose(Variables.get_measure_length(), Variables.current_measure_length): Variables.current_measure_length = 0.0 self.insert(tkinter.END, "!=") # save the new text so it can be compared again if needed on next input event self.save_last_text() else: # note is not at the end of the score # get the current character at the insertion index split = insert_index.split(".") next_index = split[0] + "." + str(int(split[1]) + 1) character = self.get(insert_index, next_index) # if the note is one that can be replaced if LookupNote.replaceable(character): # parse the insertion index first_half = self.get(1.0, split[0] + "." + str(int(split[1]) - 1)) second_half = self.get(split[0] + "." + str(int(split[1]) + 2), "end-1c") # generate new note from the lookup note class using current settings new_note = LookupNote.get_note(note.upper(), Variables.octave, LookupNote.get_note_length(character), Variables.accidental, Variables.key_sig) # if there is an accidental symbol, remove it if 208 <= ord(first_half[-1]) <= 254: first_half = first_half[0:len(first_half) - 1] # delete the old text, insert the new generated text with the new symbol in the insertion index self.delete(1.0, tkinter.END) self.insert(tkinter.END, first_half + new_note + second_half) # play the tone so they know what they inputted Playback.play_tone(note.upper(), Variables.octave, Variables.accidental) # save the text for future comparisons self.save_last_text() else: # cancel the event if replacement is impossible self.cancel_event()
def _parse(self, template): return jinja2.Template(template).render(Variables())
def getCurrentOutsidetemp(self): """ Get Current outside temperature from OpenWeatherMap using the API http://192.168.0.13:3480/data_request?id=status&output_format=xml&DeviceNum=113 http://192.168.0.13:3480/data_request?id=variableget&DeviceNum=112&serviceId=urn:upnp-org:serviceId:TemperatureSensor1&Variable=CurrentTemperature http://192.168.0.13:3480/data_request?id=variableset&DeviceNum=103&serviceId=urn:upnp-org:serviceId:TemperatureSensor1&Variable=CurrentTemperature&Value=11.4 VeraGetData,http://{}:{}/data_request?id=variableget&DeviceNum={}&serviceId={}&Variable={} """ logger = logging.getLogger("main.max.getCurrentOutsidetemp") Vera_Address, Vera_Port, VeraGetData, VeraOutsideTempID, VeraOutsideTempService, VeraTemp = Variables( ).readVariables([ 'VeraIP', 'VeraPort', 'VeraGetData', 'VeraOutsideTempID', 'VeraOutsideTempService', 'VeraTemp' ]) if VeraTemp: try: f = urllib2.urlopen( VeraGetData.format(Vera_Address, Vera_Port, VeraOutsideTempID, VeraOutsideTempService, 'CurrentTemperature')) temp_c = f.read() t = urllib2.urlopen( "http://{}:{}/data_request?id=status&output_format=xml&DeviceNum={}" .format(Vera_Address, Vera_Port, VeraOutsideTempID)) t_xml = t.read() time_index = int(t_xml.find("LastUpdate")) + 19 update_time = int(t_xml[time_index:(time_index + 10)]) current_time = time.time() if current_time - update_time <= (15 * 60): logger.info('Vera Outside Temperature is %s' % temp_c) return temp_c else: logger.info('Vera Outside Temperature out of date') except Exception, err: logger.exception("No Vera temp data %s" % err)
def switchHeat(self): module_logger.info("main.max.switchHeat running") logTime = time.time() boilerEnabled, veraControl, Vera_Address, Vera_Port, \ Vera_Device, singleRadThreshold, multiRadThreshold, \ multiRadCount, AllValveTotal, ManualHeatingSwitch, boilerOverride = Variables().readVariables(['BoilerEnabled', 'VeraControl', 'VeraIP', 'VeraPort', 'VeraDevice', 'SingleRadThreshold', 'MultiRadThreshold', 'MultiRadCount', 'AllValveTotal', 'ManualHeatingSwitch', 'BoilerOverride']) roomTemps = CreateUIPage().createRooms() outsideTemp = self.getCurrentOutsidetemp() # Add outside temp to each rooms information for i in range(len(roomTemps)): roomTemps[i] = roomTemps[i] + (str(outsideTemp), ) # Calculate if heat is required valveList = [] for room in roomTemps: valveList.append(room[4]) singleRadOn = sum(i >= singleRadThreshold for i in valveList) multiRadOn = sum(i >= multiRadThreshold for i in valveList) totalRadOn = sum(valveList) if singleRadOn >= 1 or multiRadOn >= multiRadCount or totalRadOn >= AllValveTotal: boilerState = 1 else: boilerState = 0 # Boiler Override if boilerOverride == 1: module_logger.info('Boiler overridden ON') boilerState = 1 if boilerOverride == 0: boilerState = 0 module_logger.info('Boiler overridden OFF') module_logger.info("main.max.switchHeat Boiler is %s, Heating is %s" % (boilerEnabled, boilerState)) # Update Temps database DbUtils().insertTemps(roomTemps) if boilerEnabled: try: _ = requests.get(veraControl.format(Vera_Address, Vera_Port, Vera_Device, str(boilerState)), timeout=5) Variables().writeVariable([['VeraOK', 1]]) module_logger.info('message sent to Vera') except: Variables().writeVariable([['VeraOK', 0]]) module_logger.info("vera is unreachable") # Set Manual Boiler Switch if enabled # if ManualHeatingSwitch: # module_logger.info("Switching local Relay %s" %boilerState) # relayHeating(boilerState) else: boilerState = 0 try: _ = requests.get(veraControl.format(Vera_Address, Vera_Port, Vera_Device, boilerState), timeout=5) Variables().writeVariable([['VeraOK', 1]]) module_logger.info("Boiler is Disabled") except: Variables().writeVariable([['VeraOK', 0]]) module_logger.info("vera is unreachable") # Set Manual Boiler Switch if enabled # if ManualHeatingSwitch: # module_logger.info("Switching local Relay %s" %boilerState) # relayHeating(boilerState) try: boilerOn = DbUtils().getBoiler()[2] except: boilerOn = 9 if boilerState != boilerOn: msg = (logTime, boilerState) DbUtils().updateBoiler(msg) boilerOn = boilerState # Create UI Pages CreateUIPage().saveUI(roomTemps)
def maxCmd_M(self, line, refresh): """ process M response Rooms and Devices""" logger = logging.getLogger("main.max.maxCmd_M") expectedRoomNo = int(Variables().readVariables(['ExpectedNoOfRooms'])) line = line.split(",") es = base64.b64decode(line[2]) room_num = ord(es[2]) logger.info("Number of rooms found : {}".format(room_num)) # Check number of rooms if room_num != expectedRoomNo: Variables().writeVariable([['RoomsCorrect', 0]]) logger.info("RoomsCorrect set to : {}".format(0)) else: Variables().writeVariable([['RoomsCorrect', 1]]) logger.info("RoomsCorrect set to : {}".format(1)) es_pos = 3 this_now = datetime.datetime.now() for _ in range(0, room_num): room_id = str(ord(es[es_pos])) room_len = ord(es[es_pos + 1]) es_pos += 2 room_name = es[es_pos:es_pos + room_len] es_pos += room_len room_adr = es[es_pos:es_pos + 3] es_pos += 3 if room_id not in rooms or refresh: # id :0room_name, 1room_address, 2is_win_open rooms.update( {room_id: [room_name, self.hexify(room_adr), False]}) dev_num = ord(es[es_pos]) es_pos += 1 for _ in range(0, dev_num): dev_type = ord(es[es_pos]) es_pos += 1 dev_adr = self.hexify(es[es_pos:es_pos + 3]) es_pos += 3 dev_sn = es[es_pos:es_pos + 10] es_pos += 10 dev_len = ord(es[es_pos]) es_pos += 1 dev_name = es[es_pos:es_pos + dev_len] es_pos += dev_len dev_room = ord(es[es_pos]) es_pos += 1 if dev_adr not in devices or refresh: # 0type 1serial 2name 3room 4OW,5OW_time, 6status, # 7info, 8temp offset devices.update({ dev_adr: [ dev_type, dev_sn, dev_name, dev_room, 0, this_now, 0, 0, 7 ] }) dbMessage = [] for keys in rooms: roomText = str(rooms[keys][0]) # For testing ' in names # if roomText == "Bathroom": # roomText = "Bathroom's" dbList = keys, roomText, rooms[keys][1] dbMessage.append(dbList) print dbMessage DbUtils().updateRooms(dbMessage) dbMessage = [] for keys in devices: dbList = keys, devices[keys][0], devices[keys][1], devices[keys][ 2], devices[keys][3], devices[keys][5] dbMessage.append(dbList) DbUtils().updateDevices(dbMessage)
class MaxInterface(): # def __init__(self): # useNeoPixel = Variables().readVariables(['UseNeoPixel']) # if useNeoPixel: # self.initialiseNeoPixel() def initialiseNeoPixel(self): logger = logging.getLogger("main.max.initialiseNeoPixel") logger.info("Initialising NeoPixel Serial connection") self.input_queue = multiprocessing.Queue() self.output_queue = multiprocessing.Queue() self.arduino = neopixelserial.SerialProcess(self.input_queue, self.output_queue) self.arduino.daemon = True self.arduino.start() logger.info(self.arduino) def checkHeat(self, input_queue): useNeoPixel = Variables().readVariables(['UseNeoPixel']) MAXData, validData = self.getData() if validData: self.parseData(MAXData) self.switchHeat() if useNeoPixel: self.setNeoPixel(1, input_queue) def setNeoPixel(self, doChase, input_queue, *args): light_arg = 0 if args is not None: for arg in args: light_arg = arg logger = logging.getLogger("main.max.setNeoPixel") logger.info("Setting NeoPixel Lights") cube_state, vera_state, boiler_enabled, interval, boiler_override, rooms_Ok = Variables( ).readVariables([ 'CubeOK', 'VeraOK', 'BoilerEnabled', 'Interval', 'BoilerOverride', 'RoomsCorrect' ]) heating_state = DbUtils().getBoiler()[2] # overide CubeState if rooms wrong if not rooms_Ok: cube_state = 2 sendString = '%s,%s,%s,%s,%s,%s,%s,%s\n\r' % ( doChase, heating_state, interval, boiler_enabled, cube_state, vera_state, boiler_override, light_arg) logger.info("Sending NeoPixel %s" % sendString) input_queue.put(sendString) def createSocket(self): logger = logging.getLogger("main.max.createSocket") # Create Socket try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(1) logger.info('Socket Created') return s except socket.error: logger.exception("unable to create socket") s.close() sys.exit() def getData(self): logger = logging.getLogger("main.max.getData") validData = False message = "" Max_IP, Max_IP2, Max_Port = Variables().readVariables( ['MaxIP', 'MaxIP2', 'MaxPort']) logger.info( 'Max Connection Starting on : IP1-%s or IP2-%s on port %s ' % (Max_IP, Max_IP2, Max_Port)) cube1 = 0 cube2 = 0 success = False try: # Connect to Max Cube1 while cube1 < 3 and not success: try: logger.info( 'tyring to connect to Max1 on ip %s on try %s' % (Max_IP, cube1)) s = self.createSocket() s.connect((Max_IP, int(Max_Port))) logger.info('Socket Connected to Max1 on ip %s on try %s' % (Max_IP, cube1)) Variables().writeVariable([['ActiveCube', 1]]) Variables().writeVariable([['CubeOK', 1]]) success = True except Exception, err: s.close() Variables().writeVariable([['CubeOK', 0]]) logger.exception( "unable to make connection, trying later %s" % err) time.sleep(2) cube1 += 1 if not success: raise Exception("Cube 1 fail") except Exception, err: logger.exception( "unable to make connection to Cube 1, trying Cube2 %s" % err) # Connect to Max Cube2 while cube2 < 3 and not success: try: logger.info( 'tyring to connect to Max2 on ip %s on try %s' % (Max_IP2, cube2)) s = self.createSocket() s.connect((Max_IP2, int(Max_Port))) logger.info('Socket Connected to Max2 on ip %s' % Max_IP2) Variables().writeVariable([['ActiveCube', 2]]) Variables().writeVariable([['CubeOK', 1]]) success = True except Exception, err: s.close() Variables().writeVariable([['CubeOK', 0]]) logger.exception( "unable to make connection, trying later %s" % err) time.sleep(2) cube2 += 1
Variables().writeVariable([['CubeOK', 0]]) logger.exception( "unable to make connection, trying later %s" % err) time.sleep(2) cube2 += 1 try: while 1: reply = s.recv(1024) message += reply except: logger.info("Message ended") if message != "": Variables().writeVariable([['CubeOK', 1]]) validData = True s.close() logger.debug(message) return (message, validData) def parseData(self, message): print_on = int(Variables().readVariables(['PrintData'])) message = message.split('\r\n') for lines in message: # print lines try: if lines[0] == 'H': self.maxCmd_H(lines)
def setUp(self): self.constraints = Constraints(8) self.variables = Variables(8)
def __init__(self, embs, data, stage, model_dir = None): if model_dir is not None: self._classify = Classify(model_dir) self._labels = [item.strip() for item in open(model_dir + "/relations.txt").read().splitlines()] else: self._labels = None if stage == "ORACLETEST": assert(len(data) == 4) hooks = False tokens, dependencies, relations, alignments = data lemmas = None relations2 = [] self.gold = relations for r in relations: if r[1].startswith(":snt"): r2 = (Node(True),":top",r[2]) else: r2 = (r[0],r[1],r[2]) if (r2[0].token is not None or r2[1] == ":top") and r2[2].token is not None: relations2.append(r2) oracle = Oracle(relations2) self.variables = Variables() elif stage == "TRAIN" or stage == "COLLECT": assert(len(data) == 4) hooks = False tokens, dependencies, relations, alignments = data lemmas = None relations2 = [] for r in relations: if r[1].startswith(":snt"): r2 = (Node(True),":top",r[2]) else: r2 = (r[0],r[1],r[2]) if (r2[0].token is not None or r2[1] == ":top") and r2[2].token is not None: relations2.append(r2) oracle = Oracle(relations2) self.variables = None else: #PARSING assert(len(data) == 2) hooks = True tokens, dependencies = data relations2 = None alignments = None oracle = None self.variables = Variables() self.state = State(embs, relations2, tokens, dependencies, alignments, oracle, hooks, self.variables, stage, Rules(self._labels)) self.history = History() while self.state.isTerminal() == False: #print self.state tok = copy.deepcopy(self.state.buffer.peek()) if oracle is not None: action = oracle.valid_actions(self.state) else: action = self.classifier() #print action #raw_input() if action is not None: f_rel = [] f_lab = [] f_reentr = [] if stage == "TRAIN": f_rel = self.state.rel_features() if action.name== "larc" or action.name == "rarc": f_lab = self.state.lab_features() if action.name == "reduce": f_reentr = self.state.reentr_features() self.state.apply(action) self.history.add((f_rel, f_lab, f_reentr), action, tok) else: break assert (self.state.stack.isEmpty() == True and self.state.buffer.isEmpty() == True)
class Dakota(): def __init__(self, params): self.workdir = params['general']['workdir'] self.outputfile = params['study']['outputfile'] self.method = get_method(params) self.njobs = params['study']['njobs'] self.variables = Variables(params) self.restart = params['study']['restart'] def run(self): self.generate_input_file() dakota_command = ['dakota', '-i', 'dakota.in', '-o', 'dakota.log', '-w', 'dakota.rst', '-e', 'dakota.err'] if self.restart: dakota_command.append(['-r', 'dakota.rst']) with open(self.workdir + "/SSOPT.log", "w+") as logfile: with open(self.workdir + "/SSOPT.err", 'w+') as errfile: sp.Popen(dakota_command, cwd=self.workdir, stderr=errfile, stdout=logfile).wait() def generate_input_file(self): self.create_workdir() with open(self.workdir + '/dakota.in', 'w') as f: self.print_header(f) self.print_environment(f) self.print_model(f) self.method.print_method(f) self.print_interface(f) self.variables.print_variables(f) self.method.print_responses(f) def create_workdir(self): os.mkdir(self.workdir) @staticmethod def print_header(inputfile): inputfile.writelines(file_header) def print_environment(self, inputfile): inputfile.writelines(["environment\n", " tabular_data\n", " tabular_data_file = '" + self.outputfile + "'\n\n"]) @staticmethod def print_model(inputfile): inputfile.writelines("model\n single\n\n") def print_interface(self, inputfile): inputfile.writelines(["interface\n", " fork\n", " analysis_driver = 'runiteration.py'\n", " work_directory\n", " named = './iteration'\n", " copy_files = '0/' 'constant/' 'system/'\n", " directory_tag\n", " directory_save\n", " failure_capture recover "]) for i in range(0, self.method.number_of_responses): inputfile.writelines(['NaN ']) inputfile.writelines("\n") inputfile.writelines(" asynchronous\n evaluation_concurr = ") inputfile.writelines(str(self.njobs) + "\n\n")
def setNeoPixel(self, doChase, input_queue, *args): light_arg = 0 if args is not None: for arg in args: light_arg = arg logger = logging.getLogger("main.max.setNeoPixel") logger.info("Setting NeoPixel Lights") cube_state, vera_state, boiler_enabled, interval, boiler_override, rooms_Ok = Variables( ).readVariables([ 'CubeOK', 'VeraOK', 'BoilerEnabled', 'Interval', 'BoilerOverride', 'RoomsCorrect' ]) heating_state = DbUtils().getBoiler()[2] # overide CubeState if rooms wrong if not rooms_Ok: cube_state = 2 sendString = '%s,%s,%s,%s,%s,%s,%s,%s\n\r' % ( doChase, heating_state, interval, boiler_enabled, cube_state, vera_state, boiler_override, light_arg) logger.info("Sending NeoPixel %s" % sendString) input_queue.put(sendString)
features = data[0] labels = data[1] plt.scatter(features[:, 0], features[:, 1], c=labels, cmap='coolwarm') x = np.linspace(0, 11, 10) y = -x + 5 plt.plot(x, y) g = Graph() graphObject = g.set_as_default() # Initialize function wx - b | [1,1] * x - 5 x = Placeholder() graphObject.placeholders.append(x) # append placeholder x w = Variables([1, 1]) graphObject.variables.append(w) # append variable w b = Variables(-5) graphObject.variables.append(b) # append variable b z = Addition(MatrixMultiplication(w, x, graphObject), b, graphObject) # Apply activation function a = Sigmoid(z, graphObject) # Execute neural network sess = Session() print(sess.run(a, {x: [0, -10]})) plt.show()
def NN(input_list, results_path, seed=123, hidden_layer_sizes=(30,30,30), activation='relu', solver='adam', regularization=0.0001, batch_size='auto', learning_rate_sch='constant', learning_rate_init=0.001, max_iter=100, tol=1e-4, momentum=0.9, nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, n_iter_no_change=10): """ 1. Train a neural network to classify X based on Y 2. Get classification report 3. Get heatmap of confusion matrix 4. Get ROC curve Arguments: - input_list : list, length = 4 Absolute path to [X_train, Y_train, X_test, Y_test] - results_path: str Absolute path to the directory where the files will be saved - seed: int, optional, default = 123 Random seed - hidden_layer_sizes: tuple, length = n_layers-2, default = (30,30,30) Number of neurons in each layer - activation: {'identity', 'logistic', 'tanh', 'relu'}, optional, default = 'relu' Activation function - solver: {'lbfgs', 'sgd', 'adam'}, optional, default = 'adam' Solver - regularization: float, optional, default = 0.0001 L2 regularization term - batch_size: int, optional, default = 'auto' Minibatch size - learning_rate_sch: {'constant', 'invscaling', 'adaptive'}, optional, default='constant' Learning rate schedules - learning_rate_init: float, optional, default = 0.001 Initial learning rate used - max_iter: int, optional, default = 100 Maximum number of iterations - tol: float, optional, default = 1e-4 Tolerance for the optimization - momentum: float, optional, default = 0.9 Momentum for gradient descent update - nesterovs_momentum: boolean, optional, default = True Whether to use Nesterov's momentum - early_stopping: bool, optional, default = False Whether to use early stopping to terminate training when validation score is not improving - validation_fraction: float, optional, default = 0.1 Proportion of training data to set aside as validation set for early stopping - beta_1: float in [0,1), optional, default = 0.9 Exponential decay rate for estimates of first moment vector in adam - beta_2: float in [0,1), optional, default = 0.999 Exponential decay rate for estimates of second moment vector in adam - n_iter_no_change: int, optional, default = 10 Maximum number of epochs to not meet tol improvement Returns: - Trained neural network """ h = Helpers() v = Variables() # Diagnostics h.check_dir(results_path) if len(input_list) != 4: logger.error("{0} files found in input_list, expected 4".format(len(input_list))) h.error() X_train, Y_train, X_test, Y_test = input_list h.check_file_existence(X_train) h.check_file_existence(Y_train) h.check_file_existence(X_test) h.check_file_existence(Y_test) # Import datasets X_train = h.import_dataset(X_train) Y_train = h.import_dataset(Y_train) X_test = h.import_dataset(X_test) Y_test = h.import_dataset(Y_test) # Train NN nn = MLPClassifier(hidden_layer_sizes = hidden_layer_sizes, activation=activation, solver=solver, alpha=regularization, batch_size=batch_size, learning_rate=learning_rate_sch, learning_rate_init=learning_rate_init, max_iter=max_iter, random_state=seed, tol=tol, momentum=momentum, nesterovs_momentum=nesterovs_momentum, early_stopping=early_stopping, validation_fraction=validation_fraction, beta_1=beta_1, beta_2=beta_2, n_iter_no_change=n_iter_no_change) nn.fit(X_train, Y_train) # get accuracy, confusion matrix and ROC AUC h.get_metrics(nn, [X_train, Y_train, X_test, Y_test], results_path) return nn
class Phase(): '''A number of rows of text, and a stop condition (string).''' def __init__(self, label, stop_condition_str, lineno): self.label = label self.stop_condition_str = stop_condition_str self.lineno = lineno self.lines = list() # List of tuples (line, lineno) # Set in parse self.stimulus_elements = None self.behaviors = None self.global_variables = None self.local_variables = None self.linelabels = list() self.end_condition = None self.phase_lines = dict() # Keys are phase line labels, values are PhaseLine objects self.first_label = None self.curr_lineobj = None self.event_counter = None self.is_first_line = True self.is_inherited = False self.is_parsed = False self.first_stimulus_presented = False def append_line(self, line, lineno): self.lines.append((line, lineno)) def parse(self, parameters, global_variables): self.parameters = parameters self.global_variables = global_variables stimulus_elements = parameters.get(STIMULUS_ELEMENTS) behaviors = parameters.get(BEHAVIORS) phase_lines_afterlabel = list() linenos = list() # First iteration through lines: Create list of lines (and labels) for line_lineno in self.lines: line, lineno = line_lineno label, afterlabel = ParseUtil.split1_strip(line) if afterlabel is None: raise ParseException(lineno, "Phase line contains only label.") coincide_err = f"The phase line label '{label}' coincides with the name of a " if label in stimulus_elements: raise ParseException(lineno, coincide_err + "stimulus element.") elif label in behaviors: raise ParseException(lineno, coincide_err + "behavior.") if label in self.linelabels and not self.is_inherited: raise ParseException(lineno, f"Duplicate of phase line label '{label}'.") self.linelabels.append(label) phase_lines_afterlabel.append(afterlabel) linenos.append(lineno) if self.first_label is None: # Set self.first_label to the label of the first line self.first_label = label # Second iteration: Create PhaseLine objects and put in the dict self.phase_lines for label, after_label, lineno in zip(self.linelabels, phase_lines_afterlabel, linenos): self.phase_lines[label] = PhaseLine(self, lineno, label, after_label, self.linelabels, self.parameters, self.global_variables) if label == "new_trial": # Change self.first_label to the new_trial line self.first_label = label self.initialize_local_variables() self.event_counter = PhaseEventCounter(self.linelabels, self.parameters) self.subject_reset() self.is_parsed = True def initialize_local_variables(self): self.local_variables = Variables() def subject_reset(self): self.event_counter = PhaseEventCounter(self.linelabels, self.parameters) self.stop_condition = EndPhaseCondition(self.lineno, self.stop_condition_str) self._make_current_line(self.first_label) self.prev_linelabel = None self.is_first_line = True self.initialize_local_variables() self.first_stimulus_presented = False def next_stimulus(self, response, ignore_response_increment=False, preceeding_help_lines=None, omit_learn=None): # if not self.is_parsed: # raise Exception("Internal error: Cannot call Phase.next_stimulus" + # " before Phase.parse().") if preceeding_help_lines is None: preceeding_help_lines = list() if not ignore_response_increment: # if not self.is_first_line: if response is not None: self.event_counter.increment_count(response) self.event_counter.increment_count_line(response) self.event_counter.set_last_response(response) if self.first_stimulus_presented: variables_both = Variables.join(self.global_variables, self.local_variables) if self.stop_condition.is_met(variables_both, self.event_counter): return None, None, preceeding_help_lines, None if self.is_first_line: # assert(response is None) rowlbl = self.first_label self.is_first_line = False omit_learn = True # Since response is None, there is no learning (updating of v-values) to do else: rowlbl, omit_learn = self.curr_lineobj.next_line(response, self.global_variables, self.local_variables, self.event_counter) self.prev_linelabel = self.curr_lineobj.label self._make_current_line(rowlbl) stimulus = self.phase_lines[rowlbl].stimulus if stimulus is not None: for element, intensity in stimulus.items(): if type(intensity) is str: # element[var] where var is a (local) variable variables_both = Variables.join(self.global_variables, self.local_variables) stimulus[element], err = ParseUtil.evaluate(intensity, variables=variables_both) if err: raise ParseException(self.phase_lines[rowlbl].lineno, err) if rowlbl != self.prev_linelabel: self.event_counter.reset_count_line() self.event_counter.line_label = rowlbl self.event_counter.increment_count(rowlbl) self.event_counter.increment_count_line(rowlbl) if stimulus is None: # Help line action = self.phase_lines[rowlbl].action lineno = self.phase_lines[rowlbl].lineno self._perform_action(lineno, action) preceeding_help_lines.append(rowlbl) next_stimulus_out = self.next_stimulus(response, ignore_response_increment=True, preceeding_help_lines=preceeding_help_lines, omit_learn=omit_learn) stimulus, rowlbl, preceeding_help_lines, omit_learn_help = next_stimulus_out omit_learn = (omit_learn or omit_learn_help) else: for stimulus_element in stimulus: self.event_counter.increment_count(stimulus_element) self.event_counter.increment_count_line(stimulus_element) self.first_stimulus_presented = True return stimulus, rowlbl, preceeding_help_lines, omit_learn def _make_current_line(self, label): # self.curr_linelabel = label self.curr_lineobj = self.phase_lines[label] # self.endphase_obj.update_itemfreq(label) def perform_actions(self, lineno, actions): any_omit_learn = False for action in actions: omit_learn = self._perform_action(lineno, action) any_omit_learn = (any_omit_learn or omit_learn) return any_omit_learn def _perform_action(self, lineno, action): """ Sets a variable (x:3) or count_reset(event). """ omit_learn = False # No action to perform if len(action) == 0: pass # Setting a variable elif action.count(':') == 1 or action.count('=') == 1: if action.count('=') == 1: sep = '=' else: sep = ':' var_name, value_str = ParseUtil.split1_strip(action, sep=sep) variables_join = Variables.join(self.global_variables, self.local_variables) value, err = ParseUtil.evaluate(value_str, variables_join) if err: raise ParseException(lineno, err) err = self.local_variables.set(var_name, value, self.parameters) if err: raise ParseException(lineno, err) # count_reset elif action.startswith("count_reset(") and action.endswith(")"): event = action[12:-1] self.event_counter.reset_count(event) # omit_learn elif action == "@omit_learn": omit_learn = True else: raise ParseException(lineno, "Internal error.") # Should have been caught during Pase.parse() return omit_learn def is_phase_label(self, label): return label in self.linelabels def check_deprecated_syntax(self): for _, phase_line in self.phase_lines.items(): msg = phase_line.check_deprecated_syntax() if msg is not None: return msg return None def copy(self): cpy = copy.deepcopy(self) cpy.stop_condition = copy.deepcopy(self.stop_condition) return cpy
def getVariables(self): if (self.__variables == None): self.__variables = Variables(self) return self.__variables
def LR(input_list, results_path, seed=123, k_folds=10): """ 1. Perform k-fold logistic regression on X and Y 2. Get heatmap of confusion matrix 3. Get ROC curve Arguments: - input_list: list, length = 2 or 4 Absolute path to [X,Y] or [X_train, Y_train, X_test, Y_test] - results_path: str Absolute path to the directory where the figures must be saved - seed: int, optional, default = 123 Random seed - k_folds: int, optional, default = 10 Number of folds for cross-validation Returns: - Trained logistic regression model """ h = Helpers() v = Variables() # Diagnostics h.check_dir(results_path) num_files = len(input_list) if num_files == 2: X, Y = input_list h.check_file_existence(X) h.check_file_existence(Y) elif num_files == 4: X_train, Y_train, X_test, Y_test = input_list h.check_file_existence(X_train) h.check_file_existence(Y_train) h.check_file_existence(X_test) h.check_file_existence(Y_test) else: logger.error( "{0} files found in input_list, expected 2 or 4".format(num_files)) h.error() # Import datasets if num_files == 2: X = h.import_dataset(X) Y = h.import_dataset(Y) else: X_train = h.import_dataset(X_train) Y_train = h.import_dataset(Y_train) X_test = h.import_dataset(X_test) Y_test = h.import_dataset(Y_test) # Train LR model if num_files == 2: lr = LogisticRegressionCV(solver='liblinear', cv=k_folds, random_state=seed) lr.fit(X, Y) # get accuracy, classification report, confusion matrix and ROC AUC h.get_metrics(lr, [X, Y], results_path) else: lr = LogisticRegression(solver='liblinear', random_state=seed) lr.fit(X_train, Y_train) # get accuracy, classification matrix, confusion matrix and ROC AUC h.get_metrics(lr, [X_train, Y_train, X_test, Y_test], results_path) return lr
import time from database import DbUtils from variables import Variables from veratemps import VeraVirtualTemps import logging DB = DbUtils() VAR = Variables() VT = VeraVirtualTemps() class CreateUIPage(): def __init__(self): """ Create UI Webpage """ def updateWebUI(self): roomTemps = self.createRooms() self.saveUI(roomTemps) self.saveAdminUI() def saveUI(self, roomTemps): # print 'SAVE UI' # self.dutyCycle() pageTop = self.pageTop() pageHeaderMain = self.pageHeader('Main UI') pageHeaderAdmin = self.pageHeader('Admin') buttonLayout = self.buttonLayout() filler = self.filler()
def get_config(self): config = {} config['io_drivers'] = self.io_driver_list.get_config() config['viewers'] = self.viewers.get_config() return config def set_config(self, config): if 'io_drivers' in config: self.io_driver_list.set_config(config['io_drivers']) if 'viewers' in config: self.viewers.set_config(config['viewers']) self.variables.clear() vs = Variables() viewers = Viewers(variables=vs) iodl = IODriverList(variables=vs, viewers_instance=viewers) proj = PlotOMatic(io_driver_list=iodl, variables=vs, viewers=viewers) proj.start() proj.configure_traits() proj.stop() if PROFILE: print "Generating Statistics" yappi.stop() stats = yappi.get_stats(yappi.SORTTYPE_TSUB, yappi.SORTORDER_DESCENDING, 300) #yappi.SHOW_ALL) for stat in stats:
class scheduler_test1(unittest.TestCase): def setUp(self): self.v = Variables(vmap) def test1_check_loading(self): """ Check that the loading f the variables went OK """ self.assertEqual(len(self.v.variables), 11) self.assertEqual(self.v.variables["test"], "False") self.assertEqual(self.v.variables["housemode"], "At home") def test2_check_get_variable(self): """ Check get_variable """ val = self.v.get_variable("test") self.assertEqual(val, "False") def test3_check_get_variable_negative(self): """ Check get_variable, negative test case """ val = self.v.get_variable("should-not-be-there") self.assertIsNone(val) def test4_add_del_update(self): """ Check adding, changing and deleting a variable """ self.assertEqual(self.v.variables["housemode"], "At home") self.v.set_variable("housemode", "Away") self.assertEqual(self.v.variables["housemode"], "Away") val = self.v.get_variable("should-not-be-there") self.assertRaises(KeyError) self.v.set_variable("should-not-be-there", "now it's there") val = self.v.get_variable("should-not-be-there") self.assertEqual(val, "now it's there") self.v.del_variable("should-not-be-there") val = self.v.get_variable("should-not-be-there") self.assertRaises(KeyError) # OK, it's gone again def tearDown(self): pass
def __init__(self): super(AMDock, self).__init__() QtGui.QMainWindow.__init__(self) Variables.__init__(self) # redirect stderr to log window sys.stderr = EmittingStream(textWritten=self.normalOutputWritten) self.checker = Checker(self) self.output2file = OutputFile(self) self.loader = Loader(self) self.project = PROJECT() self.target = BASE() self.offtarget = BASE() self.ligand = BASE() self.log_thread = QtCore.QThreadPool() self.numeric_version = [1, 4, 99] self.version = "{}.{}.{} For Windows and Linux".format( *self.numeric_version) self.spacing_autoligand = 1.0 self.spacing_autodock = 0.375 self.pH = 7.40 self.state = 0 # 0 not running, 2 running self.section = -1 # -1 only PD selected, 0 project, 1 input files, 2 bsd, 3 docking with open(self.style_file) as f: self.setStyleSheet(f.read()) self.icon = QtGui.QIcon() self.icon.addPixmap(QtGui.QPixmap(self.home_icon), QtGui.QIcon.Active, QtGui.QIcon.Off) self.icon.addPixmap(QtGui.QPixmap(self.home_icon), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.icon.addPixmap(QtGui.QPixmap(self.home_icon), QtGui.QIcon.Selected, QtGui.QIcon.Off) self.icon.addPixmap(QtGui.QPixmap(self.home_icon), QtGui.QIcon.Disabled, QtGui.QIcon.Off) self.icon.addPixmap(QtGui.QPixmap(self.home_icon_white), QtGui.QIcon.Selected, QtGui.QIcon.On) self.icon.addPixmap(QtGui.QPixmap(self.home_icon_white), QtGui.QIcon.Active, QtGui.QIcon.On) ##--TABS self.main_window = QtGui.QTabWidget(self) self.setCentralWidget(self.main_window) ##--Tabs for Docking Options self.lobby = Lobby(self) self.main_window.addTab(self.lobby, self.icon, "") self.program_body = Program_body(self) self.main_window.addTab(self.program_body, "Docking Options") self.main_window.setTabEnabled(1, False) ##Tabs for result analysis self.result_tab = Results(self) self.main_window.addTab(self.result_tab, "Results Analysis") self.main_window.setTabEnabled(2, False) # **Configurations_Tab self.configuration_tab = Configuration_tab(self) self.main_window.addTab(self.configuration_tab, "Configuration") # ** Help_Tab self.help_tab = Help(self) self.main_window.addTab(self.help_tab, "Info") # ** log dockwidget self.log_widget = LogWindow(self) self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.log_widget) if self.configuration_tab.log_view.isChecked(): self.log_widget.show() else: self.log_widget.hide() self.statusbar = QtGui.QStatusBar(self) self.statusbar.setObjectName("statusbar") self.setStatusBar(self.statusbar) QtCore.QMetaObject.connectSlotsByName(self) self.version_label = QtGui.QLabel("Version: %s" % self.version) self.statusbar.addPermanentWidget(self.version_label)
class ScriptParser(): def __init__(self, script): self.lines = clean_script(script) self.variables = Variables() self.parameters = Parameters() self.phases = Phases() self.runs = Runs() self.all_run_labels = list() self.postcmds = PostCmds() # Used to label unlabelled @run-statements with "run1", "run2", ... self.unnamed_run_cnt = 1 # Used to label unlabelled @phase-statements with "phase1", "phase2", ... self.unnamed_phase_cnt = 1 def parse(self): if len(self.lines) == 0: raise ParseException(1, "Script is empty.") prop = None curr_phase_label = None in_prop = False in_variables = False in_phase = False for lineno0, line_orig in enumerate(self.lines): line = line_orig lineno = lineno0 + 1 # Handle empty line if len(line) == 0: continue line_endswith_comma = line.endswith(',') if line_endswith_comma: line = line[:-1] line_parser = LineParser(line, self.variables) line_parser.parse() linesplit_colon = line_parser.linesplit_colon linesplit_equal = line_parser.linesplit_equal linesplit_space = line_parser.linesplit_space if in_prop or in_variables or in_phase: parse_this_line_done = True err = None if in_prop: in_prop = line_endswith_comma err = self.parameters.str_append(prop, line, self.variables, self.phases, self.all_run_labels, line_endswith_comma) elif in_variables: in_variables = line_endswith_comma err = self.variables.add_cs_varvals(line, self.parameters) elif in_phase: in_phase = line_parser.line_type is None if in_phase: self.phases.append_line(curr_phase_label, line, lineno) else: parse_this_line_done = False # Phase with label curr_phase_label is complete, parse it self.phases.parse_phase(curr_phase_label, self.parameters, self.variables) if err: raise ParseException(lineno, err) if parse_this_line_done: continue if line_parser.line_type == LineParser.PARAMETER: # Handle line that sets a parameter (e.g. "prop : val") prop = line_parser.param if len(linesplit_colon) == 1 and len(linesplit_equal) == 1: raise ParseException( lineno, f"Parameter '{prop}' is not specified.") first_colon_index = line_orig.find(':') first_equal_index = line_orig.find('=') if first_equal_index > 0 and first_colon_index > 0: if first_colon_index < first_equal_index: # u : a=1, b=2 possible_val = linesplit_colon[1].strip() else: # u = a:1, b:2 possible_val = linesplit_equal[1].strip() elif first_equal_index > 0 and first_colon_index < 0: # u = 2 possible_val = linesplit_equal[1].strip() elif first_colon_index > 0 and first_equal_index < 0: # u : 2 possible_val = linesplit_colon[1].strip() if len(possible_val) == 0: raise ParseException( lineno, f"Parameter '{prop}' is not specified.") if line_endswith_comma: if not self.parameters.may_end_with_comma(prop): raise ParseException( lineno, "Value for {} may not end by comma.".format(prop)) in_prop = self.parameters.is_csv(prop) err = self.parameters.str_set(prop, possible_val, self.variables, self.phases, self.all_run_labels, line_endswith_comma) if err: raise ParseException(lineno, err) continue elif line_parser.line_type == LineParser.VARIABLES: if len(linesplit_space) == 1: raise ParseException(lineno, "@VARIABLES not specified.") in_variables = line_endswith_comma cs_varvals = linesplit_space[1].strip() err = self.variables.add_cs_varvals(cs_varvals, self.parameters) if err: raise ParseException(lineno, err) else: continue elif line_parser.line_type == LineParser.PREV_DEFINED_VARIABLE: err = self.variables.add_cs_varvals(line.strip(), self.parameters) if err: raise ParseException(lineno, err) else: continue elif line_parser.line_type == LineParser.RUN: if len(linesplit_space) == 1: raise ParseException( lineno, "@RUN line must have the form '@RUN phases [runlabel:label]." ) after_run = linesplit_space[1].strip() run_label, run_phase_labels = self._parse_run( after_run, lineno) world = self.phases.make_world(run_phase_labels) run_parameters = copy.deepcopy( self.parameters) # Params may change betweeen runs mechanism_obj, err = run_parameters.make_mechanism_obj() if err: raise ParseException(lineno, err) n_subjects = run_parameters.get(kw.N_SUBJECTS) bind_trials = run_parameters.get(kw.BIND_TRIALS) is_ok, err, err_lineno = mechanism_obj.check_compatibility_with_world( world) if err: raise ParseException(err_lineno, err) run = Run(run_label, world, mechanism_obj, n_subjects, bind_trials) self.runs.add(run, run_label, lineno) continue elif line_parser.line_type == LineParser.PHASE: gen_err = "@PHASE line must have the form '@PHASE label stop:condition'." if len(linesplit_space) == 1: raise ParseException(lineno, gen_err) lbl_and_stopcond = linesplit_space[1].strip() curr_phase_label, stop_condition = ParseUtil.split1( lbl_and_stopcond) inherited_from = None if '(' in curr_phase_label and curr_phase_label.endswith(')'): lind = curr_phase_label.index('(') inherited_from = curr_phase_label[(lind + 1):-1] curr_phase_label = curr_phase_label[0:lind] if not self.phases.contains(inherited_from): raise ParseException( lineno, f"Invalid phase label '{inherited_from}'.") if self.phases.contains(curr_phase_label): raise ParseException( lineno, f"Redefinition of phase '{curr_phase_label}'.") if not curr_phase_label.isidentifier(): raise ParseException( lineno, f"Phase label '{curr_phase_label}' is not a valid identifier." ) if stop_condition is None: raise ParseException(lineno, gen_err) stop, condition = ParseUtil.split1_strip(stop_condition, ':') if stop != "stop" or condition is None or len(condition) == 0: raise ParseException( lineno, "Phase stop condition must have the form 'stop:condition'." ) in_phase = True if inherited_from: self.phases.inherit_from(inherited_from, curr_phase_label, condition, lineno) else: self.phases.add_phase(curr_phase_label, condition, lineno) continue elif line_parser.line_type == LineParser.FIGURE: figure_title, mpl_prop = self._parse_figure( lineno, linesplit_space) figure_cmd = FigureCmd(figure_title, mpl_prop) self.postcmds.add(figure_cmd) elif line_parser.line_type == LineParser.SUBPLOT: subplotspec, subplot_title, mpl_prop = self._parse_subplot( lineno, linesplit_space) subplot_cmd = SubplotCmd(subplotspec, subplot_title, mpl_prop) self.postcmds.add(subplot_cmd) elif line_parser.line_type == LineParser.PLOT: expr, mpl_prop, expr0 = self._parse_plot( lineno, linesplit_space) cmd = linesplit_space[0].lower() plot_parameters = copy.deepcopy( self.parameters) # Params may change betweeen plot self._evalparse(lineno, plot_parameters) plot_cmd = PlotCmd(cmd, expr, expr0, plot_parameters, mpl_prop) self.postcmds.add(plot_cmd) elif line_parser.line_type == LineParser.LEGEND: mpl_prop = self._parse_legend(lineno, linesplit_space) legend_cmd = LegendCmd(mpl_prop) self.postcmds.add(legend_cmd) elif line_parser.line_type == LineParser.EXPORT: expr, filename, expr0 = self._parse_export( lineno, linesplit_space) cmd = linesplit_space[0].lower() export_parameters = copy.deepcopy( self.parameters) # Params may change betweeen exports self._evalparse(lineno, export_parameters) export_parameters.val[ kw. FILENAME] = filename # If filename only given on export line export_cmd = ExportCmd(lineno, cmd, expr, expr0, export_parameters) self.postcmds.add(export_cmd) else: raise ParseException(lineno, f"Invalid expression '{line}'.") def check_deprecated_syntax(self): return self.phases.check_deprecated_syntax() def _evalparse(self, lineno, parameters): """Handles parameters that depend on currently defined runs.""" if len(self.all_run_labels) == 0: raise ParseException(lineno, "There is no @RUN.") run_label = parameters.get(kw.EVAL_RUNLABEL) if len(run_label) == 0: run_label = self.all_run_labels[-1] parameters.val[kw.EVAL_RUNLABEL] = run_label else: if run_label not in self.all_run_labels: raise ParseException(lineno, f"Unknown run label '{run_label}'.") def _parse_legend(self, lineno, linesplit_space): if len(linesplit_space) == 1: # @legend mpl_prop = dict() elif len(linesplit_space) == 2: # @legend {mpl_prop} arg = linesplit_space[1] is_dict, mpl_prop = ParseUtil.is_dict(arg) if not is_dict: raise ParseException(lineno, f"Invalid @legend argument {arg}.") return mpl_prop def _parse_export(self, lineno, linesplit_space): """ @export expr @export expr filename @hexport @hexport filename """ cmd = linesplit_space[0] filename_param = self.parameters.get(kw.FILENAME) if cmd == kw.HEXPORT: if len(linesplit_space) == 1: # @hexport if len(filename_param) == 0: raise ParseException(lineno, f"Invalid {cmd} command.") else: filename = filename_param else: # @hexport filename filename = linesplit_space[1] return None, filename, None if len(linesplit_space) == 1: # @export raise ParseException(lineno, f"Invalid {cmd} command.") args = linesplit_space[1] expr0, filename = ParseUtil.split1_strip(args) expr = expr0 if filename is None: if len(filename_param) == 0: raise ParseException(lineno, f"No filename given to {cmd}.") else: filename = filename_param all_stimulus_elements = self.parameters.get(kw.STIMULUS_ELEMENTS) all_behaviors = self.parameters.get(kw.BEHAVIORS) err = None if cmd == kw.VEXPORT: expr, err = ParseUtil.parse_element_behavior( expr0, all_stimulus_elements, all_behaviors) elif cmd == kw.VSSEXPORT: expr, err = ParseUtil.parse_element_element( expr0, all_stimulus_elements) elif cmd == kw.PEXPORT: stimulus, behavior, err = ParseUtil.parse_stimulus_behavior( expr0, all_stimulus_elements, all_behaviors, self.variables) expr = (stimulus, behavior) elif cmd == kw.NEXPORT: expr, err = ParseUtil.parse_chain(expr0, all_stimulus_elements, all_behaviors) if err: raise ParseException(lineno, err) return expr, filename, expr0 def _parse_plot(self, lineno, linesplit_space): """ @plot expr {mpl_prop} """ cmd = linesplit_space[0] if len(linesplit_space) == 1: # @plot raise ParseException(lineno, f"Invalid {cmd} command.") expr0, mpl_prop = ParseUtil.get_ending_dict(linesplit_space[1]) if mpl_prop is None: mpl_prop = dict() expr = expr0 all_stimulus_elements = self.parameters.get(kw.STIMULUS_ELEMENTS) all_behaviors = self.parameters.get(kw.BEHAVIORS) err = None if cmd == kw.VPLOT: expr, err = ParseUtil.parse_element_behavior( expr0, all_stimulus_elements, all_behaviors) elif cmd == kw.VSSPLOT: expr, err = ParseUtil.parse_element_element( expr0, all_stimulus_elements) elif cmd == kw.PPLOT: stimulus, behavior, err = ParseUtil.parse_stimulus_behavior( expr0, all_stimulus_elements, all_behaviors, self.variables) expr = (stimulus, behavior) elif cmd == kw.NPLOT: expr, err = ParseUtil.parse_chain(expr0, all_stimulus_elements, all_behaviors) if err: raise ParseException(lineno, err) return expr, mpl_prop, expr0 def _parse_subplot(self, lineno, linesplit_space): """ @subplot @subplot subplotspec @subplot subplotspec title @subplot subplotspec {mpl_prop} @subplot subplotspec title {mpl_prop} """ title_param = self.parameters.get(kw.SUBPLOTTITLE) if len(linesplit_space) == 1: # @subplot subplotspec = '111' title = title_param mpl_prop = dict() elif len(linesplit_space) == 2: # @subplot ... args, mpl_prop = ParseUtil.get_ending_dict(linesplit_space[1]) if mpl_prop is None: mpl_prop = dict() subplotspec, title_line = ParseUtil.split1_strip(args) if title_line is None: # @subplot subplotspec title = title_param else: title = title_line return subplotspec, title, mpl_prop def _parse_figure(self, lineno, linesplit_space): """ @figure @figure title @figure {mpl_prop} @figure title {mpl_prop} """ title = self.parameters.get(kw.TITLE) if len(linesplit_space) == 1: # @figure mpl_prop = dict() elif len(linesplit_space) == 2: # @figure args title, mpl_prop = ParseUtil.get_ending_dict(linesplit_space[1]) if mpl_prop is None: mpl_prop = dict() return title, mpl_prop def _parse_run(self, after_run, lineno): """ Parses a @RUN line ("@RUN phase1,phase2,... [runlabel:lbl]") and returns the run label and a list of phase labels. """ match_objs_iterator = re.finditer(r' runlabel[\s]*:', after_run) match_objs = tuple(match_objs_iterator) n_matches = len(match_objs) if n_matches == 0: label = f'run{self.unnamed_run_cnt}' self.unnamed_run_cnt += 1 phases_str = after_run elif n_matches == 1: match_obj = match_objs[0] start_index = match_obj.start() + 1 # Index of "l" in "label" end_index = match_obj.end() # Index of character after ":" label = after_run[end_index:].strip() phases_str = after_run[0:start_index].strip() else: raise ParseException( lineno, f"Maximum one instance of 'runlabel:' on a {kw.RUN} line.") if label in self.all_run_labels: raise ParseException(lineno, f"Duplication of run label '{label}'.") else: self.all_run_labels.append(label) phase_labels = phases_str.strip(',').split(',') phase_labels = [phase_label.strip() for phase_label in phase_labels] for phase_label in phase_labels: if not self.phases.contains(phase_label): raise ParseException(lineno, f"Phase {phase_label} undefined.") return label, phase_labels
def next_stimulus(self, response, ignore_response_increment=False, preceeding_help_lines=None, omit_learn=None): # if not self.is_parsed: # raise Exception("Internal error: Cannot call Phase.next_stimulus" + # " before Phase.parse().") if preceeding_help_lines is None: preceeding_help_lines = list() if not ignore_response_increment: # if not self.is_first_line: if response is not None: self.event_counter.increment_count(response) self.event_counter.increment_count_line(response) self.event_counter.set_last_response(response) if self.first_stimulus_presented: variables_both = Variables.join(self.global_variables, self.local_variables) if self.stop_condition.is_met(variables_both, self.event_counter): return None, None, preceeding_help_lines, None if self.is_first_line: # assert(response is None) rowlbl = self.first_label self.is_first_line = False omit_learn = True # Since response is None, there is no learning (updating of v-values) to do else: rowlbl, omit_learn = self.curr_lineobj.next_line(response, self.global_variables, self.local_variables, self.event_counter) self.prev_linelabel = self.curr_lineobj.label self._make_current_line(rowlbl) stimulus = self.phase_lines[rowlbl].stimulus if stimulus is not None: for element, intensity in stimulus.items(): if type(intensity) is str: # element[var] where var is a (local) variable variables_both = Variables.join(self.global_variables, self.local_variables) stimulus[element], err = ParseUtil.evaluate(intensity, variables=variables_both) if err: raise ParseException(self.phase_lines[rowlbl].lineno, err) if rowlbl != self.prev_linelabel: self.event_counter.reset_count_line() self.event_counter.line_label = rowlbl self.event_counter.increment_count(rowlbl) self.event_counter.increment_count_line(rowlbl) if stimulus is None: # Help line action = self.phase_lines[rowlbl].action lineno = self.phase_lines[rowlbl].lineno self._perform_action(lineno, action) preceeding_help_lines.append(rowlbl) next_stimulus_out = self.next_stimulus(response, ignore_response_increment=True, preceeding_help_lines=preceeding_help_lines, omit_learn=omit_learn) stimulus, rowlbl, preceeding_help_lines, omit_learn_help = next_stimulus_out omit_learn = (omit_learn or omit_learn_help) else: for stimulus_element in stimulus: self.event_counter.increment_count(stimulus_element) self.event_counter.increment_count_line(stimulus_element) self.first_stimulus_presented = True return stimulus, rowlbl, preceeding_help_lines, omit_learn
def apply(self, action): if action.name == "shift": #pred = self.nextSubgraph() token = self.buffer.consume() sg = action.argv.get() #pred2 = pred.get_str(None, Variables()) #sg2 = sg.get_str(None, Variables()) #if sg2 == pred2: # print "MATCH" #else: # print "MISMATCH" if self.stage == "COLLECT": Resources.phrasetable[token.word + "_" + token.pos][action.argv.get( None, Variables())] += 1 if token.ne in ["ORGANIZATION", "ORG" ] and token.word not in Resources.seen_org: Resources.seen_org.append(token.word) Resources.forg.write(token.word) for node in sg.nodes: if node.isConst == False and node.concept.strip( ) != "": Resources.forg.write(" " + node.concept) Resources.forg.write("\n") test = [] for n in sg.nodes: if len([r for r in sg.relations if r[1] == n]) == 0: # push only root self.stack.push(n) test.append(n) break tmprels = Relations() for n1, n2, label in sg.relations: self.stack.relations.add(n1, n2, label) tmprels.add(n1, n2, label) self.counter += 1 if len(sg.nodes) == 0: graph = "NULL" elif tmprels == Relations(): graph = "(" + sg.nodes[0].concept + ")" else: graph, _, _ = tostring.to_string(tmprels.triples(), "TOP") #print self.sentence, "|||", self.counter - 1, "|||", " ".join(graph.replace("\n","").split()) elif action.name == "reduce": node = self.stack.pop() if action.argv is not None: s, label, _ = action.argv self.stack.relations.add(node, s, label) elif action.name == "larc": label = action.argv child = self.stack.get(1) top = self.stack.top() assert (top is not None and child is not None) self.stack.relations.add(top, child, label) self.stack.pop(1) elif action.name == "rarc": label = action.argv child = self.stack.get(1) top = self.stack.top() assert (top is not None and child is not None) self.stack.relations.add(child, top, label) else: raise ValueError("action not defined")
def setUp(self): self.v = Variables(vmap)
class Helpers: """ Private functions """ v = Variables() def error(self): """ Print onto the console a message if an error is encountered during execution Arguments: - No arguments Returns: - No return values """ print("Execution terminated. Check {0} for details".format( self.v.log_f)) sys.exit(1) def create_file(self, filename): """ Create a file if it does not exist Arguments: - filename: str Absolute path of the file that has to be created Returns: - No return values """ try: os.path.exists(filename) except IOError: logger.info("Attempting to create {0}".format(filename)) try: f = open(filename, 'w') f.close() except IOError: logger.error("Unable to create {0}".format(filename)) self.error() logger.info("{0} successfully created".format(filename)) return None def done(self): """ Print onto the console a message that execution has completed Arguments: - No arguments Returns: - No return values """ print("Execution completed") return None def check_file_existence(self, filename): """ Check if a file exists and throw error if not Arguments: - filename: str Absolute path of the file whose existence is to be determined Returns: - No return values """ try: assert (os.path.exists(filename)) except AssertionError: logger.error("{0} does not exist".format(filename)) self.error() return None def check_year(self, start, end): """ Check if end year is greater than the start year Arguments: - start: int Starting year - end: int Ending year Returns: - No return values """ self.check_integer(start) self.check_integer(end) try: assert (end > start) except AssertionError: logger.error("End year must be greater than start year") self.error() return None def check_extension(self, filename, ext): """ Validates the extension of the file Arguments: - filename: str Name of the file - ext: str Extension of the file Returns: - No return values """ try: current = filename.split('.')[-1] assert (current == ext) except AssertionError: logger.error("Expected extension {0}, got {1}".format( ext, current)) self.error() def import_dataset(self, abs_path): """ Imports the dataset Arguments: - abs_path: str Absolute path to the dataset file Returns: - data: pandas dataframe Dataset as a pandas dataframe """ self.check_file_existence(abs_path) logger.info("Reading {0}".format(abs_path)) ext = abs_path.split('.')[-1] if ext == 'csv': data = pd.read_csv(abs_path) elif ext == 'xls': data = pd.read_excel(abs_path) else: logger.error("Expected extensions csv or xls, got {0}".format(ext)) self.error() logger.info("{0} has {1} rows and {2} columns".format( abs_path, data.shape[0], data.shape[1])) return data def write_dataset(self, dataset, abs_path): """ Writes dataset to a file Arguments: - dataset: pandas dataframe Dataset to be written - abs_path: str Absolute path to the file where the dataset must be written Returns: - No return value """ logger.info("Creating {0}".format(abs_path)) ext = abs_path.split('.')[-1] if ext == 'csv': dataset.to_csv(abs_path, index=False) elif ext == 'xls': dataset.to_excel(abs_path, index=False) else: logger.error("Expected extensions csv or xls, got {0}".format(ext)) self.error() logger.info("{0} successfully generated".format(abs_path)) return None def read_feature_file(self, filename): """ read the features in filename Arguments: - filename: str Absolute path to the file where the features are newline separated strings Returns: - features: list List of features """ with open(filename, 'r') as f: features = f.readlines() features = [d.split('\n')[0] for d in features] return features def isfloat(self, inp): """ Check if input is a floating-point number or not Arguments: - inp: Input Returns: True if floating-point number, False otherwise """ try: float(inp) return True except ValueError: return False def check_integer(self, number): """ Ensure that input is a number Arguments: - number: Input Returns: - No return values """ try: assert (str(number).isnumeric()) except AssertionError: logger.error("Expected type {0}, got type {1} for {2}".format( type(number), 'int', number)) self.error() def check_float(self, number): """ Ensure that input is a floating-point number Arguments: - number: Input Returns: - No return values """ try: assert (self.isfloat(number)) except AssertionError: logger.info("Expected type {0}, got type {1} for {2}".format( type(number), 'float', number)) self.error() def confmat_heatmap(self, cm, score, path): """ Generate the heatmap for the confusion matrix Arguments: - cm: array confusion matrix - score: float Accuracy - path: str Absolute path to the directory where the heatmap will be generated Returns: - No return values """ plt.figure(figsize=(9, 9)) sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square=True, cmap='Blues_r') plt.ylabel('True label') plt.xlabel('Predicted label') all_sample_title = 'Accuracy Score: {0}'.format(score) plt.title(all_sample_title, size=15) plt.savefig(path) logger.info("{0} successfully generated".format(path)) return None def roc_auc(self, fpr, tpr, auc, path): """ Generate the ROC AUC curve Arguments: - fpr: float False positive rates - tpr: float True positive rates - auc: float ROC AUC score - path: str Absolute path to the directory where the curve must be saved Returns: - No return values """ plt.figure(figsize=(9, 9)) plt.plot(fpr, tpr, color='red', lw=2, label='ROC curve') plt.plot([0, 1], [0, 1], color='blue', lw=2, linestyle='--') plt.xlabel('FPR') plt.ylabel('TPR') plt.title('ROC curve with AUC: {0}'.format(auc), size=15) plt.savefig(path) logger.info("{0} successfully generated".format(path)) return None def check_dir(self, path): """ Check if directory is present and create it if not present Arguments: - path: str Absolute path to the directory Returns: - No return values """ if not os.path.exists(path): try: logger.info("Attempting to create {0} directory".format(path)) os.makedirs(path) logger.info("{0} directory successfully created".format(path)) except OSError: logger.error("Unable to create {0} directory".format(path)) self.error() def get_metrics(self, model, data_list, results_path, accuracy=True, class_report=True, confmat=True, roc_auc=True): """ Compute the accuracy, classification report, confusion matrix and ROC AUC curve Arguments: - model: Model whose metrics must be computed - data_list: list, length = 2 or 4 Dataset [X, Y] or Training and testing datasets [X_train, Y_train, X_test, Y_test] - results_path: str Absolute path to the directory where the metric files must be saved - accuracy: bool, optional, default = True Whether or not to log accuracy - class_report: bool, optional, default = True Whether or not to generate class_report_train.txt and class_report_test.txt - confmat: bool, optional, default = True Whether or not to generate confmat_train.png and confmat_test.png - roc_auc: bool, optional, default = True: Whether or not to generate roc_auc_train.png and roc_auc_test.png Returns: - No return values """ if len(data_list) == 2: X, Y = data_list score = model.score(X, Y) pred = model.predict(X) if accuracy: logger.info("accuracy = {0}".format(score)) if class_report: cr = classification_report(Y, pred) cr_file = os.path.join(results_path, 'class_report.txt') self.create_file(cr_file) with open(cr_file, 'w') as f: f.write(cr) logger.info( "Classification report generated: {0}".format(cr_file)) if confmat: cm = confusion_matrix(Y, pred) self.confmat_heatmap(cm, score, os.path.join(results_path, 'confmat.png')) if roc_auc: fpr, tpr, _ = roc_curve(Y, model.predict_proba(X)[:, 1]) auc_score = auc(fpr, tpr) logger.info("ROC AUC = {0}".format(auc_score)) self.roc_auc(fpr, tpr, auc_score, os.path.join(results_path, 'roc_auc.png')) elif len(data_list) == 4: X_train, Y_train, X_test, Y_test = data_list train_score = model.score(X_train, Y_train) test_score = model.score(X_test, Y_test) train_pred = model.predict(X_train) test_pred = model.predict(X_test) if accuracy: logger.info("Training accuracy = {0}".format(train_score)) logger.info("Testing accuracy = {0}".format(test_score)) if class_report: train_cr = classification_report(Y_train, train_pred) train_cr_file = os.path.join(results_path, 'class_report_train.txt') self.create_file(train_cr_file) with open(train_cr_file, 'w') as f: f.write(train_cr) logger.info( "Training classification report generated: {0}".format( train_cr_file)) test_cr = classification_report(Y_test, test_pred) test_cr_file = os.path.join(results_path, 'class_report_test.txt') self.create_file(test_cr_file) with open(test_cr_file, 'w') as f: f.write(test_cr) logger.info( "Testing classification report generated: {0}".format( test_cr_file)) if confmat: train_cm = confusion_matrix(Y_train, train_pred) self.confmat_heatmap( train_cm, train_score, os.path.join(results_path, 'confmat_train.png')) test_cm = confusion_matrix(Y_test, test_pred) self.confmat_heatmap( test_cm, test_score, os.path.join(results_path, 'confmat_test.png')) if roc_auc: train_fpr, train_tpr, _ = roc_curve( Y_train, model.predict_proba(X_train)[:, 1]) train_auc = auc(train_fpr, train_tpr) logger.info("Training ROC AUC = {0}".format(train_auc)) self.roc_auc(train_fpr, train_tpr, train_auc, os.path.join(results_path, 'roc_auc_train.png')) test_fpr, test_tpr, _ = roc_curve( Y_test, model.predict_proba(X_test)[:, 1]) test_auc = auc(test_fpr, test_tpr) logger.info("Testing ROC AUC = {0}".format(test_auc)) self.roc_auc(test_fpr, test_tpr, test_auc, os.path.join(results_path, 'roc_auc_test.png')) else: logger.error("expected list of length 2 or 4, got {0}".format( len(data_list))) self.error() return None def decision_tree_viz(self, model, path): """ Generate visualization of decision tree Arguments: - model: Decision tree model - path: str Absolute path to the file where the decision tree visualization is to be generated Returns: - No return values """ dot_data = StringIO() export_graphviz(model, out_file=dot_data, filled=True, rounded=True, special_characters=True, feature_names=self.read_feature_file( self.v.features_f), class_names=['0', '1']) graph = pydotplus.graph_from_dot_data(dot_data.getvalue()) self.check_extension(path, 'png') graph.write_png(path) logger.info("{0} successfully generated".format(path)) return None def get_one_roc_curve(self, data_list, results_path, logit, dec_tree, neural_net): """ Generate all ROC curves in one plot Arguments: - data_list: list, length = 2 Test set [X_test,Y_test] - results_path: str Absolute path to the directory where the ROC curve must be generated - data_list: list, length = 4 Training and testing datasets [X_train, Y_train, X_test, Y_test] - logit: Logistic Regression model - dec_tree: Decision tree model - neural_net: Neural network model Returns: - No return value """ if len(data_list) != 2: logger.error("Expected array of length 2, got {0}".format( len(data_list))) self.error() plt.figure(figsize=(9, 9)) X, Y = data_list self.check_file_existence(X) self.check_file_existence(Y) X = self.import_dataset(X) Y = self.import_dataset(Y) if logit != None: logit_fpr, logit_tpr, _ = roc_curve(Y, logit.predict_proba(X)[:, 1]) logit_auc_score = auc(logit_fpr, logit_tpr) plt.plot( logit_fpr, logit_tpr, color='red', lw=2, label='Logistic regression, AUC = {0}'.format(logit_auc_score)) if dec_tree != None: dec_tree_fpr, dec_tree_tpr, _ = roc_curve( Y, dec_tree.predict_proba(X)[:, 1]) dec_tree_auc_score = auc(dec_tree_fpr, dec_tree_tpr) plt.plot( dec_tree_fpr, dec_tree_tpr, color='blue', lw=2, label='Decision tree, AUC = {0}'.format(dec_tree_auc_score)) if neural_net != None: neural_net_fpr, neural_net_tpr, _ = roc_curve( Y, neural_net.predict_proba(X)[:, 1]) neural_net_auc_score = auc(neural_net_fpr, neural_net_tpr) plt.plot( neural_net_fpr, neural_net_tpr, color='green', lw=2, label='Neural network, AUC = {0}'.format(neural_net_auc_score)) plt.plot([0, 1], [0, 1], color='yellow', lw=2, linestyle='--') plt.xlabel('FPR') plt.ylabel('TPR') plt.title('ROC curves'.format(auc), size=15) plt.legend(loc='upper left') reqd_file = os.path.join(results_path, "final_roc.png") plt.savefig(reqd_file) logger.info("{0} successfully generated".format(reqd_file)) return None
def split(dataset, target, test_size, output_direc, seed=123): """ 1. Import dataset 2. Take only individuals where target is not NaN in dataset 3. X: Data for required features 4. Y: Data for target 5. Split X and Y into train and test 6. X_unknown and Y_unknown where Y is missing 7. Save X, Y, X_train, Y_train, X_test, Y_test, X_unknown, Y_unknown to output_direc Arguments: - dataset: str Absolute path to the preprocessed dataset which has to be split - target: str The target feature - test_size: float Size of the test set (percentage) - output_direc: str Directory where training and test directory must be placed - seed: int, optional, default = 123 Random seed value Returns: - No return value """ h = Helpers() v = Variables() # Diagnostics h.check_file_existence(dataset) h.check_float(test_size) h.check_file_existence(output_direc) h.check_integer(seed) h.check_dir(output_direc) # Import dataset data = h.import_dataset(dataset) # Take only those individuals where target is not NaN data_nonNaN = data[data[target].notnull()] logger.info( "{0} individuals do not have missing values in their target attribute". format(data_nonNaN.shape[0])) data_NaN = data[data[target].isnull()] # Read features from features file features = h.read_feature_file(v.features_f) # Get X and Y X = data_nonNaN[features] Y = data_nonNaN[target] # Split X and Y into training and testing data X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size, random_state=seed) # Feature scaling scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) # Get X_unknown and Y_unknown X_unknown = data_NaN[features] Y_unknown = data_NaN[target] # Convert these np arrays to pd dataframes X = pd.DataFrame(X, columns=features) Y = pd.DataFrame(Y, columns=[target]) X_train = pd.DataFrame(X_train, columns=features) Y_train = pd.DataFrame(Y_train, columns=[target]) X_test = pd.DataFrame(X_test, columns=features) Y_test = pd.DataFrame(Y_test, columns=[target]) X_unknown = pd.DataFrame(X_unknown, columns=features) Y_unknown = pd.DataFrame(Y_unknown, columns=[target]) # Save train, test and unknown to output_direc h.write_dataset(X, os.path.join(output_direc, 'X.csv')) h.write_dataset(Y, os.path.join(output_direc, 'Y.csv')) h.write_dataset(X_train, os.path.join(output_direc, 'X_train.csv')) h.write_dataset(Y_train, os.path.join(output_direc, 'Y_train.csv')) h.write_dataset(X_test, os.path.join(output_direc, 'X_test.csv')) h.write_dataset(Y_test, os.path.join(output_direc, 'Y_test.csv')) h.write_dataset(X_unknown, os.path.join(output_direc, 'X_unknown.csv')) h.write_dataset(Y_unknown, os.path.join(output_direc, 'Y_unknown.csv')) return None
def initialize_local_variables(self): self.local_variables = Variables()