def salvar(self, widget): try: if self.net: self.dialog = Gtk.FileChooserDialog("Selecione o local e informe o nome do arquivo", None, Gtk.FileChooserAction.SAVE, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_SAVE, Gtk.ResponseType.OK)) response = self.dialog.run() if response == Gtk.ResponseType.OK: nome = self.dialog.get_filename()+".net" self.net.save(nome) #altera status para nome do arquivo contexto = self.statusFile.get_context_id("fileSave") self.statusFile.push(contexto,str(self.dialog.get_filename())) self.dialog.destroy() else: contexto = self.status.get_context_id("EmptyNet") self.status.push(contexto,"Não existe RNA para ser salva.") except: #pega a excecao gerada trace = traceback.format_exc() #imprime #print "Ocorreu um erro: \n",trace #salva em arquivo file("trace.log","a").write(trace) self.dialog.destroy()
def pegarRedeSalva(self, widget): try: dialog = Gtk.FileChooserDialog("Por favor, escolha um arquivo .net", None, Gtk.FileChooserAction.OPEN, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OPEN, Gtk.ResponseType.OK)) response = dialog.run() if response == Gtk.ResponseType.OK: #carrega o arquivo self.net = neurolab.load(dialog.get_filename()) #extrai e coloca nos liststores numTargets = [float(self.targets[i][j]) for i in xrange(len(self.targets)) for j in xrange(1)] self._setListStore(self.inputs, numTargets, True) self.storeRA.append([str(self.net.trainf)]) #altera status para nome do arquivo contexto = self.statusFile.get_context_id("fileAbrir") self.statusFile.push(contexto,str(dialog.get_filename())) except: self.feedStatus.gerarStatus(self.feedStatus.contexto_load) #pega a excecao gerada trace = traceback.format_exc() #imprime #print "Ocorreu um erro: \n",trace #salva em arquivo file("trace.log","a").write(trace) finally: dialog.destroy()
def __init__(self, fileName, maxCountInFile, keyword): inputFile = file(fileName, "r") fileCount = 0 keyword2 = "<" + keyword keyword = "</" + keyword + ">" current = "" while current[: len(keyword2)] != keyword2: current = inputFile.readline() while current != "<\dblp>" and fileCount <= 1000: toFile = current articleCount = 0 while articleCount < maxCountInFile: current = "" while current[: len(keyword)] != keyword: current = inputFile.readline() toFile += current articleCount += 1 current = "" f = file("xmls\\" + str(fileCount) + ".xml", "w") fileCount += 1 print(fileCount) f.write( """<?xml version="1.0" encoding="ISO-8859-1"?> \n <!DOCTYPE dblp SYSTEM "dblp.dtd"> \n<dblp>\n""" + toFile + "</dblp>" ) f.close() f = file("xmls\\" + str(fileCount) + ".xml", "w") fileCount += 1 f.write(toFile + "</dblp>") f.close()
def __init__(self, fileName, maxCountInFile, keyword): inputFile = file(fileName, 'r') fileCount = 0 keyword2 = '<' + keyword keyword = '</' + keyword + '>' current = '' while (current[:len(keyword2)] != keyword2): current = inputFile.readline() while (current != '<\dblp>' and fileCount <= 1000): toFile = current articleCount = 0 while (articleCount < maxCountInFile): current = '' while (current[:len(keyword)] != keyword): current = inputFile.readline() toFile += current articleCount += 1 current = '' f = file('xmls\\' + str(fileCount) + '.xml', 'w') fileCount += 1 print(fileCount) f.write( """<?xml version="1.0" encoding="ISO-8859-1"?> \n <!DOCTYPE dblp SYSTEM "dblp.dtd"> \n<dblp>\n""" + toFile + "</dblp>") f.close() f = file('xmls\\' + str(fileCount) + '.xml', 'w') fileCount += 1 f.write(toFile + "</dblp>") f.close()
def daemonize(self): """ Do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 """ try: pid = os.fork() if pid > 0: # Exit first parent sys.exit(0) except OSError as e: sys.stderr.write( "fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) # Decouple from parent environment os.chdir(self.home_dir) os.setsid() os.umask(self.umask) # Do second fork try: pid = os.fork() if pid > 0: # Exit from second parent sys.exit(0) except OSError as e: sys.stderr.write( "fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) if sys.platform != 'darwin': # This block breaks on OS X # Redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() si = file(self.stdin, 'r') so = file(self.stdout, 'a+') if self.stderr: se = file(self.stderr, 'a+', 0) else: se = so os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) def sigtermhandler(signum, frame): self.daemon_alive = False signal.signal(signal.SIGTERM, sigtermhandler) signal.signal(signal.SIGINT, sigtermhandler) if self.verbose >= 1: print("Started!") # Write pidfile atexit.register( self.delpid) # Make sure pid file is removed if we quit pid = str(os.getpid()) file(self.pidfile, 'w+').write("%s\n" % pid)
def aprender(self): try: tr = treino.Treinamento(self.net, self.inputs, self.targets, self.epocas, self.show, self.objetivo, self.taxaAprendizado,self.taxaIncremento, self.taxaDecremento, self.impulso, self.taxaRegularizacao) #treinar if self.delta.get_active(): self.errors = tr.treinar("delta") elif self.gd.get_active(): self.errors = tr.treinar("gd") elif self.gdm.get_active(): self.errors = tr.treinar("gdm") elif self.gda.get_active(): self.errors = tr.treinar("gda") elif self.gdx.get_active(): self.errors = tr.treinar("gdx") elif self.rprop.get_active(): self.errors = tr.treinar("rprop") elif self.bfgs.get_active(): self.errors = tr.treinar("bfgs") #imprimir Erro for i in xrange(self.show.get_value_as_int()-1, len(self.errors), self.show.get_value_as_int()): self.storeErro.append((i+1, self.errors[i])) #imprimir Regra de Aprendizado em dados self.storeRA.append([str(self.net.trainf)]) #atualizar pesos e bias numTargets = [float(self.targets[i][j]) for i in xrange(len(self.targets)) for j in xrange(1)] self._setListStore(self.inputs, numTargets, True) if len(self.errors) < self.epocas.get_value_as_int(): self.feedStatus.gerarStatus(self.feedStatus.contexto_treinado) else: self.feedStatus.gerarStatus(self.feedStatus.contexto_max) if self.switchAnimacao.get_active() and len(self.errors) <= 100: self._animarErro() except: self.feedStatus.gerarStatus(self.feedStatus.contexto_train) #pega a excecao gerada trace = traceback.format_exc() #imprime #print "Ocorreu um erro: \n",trace #salva em arquivo file("trace.log","a").write(trace) finally: self.spinner.stop()
def _animarErro(self): try: p = Process(target=anima.Animacao, args=(self.errors,)) p.start() p.join() except: #pega a excecao gerada trace = traceback.format_exc() file("trace.log","a").write(trace) finally: p.terminate()
def extrair_dados(self, caminho): try: data = get_data(caminho) nova = list() for i in xrange(len(data)): if data[i]: nova.append(data[i]) return nova except: trace = traceback.format_exc() file("trace.log","a").write(trace)
def tratarDadosSimulacao(self, widget): try: caminho = self.butOpenSimulador.get_filename() novaf = self.extrair_dados(caminho) self.inputSimulador = novaf except: #pega a excecao gerada trace = traceback.format_exc() #imprime print "Ocorreu um erro: \n",trace #salva em arquivo file("trace.log","a").write(trace)
def daemonize(pidfile, nopidfile): # Make a non-session-leader child process try: pid = os.fork() # @UndefinedVariable - only available in UNIX if pid != 0: os._exit(0) except OSError as e: sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) os.setsid() # @UndefinedVariable - only available in UNIX # Make sure I can read my own files and shut out others prev = os.umask(0) os.umask(prev and int('077', 8)) # Make the child a session-leader by detaching from the terminal try: pid = os.fork() # @UndefinedVariable - only available in UNIX if pid != 0: os._exit(0) except OSError as e: sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) # Write pid if not nopidfile: pid = str(os.getpid()) try: file(pidfile, 'w').write("%s\n" % pid) print(pidfile) except IOError as e: sys.stderr.write(u"Unable to write PID file: " + pidfile + ". Error: " + str(e.strerror) + " [" + str(e.errno) + "]") sys.exit(1) else: print("no pid file") # Redirect all output sys.stdout.flush() sys.stderr.flush() devnull = getattr(os, 'devnull', '/dev/null') stdin = file(devnull, 'r') stdout = file(devnull, 'a+') stderr = file(devnull, 'a+') os.dup2(stdin.fileno(), sys.stdin.fileno()) os.dup2(stdout.fileno(), sys.stdout.fileno()) os.dup2(stderr.fileno(), sys.stderr.fileno())
def _fileContents(filename=None, path=None, file=None, length=None): assert not (filename and path) if filename: path = filename assert not (path and file) if length: params = (length,) else: params = () if path: path = os.path.normpath(path) #dsunittest.trace(">dsfile.fileContents file '%s'" % (path)) dsthread.blockEnterNamedSection(name="dsfileLock") data = "" try: if not os.path.exists(path): realPath = tempPathForPath(path=path) else: realPath = path f = __builtin__.file(realPath, "rb") data = f.read(*params) f.close() finally: dsthread.leaveNamedSection(name="dsfileLock") #dsunittest.trace("<dsfile.fileContents file '%s', length %u" % (path, len(data))) return data elif file: file.seek(0) return file.read(*params) else: assert 0, "Pass path or file"
def start(self, *args, **kwargs): """ Start the daemon """ if self.verbose >= 1: print("Starting...") # Check for a pidfile to see if the daemon already runs try: pf = file(self.pidfile, 'r') pid = int(pf.read().strip()) pf.close() except IOError: pid = None except SystemExit: pid = None if pid: message = "pidfile %s already exists. Is it already running?\n" sys.stderr.write(message % self.pidfile) sys.exit(1) # Start the daemon self.daemonize() self.run(*args, **kwargs)
def model(x_train, x_test, y_train, y_test,clf): print(clf) clf.fit(x_train, y_train) f = file(outpath,'a+') f.write(str(clf)) f.write("\n") ''''' 系数反映每个特征的影响力。越大表示该特征在分类中起到的作用越大 ''' # print(clf.feature_importances_) '''''测试结果的打印''' answer = clf.predict(x_test) # print(x_train) # print(answer) # print(y_train) avrage = np.mean(answer == y_test) f.write(str(avrage)) f.write("\n") '''''准确率与召回率''' precision, recall, thresholds = precision_recall_curve(y_test, clf.predict(x_test)) # print (precision) # answer = clf.predict_proba(x_trian)[:,1] # print answer # answer = answer > 0.3 report = classification_report(y_test, answer, target_names = ['女', '男']) print report f.write(str(report)) f.write("\n\n") f.close()
def load_data(load_file_path, query_action_type = '1'): print "Load data", load_file_path, " query_action_type", query_action_type fd_stdin = csv.reader(file(load_file_path, 'rb')) res = {} gmt_max = 0 if (re.search('actions.csv', load_file_path)): for line in fd_stdin: #user_id = line[0] song_id = line[1] gmt_create = int(line[2])/3600/24 gmt_max = max(gmt_create, gmt_max) action_type = line[3] if(query_action_type != action_type): continue res.setdefault(song_id, {}) res[song_id].setdefault(gmt_create, 0) res[song_id][gmt_create] = res[song_id][gmt_create]+1; if (re.search('songs.csv', load_file_path)): for line in fd_stdin: song_id = line[0] artist_id = line[1] #song_init_plays = int(line[3]) #Language = int(line[4]) #Gender = line[5] res.setdefault(song_id, 'artist_id') res[song_id] = artist_id; print "down load complete!" return res
def parse_tagged_file(tfile): linelistTagged = [] words = 0 for line in file(tfile): items = [item for item in line.split(" ")] linelistTagged.append([[w for w in d.strip('\n').split('/')] for d in items]) return linelistTagged, words
def pdf(request, filename): fullpath = os.path.join(PDF_PATH, filename) response = HttpResponse(file(fullpath).read()) response['Content-Type'] = 'application/pdf' response['Content-disposition'] = 'attachment' return response
def setInitFile(tempFilePath, filePath, authFormat): f = file(tempFilePath, "r") InitFileContent = "" + f.read() f.close() port = raw_input("Enter Bind Port(empty for default):") if not port: port = "-1" InitFileContent = InitFileContent.replace("{{python}}", sys.executable) InitFileContent = InitFileContent.replace("{{path-to-DDDProxy}}", mainPath) InitFileContent = InitFileContent.replace("{{port-setting}}", port) InitFileContent = InitFileContent.replace("{{entry-point}}", "%s.py" % (server)) InitFileContent = InitFileContent.replace("{{server-name}}", "dddproxy." + server) if server == "remoteServer": serverPassword = "" while True: serverPassword = getpass.getpass( "Enter passphrase(empty for random):") if not serverPassword: serverPassword = pwGen(20) print "Server password:"******"Enter same passphrase again:"): print "Passphrases do not match. try again" else: break InitFileContent = InitFileContent.replace( "{{auth}}", authFormat % (serverPassword)) else: InitFileContent = InitFileContent.replace("{{auth}}", "") if os.path.exists(filePath): overwrite = raw_input(filePath + " already exists.\nOverwrite (y/n)?") if overwrite != "y": exit(1) print "Write file ", filePath f = file(filePath, "w+") f.write(InitFileContent) f.close() return port
def daemonize(self): """ do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 """ try: pid = os.fork() if pid > 0: # exit first parent sys.exit(0) except OSError as e: sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) # decouple from parent environment os.chdir("/") os.setsid() os.umask(0) # do second fork try: pid = os.fork() if pid > 0: # exit from second parent sys.exit(0) except OSError as e: sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) # redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() si = file(self.stdin, 'r') so = file(self.stdout, 'a+') se = file(self.stderr, 'a+', 0) os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) # write pidfile atexit.register(self.delpid) pid = str(os.getpid()) file(self.pidfile, 'w+').write("%s\n" % pid)
def create_input_source(source=None, publicID=None, location=None, file=None, data=None, format=None): """ Return an appropriate InputSource instance for the given parameters. """ # TODO: test that exactly one of source, location, file, and data # is not None. input_source = None if source is not None: if isinstance(source, InputSource): input_source = source else: if isinstance(source, _StringTypes): location = source elif hasattr(source, "read") and not isinstance(source, Namespace): f = source input_source = InputSource() input_source.setByteStream(f) if hasattr(f, "name"): input_source.setSystemId(f.name) else: raise Exception("Unexpected type '%s' for source '%s'" % (type(source), source)) if location is not None: base = urljoin("file:", "%s/" % pathname2url(os.getcwd())) absolute_location = URIRef(location, base=base).defrag() if absolute_location.startswith("file:///"): filename = url2pathname(absolute_location.replace("file:///", "/")) file = __builtin__.file(filename, "rb") else: input_source = URLInputSource(absolute_location, format) publicID = publicID or absolute_location if file is not None: input_source = FileInputSource(file) if data is not None: if isinstance(data, unicode): data = data.encode('utf-8') input_source = StringInputSource(data) if input_source is None: raise Exception("could not create InputSource") else: if publicID: input_source.setPublicId(publicID) # TODO: what motivated this bit? id = input_source.getPublicId() if id is None: input_source.setPublicId("") return input_source
def get_pid(self): try: pf = file(self.pidfile, 'r') pid = int(pf.read().strip()) pf.close() except IOError: pid = None except SystemExit: pid = None return pid
def create_data_file(dataset_file_path, data_type, gmt_start = 16495, predict_gmt = 16738): #16738, 16626 dataset_file = csv.writer(file(dataset_file_path, 'wb')) for key in data_type.keys(): actions_type = data_type[key] line = [] line.append(key) for gmt_time in range(gmt_start,predict_gmt): actions_type.setdefault(gmt_time, 0) line.append(actions_type[gmt_time]) dataset_file.writerow([str(i) for i in line])
def setFileContents(data, path=None, filename=None, file=None): assert not (filename and path) if filename: path = filename assert not (path and file) if path: path = os.path.normpath(path) dsthread.blockEnterNamedSection(name="dsfileLock") try: # first check if the file is actually changed before writing it out again if os.path.exists(path): f = __builtin__.file(path, "rb") readData = f.read() f.close() if readData == data: return #dsunittest.trace(">dsfile.setFileContents file '%s', length %u" % (path, len(data))) #dsunittest.trace("Writing %u bytes to file %s" % (len(data), path)) tempFile = tempPathForPath(path=path) f = __builtin__.file(tempFile, "wb") f.write(data) f.close() if os.path.exists(path): deleteFileOrDirectory(path=path) os.rename(tempFile, path) if os.path.exists(tempFile): deleteFileOrDirectory(path=tempFile) # === Check that the file can be read back correctly. f = __builtin__.file(path, "rb") readData = f.read() f.close() if readData != data: raise "dsfile.setFileContents: error, datas do not match for file '%s'.\ndata = '%s'\nreadData = '%s'" % (path, data, readData) finally: #dsunittest.trace("<dsfile.setFileContents file '%s', length %u" % (path, len(data))) dsthread.leaveNamedSection(name="dsfileLock") elif file: file.seek(0) file.write(data) file.truncate() else: assert 0, "Pass path or file"
def bailBecauseA11yIsDisabled(): if sys.argv[0].endswith("pydoc"): return # pragma: no cover try: if file("/proc/%s/cmdline" % os.getpid()).read().find('epydoc') != -1: return # pragma: no cover except: # pragma: no cover pass # pragma: no cover logger.log("Dogtail requires that Assistive Technology support be enabled." "\nYou can enable accessibility with sniff or by running:\n" "'gsettings set org.gnome.desktop.interface toolkit-accessibility true'\nAborting...") sys.exit(1)
def simular(self, widget): try: if self.butOpenSimulador.get_filename(): self.saidaSimulador = self.net.sim(self.inputSimulador) numTargets = [0 for i in xrange(len(self.inputSimulador)) ] self._setListStore(self.inputSimulador, numTargets, False) else: self.saidaSimulador = self.net.sim(self.inputs) numTargets = [float(self.targets[i][j]) for i in xrange(len(self.targets)) for j in xrange(1)] self._setListStore(self.inputs, numTargets, False) self.feedStatus.gerarStatus(self.feedStatus.contexto_simulacao) except: self.feedStatus.gerarStatus(self.feedStatus.contexto_erroSim) #pega a excecao gerada trace = traceback.format_exc() #imprime #print "Ocorreu um erro: \n",trace #salva em arquivo file("trace.log","a").write(trace)
def download(request, file_name): file_path = settings.MEDIA_ROOT + '/' + file_name from wsgiref.util import FileWrapper import mimetypes from django.http import HttpResponse file_wrapper = FileWrapper(file(file_path, 'rb')) file_mimetype = mimetypes.guess_type(file_path) response = HttpResponse(file_wrapper, content_type=file_mimetype) response['X-Sendfile'] = file_path response['Content-Length'] = os.stat(file_path).st_size from django.utils.encoding import smart_str response['Content-Disposition'] = 'attachment; filename=%s' % smart_str( file_name) return response
def criarRede(self, widget): try: #criar o intervalo de entradas intervalo = [[-100, 100] for i in xrange(len(self.inputs[0]))] #criar a rede neural artificial rede = rna.Rede(intervalo, self.linear.get_active(), self.heaviside.get_active(), self.tangente.get_active(), self.sigmoide.get_active(), self.listaNeuronios) self.net = rede.criarRede() numTargets = [float(self.targets[i][j]) for i in xrange(len(self.targets)) for j in xrange(1)] self._setListStore(self.inputs, numTargets, True) self.feedStatus.gerarStatus(self.feedStatus.contexto_redeCriada) except: self.feedStatus.gerarStatus(self.feedStatus.contexto_rede) #pega a excecao gerada trace = traceback.format_exc() #imprime #print "Ocorreu um erro: \n",trace #salva em arquivo file("trace.log","a").write(trace)
def bailBecauseA11yIsDisabled(): if sys.argv[0].endswith("pydoc"): return # pragma: no cover try: if file("/proc/%s/cmdline" % os.getpid()).read().find('epydoc') != -1: return # pragma: no cover except: # pragma: no cover pass # pragma: no cover logger.log( "Dogtail requires that Assistive Technology support be enabled." "\nYou can enable accessibility with sniff or by running:\n" "'gsettings set org.gnome.desktop.interface toolkit-accessibility true'\nAborting..." ) sys.exit(1)
def file(fName, output=False, notNone=False, append=False) : """Renvoi un fichier en fonction du nom de fichier passé en paramètre. fName est le chemin d'acces complet a un fichier. fName = "-" est interprété comme l'entrée (ou la sortie, si output=True) standard. output = True indique que le ficher doit etre ouvert en ecriture notNone = True provoque le renvoi d'un fichier ouvert sur /dev/null plutot que None quand fName = "" append provoque l'ouverture du ficheir en mode append. Doit etre utilisé avec output = True """ import __builtin__ if append : wMode = "a" else : wMode = "w" #le nom de fichier est vide... if fName == "" : if notNone : if output : return file("/dev/null", wMode) else : return file("/dev/null") else : return None elif fName == "-" : #la sortie ou l'entree standard if output : return sys.stdout else : return sys.stdin else : # un fichier normal if output : return __builtin__.file(fName, wMode) else : return __builtin__.file(fName)
def tratarEntrada(self, widget): try: self.inputs = list() self.targets = list() caminho = None caminho = self.butOpenEntradas.get_filename() novaf = self.extrair_dados(caminho) if novaf: #tam = len(novaf[0])-1 for i in range(len(novaf)): self.targets.append(novaf[i][-1]) del novaf[i][-1] self.targets = [[self.targets[i]] for i in xrange(len(self.targets))] self.inputs = novaf except: #pega a excecao gerada trace = traceback.format_exc() #imprime print "Ocorreu um erro: \n",trace #salva em arquivo file("trace.log","a").write(trace)
def closeEditSelected(self, rc): if rc: data = self.editDlg.getData() self.editDlg.Destroy() self.bEditSel.Enable(True) self.bRename.Enable(True) self.bChgFile.Enable(True) if not rc: return fp = file(self.editFileName, 'w') for ln in data: fp.write("%s\n" % ln.rstrip()) fp.close() self.bSave.Enable(True)
def closeEditSelected(self, rc): if rc: data = self.editDlg.getData() self.editDlg.Destroy(); self.bEditSel.Enable(True) self.bRename.Enable(True) self.bChgFile.Enable(True) if not rc: return fp = file(self.editFileName, 'w') for ln in data: fp.write("%s\n" % ln.rstrip()) fp.close() self.bSave.Enable(True)
def closeNewFile(self, rc): if rc: data = self.editDlg.getData() self.editDlg.Destroy() self.bNewFile.Enable(True) if not rc: return dlg = wx.FileDialog(self, message="Save as ...", defaultDir=self.settings.lastmacrodirectory, defaultFile="", wildcard=wildcard, style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) val = dlg.ShowModal() if val != wx.ID_OK: dlg.Destroy() return path = dlg.GetPath() mdir = os.path.split(path)[0] if mdir != self.settings.lastmacrodirectory: self.settings.lastmacrodirectory = mdir self.settings.setModified() dlg.Destroy() ext = os.path.splitext(os.path.basename(path))[1] if ext == "": path += ".gcode" fp = file(path, 'w') for ln in data: fp.write("%s\n" % ln.rstrip()) fp.close() self.bSave.Enable(True)
def closeNewFile(self, rc): if rc: data = self.editDlg.getData() self.editDlg.Destroy(); self.bNewFile.Enable(True) if not rc: return dlg = wx.FileDialog( self, message="Save as ...", defaultDir=self.settings.lastmacrodirectory, defaultFile="", wildcard=wildcard, style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT) val = dlg.ShowModal() if val != wx.ID_OK: dlg.Destroy() return path = dlg.GetPath() mdir = os.path.split(path)[0] if mdir != self.settings.lastmacrodirectory: self.settings.lastmacrodirectory = mdir self.settings.setModified() dlg.Destroy() ext = os.path.splitext(os.path.basename(path))[1] if ext == "": path += ".gcode" fp = file(path, 'w') for ln in data: fp.write("%s\n" % ln.rstrip()) fp.close() self.bSave.Enable(True)
def plain(node, fileName=None): """ Plain-text dump. The hierarchy is represented through indentation. """ def crawl(node, depth): dump(node, depth) for action in node.actions.values(): dump(action, depth + 1) for child in node.children: crawl(child, depth + 1) def dumpFile(item, depth): _file.write(spacer * depth + str(item) + '\n') def dumpStdOut(item, depth): print(spacer * depth + str(item)) if fileName: dump = dumpFile _file = file(fileName, 'w') else: dump = dumpStdOut crawl(node, 0)
def get_artist_id_file(artist_id_file_path, data_type): artist_id_file = csv.writer(file(artist_id_file_path, 'wb')) for key in data_type.keys(): artist_id_file.writerow([key])
from gi.repository import Gtk from gi.repository import Gio from pyexcel_ods import get_data from pyexcel_ods import save_data import pylab import neurolab import pygame, sys from pygame.locals import * except: #pega a excecao gerada trace = traceback.format_exc() #imprime print "Ocorreu um erro: \n",trace #salva em arquivo file("trace.log","a").write(trace) #encerra programa raise SystemExit class Interface (object): ''' Descrição: Classe principal para o programa neuroIFC. Classe que ficará responsável pela chamada do Gtk, versão do Debian 8, com Gnome 3.14. Utilização: Para iniciar o programa basta chamar essa classe com o comando: python Interface.py ''' def __init__(self): gladeXML = "Interface.glade" builder = Gtk.Builder()
def _check(self, patchpath): self._misc_msg('\nPATCH: "%s"' % patchpath) patchpath = ut.join_path(self.patchdir, patchpath) # print patchpath pdata = Patch(patchpath) if (len(pdata.diffs) == 0): self._info_msg('skipping empty/commented patch', 1) return 0 if (pdata.patch_type == 'binary'): self._info_msg('skipping binary patch', 1) return -1 errors = 0 context = [] logs = "" for diff in pdata.diffs: # print "a path :" + diff.a_path # print "old path :" + diff.old_path # print "new path :" + diff.new_path self._misc_msg('DIFF: "%s"' % diff.spec, 1) if (not self._check_paths(diff)): errors += 1 continue if (diff.old_path == '/dev/null' ): # Can't fail on adding lines to a new file continue # print "old file path :" + ut.join_path(self.sourcedir, diff.old_path) old_lines = ut.read_strings( ut.join_path(self.sourcedir, diff.old_path)) new_hunks = [] new_diff = [] for hunk in diff.hunks: self._misc_msg('HUNK: "%s"' % hunk.spec, 2) edits = hunk.edits start = hunk.old_start count = hunk.old_count tag = 'old' note = hunk.note if (not self._check_hunk_format(start, count, len(old_lines), tag)): errors += 1 continue # print "begin to change hunk edits" # errors += self._check_hunk_edits(diff.old_path, edits, start, count, note, old_lines) #获取并加入新的hunk信息 new_hunks.append( self._patch_hunk_edits(diff.old_path, edits, start, count, note, old_lines)) # print "end for this hunk" #写入当前hunk new_diff.append(diff.spec + "\n") # for (new_edits,new_start,context_start,new_count,context_count) in new_hunks: # new_diff.append("@@ " + str(new_start) + "," + str(context_count) + " " + str(context_start) + "," + str(new_count) + "@@\n") # for edit in new_edits: # new_diff.append(edit + "\n") # # context.append("".join(new_diff)) for (new_place, before_start, before_count, after_start, after_count, log) in new_hunks: new_diff.append("@@ -" + str(before_start) + "," + str(before_count) + " +" + str(after_start) + "," + str(after_count) + "@@\n") for (edit, new_line_current, new_line_edited) in new_place: new_diff.append(edit + "\n") logs += log context.append("".join(new_diff)) # print new_diff #将新的diffs信息写入文件 f = file("new.patch", "w") # print context f.write("".join(context)) f.close() logfile = file("log.txt", "w") logfile.write("".join(logs)) logfile.close() self._info_msg("%d patch errors" % errors, 1) return errors
def test_get_scores(self): r__read = file('/home/sarath/start-internet-idea/tvguide/resources/rt_page_2.html', 'r').read() print(self.rt.get_scores(r__read))
def test_parse_search_url(self): r__read = file('/home/sarath/start-internet-idea/tvguide/resources/RT_search.html', 'r').read() print(self.rt.parse_search_page(r__read))
def test_get_scores(self): r__read = file('/home/sarath/start-internet-idea/tvguide/resources/imdb_title.html', 'r').read() print(self.imdb.get_scores(r__read))
def test_get_scores(self): r__read = file( '/home/sarath/start-internet-idea/tvguide/resources/imdb_title.html', 'r').read() print(self.imdb.get_scores(r__read))
result_list = self.graph_api.request( '/search', { 'q': tv_listing.show_name, 'type': 'page', 'fields': 'likes,id,name,talking_about_count', 'limit': 1 }) data_ = result_list['data'] if data_: result = data_[0] result['fans'] = result.pop('likes') return result time.sleep(2) return {} if __name__ == '__main__': search = FbSearch() listing_file = file(sys.argv[1], 'r') scores = open(sys.argv[2], 'a') for line in listing_file: value = line.strip() print(value) listing = Listing(value) try: score = search.search_show(listing) scores.write(value + '\t' + ujson.dumps(score) + "\n") except Exception as e: print(value, e) time.sleep(30 * 60) scores.close()
def test_parse_search_url(self): r__read = file( '/home/sarath/start-internet-idea/tvguide/resources/RT_search.html', 'r').read() print(self.rt.parse_search_page(r__read))
def test_get_scores(self): r__read = file( '/home/sarath/start-internet-idea/tvguide/resources/rt_page_2.html', 'r').read() print(self.rt.get_scores(r__read))
def __init__(self, configFilePath, configFile): """ Initialize the SimpleIOHubRuntime Object, loading the experiment configuration file, initializing and launching the ioHub server process, and creating the client side device interface to the ioHub devices that have been created. Currently the ioHub timer uses a ctypes implementation of direct access to the Windows QPC functions in win32 (so no python interpreter start time offset is applied between processes) and timeit.default_timer is used for all other platforms at this time. The advantage of not having a first read offset applied per python interpreter is that it means the both the psychopy process and the ioHub process are using the exact same timebase without a different offset that is hard to exactly determine due to the variablility in IPC request-reponses. By the two processes using the exact same time space, including offset, getTime() for the the ioHub client in psychopy == the current time of the ioHub server process, greatly simplifying some aspects of synconization. This only holds as long as both processes are running on the same PC of course. Note on timeit.default_timer: As of 2.7, timeit.default_timer correctly selects the best clock based on OS for high precision timing. < 2.7, you need to check the OS version yourself and select; or use the psychopy clocks since it does the work for you. ;) Args: configFilePath (str): The absolute path to the experiment configuration .yaml file, which is automatically assigned to the path the experiment script is running from by default. configFile (str): The name of the experiment configuration .yaml file, which has a default value of 'experiment_config.yaml' Return: None """ self.currentTime = computer.currentSec self.configFilePath = configFilePath self.configFileName = configFile self.fullPath = os.path.join(self.configFilePath, self.configFileName) # load the experiment config settings from the experiment_config.yaml file. # The file must be in the same directory as the experiment script. self.configuration = load(file(self.fullPath, u'r'), Loader=Loader) self.experimentConfig = dict() self._experimentConfigKeys = [ 'title', 'code', 'version', 'description', 'total_sessions_to_run' ] for key in self._experimentConfigKeys: if key in self.configuration: self.experimentConfig[key] = self.configuration[key] self.experimentSessionDefaults = self.configuration['session_defaults'] self.sessionUserVariables = self.experimentSessionDefaults[ 'user_variables'] del self.experimentSessionDefaults['user_variables'] # self.hub will hold the reference to the ioHubClient object, used to access the ioHubServer # process and devices. self.hub = None # holds events collected from the ioHub during periods like msecWait() self.allEvents = None # indicates if the experiment is in high priority mode or not. Do not set directly. # See enableHighPriority() and disableHighPriority() self._inHighPriorityMode = False self.sysutil = ioHub.devices.computer # initialize the experiment object based on the configuration settings. self._initalizeConfiguration()
f = open("input.txt") list = [] word = [] words = [] files = [] s = "" line = f.readline() # top five words def tokens(text): return re.findall('[a-z]+', text.lower()) a = tokens(file('input.txt').read()) counts = collections.Counter(a) words = counts.most_common(5) a = file("input.txt").read() files = sent_tokenize(a) filess = [word_tokenize(t) for t in files] for i in files: for j in i.split(" "): if (j == words[0][0]) or (j == words[1][0]) or (j == words[2][0]) or ( j == words[3][0]) or (j == words[4][0]): s = s + i + "\n" break print(s) while line:
def _initalizeConfiguration(self): """ Based on the configuration data in the experiment_config.yaml and iohub_config.yaml, configure the experiment environment and ioHub process environments. This mehtod is called by the class init and should not be called directly. """ if 'ioHub' in self.configuration and self.configuration['ioHub'][ 'enable'] is True: from ioHub.client import ioHubClient ioHubConfigFileName = unicode( self.configuration['ioHub']['config']) ioHubConfigAbsPath = os.path.join(self.configFilePath, unicode(ioHubConfigFileName)) self.ioHubConfig = load(file(ioHubConfigAbsPath, u'r'), Loader=Loader) self.hub = ioHubClient(self.ioHubConfig, ioHubConfigAbsPath) self.hub.startServer() self.hub._calculateClientServerTimeOffset(500) # Is ioHub configured to be run in experiment? if self.hub: # display a read only dialog verifying the experiment parameters # (based on the experiment .yaml file) to be run. User can hit OK to continue, # or Cancel to end the experiment session if the wrong experiment was started. exitExperiment = self.displayExperimentSettingsDialog() if exitExperiment: print "User Cancelled Experiment Launch." self._close() import sys sys.exit(1) # send experiment info and set exp. id self.hub.sendExperimentInfo(self.experimentConfig) # display editable session variable dialog displaying the ioHub required session variables # and any user defined session variables (as specified in the experiment config .yaml file) # User can enter correct values and hit OK to continue, or Cancel to end the experiment session. exitExperiment = self.displayExperimentSessionSettingsDialog() if exitExperiment: print "User Cancelled Experiment Launch." self._close() import sys sys.exit(1) # send session data to ioHub and get session ID (self.hub.sessionID) tempdict = self.experimentSessionDefaults tempdict['user_variables'] = self.sessionUserVariables self.hub.sendSessionInfo(tempdict) # create a local 'thin' representation of the registered ioHub devices, # allowing such things as device level event access (if supported) # and transparent IPC calls of public device methods and return value access. # Devices are available as hub.devices.[device_name] , where device_name # is the name given to the device in the ioHub .yaml config file to be access; # i.e. hub.devices.ExperimentPCkeyboard would access the experiment PC keyboard # device if the default name was being used. self.hub._createDeviceList() # A circular buffer used to hold events retrieved from self.getEvents() during # self.delay() calls. self.getEvents() appends any events in the allEvents # buffer to the result of the hub.getEvents() call that is made. self.allEvents = deque( maxlen=self.configuration['event_buffer_length']) else: print "** ioHub is Disabled (or should be). Why are you using this utility class then? ;) **" # set process affinities based on config file settings cpus = range(computer.cpuCount) experiment_process_affinity = cpus other_process_affinity = cpus iohub_process_affinity = cpus if 'process_affinity' in self.configuration: experiment_process_affinity = self.configuration[ 'process_affinity'] if len(experiment_process_affinity) == 0: experiment_process_affinity = cpus if 'remaining_processes_affinity' in self.configuration: other_process_affinity = self.configuration[ 'remaining_processes_affinity'] if len(other_process_affinity) == 0: other_process_affinity = cpus if self.hub and 'process_affinity' in self.configuration['ioHub']: iohub_process_affinity = self.configuration['ioHub'][ 'process_affinity'] if len(iohub_process_affinity) == 0: iohub_process_affinity = cpus if len(experiment_process_affinity) < len(cpus) or len( iohub_process_affinity) < len(cpus): self.setProcessAffinities(experiment_process_affinity, iohub_process_affinity) if len(other_process_affinity) < len(cpus): ignore = [ computer.currentProcessID, ] if self.hub: ignore.append(self.hub.server_pid) computer.setAllOtherProcessesAffinity(other_process_affinity, ignore) return self.hub