def time(self, name): rt = RunTimer() before = time.time() yield rt # Can call .set_result() after = time.time() elapsed = after - before self.log(name, "in {0:.2f} secs".format(elapsed), rt.result)
def connect(self, address): if isinstance(address, tuple) and len(address)==2: address = gethostbyname(address[0]), address[1] if self.timeout == 0.0: return self._sock.connect(address) sock = self._sock if self.timeout is None: while True: err = sock.getsockopt(SOL_SOCKET, SO_ERROR) if err: raise error(err, strerror(err)) result = sock.connect_ex(address) if not result or result == EISCONN: break elif (result in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or (result == EINVAL and is_windows): wait_readwrite(sock.fileno()) else: raise error(result, strerror(result)) else: end = time.time() + self.timeout while True: err = sock.getsockopt(SOL_SOCKET, SO_ERROR) if err: raise error(err, strerror(err)) result = sock.connect_ex(address) if not result or result == EISCONN: break elif (result in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or (result == EINVAL and is_windows): timeleft = end - time.time() if timeleft <= 0: raise timeout('timed out') wait_readwrite(sock.fileno(), timeout=timeleft) else: raise error(result, strerror(result))
def run(clients, servers, startup=10): port = 10000 server_procs = [] for server in servers: print "starting", server proc = subprocess.Popen([sys.executable, server, str(port)], stdout=subprocess.PIPE) _PROCS.append(proc) server_procs.append(proc) proc.port = port proc.server_name = server start = time.time() while time.time() - start < startup: try: socket.create_connection( ('localhost', port)) break except socket.error: pass else: # didn't break raise EnvironmentError( "server {0} on port {1} didn't come ready within {2}s".format( server, port, startup)) port += 1 for serv in server_procs: print "SERVER", serv.server_name for client in clients: print " CLIENT", client.__name__, client(serv.port) serv.kill()
def _wait_read(self): assert self.__readable.ready(), "Only one greenlet can be waiting on this event" self.__readable = AsyncResult() # timeout is because libzmq cannot always be trusted to play nice with libevent. # I can only confirm that this actually happens for send, but lets be symmetrical # with our dirty hacks. # this is effectively a maximum poll interval of 1s tic = time.time() dt = self._gevent_bug_timeout if dt: timeout = gevent.Timeout(seconds=dt) else: timeout = None try: if timeout: timeout.start() self.__readable.get(block=True) except gevent.Timeout as t: if t is not timeout: raise toc = time.time() # gevent bug: get can raise timeout even on clean return # don't display zmq bug warning for gevent bug (this is getting ridiculous) if self._debug_gevent and timeout and toc-tic > dt and \ self.getsockopt(zmq.EVENTS) & zmq.POLLIN: print("BUG: gevent may have missed a libzmq recv event on %i!" % self.FD, file=sys.stderr) finally: if timeout: timeout.cancel() self.__readable.set()
def attach_volume(self, local_dev_timeout=120): new_device_name = None if not self.volume: raise FailureWithCode('This import does not have a volume', INPUT_DATA_FAILURE) instance_id = self.instance_id devices_before = get_block_devices() device_name = self.next_device_name(devices_before) log.debug('Attaching volume {0} to {1} as {2}'. format(self.volume.id, instance_id, device_name), self.task_id) self.ec2_conn.attach_volume_and_wait(self.volume.id, instance_id, device_name) elapsed = 0 start = time.time() while elapsed < local_dev_timeout and not new_device_name: new_block_devices = get_block_devices() log.debug('Waiting for local dev for volume: "{0}", ' 'elapsed:{1}'.format(self.volume.id, elapsed), self.task_id) diff_list = list(set(new_block_devices) - set(devices_before)) if diff_list: for dev in diff_list: # If this is virtio attempt to verify vol to dev mapping # using serial number field info if not os.path.basename(dev).startswith('vd'): try: self.verify_virtio_volume_block_device( volume_id=self.volume.id, blockdev=dev) except ValueError, ex: raise FailureWithCode(ex, ATTACH_VOLUME_FAILURE) new_device_name = dev break elapsed = time.time() - start if elapsed < local_dev_timeout: time.sleep(2)
def watch_server_pids(server_pids, interval=1, **kwargs): """Monitor a collection of server pids yeilding back those pids that aren't responding to signals. :param server_pids: a dict, lists of pids [int,...] keyed on Server objects """ status = {} start = time.time() end = start + interval server_pids = dict(server_pids) # make a copy while True: for server, pids in server_pids.items(): for pid in pids: try: # let pid stop if it wants to os.waitpid(pid, os.WNOHANG) except OSError, e: if e.errno not in (errno.ECHILD, errno.ESRCH): raise # else no such child/process # check running pids for server status[server] = server.get_running_pids(**kwargs) for pid in pids: # original pids no longer in running pids! if pid not in status[server]: yield server, pid # update active pids list using running_pids server_pids[server] = status[server] if not [p for server, pids in status.items() for p in pids]: # no more running pids break if time.time() > end: break else: time.sleep(0.1)
def find_proxy( url, timeout, testing_url): try: response = urllib.urlopen( url ) except: if Debug: print "Request to get proxy failed." return (False, False) result=response.getcode() content = response.read() data = json.loads( content ) if Debug: print data['curl'] start_time = time.time() try: response = urllib.urlopen(testing_url, proxies={'http':data['curl']}) except: if Debug: print "Proxy test request failed." return (False, False) result=response.getcode() request_time = time.time() - start_time if result == 200: if Debug: print "\n\nGot test url with %d in %f seconds" % (result, request_time) return (data['curl'], request_time) else: if Debug: print "Failed with %d" % result return (False, False)
def main(): print("Code to look at runtime for insertion sort vs. Python's list sort.") numDig = 5 #number of digits to output #large list with numElements elements numElements = 10000 data = [] for i in range(numElements): data.append(randint(1, numElements)) print("\nSorting list with " + str(len(data)) + " elements.\n") start = time.time() insertionSort(data) end = time.time() print("Insertion sort -> " + str(round(end - start, numDig)) + " seconds.") #large list with numElements elements numElements = 10000 data = [] for i in range(numElements): data.append(randint(1, numElements)) start = time.time() data.sort() end = time.time() print("Python's sort -> " + str(round(end - start, numDig)) + " seconds.")
def reader(self): #Syncing All Worker Threads #self.barrier.await() time_to_stop = time.time() + time_to_run self.request = Test( grinder.threadNumber, "r").wrap( self.request ) while time.time() < time_to_stop: data=urllib.urlencode({ 'author': self.roomInfo.user, 'body': self.roomInfo.user + " Grind Test " + datetime.now().strftime("%H:%M %s"), 'cmo': self.roomInfo.cmo, 'cookie': self.roomInfo.thacook, 'film': self.roomInfo.film, 'instance': self.roomInfo.instance, 'ishost': self.roomInfo.ishost, 'mdt': self.roomInfo.mdt, 'room': self.roomInfo.room, 'type': self.roomInfo.chat, 'user_image':self.roominfo.user_image, 'u': self.roomInfo.user, "s": "0", 'a': "0", 'c': "6", 't': time.time(), 'p': self.roomInfo.p }) self.request.POST( domain + '/services/chat/update', data )
def writer(self): #Syncing All Worker Threads #self.barrier.await() time_to_stop = time.time() + time_to_run self.request = Test( grinder.threadNumber, "w").wrap( self.request ) while time.time() < time_to_stop: data=urllib.urlencode({ 'author': self.roomInfo.user, 'body': self.roomInfo.user + " Grind Test " + datetime.now().strftime("%H:%M %s"), 'cmo': self.roomInfo.cmo, 'cookie': self.roomInfo.thacook, 'film': self.roomInfo.film, 'instance': self.roomInfo.instance, 'ishost': self.roomInfo.ishost, 'mdt': self.roomInfo.mdt, 'room': self.roomInfo.room, 'type': self.roomInfo.chat, 'user_image':self.roominfo.user_image, 'p': self.roomInfo.p }) self.request.POST( domain + '/services/chat/post', data ) #Since number of clients is the same, vary the write load in linear progression time.sleep( ( time_to_stop - time.time() ) / time_to_run )
def update_follower2leveldb(): # 从leveldb更新leveldb的用户粉丝数数据 # test 0.15 seconds per 10000 users, total 22670000 users, 0.09 h users = xapian_search_user.iter_all_docs(fields=['user', 'followers_count']) count = 0 ts = te = time.time() for k, v in user_followers_count_leveldb.RangeIter(): uid = int(k) follower = int(v) try: active, important, _follower, domain = daily_identify_aifd_bucket.Get(str(uid)).split('_') except KeyError: active = 0 important = 0 domain = 20 daily_identify_aifd_bucket.Put(str(uid), str(active) + '_' + str(important) + '_' + \ str(follower) + '_' + str(domain)) if count % 10000 == 0: te = time.time() print count, '%s sec' % (te - ts), ' identify person follower', now_datestr ts = te count += 1
def simple_find_in_context(self, ref, context): """ Like simple_find, but limits the search to a specific context. Useful for when you want to (e.g.) make sure you only look in the webview. :param ref: an identifier for an element; id, class name, partial link text, etc. :param context: the context in which we're looking; typically WEBVIEW or NATIVE_APP :rtype: WebElement """ # speed up the implicit wait, because with default time, this takes way # too long because of all the possible permutations self.implicitly_wait(HackedWebDriver.QuickImplicitWait_sec) # wrap this all in a try so we can restore the default implicit wait if # and when this block exits try: timeout = time.time() + self.MaxSmartSearchTime_sec while time.time() < timeout: element = self._simple_find_core(ref, context) if element: return element log.debug(u'exhausted all search methods, looping until we timeout here') finally: # restore the default implicit wait self.implicitly_wait(HackedWebDriver.ImplicitWait_sec) assert False, u'couldnt find {}!'.format(ref)
def update_domain2leveldb(): # 从leveldb更新leveldb的用户领域所属数据 # test 0.15 seconds per 10000 users, total 22670000 users, 0.09 h count = 0 ts = te = time.time() for k, v in domain_leveldb.RangeIter(): uid, datestr = k.split('_') domainid = DOMAIN_LIST.index(v) try: active, important, follower, _domain = daily_identify_aifd_bucket.Get(str(uid)).split('_') except KeyError: active = 0 important = 0 follower = 0 domain = domainid daily_identify_aifd_bucket.Put(str(uid), str(active) + '_' + str(important) + '_' + \ str(follower) + '_' + str(domain)) if count % 10000 == 0: te = time.time() print count, '%s sec' % (te - ts), ' identify person domain', now_datestr ts = te count += 1
def main(): # Process CLI arguments. try: execname, host, port, mode = sys.argv except ValueError: execname = sys.argv[0] print >>sys.stderr, '%s: incorrect number of arguments' % execname print >>sys.stderr, 'usage: %s hostname port [sit|const|wild]' % sys.argv[0] sys.exit(-1) bzrc = BZRC(host, int(port)) cur_time = time.time() agent = PigeonAgent(bzrc, mode, cur_time) # Run the agent try: while True: cur_time = time.time() agent.behave(cur_time) except KeyboardInterrupt: print "Exiting due to keyboard interrupt." agent.stop() bzrc.close()
def __init__(self, avoid_exit = False): self._interpreter = Interpreter(self) self._symbol_table = {} self._globals = {} self.nilObject = None self.trueObject = None self.falseObject = None self.objectClass = None self.classClass = None self.metaclassClass = None self.nilClass = None self.integerClass = None self.arrayClass = None self.methodClass = None self.symbolClass = None self.primitiveClass = None self.systemClass = None self.blockClass = None self.blockClasses = None self.stringClass = None self.doubleClass = None self._last_exit_code = 0 self._avoid_exit = avoid_exit self._dump_bytecodes = False self.classpath = None self.start_time = time.time() # a float of the time in seconds self.random = Random(abs(int(time.clock() * time.time()))) CURRENT = self
def _wait_write(self): assert self.__writable.ready(), "Only one greenlet can be waiting on this event" self.__writable = AsyncResult() # timeout is because libzmq cannot be trusted to properly signal a new send event: # this is effectively a maximum poll interval of 1s tic = time.time() dt = self._gevent_bug_timeout if dt: timeout = gevent.Timeout(seconds=dt) else: timeout = None try: if timeout: timeout.start() self.__writable.get(block=True) except gevent.Timeout as t: if t is not timeout: raise toc = time.time() # gevent bug: get can raise timeout even on clean return # don't display zmq bug warning for gevent bug (this is getting ridiculous) if self._debug_gevent and timeout and toc-tic > dt and \ self.getsockopt(zmq.EVENTS) & zmq.POLLOUT: print("BUG: gevent may have missed a libzmq send event on %i!" % self.FD, file=sys.stderr) finally: if timeout: timeout.cancel() self.__writable.set()
def main(): image=image_pro() game=game_body() game.gui_start() start,end,score=0,1,0 speech.say("Welcome to swords") speech.say("press any key to begin") while end: start,end=game.start(),game.end() #start=game.start() #print end while start: #profile.run('main()') time1 = time.time() im,posX,posY=image.image_core() game.sword_center_draw(im,posX,posY) score=game.game_core(posX,posY) time2 = time.time() #print ((time2-time1)*1000.0),'sec' end=game.end() start=end ''' option for retry''' game.gui_end() speech.say("final score is "+str(score)) cv2.destroyAllWindows() pygame.quit ()
def tree_constructor(): """Times the construction of a suffix tree object from file.""" t0 = time.time() tree.tree_by_file(DATA_FILE, False) t1 = time.time() return t1 - t0
def findPaths(keys): paths = dict([(key,findPath(search_paths[key])) \ for key in keys]) for key in paths.keys(): if paths[key] is None: # Download file and install program if available if key in download_paths.keys(): (FN,command,path) = download_paths[key] # Check that it has not already been downloaded if os.path.isfile(path): paths[key] = os.path.abspath(path) else: import time download_start_time = time.time() print 'Downloading and installing '+key os.system('wget --no-check-certificate http://stash.osgconnect.net/+daveminh/%s'%(FN)) os.system('tar -xvf %s'%FN) if command != '': command = command.replace('$WORK_DIR',os.getcwd()) os.system(command) if os.path.isfile(path): print key + ' downloaded and installed in %d s'%(\ time.time() - download_start_time) paths[key] = os.path.abspath(path) else: print 'Could not download '+key raise Exception('Could not download '+key) else: raise Exception('Missing file for '+key) return paths
def perform(self, node, inputs, output_storage): start_time = time.time() log_posteriors, seq_lengths = inputs if numpy.isnan(log_posteriors).any(): print >> log.v1, 'SprintErrorSigOp: log_posteriors contain NaN!' if numpy.isinf(log_posteriors).any(): print >> log.v1, 'SprintErrorSigOp: log_posteriors contain Inf!' #numpy.set_printoptions(threshold=numpy.nan) print >> log.v1, 'SprintErrorSigOp: log_posteriors:', log_posteriors if self.sprint_instance_pool is None: print >> log.v3, "SprintErrorSigOp: Starting Sprint %r" % self.sprint_opts self.sprint_instance_pool = SprintInstancePool.get_global_instance(sprint_opts=self.sprint_opts) loss, errsig = self.sprint_instance_pool.get_batch_loss_and_error_signal(log_posteriors, seq_lengths) #print >> log.v4, 'loss:', loss, 'errsig:', errsig output_storage[0][0] = loss output_storage[1][0] = errsig print >> log.v5, 'SprintErrorSigOp: avg frame loss for segments:', loss.sum() / seq_lengths.sum() end_time = time.time() if self.debug_perform_time is None: from Config import get_global_config config = get_global_config() self.debug_perform_time = config.bool("debug_SprintErrorSigOp_perform_time", False) if self.debug_perform_time: print >>log.v1, "SprintErrorSigOp perform time:", end_time - start_time from Device import deviceInstance assert deviceInstance.is_device_proc() forward_time = start_time - deviceInstance.compute_start_time print >> log.v1, "SprintErrorSigOp forward time:", forward_time
def readData(): global rdObj rdObj.hostTemp = get_temperature() for i in range(60): timebegin = time.time() get_per_sec_info() time.sleep(1-(time.time()-timebegin))
def transform(self, data): assert np.isfinite(data).all() ntest = len(data) data = data.copy() data.shape = ntest, -1 assert np.isfinite(data).all() print ">>> Computing traintest linear kernel" start = time.time() kernel_traintest = np.dot(data, self._train_data.T) assert not np.isnan(kernel_traintest).any() assert not np.isinf(kernel_traintest).any() kernel_traintest /= self._ktrace assert not np.isnan(kernel_traintest).any() assert not np.isinf(kernel_traintest).any() end = time.time() print "Time: %s" % (end-start) return self._clf.decision_function(kernel_traintest).ravel()
def add_engines(n=1, profile='iptest', total=False): """add a number of engines to a given profile. If total is True, then already running engines are counted, and only the additional engines necessary (if any) are started. """ rc = Client(profile=profile) base = len(rc) if total: n = max(n - base, 0) eps = [] for i in range(n): ep = TestProcessLauncher() ep.cmd_and_args = ipengine_cmd_argv + [ '--profile=%s' % profile, '--InteractiveShell.colors=nocolor' ] ep.start() launchers.append(ep) eps.append(ep) tic = time.time() while len(rc) < base+n: if any([ ep.poll() is not None for ep in eps ]): raise RuntimeError("A test engine failed to start.") elif time.time()-tic > 15: raise RuntimeError("Timeout waiting for engines to connect.") time.sleep(.1) rc.close() return eps
def __check_ssh_agent(): """Check that an ssh-agent is present and has at least one valid looking identity loaded into it.""" # There's no way to do this w/ putty/pageant and that's OK because # they don't hang up on prompting for passwords if sys.platform == 'win32': return True app = wingapi.gApplication if not app.fSingletons.fFileAttribMgr[_kCheckSSHAgent]: return True cmd = 'ssh-add' handler = app.AsyncExecuteCommandLine(cmd, os.getcwd(), '-l') end = time.time() + 1.0 while not handler.Iterate() and time.time() < end: time.sleep(0.01) stdout, stderr, err, status = handler.Terminate() if err is None: out = stdout + stderr if len(out) > 0 and not out.find('no identities') >= 0 and not out.find('not open') >= 0: return True return False
def wait_for_completion(self): if self.is_old: self.old.wait_for_completion() return end = time.time() + self.timeout while db.guest_get_status(self.task_id) == "running": log.debug("%s: analysis still processing", self.vmid) time.sleep(1) # If the analysis hits the critical timeout, just return straight # away and try to recover the analysis results from the guest. if time.time() > end: raise CuckooGuestError( "The analysis hit the critical timeout, terminating.") try: status = self.get("/status", timeout=5).json() except Exception as e: log.info("Virtual Machine /status failed (%r)", e) # this might fail due to timeouts or just temporary network issues # thus we don't want to abort the analysis just yet and wait for things to # recover continue if status["status"] == "complete": log.info("%s: analysis completed successfully", self.vmid) return elif status["status"] == "exception": log.info("%s: analysis caught an exception\n%s", self.vmid, status["description"]) return
def wait(self, status): """Waiting for status. @param status: status. @return: always True. """ log.debug("%s: waiting for status 0x%.04x", self.id, status) end = time.time() + self.timeout self.server._set_timeout(self.timeout) while db.guest_get_status(self.task_id) == "starting": # Check if we've passed the timeout. if time.time() > end: raise CuckooGuestError("{0}: the guest initialization hit the " "critical timeout, analysis " "aborted.".format(self.id)) try: # If the server returns the given status, break the loop # and return. if self.server.get_status() == status: log.debug("%s: status ready", self.id) break except: pass log.debug("%s: not ready yet", self.id) time.sleep(1) self.server._set_timeout(None) return True
def testFolder(inputfolder, outputfolder, decisionThreshold = cfg.decision_threshold, applyNMS=True): fileList = os.listdir(inputfolder) imagesList = filter(lambda element: '.jpg' in element, fileList) print 'Start processing '+inputfolder start = time() for filename in imagesList: imagepath = inputfolder + '/' + filename print 'Processing '+imagepath #Test the current image bboxes, scores = testImage(imagepath, decisionThreshold=decisionThreshold, applyNMS=applyNMS) #Store the result in a dictionary result = dict() result['imagepath'] = imagepath result['bboxes'] = bboxes result['scores'] = scores #Save the features to a file using pickle outputFile = open(outputfolder+'/'+filename+'_'+'-'.join(cfg.featuresToExtract)+'_'+cfg.model+'.results', "wb") pickle.dump(result, outputFile) outputFile.close() elapsed_time = time() - start print('Time elapsed using regular function: ', elapsed_time)
def run_proc_rr(self, process_table, memory_pool, placement_algo): # handle process queue # stats self.switch_count = 0 avg_burst_time = 0.0 self.avg_wait_time = 0.0 print "time 0ms: Simulator started for RR and %s"%placement_algo self.t0 = time.time() # at the very begining when no process has arrived process_queue = Queue(); self.new_arrival_proc(process_table, process_queue, memory_pool, placement_algo) while len(process_table): self.proc_rr_loop(process_table, process_queue, memory_pool, placement_algo) print "time %sms: Simulator for RR and %s ended"%(int(1000*(time.time()-self.t0)) + self.t_pseudo_elapsed, placement_algo) # stat burst_num = 0 for val in self.process_table.values(): avg_burst_time += (val['burst_time']*val['num_burst']) burst_num += val['num_burst'] self.avg_turnaround_time = (self.avg_wait_time + self.t_cs*self.switch_count + avg_burst_time + self.t_pseudo_elapsed)/burst_num avg_burst_time /= burst_num self.avg_wait_time /= burst_num print "Algorithm RR and %s"%placement_algo print "-- average CPU burst time: %.2f ms"%avg_burst_time print "-- average wait time: %.2f ms"%self.avg_wait_time print "-- average turnaround time: %.2f ms"%self.avg_turnaround_time print "-- total number of context switches: %s"%self.switch_count
def train(self, x, y, param_names, **kwargs): start = time.time() scaled_x = self._set_and_preprocess(x=x, param_names=param_names) # Check that each input is between 0 and 1 self._check_scaling(scaled_x=scaled_x) if self._debug: print "Shape of training data: ", scaled_x.shape print "Param names: ", self._used_param_names print "First training sample\n", scaled_x[0] print "Encode: ", self._encode # Do a random search max_features, learning_rate, max_depth, min_samples_leaf, n_estimators = self._random_search(random_iter=100, x=scaled_x, y=y) # Now train model gb = GradientBoostingRegressor(loss='ls', learning_rate=learning_rate, n_estimators=n_estimators, subsample=1.0, min_samples_split=2, min_samples_leaf=min_samples_leaf, max_depth=max_depth, init=None, random_state=self._rng, max_features=max_features, alpha=0.9, verbose=0) gb.fit(scaled_x, y) self._model = gb duration = time.time() - start self._training_finished = True return duration
def train(): print('Begin training') train_feeder = DataIterator(data_dir='./data/train/') test_feeder = DataIterator(data_dir='./data/test/') with tf.Session() as sess: train_images, train_labels = train_feeder.input_pipeline( batch_size=FLAGS.batch_size, aug=True) test_images, test_labels = test_feeder.input_pipeline( batch_size=FLAGS.batch_size) graph = build_graph(top_k=1) sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) saver = tf.train.Saver() train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph) test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/val') start_step = 0 if FLAGS.restore: ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_dir) if ckpt: saver.restore(sess, ckpt) print("restore from the checkpoint {0}".format(ckpt)) start_step += int(ckpt.split('-')[-1]) logger.info(':::Training Start:::') try: while not coord.should_stop(): start_time = time.time() train_images_batch, train_labels_batch = sess.run( [train_images, train_labels]) feed_dict = { graph['images']: train_images_batch, graph['labels']: train_labels_batch, graph['keep_prob']: 0.8 } _, loss_val, train_summary, step = sess.run( [ graph['train_op'], graph['loss'], graph['merged_summary_op'], graph['global_step'] ], feed_dict=feed_dict) train_writer.add_summary(train_summary, step) end_time = time.time() logger.info("the step {0} takes {1} loss {2}".format( step, end_time - start_time, loss_val)) if step > FLAGS.max_steps: break if step % FLAGS.eval_steps == 1: test_images_batch, test_labels_batch = sess.run( [test_images, test_labels]) feed_dict = { graph['images']: test_images_batch, graph['labels']: test_labels_batch, graph['keep_prob']: 1.0 } accuracy_test, test_summary = sess.run( [graph['accuracy'], graph['merged_summary_op']], feed_dict=feed_dict) test_writer.add_summary(test_summary, step) logger.info( '===============Eval a batch=======================') logger.info('the step {0} test accuracy: {1}'.format( step, accuracy_test)) logger.info( '===============Eval a batch=======================') if step % FLAGS.save_steps == 1: logger.info('Save the ckpt of {0}'.format(step)) saver.save(sess, os.path.join(FLAGS.checkpoint_dir, 'my-model'), global_step=graph['global_step']) except tf.errors.OutOfRangeError: logger.info('==================Train Finished================') saver.save(sess, os.path.join(FLAGS.checkpoint_dir, 'my-model'), global_step=graph['global_step']) finally: coord.request_stop() coord.join(threads)
if False: # full scale test, takes quite a long time batch = batch[:1000] res = 25 else: # quick demo, takes 1-2 minutes batch = batch[:10] res = 5 recovery_thresh = 0.01 ms = np.linspace(0, 1, num=res) sampled = {} recoveries = {} # Reshape the flat vectors into a 28x28 matrix Ms = [np.reshape(images[i], (28, 28)) for i in batch] rs = np.linalg.matrix_rank(Ms) start = time.time() data = {} for r in np.unique(rs): print("unique rank:", r) data[int(r)] = [] for i, (M, r) in enumerate(zip(Ms, rs)): data[int(r)] += [[]] for j, m in enumerate(ms): # remove pixels from the image corrupted, omega = corrupt_image(M, m) # recover the matrix and round to the nearest integer value recovered = np.round(complete_matrix(corrupted, omega))
#tlist = [] ltopic = len(tlist) t3 = 0 t4 = 0 print("Analisi articoli - Individuazione topic in corso...\n") #Elaborazione degli articoli, 3000 per volta per non occupare troppa RAM while True: articoli = articlecursor.fetchmany(3000) #Interruzione del ciclo while quando la raccolta di nuove notizie da un insieme vuoto if articoli == (): break for a in articoli: t0 = time() #Inizializzazione dell'oggetto blob, la sua creazione implica automaticamente l'analisi del testo text = TextBlob(a[1], analyzer=NaiveBayesAnalyzer()) if text.detect_language() == "it": #poichè in italiano la ricerca dei nomi funziona molto peggio rispetto all'inglese effettuo la traduzione, il risultato più affidabile text = text.translate(to="en") #Acquisizione risultati dell'analisi dai campi dell'oggetto s = text.sentiment t1 = time() - t0 raw = text.noun_phrases #rimozione topic duplicati puri unique = list(set(raw))
def validation(): print('validation') test_feeder = DataIterator(data_dir='./data/test/') final_predict_val = [] final_predict_index = [] groundtruth = [] with tf.Session() as sess: test_images, test_labels = test_feeder.input_pipeline( batch_size=FLAGS.batch_size, num_epochs=1) graph = build_graph(3) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer() ) # initialize test_feeder's inside state coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) saver = tf.train.Saver() ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_dir) if ckpt: saver.restore(sess, ckpt) print("restore from the checkpoint {0}".format(ckpt)) logger.info(':::Start validation:::') try: i = 0 acc_top_1, acc_top_k = 0.0, 0.0 while not coord.should_stop(): i += 1 start_time = time.time() test_images_batch, test_labels_batch = sess.run( [test_images, test_labels]) feed_dict = { graph['images']: test_images_batch, graph['labels']: test_labels_batch, graph['keep_prob']: 1.0 } batch_labels, probs, indices, acc_1, acc_k = sess.run( [ graph['labels'], graph['predicted_val_top_k'], graph['predicted_index_top_k'], graph['accuracy'], graph['accuracy_top_k'] ], feed_dict=feed_dict) final_predict_val += probs.tolist() final_predict_index += indices.tolist() groundtruth += batch_labels.tolist() acc_top_1 += acc_1 acc_top_k += acc_k end_time = time.time() logger.info( "the batch {0} takes {1} seconds, accuracy = {2}(top_1) {3}(top_k)" .format(i, end_time - start_time, acc_1, acc_k)) except tf.errors.OutOfRangeError: logger.info( '==================Validation Finished================') acc_top_1 = acc_top_1 * FLAGS.batch_size / test_feeder.size acc_top_k = acc_top_k * FLAGS.batch_size / test_feeder.size logger.info('top 1 accuracy {0} top k accuracy {1}'.format( acc_top_1, acc_top_k)) finally: coord.request_stop() coord.join(threads) return { 'prob': final_predict_val, 'indices': final_predict_index, 'groundtruth': groundtruth }
for frame in range(int(seq_dets[:, 0].max())): frame += 1 #detection and frame numbers begin at 1 dets = seq_dets[seq_dets[:, 0] == frame, 2:7] dets[:, 2:4] += dets[:, 0: 2] #convert to [x1,y1,w,h] to [x1,y1,x2,y2] total_frames += 1 if (display): fn = 'mot_benchmark/%s/%s/img1/%06d.jpg' % (phase, seq, frame) im = io.imread(fn) ax1.imshow(im) plt.title(seq + ' Tracked Targets') start_time = time.time() trackers = mot_tracker.update(dets) cycle_time = time.time() - start_time total_time += cycle_time for d in trackers: print('%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1' % (frame, d[4], d[0], d[1], d[2] - d[0], d[3] - d[1]), file=out_file) if (display): d = d.astype(np.int32) ax1.add_patch( patches.Rectangle((d[0], d[1]), d[2] - d[0], d[3] - d[1], fill=False,
def train_model(model, dataloaders, criterion, optimizer, device, checkpoint_save_path, num_epoch_to_stop_if_no_better, fold_idx=0, num_epochs=25, is_inception=False, is_resume=False): since = time.time() # logger = Logger('./logs') if is_resume: print('Loading weights from previous checkpoint...') checkpoint = torch.load(checkpoint_save_path + '_' + str(fold_idx)) model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) # epoch = checkpoint['epoch'] # loss = checkpoint['loss'] val_acc_history = [] # best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 same_acc_epoch_num = 0 last_epoch = 0 last_loss = -1 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Iterate over data. for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): # Get model outputs and calculate loss # Special case for inception because in training it has an auxiliary output. In train # mode we calculate the loss by summing the final output and the auxiliary output # but in testing we only consider the final output. if is_inception and phase == 'train': # From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958 outputs, aux_outputs = model(inputs) loss1 = criterion(outputs, labels) loss2 = criterion(aux_outputs, labels) loss = loss1 + 0.4 * loss2 else: outputs = model(inputs) loss = criterion(outputs, labels) _, preds = torch.max(outputs, 1) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / len(dataloaders[phase].dataset) epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset) print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) # deep copy the model if phase == 'val': # # Tensorboard Logging - Log scalar values (scalar summary) # info = {'loss': epoch_loss, 'accuracy': epoch_acc} # for tag, value in info.items(): # logger.scalar_summary(tag, value, epoch) # save improved weights if epoch_acc > best_acc: best_acc = epoch_acc # best_model_wts = copy.deepcopy(model.state_dict()) same_acc_epoch_num = 0 # save a checkpoint torch.save({ 'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': epoch_loss, }, checkpoint_save_path + '_' + str(fold_idx)) else: same_acc_epoch_num += 1 if phase == 'val': val_acc_history.append(epoch_acc) print() last_epoch = epoch last_loss = epoch_loss if same_acc_epoch_num > num_epoch_to_stop_if_no_better: break time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) # print('Best val Acc: {:4f}'.format(best_acc)) print('Best val Acc: {:4f}'.format(best_acc)) # load best model weights # model.load_state_dict(best_model_wts) # save a General Checkpoint for Inference and/or Resuming Training torch.save({ 'epoch': last_epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': last_loss, }, checkpoint_save_path + '_' + str(last_epoch) + '_' + str(fold_idx)) return model, val_acc_history, best_acc
def min_cut(edges, verts): """ Calls N times min_cut_basic keeping the best result (smallest cut) Returns (minimun_cut_size, min_cut) min cut is a list containing the two blocks (vertices are represented as a str with '_'-separated vertex names) """ n = len(verts) N = int(math.ceil(math.log(n) * n * n)) # error prob = 1/n # N = n * n * n # less error prob smallest_cut = len(edges) cut_verts = [] print "first choice: %s" % random.choice(edges.keys()) # debug time_in_its = 0 for i in xrange(N): ts = time.time() e = copy.deepcopy(edges) v = copy.deepcopy(verts) min_cut_basic(e, v) if len(e) < smallest_cut: smallest_cut = len(e) cut_verts = v.keys() print "\nNew min cut: %s\n" % smallest_cut time_in_its += time.time() - ts if i % n == 0: print "iteration %s of %s" % (i, N) av_time = time_in_its / (i + 1) print "remaining time: %s s" % (av_time * (N - i)) return smallest_cut, cut_verts