def compute_time_train(model, loss_fun): """Computes precise model forward + backward time using dummy data.""" # Use train mode model.train() # Generate a dummy mini-batch and copy data to GPU im_size, batch_size = cfg.TRAIN.IM_SIZE, int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS) inputs = torch.rand(batch_size, 3, im_size, im_size).cuda(non_blocking=False) labels = torch.zeros(batch_size, dtype=torch.int64).cuda(non_blocking=False) # Cache BatchNorm2D running stats bns = [m for m in model.modules() if isinstance(m, torch.nn.BatchNorm2d)] bn_stats = [[bn.running_mean.clone(), bn.running_var.clone()] for bn in bns] # Compute precise forward backward pass time fw_timer, bw_timer = Timer(), Timer() total_iter = cfg.PREC_TIME.NUM_ITER + cfg.PREC_TIME.WARMUP_ITER for cur_iter in range(total_iter): # Reset the timers after the warmup phase if cur_iter == cfg.PREC_TIME.WARMUP_ITER: fw_timer.reset() bw_timer.reset() # Forward fw_timer.tic() _, preds, _ = model(inputs) loss = loss_fun(preds, labels) torch.cuda.synchronize() fw_timer.toc() # Backward bw_timer.tic() loss.backward() torch.cuda.synchronize() bw_timer.toc() # Restore BatchNorm2D running stats for bn, (mean, var) in zip(bns, bn_stats): bn.running_mean, bn.running_var = mean, var return fw_timer.average_time, bw_timer.average_time
async def main_async(data_path, num_top, num_sites, http_timeout): ''' Main routine is main entry point of program. ''' with Timer() as program_timer: with Timer() as gather_timer: site_info = await Gatherer.get_site_info_async( data_path, num_sites, http_timeout) logging.info(f'--- gather duration: {gather_timer.interval}') with Timer() as analysis_timer: site_report = Analyzer.get_site_report(site_info) logging.info( f'--- site analysis duration: {analysis_timer.interval} seconds ---' ) logging.info(f'--- site analysis report: --- \n{site_report}') with Timer() as analysis_timer: header_report = Analyzer.get_header_report(site_info, num_top) logging.info( f'--- header analysis duration: {analysis_timer.interval} seconds ---' ) logging.info(f'--- header analysis report: --- \n{header_report}') logging.info(f'--- program duration: {program_timer.interval} seconds ---')
def __init__(self, n_in, n_hids, low_gru_size, n_out, inps=None, n_layers=None, dropout=None, seq_len=None, learning_rule=None, weight_initializer=None, bias_initializer=None, activ=None, use_cost_mask=True, noise=False, use_hint_layer=False, use_average=False, theano_function_mode=None, use_positional_encoding=False, use_inv_cost_mask=False, batch_size=32, use_noise=False, name=None): self.n_in = n_in self.n_hids = n_hids self.n_out = n_out self.low_gru_size = low_gru_size self.n_layers = n_layers self.inps = inps self.noise = noise self.seq_len = seq_len self.use_cost_mask = use_cost_mask selfearning_rule = learning_rule self.dropout = dropout self.use_average = use_average self.batch_size = batch_size self.use_noise = use_noise self.train_timer = Timer("Training function") self.grads_timer = Timer("Computing the grads") self.theano_function_mode = theano_function_mode self.weight_initializer = weight_initializer self.bias_initializer = bias_initializer self.use_average = use_average self.use_positional_encoding = use_positional_encoding self.use_inv_cost_mask = use_inv_cost_mask self.eps = 1e-8 self.activ = activ self.out_layer_in = self.n_hids if name is None: raise ValueError("name should not be empty.") self.reset() self.name = name
def setUp(self): self.memory = Memory(0x1000) self.sound_timer = Timer(freq=60) self.delay_timer = Timer(freq=60) self.cpu = Cpu(self.memory, None, delay_timer=self.delay_timer, sound_timer=self.sound_timer)
def __init__(self): self._memory = Memory(0x1000) self._display = Display(64, 32) self._delay_timer = Timer(freq=60) self._sound_timer = Timer(freq=60) self._sound = Sound(self._sound_timer) self._cpu = Cpu(self._memory, self._display, delay_timer=self._delay_timer, sound_timer=self._sound_timer) self._fps_time = datetime.now() pygame.init()
def build_model(self, configs): timer = Timer() timer.start() for layer in configs['model']['layers']: neurons = layer['neurons'] if 'neurons' in layer else None dropout_rate = layer['rate'] if 'rate' in layer else None activation = layer['activation'] if 'activation' in layer else None return_seq = layer['return_seq'] if 'return_seq' in layer else None input_timesteps = layer[ 'input_timesteps'] if 'input_timesteps' in layer else None input_dim = layer['input_dim'] if 'input_dim' in layer else None if layer['type'] == 'dense': self.model.add(Dense(neurons, activation=activation)) if layer['type'] == 'lstm': self.model.add( LSTM(neurons, input_shape=(input_timesteps, input_dim), return_sequences=return_seq)) if layer['type'] == 'dropout': self.model.add(Dropout(dropout_rate)) self.model.compile(loss=configs['model']['loss'], optimizer=configs['model']['optimizer']) print('[Model] Model Compiled') timer.stop()
def __init__(self, n_in, n_out, bow_size, weight_initializer=None, use_index_jittering=False, bias_initializer=None, max_fact_len=12, max_seq_len=250, dropout=None, batch_size=None, learning_rule=None, share_inp_out_weights=False, n_steps=1, inps=None, use_noise=False, theano_function_mode=None, rng=None, name=None): self.n_in = n_in self.n_out = n_out self.bow_size = bow_size self.use_index_jittering = use_index_jittering self.weight_initializer = weight_initializer self.bias_initializer = bias_initializer self.share_inp_out_weights = share_inp_out_weights self.rng = rng self.inps = inps self.dropout = dropout self.batch_size = batch_size self.learning_rule = learning_rule self.theano_function_mode = theano_function_mode self.eps = 1e-7 self.max_fact_len = max_fact_len self.max_seq_len = max_seq_len self.n_steps = n_steps self.use_noise = use_noise self.name = name assert n_steps > 0, "Illegal value has been provided for n_steps." self.train_timer = Timer("Training function") self.grads_timer = Timer("Computing the grads") self.updates = {}
def __init__(self, max_iter): self.max_iter = max_iter self.iter_timer = Timer() # Current minibatch errors (smoothed over a window) self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD) self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD) # Min errors (over the full test set) self.min_top1_err = 100.0 self.min_top5_err = 100.0 # Number of misclassified examples self.num_top1_mis = 0 self.num_top5_mis = 0 self.num_samples = 0
def compute_time_loader(data_loader): """Computes loader time.""" timer = Timer() loader.shuffle(data_loader, 0) data_loader_iterator = iter(data_loader) total_iter = cfg.PREC_TIME.NUM_ITER + cfg.PREC_TIME.WARMUP_ITER total_iter = min(total_iter, len(data_loader)) for cur_iter in range(total_iter): if cur_iter == cfg.PREC_TIME.WARMUP_ITER: timer.reset() timer.tic() next(data_loader_iterator) timer.toc() return timer.average_time
def __init__(self): super().__init__() # read pre-trained models for predicting two different periodic functions. self.forward_model = keras.models.load_model("../model/sine.h5") self.backward_model = keras.models.load_model("../model/spiked.h5") self.input_danger.pipe(ops.zip(self.input_opportunity, self.input_flip))\ .subscribe(lambda x: setattr(self, "last_input", x)) Timer().ticks.pipe( ops.filter(lambda x: x % self.muscle_tick_rate == 0), ops.map(lambda x: self.input_to_prediction(self.last_input)), ops.map(lambda x: self.prediction_to_stimuli(x)))\ .subscribe(lambda x: self.output_muscle_stimuli.on_next(x))
def __init__(self, demo_mode=False): """ Sets up the environment. :param demo_mode: activate demo_mode to cycle through the images in a set order instead of randomly. Useful for plotting an initial demo graph, for verifying correctness of decisions made by the agent. """ self.demo_mode = demo_mode # set up visual feedback, by periodically providing random cat/dog image Timer().ticks\ .pipe( ops.filter(lambda x: x % self.image_tick_rate == 0), ops.map(lambda x: self.read_random_image()) )\ .subscribe(lambda img: self.visual_feedback.on_next(img))
def compute_time_eval(model): """Computes precise model forward test time using dummy data.""" # Use eval mode model.eval() # Generate a dummy mini-batch and copy data to GPU im_size, batch_size = cfg.TRAIN.IM_SIZE, int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS) inputs = torch.zeros(batch_size, 3, im_size, im_size).cuda(non_blocking=False) # Compute precise forward pass time timer = Timer() total_iter = cfg.PREC_TIME.NUM_ITER + cfg.PREC_TIME.WARMUP_ITER for cur_iter in range(total_iter): # Reset the timers after the warmup phase if cur_iter == cfg.PREC_TIME.WARMUP_ITER: timer.reset() # Forward timer.tic() model(inputs) torch.cuda.synchronize() timer.toc() return timer.average_time
def __init__(self, epoch_iters): self.epoch_iters = epoch_iters self.max_iter = cfg.OPTIM.MAX_EPOCH * epoch_iters self.iter_timer = Timer() self.desc_loss = ScalarMeter(cfg.LOG_PERIOD) self.desc_loss_total = 0.0 self.att_loss = ScalarMeter(cfg.LOG_PERIOD) self.att_loss_total = 0.0 self.lr = None # Current minibatch errors (smoothed over a window) self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD) self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD) self.mb_att_top1_err = ScalarMeter(cfg.LOG_PERIOD) self.mb_att_top5_err = ScalarMeter(cfg.LOG_PERIOD) # Number of misclassified examples self.num_top1_mis = 0 self.num_top5_mis = 0 self.num_att_top1_mis = 0 self.num_att_top5_mis = 0 self.num_samples = 0
def train(self, x, y, epochs, batch_size, save_dir): timer = Timer() timer.start() print('[Model] Training Started') print('[Model] %s epochs, %s batch size' % (epochs, batch_size)) save_fname = os.path.join( save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs))) callbacks = [ EarlyStopping(monitor='val_loss', patience=2), ModelCheckpoint(filepath=save_fname, monitor='val_loss', save_best_only=True) ] self.model.fit(x, y, epochs=epochs, batch_size=batch_size, callbacks=callbacks) self.model.save(save_fname) print('[Model] Training Completed. Model saved as %s' % save_fname) timer.stop()
def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch, save_dir): timer = Timer() timer.start() print('[Model] Training Started') print('[Model] %s epochs, %s batch size, %s batches per epoch' % (epochs, batch_size, steps_per_epoch)) save_fname = os.path.join( save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs))) callbacks = [ ModelCheckpoint(filepath=save_fname, monitor='loss', save_best_only=True) ] self.model.fit(data_gen, steps_per_epoch=steps_per_epoch, epochs=epochs, callbacks=callbacks, workers=1) print('[Model] Training Completed. Model saved as %s' % save_fname) timer.stop()
def __init__(self, n_in, n_hids, n_out, mem_size, mem_nel, deep_out_size, bow_size=40, inps=None, dropout=None, predict_bow_out=False, seq_len=None, n_read_heads=1, n_layers=1, n_write_heads=1, train_profile=False, erase_activ=None, content_activ=None, l1_pen=None, l2_pen=None, use_reinforce=False, use_reinforce_baseline=False, n_reading_steps=2, use_gru_inp_rep=False, use_simple_rnn_inp_rep=False, use_nogru_mem2q=False, sub_mb_size=40, lambda1_rein=2e-4, lambda2_rein=2e-5, baseline_reg=1e-2, anticorrelation=None, use_layer_norm=False, recurrent_dropout_prob=-1, correlation_ws=None, hybrid_att=True, max_fact_len=7, use_dice_val=False, use_qmask=False, renormalization_scale=4.8, w2v_embed_scale=0.42, emb_scale=0.32, use_soft_att=False, use_hard_att_eval=False, use_batch_norm=False, learning_rule=None, use_loc_based_addressing=True, smoothed_diff_weights=False, use_multiscale_shifts=True, use_ff_controller=False, use_gate_quad_interactions=False, permute_order=False, wpenalty=None, noise=None, w2v_embed_path=None, glove_embed_path=None, learn_embeds=True, use_last_hidden_state=False, use_adv_indexing=False, use_bow_input=True, use_out_mem=True, use_deepout=True, use_q_mask=False, use_inp_content=True, rnd_indxs=None, address_size=0, learn_h0=False, use_context=False, debug=False, controller_activ=None, mem_gater_activ=None, weight_initializer=None, bias_initializer=None, use_cost_mask=True, use_bow_cost_mask=True, theano_function_mode=None, batch_size=32, use_noise=False, reinforce_decay=0.9, softmax=False, use_mask=False, name="ntm_model", **kwargs): assert deep_out_size is not None, ("Size of the deep output " " should not be None.") if sub_mb_size is None: sub_mb_size = batch_size assert sub_mb_size <= batch_size, "batch_size should be greater than sub_mb_size" self.hybrid_att = hybrid_att self.state = locals() self.use_context = use_context self.eps = 1e-8 self.use_mask = use_mask self.l1_pen = l1_pen self.l2_pen = l2_pen self.l2_penalizer = None self.emb_scale = emb_scale self.w2v_embed_path = w2v_embed_path self.glove_embed_path = glove_embed_path self.learn_embeds = learn_embeds self.exclude_params = {} self.use_gate_quad_interactions = use_gate_quad_interactions self.reinforce_decay = reinforce_decay self.max_fact_len = max_fact_len self.lambda1_reinf = lambda1_rein self.lambda2_reinf = lambda2_rein self.use_reinforce_baseline = use_reinforce_baseline self.use_reinforce = use_reinforce self.use_gru_inp_rep = use_gru_inp_rep self.use_simple_rnn_inp_rep = use_simple_rnn_inp_rep self.use_q_mask = use_q_mask self.use_inp_content = use_inp_content self.rnd_indxs = rnd_indxs self.use_layer_norm = use_layer_norm self.recurrent_dropout_prob = recurrent_dropout_prob self.n_reading_steps = n_reading_steps self.sub_mb_size = sub_mb_size self.predict_bow_out = predict_bow_out self.correlation_ws = correlation_ws self.smoothed_diff_weights = smoothed_diff_weights self.use_soft_att = use_soft_att self.use_hard_att_eval = use_hard_att_eval if anticorrelation and n_read_heads < 2: raise ValueError("Anti-correlation of the attention weight" " do not support the multiple read heads.") self.anticorrelation = anticorrelation if self.predict_bow_out: if len(inps) <= 4: raise ValueError( "The number of inputs should be greater than 4.") if l2_pen: self.l2_penalizer = L2Penalty(self.l2_pen) #assert use_bow_input ^ use_gru_inp_rep ^ self.use_simple_rnn_inp_rep, \ # "You should either use GRU or BOW input." self.renormalization_scale = renormalization_scale self.w2v_embed_scale = w2v_embed_scale self.baseline_reg = baseline_reg self.inps = inps self.erase_activ = erase_activ self.use_ff_controller = use_ff_controller self.content_activ = content_activ self.use_bow_cost_mask = use_bow_cost_mask self.ntm_outs = None self.theano_function_mode = theano_function_mode self.n_in = n_in self.dropout = dropout self.wpenalty = wpenalty self.noise = noise self.bow_size = bow_size self.use_last_hidden_state = use_last_hidden_state self.use_loc_based_addressing = use_loc_based_addressing self.train_profile = train_profile self.use_nogru_mem2q = use_nogru_mem2q self.use_qmask = use_qmask self.permute_order = permute_order self.use_batch_norm = use_batch_norm # Use this if you have a ff-controller because otherwise this is not effective: self.n_layers = n_layers if self.use_reinforce: reinforceCls = REINFORCE if not self.use_reinforce_baseline: reinforceCls = REINFORCEBaselineExt self.Reinforce = reinforceCls(lambda1_reg=self.lambda1_reinf, lambda2_reg=self.lambda2_reinf, decay=self.reinforce_decay) self.ReaderReinforce = \ ReinforcePenalty(reinf_level=self.lambda1_reinf, maxent_level=self.lambda2_reinf, use_reinforce_baseline=self.use_reinforce_baseline) self.dice_val = None if use_dice_val: self.dice_val = sharedX(1.) self.use_dice_val = use_dice_val if bow_size is None: raise ValueError("bow_size should be specified.") if name is None: raise ValueError("name should not be empty.") self.n_hids = n_hids self.mem_size = mem_size self.use_deepout = use_deepout self.mem_nel = mem_nel self.n_out = n_out self.use_out_mem = use_out_mem self.use_multiscale_shifts = use_multiscale_shifts self.address_size = address_size self.n_read_heads = n_read_heads self.n_write_heads = n_write_heads self.learn_h0 = learn_h0 self.use_adv_indexing = use_adv_indexing self.softmax = softmax self.use_bow_input = use_bow_input self.use_cost_mask = use_cost_mask self.deep_out_size = deep_out_size self.controller_activ = controller_activ self.mem_gater_activ = mem_gater_activ self.weight_initializer = weight_initializer self.bias_initializer = bias_initializer if batch_size: self.batch_size = batch_size else: self.batch_size = inps[0].shape[1] #assert self.batch_size >= self.sub_mb_size, ("Minibatch size should be " # " greater than the sub minibatch size") self.comp_grad_fn = None self.name = name self.use_noise = use_noise self.train_timer = Timer("Training function") self.gradfn_timer = Timer("Gradient function") self.grads_timer = Timer("Computing the grads") self.reset() self.seq_len = TT.iscalar('seq_len') self.__convert_inps_to_list() if debug: if self.use_gru_inp_rep or self.use_bow_input: self.seq_len.tag.test_value = self.inps[ 0].tag.test_value.shape[1] else: self.seq_len.tag.test_value = self.inps[ 0].tag.test_value.shape[0] self.learning_rule = learning_rule if self.predict_bow_out: self.bow_out_w = TT.fscalar("bow_out_w") if debug: self.bow_out_w.tag.test_value = np.float32(1.0) else: self.bow_out_w = 0
class Logging: """Used to log data streams to a CSV file""" file = None timer = Timer() # A list of Loggers loggers = [] def __init__(self, file_path="../log/logfile.txt", flush_rate=10): """ Initializes the Logging class :param file_path: file path of csv file to log to :param flush_rate: rate in ticks after which the logger should write to the csv file """ self.file_path = file_path self.flush_rate = flush_rate class Logger: """Holds an observable of logged values.""" name = "" # descriptive label for logged values last_val = "" # last logged value observable = None # stream of logged values def __init__(self, name, obs): self.name = name self.observable = obs obs.subscribe(lambda x: setattr(self, "last_val", x)) def on_tick(self, tick): self.write_line(tick) if tick % self.flush_rate == 0: self.file.flush() def start_logging(self): """Start logging to console and file""" self.file = open(self.file_path, "w") self.write_header() self.timer.ticks.subscribe(lambda x: self.on_tick(x)) def add_logger(self, logger): """Add a logger. Imagine it as another column in the CSV file.""" self.loggers.append(logger) def write_header(self): """Write the header of the CSV file, based on the Loggers' names""" header = "tick" header += ';'.join(map(lambda x: x.name, self.loggers)) self.file.write(header + "\n") print(header) def write_line(self, tick): """Write a single line to the log CSV file.""" line = f"{tick};" line += ';'.join(map(lambda x: str(x.last_val), self.loggers)) # reset last value for logger in self.loggers: logger.last_val = "" self.file.write(line + "\n") print(line)
def __init__(self): self.timer = Timer() self.scheduler = Scheduler() self.memory = RAM() self.processes = [] self.cpus = [CPU(1)]
rpg.output_invert_movement.subscribe(lambda i: cpg.input_flip.on_next(i)) cpg.output_muscle_stimuli.subscribe(lambda ms: process_muscle_stimulus(ms)) # ========== configure logging ========== log_path = "../log/demo.txt" if demo_mode else "../log/logfile.txt" logging = Logging(log_path, 10) logging.add_logger(Logging.Logger("vis_feedback", env.visual_feedback_label)) logging.add_logger(Logging.Logger("prediction", vis.output_prediction_label)) logging.add_logger(Logging.Logger("danger", eval.output_danger_level)) logging.add_logger(Logging.Logger("opportunity", eval.output_opportunity_level)) logging.add_logger(Logging.Logger("invert", rpg.output_invert_movement)) logging.add_logger(Logging.Logger("muscle_f_ant", cpg.output_muscle_stimuli.pipe(ops.map(lambda x: x[0])))) logging.add_logger(Logging.Logger("muscle_f_post", cpg.output_muscle_stimuli.pipe(ops.map(lambda x: x[1])))) logging.start_logging() # ========= start timer ticks =========== Timer().start() def process_muscle_stimulus(ms): env.contract_anterior(ms[0]) env.contract_posterior(ms[1]) # prevent termination of streams while True: time.sleep(10)