Пример #1
0
    def POST(self):
        auth = web.ctx.env.get('HTTP_AUTHORIZATION')
        authreq = False

        if auth is None:
            authreq = True
        else:
            auth = re.sub('^Basic ', '', auth)
            username, password = base64.decodestring(auth).split(':')

            if (username, password) in allowed:
                web.header('Content-Type', 'application/json')
                data = json.loads(web.data())
                feedTime = data["time"]
                feed = Feeder(float(feedTime))
                result = feed.feed()
                successDate = feed.getDate()
                jsonResponse = {'result': result, 'date': successDate}
                return json.dumps(jsonResponse)
            else:
                authreq = True

        if authreq:
            web.header('WWW-Authenticate', 'Basic realm="Cat Feeder"')
            web.ctx.status = '401 Unauthorized'
            return
Пример #2
0
def lambda_handler(event, context):
    id = context.aws_request_id
    logger.info('started lambda_handler with id %s' % id)
    feeder = Feeder(event['url'])
    while not feeder.done():
        time.sleep(Config.sleep_interval)
    return "finished lambda_handler with id %s" % id
Пример #3
0
def load_data():
    print("==> loading train data")
    data_path = os.path.join(args.dataset_dir,
                             'train_data' + args.modality + '.npy')
    label_path = os.path.join(args.dataset_dir, 'train_label.pkl')
    valid_frame_path = os.path.join(args.dataset_dir, 'train_num_frame.npy')
    train_loader = torch.utils.data.DataLoader(
        dataset=Feeder(data_path,
                       label_path,
                       valid_frame_path,
                       normalization=args.normalization,
                       ftrans=args.ftrans),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=4,
    )

    print("==> loading test data")
    data_path = os.path.join(args.dataset_dir,
                             'test_data' + args.modality + '.npy')
    label_path = os.path.join(args.dataset_dir, 'test_label.pkl')
    valid_frame_path = os.path.join(args.dataset_dir, 'test_num_frame.npy')
    test_loader = torch.utils.data.DataLoader(
        dataset=Feeder(data_path,
                       label_path,
                       valid_frame_path,
                       normalization=args.normalization,
                       ftrans=args.ftrans),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=4,
    )
    return (train_loader, test_loader)
Пример #4
0
def simulate(in_runs, in_pairs, in_visitors):
	target = [ [2,2,0],[2,2,1],[2,2,2] ]
	genes  = [ [1,1,0],[1,2,0],[2,0,0],[2,1,0] ] # simulating t110 x t110
	probs  = [ 4/9,    2/9,    1/9,    2/9     ] # 25/12.5/6.25/12.5
	genemap = len(probs)
	pairs_results = []
	turtle_results = []

	for run in range(in_runs):
		turtle_day = -1
		pairs_day = -1
		turtle = Field("turtle", turtle_layout)
		pairs  = Field("pairs", pairs_layout)
		pairer = PairBreeder()
		feeder = Feeder(genes, probs, visit=in_visitors, pairs=in_pairs)
		for day in range(1, 365):
			# get the days flowers
			feed = feeder.feed()
			# print("day",day,feed)

			# check for output from the pairer, put them into the pairs field
			for new_pair in pairer.breed():
				pairs.place(new_pair[0], new_pair[1])

			# add the day's feed into the pairer and the turtle
			for new_flower in feed:
				pairer.place(new_flower)
				turtle.place(new_flower)

			# everything above this point technically "runs" the day before
			# run the fields
			if pairs_day == -1:
				harvest  = pairs.run()
				# print("pairs:",  pairs_harvest)
				for flower in harvest:
					if flower.get_genes() in target:
						pairs_day = day
			if turtle_day == -1:
				harvest = turtle.run()
				# print("turtle:", turtle_harvest)
				for flower in harvest:
					if flower.get_genes() in target:
						turtle_day = day

			# break out if they both finish
			if turtle_day != -1 and pairs_day != -1:
				break
		if turtle_day != -1 and pairs_day != -1:
			# print("run", run, "ended on day", day, "turtle", turtle_day, "pairs", pairs_day)
			pairs_results.append(pairs_day)
			turtle_results.append(turtle_day)

	print("{}\t{}\tpairs\t{}\t{}\t{}\t{}\t{}".format(in_visitors, in_pairs, np.amin(pairs_results), np.amax(pairs_results), np.mean(pairs_results), np.median(pairs_results), np.percentile(pairs_results, 95)))
	print("{}\t{}\tturtle\t{}\t{}\t{}\t{}\t{}".format(in_visitors, in_pairs, np.amin(turtle_results), np.amax(turtle_results), np.mean(turtle_results), np.median(turtle_results), np.percentile(turtle_results, 95)))
	return
Пример #5
0
class TradingServer:
    """
    TradingServer holds two socket servers: a feeder and a matching engine.
    Feeder will stream referential (instruments that can be traded) and order books (orders placed by traders)
    Matching engine will handle orders received and send match confirmations (deal).
    Storage will contain traders credentials for authentication (if enabled by client_authentication parameter)
    """
    def __init__(self,
                 storage: AbstractStorage,
                 client_authentication: bool,
                 marshaller,
                 feeder_port: int,
                 matching_engine_port: int,
                 uptime_in_seconds: Optional[int]):
        self.storage = storage
        self.client_authentication = client_authentication
        self.feeder = Feeder(marshaller=marshaller, port=feeder_port)
        self.matching_engine = MatchingEngine(self.storage, client_authentication, marshaller, port=matching_engine_port)
        self.matching_engine.initialize_order_books(referential=self.feeder.get_referential())
        self.start_time = None
        self.stop_time = None
        if uptime_in_seconds:
            self.start_time = time.time()
            self.stop_time = self.start_time + uptime_in_seconds

    def reached_uptime(self):
        if self.stop_time:
            return time.time() >= self.stop_time
        return False

    def print_listen_messages(self):
        if self.start_time and self.stop_time:
            duration = self.stop_time - self.start_time
            logger.info(f"Feeder listening on port [{self.feeder.port}] for [{duration}] seconds")
            logger.info(f"Matching engine listening on port [{self.matching_engine.port}] for [{duration}] seconds")
        else:
            logger.info(f"Feeder listening on port [{self.feeder.port}]")
            logger.info(f"Matching engine listening on port [{self.matching_engine.port}]")

    def start(self):
        try:
            self.feeder.listen()
            self.matching_engine.listen()
            self.print_listen_messages()
            while not self.reached_uptime():
                self.matching_engine.process_sockets()
                self.feeder.process_sockets()
                order_books = self.matching_engine.get_order_books()
                self.feeder.send_all_order_books(order_books)
        except KeyboardInterrupt:
            logger.info("Stopped by user")
        except socket.error as exception:
            logger.error(f"Trading server socket error [{exception}]")
            logger.exception(exception)
        finally:
            self.feeder.cleanup()
            self.matching_engine.cleanup()
def lambda_handler(event, context):
    id = context.aws_request_id
    logger.info('started lambda_handler with id %s' % id)
    monitor = Monitor(event['query_url'], event['access_id'],
                      event['access_key'])
    while not monitor.done():
        time.sleep(Config.sleep_interval)
    logger.info('finished monitor query')
    feeder = Feeder(event['send_url'], event['deployment'],
                    monitor.performance)
    feeder.send()
    logger.info('finished monitor send')
    return "finished lambda_handler with id %s" % id
Пример #7
0
class TradingServer:
    def __init__(self, storage, marshaller, feeder_port, matching_engine_port,
                 uptime_in_seconds):
        self.logger = logging.getLogger(__name__)
        self.storage = storage
        self.feeder = Feeder(marshaller=marshaller, port=feeder_port)
        self.matching_engine = MatchingEngine(
            storage=self.storage,
            referential=self.feeder.get_referential(),
            marshaller=marshaller,
            port=matching_engine_port)
        self.start_time = None
        self.stop_time = None
        if uptime_in_seconds:
            self.start_time = time.time()
            self.stop_time = self.start_time + uptime_in_seconds

    def reached_uptime(self):
        if self.stop_time:
            return time.time() >= self.stop_time
        return False

    def print_listen_messages(self):
        if self.start_time and self.stop_time:
            duration = self.stop_time - self.start_time
            self.logger.info(
                'Feeder listening on port [{}] for [{}] seconds'.format(
                    self.feeder.port, duration))
            self.logger.info(
                'Matching engine listening on port [{}] for [{}] seconds'.
                format(self.matching_engine.port, duration))
        else:
            self.logger.info('Feeder listening on port [{}]'.format(
                self.feeder.port))
            self.logger.info('Matching engine listening on port [{}]'.format(
                self.matching_engine.port))

    def start(self):
        try:
            self.feeder.listen()
            self.matching_engine.listen()
            self.print_listen_messages()
            while not self.reached_uptime():
                self.matching_engine.process_sockets()
                self.feeder.process_sockets()
                self.feeder.send_all_order_books(
                    self.matching_engine.get_order_books())
        except KeyboardInterrupt:
            self.logger.info('Stopped by user')
        except socket.error as exception:
            self.logger.error(
                'Trading server socket error [{}]'.format(exception))
            self.logger.error(traceback.print_exc())
        finally:
            self.feeder.cleanup()
            self.matching_engine.cleanup()
Пример #8
0
 def __init__(self, storage, marshaller, feeder_port, matching_engine_port,
              uptime_in_seconds):
     self.logger = logging.getLogger(__name__)
     self.storage = storage
     self.feeder = Feeder(marshaller=marshaller, port=feeder_port)
     self.matching_engine = MatchingEngine(
         storage=self.storage,
         referential=self.feeder.get_referential(),
         marshaller=marshaller,
         port=matching_engine_port)
     self.start_time = None
     self.stop_time = None
     if uptime_in_seconds:
         self.start_time = time.time()
         self.stop_time = self.start_time + uptime_in_seconds
Пример #9
0
def main():
    feeder = Feeder(schedule=None)

    # main app loop

    while True:
        # check if button is pressed
        if random.random() < 0.01:
            feeder.button_is_pressed = True

        # update and check schedule
        feeder.update()

        # now sleep for a second so the loop doesn't run needlesly often
        time.sleep(1)
Пример #10
0
 def __init__(self,
              inputFileName,
              numBits=512,
              numOnBits=10,
              seed=37,
              ):
     Feeder.__init__(self, numBits, numOnBits)
     self.inputFileName = inputFileName
     self.char_list = [char for char in open(inputFileName).read()]
     asc_chars = [chr(i) for i in range(128)]
     self.char_sdr = SDR(asc_chars,
                         numBits=numBits,
                         numOnBits=numOnBits,
                         seed=seed)
     self.readIndex = -1
Пример #11
0
    def robotInit(self):
        super().__init__()
        # Instances of classes

        # Instantiate Subsystems
        #XXX DEBUGGING
        self.drivetrain = Drivetrain(self)
        self.shooter = Shooter(self)
        self.carrier = Carrier(self)
        self.feeder = Feeder(self)
        self.intake = Intake(self)
        #self.winch = Winch(self)
        #self.climber = Climber(self)
        #self.climber_big = Climber_Big(self)
        #self.climber_little = Climber_Little(self)

        # Instantiate Joysticks
        self.left_joy = wpilib.Joystick(1)
        self.right_joy = wpilib.Joystick(2)
        # Instantiate Xbox
        self.xbox = wpilib.Joystick(3)

        # Instantiate OI; must be AFTER joysticks are inited
        self.oi = OI(self)

        self.timer = wpilib.Timer()
Пример #12
0
 def __init__(self,
              storage: AbstractStorage,
              client_authentication: bool,
              marshaller,
              feeder_port: int,
              matching_engine_port: int,
              uptime_in_seconds: Optional[int]):
     self.storage = storage
     self.client_authentication = client_authentication
     self.feeder = Feeder(marshaller=marshaller, port=feeder_port)
     self.matching_engine = MatchingEngine(self.storage, client_authentication, marshaller, port=matching_engine_port)
     self.matching_engine.initialize_order_books(referential=self.feeder.get_referential())
     self.start_time = None
     self.stop_time = None
     if uptime_in_seconds:
         self.start_time = time.time()
         self.stop_time = self.start_time + uptime_in_seconds
Пример #13
0
    def __init__(self, opt):
        self.opt =  opt
        if opt.path_load is not None and (opt.path_load.endswith('.yaml') or opt.path_load.endswith('.yml')):
            self._model = JointScorer(opt)
        else:
            self._model = Scorer(opt)
        if opt.path_load is not None:
            self._model.load(opt.path_load)
        self.parallel()

        if opt.task != 'play':
            if opt.fld_data is not None:
                self.feeder = Feeder(opt)

        if opt.task == 'train':
            opt.save()
            os.makedirs(opt.fld_out + '/ckpt', exist_ok=True)
            self.path_log = self.opt.fld_out + '/log.txt'
        else:
            self.path_log = self.opt.fld_out + '/log_infer.txt'
Пример #14
0
 def _add_fecther(self, fetcher):
     logger.debug("add a fether %s" % str(fetcher))
     name = fetcher.name
     if name in self.feeders:
         logger.warning("fetcher %s in already in." % name)
         if self.feeders[name].addr != fetcher.addr:
             logger.error("fetcher %s has old addr %s, an new is %s" %
                          (name, self.feeders[name].addr, fetcher.addr))
             self.feeders[name].reset(fetcher.addr)
             self.dao.update_fetcher(fetcher)
     else:
         logger.info("add a fetcher %s with addr %s" % (name, fetcher.addr))
         self.feeders[name] = Feeder(name, fetcher.addr)
         self.dao.add_fetcher(fetcher)
Пример #15
0
def load_input_data():
    print("==> loading train data")
    data_path = os.path.join(args.dataset_dir, 'train_data.npy')
    label_path = os.path.join(args.dataset_dir, 'train_label.pkl')
    valid_frame_path = os.path.join(args.dataset_dir, 'train_num_frame.npy')
    train_feeder = Feeder(data_path,
                          label_path,
                          valid_frame_path,
                          normalization=args.normalization,
                          ftrans=args.ftrans,
                          reshape=True)

    print("==> loading test data")
    data_path = os.path.join(args.dataset_dir, 'test_data.npy')
    label_path = os.path.join(args.dataset_dir, 'test_label.pkl')
    valid_frame_path = os.path.join(args.dataset_dir, 'test_num_frame.npy')
    test_feeder = Feeder(data_path,
                         label_path,
                         valid_frame_path,
                         normalization=args.normalization,
                         ftrans=args.ftrans,
                         reshape=True)
    return (train_feeder, test_feeder)
Пример #16
0
def load_train_data():
    print("==> loading train data")
    data_path = os.path.join(args.dataset_dir, 'all_data.npy')
    label_path = os.path.join(args.dataset_dir, 'all_label.pkl')
    valid_frame_path = os.path.join(args.dataset_dir, 'all_num_frame.npy')
    train_loader = torch.utils.data.DataLoader(
        dataset=Feeder(data_path,
                       label_path,
                       valid_frame_path,
                       normalization=args.normalization,
                       ftrans=args.ftrans,
                       reshape=True),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=4,
    )

    return train_loader
Пример #17
0
    def POST(self):
        auth = web.ctx.env.get('HTTP_AUTHORIZATION')
        authreq = False

        if auth is None:
            authreq = True
        else:
            auth = re.sub('^Basic ', '', auth)
            username, password = base64.decodestring(auth).split(':')
            if (username, password) in allowed:
                web.header('Content-Type', 'application/json')
                jsonResponse = {'date': Feeder.getDate()}
                return json.dumps(jsonResponse)
            else:
                authreq = True

        if authreq:
            web.header('WWW-Authenticate', 'Basic realm="Cat Feeder"')
            web.ctx.status = '401 Unauthorized'
            return
Пример #18
0
class WaveGlow():
    def __init__(self, sess):
        self.sess = sess
        self.build_model()

    def build_model(self):
        self.global_step = tf.get_variable('global_step',
                                           initializer=0,
                                           dtype=tf.int32,
                                           trainable=False)
        self.mels = tf.placeholder(tf.float32, [None, args.n_mel, None])
        self.wavs = tf.placeholder(
            tf.float32,
            [None, args.squeeze_size, args.wav_time_step // args.squeeze_size])
        self.placeholders = [self.mels, self.wavs]

        self.conditions = mydeconv1d(self.mels,
                                     args.n_mel,
                                     filter_size=args.step_per_mel * 4,
                                     stride=args.step_per_mel,
                                     scope='upsample',
                                     reuse=False)
        self.conditions = tf.transpose(self.conditions, perm=[0, 2, 1])
        self.conditions = tf.reshape(self.conditions, [
            -1,
            tf.shape(self.conditions)[1] // args.squeeze_size,
            args.squeeze_size * args.n_mel
        ])
        self.conditions = tf.transpose(self.conditions, perm=[0, 2, 1])

        self.z = []
        self.layer = self.wavs
        self.logdets, self.logss = 0, 0
        for i in range(args.n_flows):
            self.layer, logs, logdet = conv_afclayer(self.layer,
                                                     self.conditions,
                                                     reverse=False,
                                                     scope='afc_' + str(i + 1),
                                                     reuse=False)
            self.logdets += logdet
            self.logss += logs
            if (i + 1) % args.early_output_every == 0 and (i +
                                                           1) != args.n_flows:
                self.z.append(self.layer[:, :args.early_output_size])
                self.layer = self.layer[:, args.early_output_size:]
        self.z.append(self.layer)
        self.z = tf.concat(self.z, axis=1)

        total_size = tf.cast(tf.size(self.z), tf.float32)
        self.logdet_loss = -tf.reduce_sum(self.logdets) / total_size
        self.logs_loss = -tf.reduce_sum(self.logss) / total_size
        self.prior_loss = tf.reduce_sum(self.z**2 /
                                        (2 * args.sigma**2)) / total_size
        self.loss = self.prior_loss + self.logs_loss + self.logdet_loss

        self.t_vars = tf.trainable_variables()
        #        print ([v.name for v in self.t_vars])
        self.numpara = 0
        for var in self.t_vars:
            varshape = var.get_shape().as_list()
            self.numpara += np.prod(varshape)
        print("Total number of parameters: %r" % (self.numpara))

        ########INFERENCE#########
        self.output = tf.truncated_normal([
            tf.shape(self.conditions)[0], args.output_remain,
            tf.shape(self.conditions)[2]
        ],
                                          dtype=tf.float32)
        for i in reversed(range(args.n_flows)):
            if (i + 1) % args.early_output_every == 0 and (i +
                                                           1) != args.n_flows:
                self.newz = tf.truncated_normal([
                    tf.shape(self.conditions)[0], args.early_output_size,
                    tf.shape(self.conditions)[2]
                ],
                                                stddev=args.infer_sigma)
                self.output = tf.concat([self.newz, self.output], axis=1)
            self.output = conv_afclayer(self.output,
                                        self.conditions,
                                        reverse=True,
                                        scope='afc_' + str(i + 1),
                                        reuse=tf.AUTO_REUSE)

        ########INFERENCE#########

    def train(self):
        coord = tf.train.Coordinator()

        self.data_generator = Feeder(coord, args)
        self.validation_data_generator = self.data_generator

        self.lr = tf.train.exponential_decay(args.lr, self.global_step,
                                             args.lr_decay_steps,
                                             args.lr_decay_rate)
        self.lr = tf.minimum(tf.maximum(self.lr, 0.0000001), 0.001)
        self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
        self.grad = self.optimizer.compute_gradients(self.loss,
                                                     var_list=self.t_vars)
        self.op = self.optimizer.apply_gradients(self.grad,
                                                 global_step=self.global_step)

        varset = list(set(tf.global_variables()) | set(tf.local_variables()))
        self.saver = tf.train.Saver(var_list=varset, max_to_keep=5)
        num_batch = self.data_generator.n_examples // args.batch_size
        do_initialzie = True
        if args.loading_path:
            if self.load():
                start_epoch = self.global_step.eval() // num_batch
                do_initialzie = False
            else:
                print("Error Loading Model! Training From Initial State...")
        if do_initialzie:
            init_op = tf.global_variables_initializer()
            start_epoch = 0
            self.sess.run(init_op)

        self.writer = tf.summary.FileWriter(args.summary_dir, None)
        with tf.name_scope("summaries"):
            self.s_logdet_loss = tf.summary.scalar('logdet_loss',
                                                   self.logdet_loss)
            self.s_logs_loss = tf.summary.scalar('logs_loss', self.logs_loss)
            self.s_prior_loss = tf.summary.scalar('prior_loss',
                                                  self.prior_loss)
            self.s_loss = tf.summary.scalar('total_loss', self.loss)
            self.merged = tf.summary.merge([
                self.s_logdet_loss, self.s_logs_loss, self.s_prior_loss,
                self.s_loss
            ])

        # self.procs = self.data_generator.start_enqueue()
        # self.val_procs = self.validation_data_generator.start_enqueue()
        # self.procs += self.val_procs
        self.data_generator.start(self.sess)

        self.sample(0)
        try:
            for epoch in range(start_epoch, args.epoch):
                clr = self.sess.run(self.lr)
                print("Current learning rate: %.6e" % clr)
                loss_names = [
                    "Total Loss", "LogS Loss", "LogDet Loss", "Prior Loss"
                ]
                buffers = buff(loss_names)
                for batch in tqdm(range(num_batch)):
                    input_data = self.data_generator.dequeue()
                    feed_dict = dict(zip(self.placeholders, input_data))
                    _, loss, logs_loss, logdet_loss, prior_loss, summary, step = self.sess.run(
                        [
                            self.op, self.loss, self.logs_loss,
                            self.logdet_loss, self.prior_loss, self.merged,
                            self.global_step
                        ],
                        feed_dict=feed_dict)
                    self.gate_add_summary(summary, step)
                    buffers.put([loss, logs_loss, logdet_loss, prior_loss],
                                [0, 1, 2, 3])

                    if (batch + 1) % args.display_step == 0:
                        buffers.printout([epoch + 1, batch + 1, num_batch])

                if (epoch + 1) % args.saving_epoch == 0 and args.saving_path:
                    try:
                        self.save(epoch + 1)
                    except:
                        print("Failed saving model, maybe no space left...")
                        traceback.print_exc()
                if (epoch + 1) % args.sample_epoch == 0 and args.sampling_path:
                    self.sample(epoch + 1)

        except KeyboardInterrupt:
            print("KeyboardInterrupt")
        except:
            traceback.print_exc()
        finally:
            '''
            for x in self.procs:
                x.terminate()
            '''

    def save(self, epoch):
        name = 'Model_Epoch_' + str(epoch)
        saving_path = os.path.join(args.saving_path, name)
        print("Saving Model to %r" % saving_path)
        step = self.sess.run(self.global_step)
        self.saver.save(self.sess, saving_path, global_step=step)

    def load(self):
        ckpt = tf.train.get_checkpoint_state(args.loading_path)
        if ckpt and ckpt.model_checkpoint_path:
            ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
            print("Loading Model From %r" %
                  os.path.join(args.loading_path, ckpt_name))
            self.saver.restore(self.sess,
                               os.path.join(args.loading_path, ckpt_name))
            return True
        return False

    def sample(self, epoch):
        print("Sampling to %r" % args.sampling_path)
        try:
            for i in tqdm(range(args.sample_num)):
                name = 'Epoch_%r-%r.wav' % (epoch, i + 1)
                outpath = os.path.join(args.sampling_path, name)
                print('Sampling to %r ...' % outpath)
                mels = self.validation_data_generator.dequeue()
                output = self.sess.run(self.output,
                                       feed_dict={self.mels: [mels]})
                output = np.transpose(output[0])
                output = np.reshape(output, [-1])
                writewav(outpath, output)
        except KeyboardInterrupt:
            print("KeyboardInterrupt")
        except:
            '''
            traceback.print_exc()
            for x in self.procs:
                x.terminate()
            '''

    def infer(self):
        print("Performing Inference from %r" % args.infer_mel_dir)
        self.saver = tf.train.Saver()
        try:
            if self.load():
                step = self.global_step.eval()
            else:
                print('Error loading model at inference state!')
                raise RuntimeError
            i = 0
            for mels in mel_iter(args.infer_mel_dir, size=1):
                outputs = self.sess.run(self.output,
                                        feed_dict={self.mels: mels})
                for output in outputs:
                    name = 'Infer_Step_%r-%r.wav' % (step, i + 1)
                    outpath = os.path.join(args.infer_path, name)
                    print('Synthesizing to %r ...' % outpath)
                    output = np.transpose(output)
                    output = np.reshape(output, [-1])
                    writewav(outpath, output)
                    i += 1
                if i > 32:
                    break
        except KeyboardInterrupt:
            print("KeyboardInterrupt")

    def gate_add_summary(self, summary, step):
        try:
            self.writer.add_summary(summary, step)
        except:
            print("Failed adding summary, maybe no space left...")
Пример #19
0
def main():

    # Hyperparameters

    parser = argparse.ArgumentParser()

    # in_dir = ~/wav
    parser.add_argument("--in_dir",
                        type=str,
                        required=True,
                        help="input data(pickle) dir")
    parser.add_argument(
        "--ckpt_dir",
        type=str,
        required=True,
        help="checkpoint to save/ start with for train/inference")
    parser.add_argument("--mode",
                        default="train",
                        choices=["train", "test", "infer"],
                        help="setting mode for execution")

    # Saving Checkpoints, Data... etc
    parser.add_argument("--max_step",
                        type=int,
                        default=500000,
                        help="maximum steps in training")
    parser.add_argument("--checkpoint_freq",
                        type=int,
                        default=100,
                        help="how often save checkpoint")

    # Data
    parser.add_argument("--segment_length",
                        type=float,
                        default=1.6,
                        help="segment length in seconds")
    parser.add_argument("--spectrogram_scale",
                        type=int,
                        default=40,
                        help="scale of the input spectrogram")

    # Ininitialization
    parser.add_argument("--init_type",
                        type=str,
                        default="uniform",
                        help="type of initializer")
    parser.add_argument("--init_weight_range",
                        type=float,
                        default=0.1,
                        help="initial weight ranges from -0.1 to 0.1")

    # Optimization
    parser.add_argument("--loss_type",
                        default="softmax",
                        choices=["softmax", "contrast"],
                        help="loss type for optimization")
    parser.add_argument("--optimizer",
                        type=str,
                        default="sgd",
                        help="type of optimizer")
    parser.add_argument("--learning_rate",
                        type=float,
                        default=0.01,
                        help="learning rate")
    parser.add_argument("--l2_norm_clip",
                        type=float,
                        default=3.0,
                        help="L2-norm of gradient is clipped at")

    # Train
    parser.add_argument("--num_spk_per_batch",
                        type=int,
                        default=64,
                        help="N speakers of batch size N*M")
    parser.add_argument("--num_utt_per_batch",
                        type=int,
                        default=10,
                        help="M utterances of batch size N*M")

    # LSTM
    parser.add_argument("--lstm_proj_clip",
                        type=float,
                        default=0.5,
                        help="Gradient scale for projection node in LSTM")
    parser.add_argument("--num_lstm_stacks",
                        type=int,
                        default=3,
                        help="number of LSTM stacks")
    parser.add_argument("--num_lstm_cells",
                        type=int,
                        default=768,
                        help="number of LSTM cells")
    parser.add_argument("--dim_lstm_projection",
                        type=int,
                        default=256,
                        help="dimension of LSTM projection")

    # Scaled Cosine similarity
    parser.add_argument(
        "--scale_clip",
        type=float,
        default=0.01,
        help="Gradient scale for scale values in scaled cosine similarity")

    # Collect hparams
    args = parser.parse_args()

    # Set up Queue
    global_queue = queue.Queue()
    # Set up Feeder
    libri_feeder = Feeder(args, "train", "libri")
    libri_feeder.set_up_feeder(global_queue)

    vox1_feeder = Feeder(args, "train", "vox1")
    vox1_feeder.set_up_feeder(global_queue)

    vox2_feeder = Feeder(args, "train", "vox2")
    vox2_feeder.set_up_feeder(global_queue)

    # Set up Model

    model = GE2E(args)
    graph = model.set_up_model("train")

    # Training
    with graph.as_default():
        saver = tf.train.Saver()

    with tf.Session(graph=graph) as sess:

        train_writer = tf.summary.FileWriter(args.ckpt_dir, sess.graph)
        ckpt = tf.train.get_checkpoint_state(args.ckpt_dir)
        if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
            print('Restoring Variables from {}'.format(
                ckpt.model_checkpoint_path))
            saver.restore(sess, ckpt.model_checkpoint_path)
            start_step = sess.run(model.global_step)

        else:
            print('start from 0')
            init_op = tf.global_variables_initializer()
            sess.run(init_op)
            start_step = 1

        for num_step in range(start_step, args.max_step + 1):

            print("current step: " + str(num_step) + "th step")

            batch = global_queue.get()

            summary, training_loss, _ = sess.run(
                [model.sim_mat_summary, model.total_loss, model.optimize],
                feed_dict={
                    model.input_batch: batch[0],
                    model.target_batch: batch[1]
                })
            train_writer.add_summary(summary, num_step)
            print("batch loss:" + str(training_loss))

            if num_step % args.checkpoint_freq == 0:
                save_path = saver.save(sess,
                                       args.ckpt_dir + "/model.ckpt",
                                       global_step=model.global_step)
                print("model saved in file: %s / %d th step" %
                      (save_path, sess.run(model.global_step)))
Пример #20
0
# for NNs
import tensorflow as tf

# pre-made custom layers
from tensorflow_layers import fc_layer, conv_layer, flatten_2d

# constants:
CLASSES = 24
IMG_SIZE = 40
IMG_SIZE_FLAT = IMG_SIZE * IMG_SIZE
TRAIN_BATCH_SIZE = 128
MODELNUM = 3
TENSORBOARD_DIR = './tmp/{}/'.format(MODELNUM)

# load in the data saved in './data/warped_40x40/warped_data_240k.csv'
data = Feeder(file_path='./data/warped_40x40/warped_data_240k.csv',
              classes=CLASSES)


def feed_dict(train=True, all_test_data=False):
    '''
    Return the feed_dict for train or testing mode (since it is called a lot)
    ``all_test_data`` returns all of the testing data, which may be slow to
    evaluate.
    '''
    if train:
        xs, ys = data.next_batch(TRAIN_BATCH_SIZE)
        p = 0.5
    # here we use testing data, but do we use ``all_test_data``?
    elif not all_test_data:
        xs, ys = (data.test[0])[:500], (data.test[1])[:500]
        p = 1.0
Пример #21
0
    # model = ResNet(Bottleneck, layers=[3, 4, 6, 3], num_classes=args.num_classes)
    # model = Inception3(num_classes=10, aux_logits=False)
    model = DenseNet(growth_rate=32, block_config=(6, 12, 24, 16),
                      num_init_features=64, bn_size=4, drop_rate=0.3, num_classes=args.num_classes)
    # model = Wide_ResNet(16, 8, 0.3, args.num_classes)

    if args.cuda:
        model = model.cuda()
        print('cuda_weights')

    if not os.path.isdir(args.save_dir):
        os.mkdir(args.save_dir)



    train_dl = torch.utils.data.DataLoader(dataset=Feeder(args.train_folder_path),
                                            batch_size=32,
                                            shuffle=True,
                                            num_workers=4,
                                            drop_last=True)


    test_dl = torch.utils.data.DataLoader(dataset=Feeder(args.test_folder_path, is_training=False),
                                            batch_size=16,
                                            shuffle=False,
                                            num_workers=1,
                                            drop_last=False)

    test_nothot_dl = torch.utils.data.DataLoader(dataset=Feeder(args.test_folder_path, is_training=False, target_index=0),
                                                batch_size=16,
                                                shuffle=False,
Пример #22
0
Created on 16 Sep 2017

@author: pingshiyu
'''
from PIL import Image
import numpy as np
from feeder import Feeder
from scipy.misc import toimage


# for debug purposes
def to_2d_image(array):
    '''
    1d flattened array of a square image, converted to image object
    Returns the image object
    '''
    side_length = int(np.sqrt(array.size))
    np_2d_arr = np.reshape(array, (side_length, side_length))

    return Image.fromarray(np_2d_arr, 'L')


if __name__ == '__main__':
    data = Feeder('./data/cleanData.csv')

    xs, ys = data.next_batch(10)
    for x, y in zip(xs, ys):
        toimage(np.reshape(x, (45, 45))).show()
        print(y)
        wait = input('')
Пример #23
0
    model = DenseNet(growth_rate=32,
                     block_config=(6, 12, 24, 16),
                     num_init_features=64,
                     bn_size=4,
                     drop_rate=0.3,
                     num_classes=args.num_classes)
    # model = Wide_ResNet(16, 8, 0.3, args.num_classes)

    if args.cuda:
        model = model.cuda()
        print('cuda_weights')

    if not os.path.isdir(args.save_dir):
        os.mkdir(args.save_dir)

    train_dl = torch.utils.data.DataLoader(dataset=Feeder(
        args.train_folder_path),
                                           batch_size=32,
                                           shuffle=True,
                                           num_workers=4,
                                           drop_last=True)

    test_dl = torch.utils.data.DataLoader(dataset=Feeder(args.test_folder_path,
                                                         is_training=False),
                                          batch_size=16,
                                          shuffle=False,
                                          num_workers=1,
                                          drop_last=False)

    test_sensitive_dl = torch.utils.data.DataLoader(dataset=Feeder(
        args.test_folder_path, is_training=False, target_index=0),
                                                    batch_size=16,
Пример #24
0
from feeder import Feeder
from datetime import datetime, timedelta

datetime_object = datetime.strptime(Feeder.getDate(), '%Y-%m-%d %H:%M:%S')

if datetime_object < datetime.now() - timedelta(hours=4):
    feed = Feeder()
    feed.feed()
from params import *
from feeder import Feeder
from model import GAN

################################
# Main
################################

if __name__ == '__main__':

    # Create and train the GAN
    model = GAN(noise_dim=NOISE_DIM,
                image_dim=IMAGE_DIM,
                name='gan',
                debug=False)
    feed = Feeder(IMAGES_BASE_FOLDER, LABEL_BASE_FOLDER, batch_size=BATCH_SIZE)
    model.train(feed, epochs=EPOCHS)

    # Save submission
    with ZipFile(OUTPUT_ZIP_NAME, 'w') as zip:
        batch_size = 100
        num_batches = int(10000 / batch_size)
        for idx_batch in tqdm(range(num_batches), desc='Writing test images'):
            images_gen = (
                model.generate(num_images=batch_size, seed=idx_batch + 1) *
                255).astype(np.uint8)
            for idx_image, image_gen in enumerate(images_gen):
                image_name = '{}.png'.format((idx_batch + 1) * batch_size +
                                             idx_image)
                cv2.imwrite(image_name, image_gen)
                zip.write(image_name)
Пример #26
0
def main():

    # Hyperparameters

    parser = argparse.ArgumentParser()

    # Path

    # wav name formatting: id_clip_uttnum.wav
    parser.add_argument("--in_dir", type=str, required=True, help="input dir")
    parser.add_argument("--out_dir", type=str, required=True, help="out dir")
    parser.add_argument("--batch_inference",
                        action="store_true",
                        help="set whether to use the batch inference")
    parser.add_argument("--dataset", type=str, default="libri", help="out dir")
    parser.add_argument("--in_wav1", type=str, help="input wav1 dir")
    parser.add_argument("--in_wav2",
                        default="temp.wav",
                        type=str,
                        help="input wav2 dir")
    #/home/hdd2tb/ninas96211/dev_wav_set
    parser.add_argument("--mode",
                        default="infer",
                        choices=["train", "test", "infer"],
                        help="setting mode for execution")

    parser.add_argument("--ckpt_file",
                        type=str,
                        default='./xckpt/model.ckpt-58100',
                        help="checkpoint to start with for inference")

    # Data
    #parser.add_argument("--window_length", type=int, default=160, help="sliding window length(frames)")
    parser.add_argument("--segment_length",
                        type=float,
                        default=1.6,
                        help="segment length in seconds")
    parser.add_argument("--overlap_ratio",
                        type=float,
                        default=0.5,
                        help="overlaping percentage")
    parser.add_argument("--spectrogram_scale",
                        type=int,
                        default=40,
                        help="scale of the input spectrogram")
    # Enrol
    parser.add_argument("--num_spk_per_batch",
                        type=int,
                        default=5,
                        help="N speakers of batch size N*M")
    parser.add_argument("--num_utt_per_batch",
                        type=int,
                        default=10,
                        help="M utterances of batch size N*M")

    # LSTM
    parser.add_argument("--num_lstm_stacks",
                        type=int,
                        default=3,
                        help="number of LSTM stacks")
    parser.add_argument("--num_lstm_cells",
                        type=int,
                        default=768,
                        help="number of LSTM cells")
    parser.add_argument("--dim_lstm_projection",
                        type=int,
                        default=256,
                        help="dimension of LSTM projection")
    parser.add_argument('--gpu', default='0', help='Path to model checkpoint')
    parser.add_argument('--gpu_num',
                        default=4,
                        help='Path to model checkpoint')

    # Collect hparams
    args = parser.parse_args()

    import os
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  # see issue #152
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)

    feeder = Feeder(args)
    feeder.set_up_feeder()

    model = GE2E(args)
    graph = model.set_up_model()

    #Training
    with graph.as_default():
        saver = tf.train.Saver()

    #num_gpu=4
    #sess_arr=[]
    #for i in range(num_gpu):
    #    gpu_options = tf.GPUOptions(visible_device_list=str(i))
    #    sess_arr.append(tf.Session(graph=graph, config=tf.ConfigProto(gpu_options=gpu_options)))
    #    saver.restore(sess_arr[i], args.ckpt_file)

    #    t = Thread(target=worker)
    #    t.daemon = True
    #    t.start()

    #    save_dvector_of_dir_parallel(sess_arr, feeder, model, args)

    with tf.Session(graph=graph) as sess:
        # restore from checkpoints

        saver.restore(sess, args.ckpt_file)

        #get_dvector_of_dir(sess, feeder, model, args)

        t = Thread(target=worker)
        t.daemon = True
        t.start()
        save_dvector_of_dir_parallel(sess, feeder, model, args)
Пример #27
0
 def _load_fetchers(self):
     """Load fetchers from db when start"""
     fetchers = self.dao.load_fetchers()
     for fetcher in fetchers:
         self.feeders[fetcher.name] = Feeder(fetcher.name, fetcher.addr)
     logger.info("load %s fetchers from db." % len(self.feeders))
Пример #28
0
#symbols = ['INFY','DIVISLAB','HCLTECH','BPCL','PNB']
for dt in rrule(DAILY, dtstart=start_date, until=end_date, byweekday=(MO,TU,WE,TH,FR)):
    vehicle_lst = []

    def on_feedend_callback():
        print '---------------------------------------------------------------------------------'
        djobs.stop()

    for s in symbols:
        vehicle_lst.append(Vehicle(s,POOL))

    dt = dt.replace(hour=9,minute=15)
    rf = RedisFeeder(POOL,keymask="{0}:m:price")
    rf.run(vehicle_lst,from_time=dt - datetime.timedelta(20), feeder_time=dt - datetime.timedelta(1))

    print 'hist done'

    rf = RedisFeeder(POOL,keymask="{0}:m:price")
    djobs = DataJobs(vehicle_lst)
    fd = Feeder(rf,vehicle_lst,feeder_time=dt, feed_increment=(1),on_feed_cancel=on_feedend_callback)
    fd.run(1)
    djobs.run(75,'update_afp',run_this=(Vehicle.publish_afp,(vehicle_lst,POOL)))

    #djobplot = DataJobs(vehicle_lst)
    #djobs.run(30,run_this=(Vehicle.generate_plot,(vehicle_lst,POOL,'./webclient/img/{0}.png')))

    break


if __name__ == '__main__':
    args = parse_args()


    model = DenseNet(growth_rate=32, block_config=(6, 12, 24, 16),
                      num_init_features=64, bn_size=4, drop_rate=0.3, num_classes=args.num_classes)


    if args.cuda:
        model = model.cuda()
        print('cuda_weights')

    if not os.path.isdir(args.save_dir):
        os.mkdir(args.save_dir)

    train_dl = torch.utils.data.DataLoader(dataset=Feeder(args.train_folder_path),
                                            batch_size=16,
                                            shuffle=True,
                                            num_workers=1,
                                            drop_last=True)

    test_dl = torch.utils.data.DataLoader(dataset=Feeder(args.test_folder_path, is_training=False),
                                            batch_size=16,
                                            shuffle=False,
                                            num_workers=1,
                                            drop_last=False)



    if args.mode == 'train':
Пример #30
0
class Master:
    def __init__(self, opt):
        self.opt = opt
        if opt.path_load is not None and (opt.path_load.endswith('.yaml')
                                          or opt.path_load.endswith('.yml')):
            self._model = JointScorer(opt)
        else:
            self._model = Scorer(opt)
        if opt.path_load is not None:
            self._model.load(opt.path_load)
        self.parallel()

        if opt.task != 'play':
            if opt.fld_data is not None:
                self.feeder = Feeder(opt)

        if opt.task == 'train':
            opt.save()
            os.makedirs(opt.fld_out + '/ckpt', exist_ok=True)
            self.path_log = self.opt.fld_out + '/log.txt'
        else:
            self.path_log = self.opt.fld_out + '/log_infer.txt'

    def print(self, s=''):
        try:
            print(s)
        except UnicodeEncodeError:
            print('[UnicodeEncodeError]')
            pass
        with open(self.path_log, 'a', encoding='utf-8') as f:
            f.write(s + '\n')

    def parallel(self):
        if self.opt.cuda:
            self._model = self._model.cuda()
        n_gpu = torch.cuda.device_count()
        if self.opt.cuda and n_gpu > 1:
            print('paralleling on %i GPU' % n_gpu)
            self.model = torch.nn.DataParallel(self._model)
            # after DataParallel, a warning about RNN weights shows up every batch
            warnings.filterwarnings("ignore")
            # after DataParallel, attr of self.model become attr of self.model.module
            self._model = self.model.module
            self.model.core = self.model.module.core
        else:
            self.model = self._model
        if self.opt.task == 'train':
            self.optimizer = torch.optim.Adam(self._model.parameters(),
                                              lr=self.opt.lr)

    def train(self):
        vali_loss, best_acc = self.vali()
        best_trained = 0
        step = 0
        n_trained = 0
        t0 = time.time()

        list_trained = [0]
        list_train_loss = [np.nan]
        list_train_acc = [np.nan]
        list_vali_loss = [vali_loss]
        list_vali_acc = [best_acc]
        acc_history = []

        while step < self.opt.step_max:
            self.model.train()
            self.optimizer.zero_grad()
            batch = self.feeder.get_batch(self.opt.batch)
            pred = self.model.forward(batch)
            loss = self.loss(pred)
            loss = loss.mean()  # in case of parallel-training

            loss.backward()
            torch.nn.utils.clip_grad_norm_(self.model.parameters(),
                                           self.opt.clip)
            self.optimizer.step()

            acc = (pred > 0.5).float().mean().item()
            acc_history.append(acc)
            if len(acc_history) > self.opt.len_acc:
                acc_history.pop(0)
            avg_train_acc = np.mean(acc_history)
            step += 1
            n_trained += self.opt.batch
            info = 'step %i trained %.3f best %.2f' % (step, n_trained / 1e6,
                                                       best_acc)

            if step % self.opt.step_print == 0:
                speed = (n_trained / 1e6) / ((time.time() - t0) / 3600)

                self.print(
                    '%s speed %.2f hr_gap %.2f score_gap %.2f rank_gap %.2f loss %.4f acc %.3f'
                    % (
                        info,
                        speed,
                        np.median(batch['hr_gap']),
                        (np.array(batch['score_pos']) -
                         np.array(batch['score_neg'])).mean(),
                        (np.array(batch['rank_pos']) -
                         np.array(batch['rank_neg'])).mean(),
                        loss,
                        avg_train_acc,
                    ))

            if step % self.opt.step_vali == 0:
                vali_loss, vali_acc = self.vali(info)
                if vali_acc > best_acc:
                    self.save(self.opt.fld_out + '/ckpt/best.pth')
                    best_acc = vali_acc
                    best_trained = n_trained
                sys.stdout.flush()

                list_trained.append(n_trained / 1e6)
                list_train_loss.append(loss.item())
                list_train_acc.append(avg_train_acc)
                list_vali_loss.append(vali_loss)
                list_vali_acc.append(vali_acc)
                _, axs = plt.subplots(3, 1, sharex=True)

                axs[0].plot(list_trained, list_train_loss, 'b', label='train')
                axs[0].plot(list_trained, list_vali_loss, 'r', label='vali')
                axs[0].legend(loc='best')
                axs[0].set_ylabel('loss')

                axs[1].plot(list_trained, list_train_acc, 'b', label='train')
                axs[1].plot(list_trained, list_vali_acc, 'r', label='vali')
                axs[1].plot([best_trained / 1e6, n_trained / 1e6],
                            [best_acc, best_acc], 'k:')
                axs[1].set_ylabel('acc')

                axs[-1].set_xlabel('trained (M)')
                axs[0].set_title(self.opt.fld_out + '\n' + self.opt.fld_data +
                                 '\nbest_acc = %.4f' % best_acc)
                plt.tight_layout()
                plt.savefig(self.opt.fld_out + '/log.png')
                plt.close()

            if step % self.opt.step_save == 0:
                self.save(self.opt.fld_out + '/ckpt/last.pth')

    def loss(self, pred):
        return -torch.log(pred).mean()

    def vali(self, info=''):
        assert (self.opt.min_rank_gap is not None)
        assert (self.opt.min_score_gap is not None)

        n_print = min(self.opt.batch, self.opt.vali_print)
        self.model.eval()
        loss = 0
        acc = 0
        hr_gap = 0
        score_gap = 0
        rank_gap = 0
        n_batch = int(self.opt.vali_size / self.opt.batch)
        self.feeder.reset('vali')

        for _ in range(n_batch):
            batch = self.feeder.get_batch(self.opt.batch,
                                          sub='vali',
                                          min_score_gap=self.opt.min_score_gap,
                                          min_rank_gap=self.opt.min_rank_gap)
            with torch.no_grad():
                pred = self.model.forward(batch)
                loss += self.loss(pred)
            acc += (pred > 0.5).float().mean()
            score_gap += (np.array(batch['score_pos']) -
                          np.array(batch['score_neg'])).mean()
            rank_gap += (np.array(batch['rank_pos']) -
                         np.array(batch['rank_neg'])).mean()
            hr_gap += np.median(batch['hr_gap'])

        loss /= n_batch
        acc /= n_batch
        score_gap /= n_batch
        rank_gap /= n_batch
        hr_gap /= n_batch
        s = '%s hr_gap %.2f score_gap %.2f rank_gap %.2f loss %.4f acc %.3f' % (
            info,
            hr_gap,
            score_gap,
            rank_gap,
            loss,
            acc,
        )
        s = '[vali] ' + s.strip()
        if not n_print:
            self.print(s)
            return loss.mean().item(), acc

        with torch.no_grad():
            pred_pos = self.model.core(batch['ids_pos'], batch['len_pos'])
            pred_neg = self.model.core(batch['ids_neg'], batch['len_neg'])

        def to_np(ids):
            if self.opt.cuda:
                ids = ids.cpu()
            return ids.detach().numpy()

        ids_pos = to_np(batch['ids_pos'])
        ids_neg = to_np(batch['ids_neg'])

        for j in range(n_print):
            l_cxt = batch['len_cxt'][j]
            cxt = self.model.tokenizer.decode(ids_pos[j, :l_cxt])
            pos = self.model.tokenizer.decode(
                ids_pos[j, l_cxt:]).strip('<|ndoftext|>')
            neg = self.model.tokenizer.decode(
                ids_neg[j, l_cxt:]).strip('<|ndoftext|>')
            self.print(cxt)
            self.print('hr_gap %s' % batch['hr_gap'][j])
            self.print('%s\t%.2f\t%.3f\t%s' %
                       (batch['score_pos'][j], batch['rank_pos'][j],
                        pred_pos[j], pos))
            self.print('%s\t%.2f\t%.3f\t%s' %
                       (batch['score_neg'][j], batch['rank_neg'][j],
                        pred_neg[j], neg))
            self.print()

        self.print(s)
        return loss.mean().item(), acc

    def save(self, path):
        torch.save(self._model.state_dict(), path)
        self.print('saved to ' + path)
Пример #31
0
    def train(self):
        coord = tf.train.Coordinator()

        self.data_generator = Feeder(coord, args)
        self.validation_data_generator = self.data_generator

        self.lr = tf.train.exponential_decay(args.lr, self.global_step,
                                             args.lr_decay_steps,
                                             args.lr_decay_rate)
        self.lr = tf.minimum(tf.maximum(self.lr, 0.0000001), 0.001)
        self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
        self.grad = self.optimizer.compute_gradients(self.loss,
                                                     var_list=self.t_vars)
        self.op = self.optimizer.apply_gradients(self.grad,
                                                 global_step=self.global_step)

        varset = list(set(tf.global_variables()) | set(tf.local_variables()))
        self.saver = tf.train.Saver(var_list=varset, max_to_keep=5)
        num_batch = self.data_generator.n_examples // args.batch_size
        do_initialzie = True
        if args.loading_path:
            if self.load():
                start_epoch = self.global_step.eval() // num_batch
                do_initialzie = False
            else:
                print("Error Loading Model! Training From Initial State...")
        if do_initialzie:
            init_op = tf.global_variables_initializer()
            start_epoch = 0
            self.sess.run(init_op)

        self.writer = tf.summary.FileWriter(args.summary_dir, None)
        with tf.name_scope("summaries"):
            self.s_logdet_loss = tf.summary.scalar('logdet_loss',
                                                   self.logdet_loss)
            self.s_logs_loss = tf.summary.scalar('logs_loss', self.logs_loss)
            self.s_prior_loss = tf.summary.scalar('prior_loss',
                                                  self.prior_loss)
            self.s_loss = tf.summary.scalar('total_loss', self.loss)
            self.merged = tf.summary.merge([
                self.s_logdet_loss, self.s_logs_loss, self.s_prior_loss,
                self.s_loss
            ])

        # self.procs = self.data_generator.start_enqueue()
        # self.val_procs = self.validation_data_generator.start_enqueue()
        # self.procs += self.val_procs
        self.data_generator.start(self.sess)

        self.sample(0)
        try:
            for epoch in range(start_epoch, args.epoch):
                clr = self.sess.run(self.lr)
                print("Current learning rate: %.6e" % clr)
                loss_names = [
                    "Total Loss", "LogS Loss", "LogDet Loss", "Prior Loss"
                ]
                buffers = buff(loss_names)
                for batch in tqdm(range(num_batch)):
                    input_data = self.data_generator.dequeue()
                    feed_dict = dict(zip(self.placeholders, input_data))
                    _, loss, logs_loss, logdet_loss, prior_loss, summary, step = self.sess.run(
                        [
                            self.op, self.loss, self.logs_loss,
                            self.logdet_loss, self.prior_loss, self.merged,
                            self.global_step
                        ],
                        feed_dict=feed_dict)
                    self.gate_add_summary(summary, step)
                    buffers.put([loss, logs_loss, logdet_loss, prior_loss],
                                [0, 1, 2, 3])

                    if (batch + 1) % args.display_step == 0:
                        buffers.printout([epoch + 1, batch + 1, num_batch])

                if (epoch + 1) % args.saving_epoch == 0 and args.saving_path:
                    try:
                        self.save(epoch + 1)
                    except:
                        print("Failed saving model, maybe no space left...")
                        traceback.print_exc()
                if (epoch + 1) % args.sample_epoch == 0 and args.sampling_path:
                    self.sample(epoch + 1)

        except KeyboardInterrupt:
            print("KeyboardInterrupt")
        except:
            traceback.print_exc()
        finally:
            '''