Пример #1
0
def start_bot():
    from apps.shopwatcher.bot import dp
    from aiogram import executor
    from routes import setup_routes

    scheduler = Scheduler(schedule)
    scheduler.start()

    setup_routes(dp)
    executor.start_polling(dp,
                           on_shutdown=on_shutdown,
                           on_startup=on_startup,
                           skip_updates=True)
    def __init__(self, gen_args, checkpoint_dir, log_dir, output_dir, video_len, action_set, actor_set, is_eval=False,
                 dis_args=None, loss_weights=None, pretrain_iters=0):
        """
        :param gen_args: dict, all the parameters/settings for the generator network
        :param checkpoint_dir: str, the path to save/load checkpoints
        :param log_dir: str, the path to save the log file
        :param output_dir: str, the path to save the generated videos
        :param video_len: str, the desired length of the generated videos
        :param action_set: list, the action set
        :param actor_set: list, the actor set
        :param is_eval: bool, specify for evaluation
        :param dis_args: dict, all the parameters/settings for the discriminator network
        :param loss_weights: dict, the weights for losses
        :param pretrain_iters: int, the number of iters for pretraining the content stream
        """
        super(TwoStreamVAN, self).__init__(gen_args, checkpoint_dir, log_dir, output_dir, video_len, action_set,
                                           actor_set, is_eval=is_eval)

        self.joint = gen_args['joint']
        self.layers = gen_args['kernel_layer']

        # define the generator and discriminator networks
        self.define_networks(gen_args, dis_args=dis_args)
        if torch.cuda.is_available():
            self.move_to_gpu()

        if not is_eval:
            # define the optimizers
            self.define_opts()

            # define the mechanism of the scheduled sampling
            self.schedule_sampler = Scheduler(1, 0, 150000 + pretrain_iters, 400000 + pretrain_iters, mode='linear')

            # define the loss weights
            self.motion_kl_weight = Scheduler(loss_weights['vid_m_kl_start'], loss_weights['vid_m_kl_end'], 150000 + pretrain_iters,
                                              400000 + pretrain_iters, mode='linear')
            # scheduler for c_kl is only activated in pre-training
            self.c_kl = Scheduler(loss_weights['c_kl_start'], loss_weights['c_kl_end'], 0, 100000, mode='linear')
            self.pred_scale_feat = loss_weights['pred_scale_feat']
            self.video_scale_feat = loss_weights['video_scale_feat']
            self.img_m_kl = loss_weights['img_m_kl']
            self.c_img_dis = loss_weights['c_img_dis']
            # TODO: add xp_vs_xtilde to the loss weights, 1 as default, 0.01 for syn-action
            self.xp_vs_xtilde = loss_weights['xp_vs_xtilde']
Пример #3
0
def create_app(test_config=None):
    db = MongoEngine()
    db_uri = "mongodb+srv://{user}:{passw}@cluster0-czbyg.mongodb.net/GuelphAlerts?retryWrites=true&w=majority"

    sched = Scheduler()
    if Scheduler.sched == None:
        sched.startScheduler()
        sched = Scheduler.sched

    # create and configure the app
    app = Flask(__name__, instance_relative_config=True)

    if test_config is None:
        # load the instance config, if it exists, when not testing
        app.config.from_pyfile('config.py', silent=True)
    else:
        # load the test config if passed in
        app.config.from_mapping(test_config)

    app.config['MONGODB_SETTINGS'] = {
        'db':
        'GuelphAlerts',
        'host':
        db_uri.format(user=os.environ['DB_USERNAME'],
                      passw=os.environ['DB_PASSWORD'])
    }

    db.init_app(app)

    # ensure the instance folder exists
    try:
        os.makedirs(app.instance_path)
    except OSError:
        pass

    app.register_blueprint(home.bp)
    app.register_blueprint(courses.courses_bp)
    app.register_blueprint(cart.bp)

    sched.initScheduler()

    return app
Пример #4
0
        pb2 = get_price(r2['ex']['availableToBack'])

        pl1 = get_price(r1['ex']['availableToLay'])
        pl2 = get_price(r2['ex']['availableToLay'])
        curr = (ts, pb1, pl1, pt1, pt2, pb2, pl2)

        prev = self.cache[-1] if self.cache else (None, None)
        self.cache.append(curr)

        if curr[1:] != prev[1:]:
            print "{} R1:{}/{} ({}-{}) R2:{}/{}".format(*curr)

        if now - self.prev_dump > dt.timedelta(minutes=5):
            mode = 'a'
            """
			with open(fn, mode) as fp:
				for sample in self.cache:
					line="{}|{}|{}".format(*sample)
					fp.write(line+"\n")
			"""
            self.prev_dump = now
            self.cache = []


dump = Dump(marketId, fn)
now = dt.datetime.now()
sc = Scheduler()
e = Event(now, dump.fetch_prices, marketId, dt.timedelta(seconds=1),
          now + dt.timedelta(minutes=10))
sc.register(e)
sc.run()
class TwoStreamVAN(BaseEnviron):
    """
    The environment for TwoStreamVAN: training, testing, visualization, save/load checkpoints, etc
    Variants of TwoStreamVAN (TwoStreamVAN(-C), TwoStreamVAN(-M))
    """
    def __init__(self, gen_args, checkpoint_dir, log_dir, output_dir, video_len, action_set, actor_set, is_eval=False,
                 dis_args=None, loss_weights=None, pretrain_iters=0):
        """
        :param gen_args: dict, all the parameters/settings for the generator network
        :param checkpoint_dir: str, the path to save/load checkpoints
        :param log_dir: str, the path to save the log file
        :param output_dir: str, the path to save the generated videos
        :param video_len: str, the desired length of the generated videos
        :param action_set: list, the action set
        :param actor_set: list, the actor set
        :param is_eval: bool, specify for evaluation
        :param dis_args: dict, all the parameters/settings for the discriminator network
        :param loss_weights: dict, the weights for losses
        :param pretrain_iters: int, the number of iters for pretraining the content stream
        """
        super(TwoStreamVAN, self).__init__(gen_args, checkpoint_dir, log_dir, output_dir, video_len, action_set,
                                           actor_set, is_eval=is_eval)

        self.joint = gen_args['joint']
        self.layers = gen_args['kernel_layer']

        # define the generator and discriminator networks
        self.define_networks(gen_args, dis_args=dis_args)
        if torch.cuda.is_available():
            self.move_to_gpu()

        if not is_eval:
            # define the optimizers
            self.define_opts()

            # define the mechanism of the scheduled sampling
            self.schedule_sampler = Scheduler(1, 0, 150000 + pretrain_iters, 400000 + pretrain_iters, mode='linear')

            # define the loss weights
            self.motion_kl_weight = Scheduler(loss_weights['vid_m_kl_start'], loss_weights['vid_m_kl_end'], 150000 + pretrain_iters,
                                              400000 + pretrain_iters, mode='linear')
            # scheduler for c_kl is only activated in pre-training
            self.c_kl = Scheduler(loss_weights['c_kl_start'], loss_weights['c_kl_end'], 0, 100000, mode='linear')
            self.pred_scale_feat = loss_weights['pred_scale_feat']
            self.video_scale_feat = loss_weights['video_scale_feat']
            self.img_m_kl = loss_weights['img_m_kl']
            self.c_img_dis = loss_weights['c_img_dis']
            # TODO: add xp_vs_xtilde to the loss weights, 1 as default, 0.01 for syn-action
            self.xp_vs_xtilde = loss_weights['xp_vs_xtilde']

    def define_networks(self, gen_args, dis_args=None):
        """
        Define the architecture of networks
        :param gen_args: the args for the generator
        :param dis_args: the args for the discriminators
        """
        self.networks['generator'] = Generator(gen_args['num_categories'], gen_args['n_channels'],
                                               motion_dim=gen_args['motion_dim'], cont_dim=gen_args['cont_dim'],
                                               no_mask=gen_args['no_mask'], joint=gen_args['joint'],
                                               ac_kernel=gen_args['ac_kernel'], gf_dim=gen_args['gf_dim'],
                                               use_bn=gen_args['use_bn'], kernel_layer=gen_args['kernel_layer'])

        if not self.is_eval:
            self.networks['img_discriminator'] = PatchImageDiscriminator(dis_args['n_channels'])
            self.networks['vid_discriminator'] = CategoricalVideoDiscriminator(dis_args['n_channels'], dis_args['num_categories'])

    def define_opts(self):
        """
        Define Optimizers
        """
        # optimizer for content encoder
        contEnc_params = list(self.networks['generator'].contEnc.parameters()) + \
                         list(self.networks['generator'].contSampler.parameters())

        self.optimizers['optimize_contEnc'] = optim.Adam(contEnc_params, lr=0.0002, betas=(0.5, 0.999),
                                                         weight_decay=0.00001)

        # optimizer for motion encoder
        motionEnc_params = list(self.networks['generator'].motionEnc.parameters()) + \
                         list(self.networks['generator'].motionSampler.parameters())
        self.optimizers['optimize_motionEnc'] = optim.Adam(motionEnc_params, lr=0.0002, betas=(0.5, 0.999),
                                                           weight_decay=0.00001)
        motionDec_params = list(self.networks['generator'].trajGenerator.parameters()) + \
                           list(self.networks['generator'].kernelGen.parameters())

        # optimizer for motion generator
        if self.joint:
            motionDec_params += list(self.networks['generator'].contMotionStateGen.fc_motion.parameters()) + \
                                list(self.networks['generator'].contMotionStateGen.fc_comb.parameters())
        self.optimizers['optimize_motionDec'] = optim.Adam(motionDec_params, lr=0.0002, betas=(0.5, 0.999), weight_decay=0.00001)
        contDec_params = list(self.networks['generator'].videoDec.parameters()) + \
                         list(self.networks['generator'].contMotionStateGen.fc_cont1.parameters()) + \
                         list(self.networks['generator'].contMotionStateGen.fc_cont2.parameters())

        # optimizer for content generator
        self.optimizers['optimize_contDec'] = optim.Adam(contDec_params, lr=0.0002, betas=(0.5, 0.999), weight_decay=0.00001)

        # optimizer for discriminators
        self.optimizers['optimize_d_img'] = optim.Adam(self.networks['img_discriminator'].parameters(), lr=0.0002, betas=(0.5, 0.999), weight_decay=0.00001)
        self.optimizers['optimize_d_vid'] = optim.Adam(self.networks['vid_discriminator'].parameters(), lr=0.0002, betas=(0.5, 0.999), weight_decay=0.00001)

    def get_category(self, cls_id=None, batch_size=32):
        """
        get category variable for the specific class or random classes
        :param cls_id: int, specify if the video belongs to the certain classes
        :param batch_size: int, the batch size
        """
        if cls_id is None:
            num = batch_size // self.num_categories
            random_labels = np.expand_dims(np.arange(self.num_categories), axis=0).repeat(num, 1).reshape(-1)
            offset = batch_size % self.num_categories
            offset_labels = np.random.randint(low=0, high=self.num_categories - 1, size=[offset, ])
            random_labels = np.concatenate((random_labels, offset_labels), axis=0)
        else:
            random_labels = cls_id * np.ones([batch_size, ]).astype('int')
        if torch.cuda.is_available():
            self.categories = Variable(torch.from_numpy(random_labels)).cuda()
        else:
            self.categories = Variable(torch.from_numpy(random_labels))
        self.first_img = None
        self.images = None

    # ************************************************************************
    # ************************** Forward Methods *****************************
    # ************************************************************************

    def reconstruct_forward(self, ae_mode='sample', is_eval=False):
        """
        Forward method for the content stream
        :param ae_mode: the mode of the latent variable
                        'random': sample from the standard normal distribution N(0, 1)
                        'sample': sample from the posterior distribution q(z_c|x)
                        'mean': use the mean of q(z_c|x)
        :param is_eval: specify when validating or evaluating
        """
        self.task = 'recons'

        # select a random frame to reconstruct
        rand_idx = np.random.randint(low=0, high=self.video_len - 1)
        self.recons_img = self.images[:, :, rand_idx]

        if is_eval:
            torch.set_grad_enabled(False)

        # the ae pass
        self.recons_x_tilde, self.cont_mean, self.cont_logvar, _ = \
            self.networks['generator'].reconstruct_one_frame(self.categories, self.recons_img, mode=ae_mode)

        # the gan pass
        self.recons_x_p, _, _, _ = self.networks['generator'].reconstruct_one_frame(self.categories, mode='random')

        if is_eval:
            torch.set_grad_enabled(True)

    def predict_forward(self, ae_mode='sample', is_eval=False):
        """
        Forward method for the easier task in the motion stream
        :param ae_mode: the mode of the latent variable
                        'random': sample from the standard normal distribution N(0, 1)
                        'sample': sample from the posterior distribution q(z_m|delta_x)
                        'mean': use the mean of q(z_m|delta_x)
        :param is_eval: specify when validating or evaluating
        """
        self.task = 'pred'

        # pick up predict timestep
        timestep = np.random.randint(low=1, high=self.video_len - 1)

        # prepare inputs
        self.prev_img = self.images[:, :, timestep-1]
        self.pred_target = self.images[:, :, timestep]
        diff_img = (self.images[:, :, : -1] - self.images[:, :, 1:])/2

        if is_eval:
            torch.set_grad_enabled(False)

        # the ae pass
        self.pred_x_tilde, self.motion_mean, self.motion_logvar, self.motion_scale_feats = \
            self.networks['generator'].predict_next_frame(self.prev_img, diff_img, timestep, self.categories, mode=ae_mode)

        # get the ground truth of self.scale_feats
        with torch.set_grad_enabled(False):
            _, _, _, self.pred_scale_feats_gt = self.networks['generator'].reconstruct_one_frame(self.categories, self.pred_target, mode='mean')

        # the gan pass
        self.pred_x_p, _, _, _ = self.networks['generator'].predict_next_frame(self.prev_img, diff_img, timestep, self.categories, mode='random')

        if is_eval:
            torch.set_grad_enabled(True)

    def video_forward(self, eplison, ae_mode='sample', is_eval=False):
        """
         Forward method for the easier task in the motion stream
         :param ae_mode: the mode of the latent variable
                         'random': sample from the standard normal distribution N(0, 1)
                         'sample': sample from the posterior distribution q(z_m|delta_x)
                         'mean': use the mean of q(z_m|delta_x)
         :param is_eval: specify when validating or evaluating
        """
        self.task = 'video_generate'

        # prepare inputs
        self.first_img = self.images[:, :, 0]
        diff_img = (self.images[:, :, : -1] - self.images[:, :, 1:]) / 2

        if is_eval:
            torch.set_grad_enabled(False)

        # the ae pass
        self.video_x_tilde, self.video_mean, self.video_logvar, self.video_scale_feats =\
            self.networks['generator'].reconstruct_seq(self.images, self.categories, diff_img, eplison, mode=ae_mode)

        # the gan pass
        self.video_x_p, _, _, _ = self.networks['generator'].reconstruct_seq(self.images, self.categories, diff_img,
                                                                             eplison, mode='random')

        # get the ground truth of self.scale_feats
        with torch.set_grad_enabled(False):
            video_len = self.images.shape[2]
            self.video_scale_feats_gt = []
            for idx in range(video_len-1):
                _, _, _,  scale_feat_gt = self.networks['generator'].reconstruct_one_frame(self.categories, self.images[:, :, idx+1],
                                                                                          mode='mean')
                self.video_scale_feats_gt += scale_feat_gt

        if is_eval:
            torch.set_grad_enabled(True)

    # ************************************************************************
    # ************************** Loss  Functions *****************************
    # ************************************************************************

    def get_recons_pretrain_losses(self, c_kl):
        """
        VAE losses for pre-training the content stream
        :param c_kl: the weight of KL distance in the content training
        """
        ############################ loss for encoder ############################
        # kl divergence and reconstruction
        loss = {}
        loss['kld'] = self.kl_loss((self.cont_mean, self.cont_logvar))
        loss['recons_mse'] = self.mse_loss(self.recons_x_tilde, self.recons_img)
        loss['l_enc_recons'] = c_kl * loss['kld'] + 10000 * loss['recons_mse']
        self.losses['cont_enc'] = loss

        ########################### loss for decoder #############################
        # reconstruction loss
        loss = {}
        loss['recons_mse'] = self.mse_loss(self.recons_x_tilde, self.recons_img)
        loss['l_dec_recons'] = 10000 * loss['recons_mse']
        self.losses['cont_dec'] = loss

    def get_recons_losses(self, c_kl):
        """
        VAE and GAN losses for the content learning
        :param c_kl: the weight of KL distance in the content training
        """
        ############################ loss for encoder ############################
        # kl divergence and reconstruction loss
        loss = {}
        loss['kld'] = self.kl_loss((self.cont_mean, self.cont_logvar))
        loss['recons_mse'] = self.mse_loss(self.recons_x_tilde, self.recons_img)
        # total loss
        loss['l_enc_recons'] = c_kl * loss['kld'] + 10000 * loss['recons_mse']
        self.losses['cont_enc'] = loss

        ########################### loss for decoder #############################
        # Reconstruction loss and discriminator loss
        loss = {}
        # Reconstruction loss
        loss['recons_mse'] = self.mse_loss(self.recons_x_tilde, self.recons_img)
        # Discriminator loss
        # # ae pass
        fake_score = self.networks['img_discriminator'](self.recons_x_tilde)
        all_ones = self.ones_like(fake_score.data)
        loss['recons_gan_x_tilde'] = self.gan_loss(fake_score, all_ones)
        # # gan pass
        fake_score = self.networks['img_discriminator'](self.recons_x_p)
        all_ones = self.ones_like(fake_score.data)
        loss['recons_gan_x_p'] = self.gan_loss(fake_score, all_ones)
        # total loss
        loss['l_dec_recons'] = 10000 * loss['recons_mse'] + \
                               self.c_img_dis * (loss['recons_gan_x_tilde'] + self.xp_vs_xtilde * loss['recons_gan_x_p'])
        self.losses['cont_dec'] = loss

        ######################## loss for discriminator #########################
        loss = {}
        # # ae pass
        fake_score = self.networks['img_discriminator'](self.recons_x_tilde.detach())
        all_zeros = self.zeros_like(fake_score.data)
        loss['recons_gan_x_tilde'] = self.gan_loss(fake_score, all_zeros)
        # # gan pass
        fake_score = self.networks['img_discriminator'](self.recons_x_p.detach())
        all_zeros = self.zeros_like(fake_score.data)
        loss['recons_gan_x_p'] = self.gan_loss(fake_score, all_zeros)
        # # real data
        real_score = self.networks['img_discriminator'](self.recons_img)
        all_ones = self.ones_like(real_score.data)
        loss['gan_x'] = self.gan_loss(real_score, all_ones)
        # total loss
        loss['l_dis_recons'] = loss['recons_gan_x_tilde'] + self.xp_vs_xtilde * loss['recons_gan_x_p'] + loss['gan_x']
        self.losses['img_dis'] = loss

    def get_pred_losses(self):
        """
         VAE and GAN losses for the easier content learning
        """
        ################################# loss for encoder #######################################
        # kl divergence loss, L2 loss for video frames and the modified content feature map
        loss = {}
        loss['kld'] = self.kl_loss((self.motion_mean, self.motion_logvar))
        loss['pred_mse'] = self.mse_loss(self.pred_x_tilde, self.pred_target)

        loss['scale_feat_loss'] = 0
        for (scale_feat, scale_feat_gt) in zip(self.motion_scale_feats, self.pred_scale_feats_gt):
            loss['scale_feat_loss'] += self.mse_loss(scale_feat, scale_feat_gt.detach())

        loss['l_enc_pred'] = self.img_m_kl * loss['kld'] + 10000 * loss['pred_mse'] + self.pred_scale_feat * loss['scale_feat_loss']
        self.losses['motion_enc'] = loss

        ################################# loss for decoder #####################################
        # L2 loss for video frames and the modified content feature map, discriminator loss
        loss = {}
        loss['pred_mse'] = self.mse_loss(self.pred_x_tilde, self.pred_target)

        loss['scale_feat_loss'] = 0
        for (scale_feat, scale_feat_gt) in zip(self.motion_scale_feats, self.pred_scale_feats_gt):
            loss['scale_feat_loss'] += self.mse_loss(scale_feat, scale_feat_gt.detach())

        # # ae pass
        fake_score = self.networks['img_discriminator'](self.pred_x_tilde)
        all_ones = self.ones_like(fake_score.data)
        loss['pred_gan_x_tilde'] = self.gan_loss(fake_score, all_ones)

        # # gan pass
        fake_score = self.networks['img_discriminator'](self.pred_x_p)
        all_ones = self.ones_like(fake_score.data)
        loss['pred_gan_x_p'] = self.gan_loss(fake_score, all_ones)

        loss['l_dec_pred'] = 10000 * loss['pred_mse'] + 10 * (loss['pred_gan_x_tilde'] + loss['pred_gan_x_p']) + \
                                 self.pred_scale_feat * loss['scale_feat_loss']
        self.losses['motion_dec'] = loss

        ############################### loss for discriminator ################################
        loss = {}
        # # ae pass
        fake_score = self.networks['img_discriminator'](self.pred_x_tilde.detach())
        all_zeros = self.zeros_like(fake_score.data)
        loss['pred_gan_x_tilde'] = self.gan_loss(fake_score, all_zeros)
        # # GAN pass
        fake_score = self.networks['img_discriminator'](self.pred_x_p.detach())
        all_zeros = self.zeros_like(fake_score.data)
        loss['pred_gan_x_p'] = self.gan_loss(fake_score, all_zeros)
        # # real data pass
        real_score = self.networks['img_discriminator'](self.pred_target)
        all_ones = self.ones_like(real_score.data)
        loss['gan_x'] = self.gan_loss(real_score, all_ones)

        loss['l_dis_pred'] = loss['pred_gan_x_tilde'] + loss['pred_gan_x_p'] + loss['gan_x']

        self.losses['img_dis'] = loss

    def get_video_loss(self, m_kl):
        """
        VAE and GAN losses for the harder motion learning
        :param m_kl: the weight of KL distance in the motion training
        """
        ############################### loss for encoder ######################################
        # kl divergence loss, L2 loss for video frames and the modified content feature map
        loss = {}
        loss['kld'] = self.kl_loss((self.video_mean, self.video_logvar))
        loss['pred_mse'] = self.mse_loss(self.video_x_tilde, self.images)

        loss['scale_feat_loss'] = 0
        for (scale_feat, scale_feat_gt) in zip(self.video_scale_feats, self.video_scale_feats_gt):
            loss['scale_feat_loss'] += self.mse_loss(scale_feat, scale_feat_gt)

        # make the scale feat loss independent of the video length
        video_len = self.images.shape[2]
        loss['scale_feat_loss'] /= (video_len - 1)
        loss['l_enc'] = m_kl * loss['kld'] + 10000 * loss['pred_mse'] + self.video_scale_feat * loss['scale_feat_loss']

        self.losses['vid_motion_enc'] = loss

        ################################# loss for decoder ####################################
        # L2 loss for video frames and the modified content feature map, discriminator loss
        loss = {}
        loss['pred_mse'] = self.mse_loss(self.video_x_tilde, self.images)

        loss['scale_feat_loss'] = 0
        for (scale_feat, scale_feat_gt) in zip(self.video_scale_feats, self.video_scale_feats_gt):
            loss['scale_feat_loss'] += self.mse_loss(scale_feat, scale_feat_gt)
        video_len = self.images.shape[2]
        loss['scale_feat_loss'] /= (video_len - 1)

        # # ae pass
        fake_score, fake_categories = self.networks['vid_discriminator'](self.video_x_tilde)
        all_ones = self.ones_like(fake_score.data)
        loss['vid_gan_x_tilde'] = self.gan_loss(fake_score, all_ones)
        loss['vid_cat_x_tilde'] = self.category_criterion(fake_categories, self.categories)

        # # GAN pass
        fake_score, fake_categories = self.networks['vid_discriminator'](self.video_x_p)
        all_ones = self.ones_like(fake_score.data)
        loss['vid_gan_x_p'] = self.gan_loss(fake_score, all_ones)
        loss['vid_cat_x_p'] = self.category_criterion(fake_categories, self.categories)

        loss['l_dec'] = 10000 * loss['pred_mse'] + 1 * (loss['vid_gan_x_tilde'] + loss['vid_gan_x_p']) + \
                        loss['vid_cat_x_tilde'] + loss['vid_cat_x_p'] + self.video_scale_feat * loss['scale_feat_loss']
        self.losses['vid_dec'] = loss

        ############################################ loss for discriminator ####################################
        loss = {}
        # # ae pass
        fake_score, fake_categories = self.networks['vid_discriminator'](self.video_x_tilde.detach())
        all_zeros = self.zeros_like(fake_score.data)
        loss['vid_gan_x_tilde'] = self.gan_loss(fake_score, all_zeros)
        # # GAN pass
        fake_score, fake_categories = self.networks['vid_discriminator'](self.video_x_p.detach())
        all_zeros = self.zeros_like(fake_score.data)
        loss['vid_gan_x_p'] = self.gan_loss(fake_score, all_zeros)
        # # real data
        real_score, real_categories = self.networks['vid_discriminator'](self.images)
        all_ones = self.ones_like(real_score.data)
        loss['vid_gan_x'] = self.gan_loss(real_score, all_ones)
        loss['vid_cat_x'] = self.category_criterion(real_categories, self.categories)

        loss['l_dis'] = loss['vid_gan_x_tilde'] + loss['vid_gan_x_p'] + loss['vid_gan_x'] + loss['vid_cat_x']
        self.losses['vid_dis'] = loss

    # ************************************************************************
    # ************************** Backward Methods ****************************
    # ************************************************************************

    def backward_motion_dec(self):
        """
        Backward method for the motion generator
        """
        self.optimizers['optimize_motionDec'].zero_grad()
        if self.task == 'pred':
            self.losses['motion_dec']['l_dec_pred'].backward()
        elif self.task == 'video_generate':
            self.losses['vid_dec']['l_dec'].backward()
        else:
            raise ValueError('task %s is not supported' % self.task)
        self.optimizers['optimize_motionDec'].step()

    def backward_cont_dec(self):
        """
        Backward method for the content generator
        """
        self.optimizers['optimize_contDec'].zero_grad()
        self.losses['cont_dec']['l_dec_recons'].backward()
        self.optimizers['optimize_contDec'].step()

    # ************************************************************************
    # ************************** Optimization Framework **********************
    # ************************************************************************

    def optimize_pred_parameters(self):
        """
        Optimization framework for the easier motion learning
        """
        self.freeze_cont_stream()

        # forward
        self.predict_forward()

        # get loss
        self.get_pred_losses()
        real_cost = np.mean([self.losses['img_dis']['gan_x'].data])
        fake_cost = np.mean([self.losses['img_dis']['pred_gan_x_tilde'].data, self.losses['img_dis']['pred_gan_x_p'].data])

        # backward
        equilibrium = 0.68
        margin = 0.4
        dec_update = True
        dis_update = True
        if margin is not None:
            if real_cost < equilibrium - margin or fake_cost < equilibrium - margin:
                dis_update = False
            if real_cost > equilibrium + margin or fake_cost > equilibrium + margin:
                dec_update = False
            if not (dec_update or dis_update):
                dec_update = True
                dis_update = True
        self.backward_motion_enc(retain=dec_update)
        if dec_update:
            self.backward_motion_dec()
        else:
            self.del_motion_dec()
        if dis_update:
            self.backward_img_dis()
        else:
            self.del_img_dis()

        self.free_cont_stream()

    def optimize_recons_pretrain_parameters(self, current_iter):
        """
        Optimization framework for pre-training content stream
        :param current_iter: the current number of iteration
        """
        # forward
        self.reconstruct_forward()
        # get loss
        c_kl = self.c_kl.get_value(current_iter)
        self.get_recons_pretrain_losses(c_kl)
        # backward
        self.backward_content_enc(retain=True)
        self.backward_cont_dec()

    def optimize_recons_parameters(self, current_iter):
        """
        Optimization framework for the content learning
        :param current_iter: the current number of iteration
        """
        # forward
        self.reconstruct_forward()

        # get losses
        c_kl = self.c_kl.get_value(current_iter)
        self.get_recons_losses(c_kl)
        real_cost = np.mean([self.losses['img_dis']['gan_x'].data])
        fake_cost = np.mean([2*(1-self.xp_vs_xtilde) * self.losses['img_dis']['recons_gan_x_tilde'].data, 2*self.xp_vs_xtilde*self.losses['img_dis']['recons_gan_x_p'].data])

        # backward
        equilibrium = 0.68
        margin = 0.5
        dec_update = True
        dis_update = True
        if margin is not None:
            if real_cost < equilibrium - margin or fake_cost < equilibrium - margin:
                dis_update = False
            if real_cost > equilibrium + margin or fake_cost > equilibrium + margin:
                dec_update = False
            if not (dec_update or dis_update):
                dec_update = True
                dis_update = True
        self.backward_content_enc(retain=dec_update)
        if dec_update:
            self.backward_cont_dec()
        else:
            self.del_cont_dec()
        if dis_update:
            self.backward_img_dis()
        else:
            self.del_img_dis()

    def optimize_vid_parameters(self, current_iter):
        """
        Optimization framework for the harder motion learning
        :param current_iter: the current number of iteration
        """
        self.freeze_cont_stream()
        # forward
        epsilon = self.schedule_sampler.get_value(current_iter)
        self.video_forward(epsilon, ae_mode='sample')

        # get losses
        motion_kld_weight = self.motion_kl_weight.get_value(current_iter)
        self.get_video_loss(motion_kld_weight)
        real_cost = np.mean([self.losses['vid_dis']['vid_gan_x'].data])
        fake_cost = np.mean([self.losses['vid_dis']['vid_gan_x_tilde'].data, self.losses['vid_dis']['vid_gan_x_p'].data])

        # backward
        equilibrium = 0.68
        margin = 0.5
        dec_update = True
        dis_update = True
        if margin is not None:
            if real_cost < equilibrium - margin or fake_cost < equilibrium - margin:
                dis_update = False
            if real_cost > equilibrium + margin or fake_cost > equilibrium + margin:
                dec_update = False
            if not (dec_update or dis_update):
                dec_update = True
                dis_update = True
        self.backward_motion_enc(retain=dec_update)
        if dec_update:
            self.backward_motion_dec()
        else:
            self.del_motion_dec()
        if dis_update:
            self.backward_vid_dis()
        else:
            self.del_vid_dis()
        self.free_cont_stream()

    # ************************************************************************
    # ******************************** Test **********************************
    # ************************************************************************

    def full_test(self, cls_id, batch_size, video_len, current_iter, var_name, start_idx=0, is_eval=False, rm_npy=False,
                  get_seq=False, get_mask=False):
        """
        :param cls_id:  int, the action index at test
        :param batch_size: int
        :param video_len: int, the desired length of the video
        :param current_iter: int, the current iteration so far
        :param var_name: str, the variable name for saving or tensorboard visualizing
        :param start_idx: int, the start index of the current batch
        :param is_eval: bool, specify when evaluating
        :param rm_npy: bool, specify to remove all npy files in the output folder
        :param get_seq: bool, specify to save the video sequence
        :param get_mask: bool, specify to visualize the mask
        :return: output_dir: str, the output path
        """
        # create the category matrix for the test class
        cat = cls_id * np.ones((batch_size,)).astype('int')
        if torch.cuda.is_available():
            self.categories = Variable(torch.from_numpy(cat)).cuda()
        else:
            self.categories = Variable(torch.from_numpy(cat))

        # generate the video with size [batch_size, video_len, c, h, w]
        torch.set_grad_enabled(False)
        video, masks = self.networks['generator'].full_test(self.categories, video_len+2)
        torch.set_grad_enabled(True)
        # heat up the generator for two steps
        video = video[:, 2:]

        # create the output directory
        if is_eval:
            output_dir = os.path.join(self.output_dir, 'evaluation', str(current_iter))
        else:
            output_dir = os.path.join(self.output_dir, 'validation', str(current_iter))
        makedir(output_dir)

        # remove the existing npy files
        if rm_npy:
            os.system('rm %s' % os.path.join(output_dir, '*.npy'))

        # save original output to npy file
        # video_np [batch_size, video_len, c,  h, w]
        video_np = video.cpu().data.numpy().clip(-1, 1)
        self.save_video(video_np, output_dir, self.categories, start_idx=start_idx)

        # saving to tensorboard during the validation
        if not is_eval:
            # save to tensorboard
            # [batch_size, video_len, c, h, w]
            video = torch.clamp((video.permute(0, 2, 1, 3, 4) + 1)/2, 0, 1)
            self.writer.add_video(var_name, video, current_iter)

        # save the video sequences to the output folder
        if get_seq:
            video_seqs = ((video_np.transpose(0, 1, 3, 4, 2) + 1)/2 * 255).astype('uint8')
            video_seqs = np.concatenate(np.split(video_seqs, video_len, axis=1), axis=3).squeeze()

            img_dir = os.path.join(output_dir, 'imgs')
            makedir(img_dir)
            for v_idx, seq in enumerate(video_seqs):
                filename = os.path.join(img_dir, '%s_%03d.png' % (var_name, start_idx + v_idx))
                cv2.imwrite(filename, seq[:, :, ::-1])

        # save masks to the output folder
        if get_mask:
            mask_8 = []
            mask_16 = []
            mask_32 = []
            mask_64 = []
            for frame_mask in masks:
                if self.layers >= 4:
                    mask_8.append(frame_mask[0].cpu().numpy().squeeze().clip(0, 1))
                if self.layers >= 3:
                    mask_16.append(frame_mask[1].cpu().numpy().squeeze().clip(0, 1))
                if self.layers >= 2:
                    mask_32.append(frame_mask[2].cpu().numpy().squeeze().clip(0, 1))
                if self.layers >= 1:
                    mask_64.append(frame_mask[3].cpu().numpy().squeeze().clip(0, 1))
            if self.layers >= 4:
                mask_8 = np.concatenate(mask_8[2:], axis=2)
            if self.layers >= 3:
                mask_16 = np.concatenate(mask_16[2:], axis=2)
            if self.layers >= 2:
                mask_32 = np.concatenate(mask_32[2:], axis=2)
            if self.layers >= 1:
                mask_64 = np.concatenate(mask_64[2:], axis=2)
            mask_dir = os.path.join(output_dir, 'masks')
            makedir(mask_dir)
            for v_idx in range(batch_size):
                if self.layers >= 4:
                    filename = os.path.join(mask_dir, '%s_%03d_mask_8.png' % (var_name, start_idx + v_idx))
                    cv2.imwrite(filename, (mask_8[v_idx] * 255).astype('uint8'))
                if self.layers >= 3:
                    filename = os.path.join(mask_dir, '%s_%03d_mask_16.png' % (var_name, start_idx + v_idx))
                    cv2.imwrite(filename, (mask_16[v_idx] * 255).astype('uint8'))
                if self.layers >= 2:
                    filename = os.path.join(mask_dir, '%s_%03d_mask_32.png' % (var_name, start_idx + v_idx))
                    cv2.imwrite(filename, (mask_32[v_idx] * 255).astype('uint8'))
                if self.layers >= 1:
                    filename = os.path.join(mask_dir, '%s_%03d_mask_64.png' % (var_name, start_idx + v_idx))
                    cv2.imwrite(filename, (mask_64[v_idx] * 255).astype('uint8'))

        return output_dir

    def freeze_cont_stream(self):
        """
        freeze the content params during the motion learning
        """
        params = list(self.networks['generator'].contEnc.parameters()) + \
                 list(self.networks['generator'].contSampler.parameters()) + \
                 list(self.networks['generator'].videoDec.parameters()) + \
                 list(self.networks['generator'].contMotionStateGen.fc_cont1.parameters()) + \
                 list(self.networks['generator'].contMotionStateGen.fc_cont2.parameters())
        for param in params:
            param.requires_grad = False

    def free_cont_stream(self):
        """
        free the content params after the motion learning
        """
        params = list(self.networks['generator'].contEnc.parameters()) + \
                 list(self.networks['generator'].contSampler.parameters()) + \
                 list(self.networks['generator'].videoDec.parameters()) + \
                 list(self.networks['generator'].contMotionStateGen.fc_cont1.parameters()) + \
                 list(self.networks['generator'].contMotionStateGen.fc_cont2.parameters())
        for param in params:
            param.requires_grad = True

    def del_motion_dec(self):
        """
        delete the graph of the motion decoder
        """
        if self.task == 'pred':
            del self.losses['motion_dec']['l_dec_pred']
        elif self.task == 'video_generate':
            del self.losses['vid_dec']['l_dec']
        else:
            raise ValueError('task %s is not supported' % self.task)

    def del_cont_dec(self):
        """
        delete the graph of the content decoder
        """
        del self.losses['cont_dec']['l_dec_recons']

    def del_vid_dis(self):
        """
        delete the graph of the video discriminator
        """
        del self.losses['vid_dis']['l_dis']

    def del_img_dis(self):
        """
        delete the graph of the image discriminator
        """
        if self.task == 'pred':
            del self.losses['img_dis']['l_dis_pred']
        elif self.task == 'recons':
            del self.losses['img_dis']['l_dis_recons']
        else:
            raise ValueError('task %s is not supported' % self.task)
Пример #6
0
from flask import (current_app, Blueprint, flash, g, redirect, render_template,
                   request, session, url_for, json)
from datetime import (datetime, timedelta)

from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.jobstores.mongodb import MongoDBJobStore

from utils.db import (emails, courses)
from utils.communication import Communication
from utils.worker import course_polling
from utils.helper import cleanup_email
from utils.scheduler import Scheduler

comm = Communication()

sched = Scheduler()
if Scheduler.sched == None:
    sched.startScheduler()
    sched = Scheduler.sched

sched.add_job(cleanup_email, trigger='cron', hour='4', minute='20')

bp = Blueprint('cart', __name__, url_prefix='/cart')


@bp.route('/', methods={'GET'})
def cart():
    return render_template('cart.html', title="Cart")


@bp.route('/submit', methods={'POST'})
Пример #7
0
                    nargs='?',
                    const=True,
                    help='Re-builds schedule before attending sessions')
parser.add_argument(
    '-mic',
    '--block_chrome_mic_camera',
    type=str2bool,
    default=os.environ['BLOCK_CHROME_MIC_CAM'],
    nargs='?',
    const=True,
    help=
    'Block Chrome access to Mic and Camera. If set user cannot manually give access to camera or mic'
)
parser.add_argument(
    '-mute',
    '--mute_chrome_audio',
    type=str2bool,
    default=os.environ['MUTE_CHROME_AUDIO'],
    nargs='?',
    const=True,
    help=
    'Mutes all audio from the Chrome window. If set user cannot manually un-mute'
)
args = parser.parse_args()

scheduler = Scheduler(launch_interval=args.launch_interval,
                      build_schedule=args.build_schedule,
                      block_chrome_mic_camera=args.block_chrome_mic_camera,
                      mute_chrome_audio=args.mute_chrome_audio)
scheduler.attendLecture()
Пример #8
0
def main():
    try:
        s = Scheduler()
        s.run()
    except:
        main()
Пример #9
0
def run(config, source_code=None, user_funcs=None):
    env = Environment(config)
    persist_helper = None
    init_succeed = False
    mod_handler = ModHandler()

    try:
        if source_code is not None:
            env.set_strategy_loader(SourceCodeStrategyLoader(source_code))
        elif user_funcs is not None:
            env.set_strategy_loader(UserFuncStrategyLoader(user_funcs))
        else:
            env.set_strategy_loader(
                FileStrategyLoader(config.base.strategy_file))
        env.set_global_vars(GlobalVars())
        mod_handler.set_env(env)
        mod_handler.start_up()

        if not env.data_source:
            env.set_data_source(BaseDataSource(config.base.data_bundle_path))

        env.set_data_proxy(DataProxy(env.data_source))
        Scheduler.set_trading_dates_(env.data_source.get_trading_calendar())
        scheduler = Scheduler(config.base.frequency)
        mod_scheduler._scheduler = scheduler

        env._universe = StrategyUniverse()

        _adjust_start_date(env.config, env.data_proxy)

        _validate_benchmark(env.config, env.data_proxy)

        broker = env.broker
        assert broker is not None
        env.portfolio = broker.get_portfolio()
        env.benchmark_portfolio = create_benchmark_portfolio(env)

        event_source = env.event_source
        assert event_source is not None

        bar_dict = BarMap(env.data_proxy, config.base.frequency)
        env.set_bar_dict(bar_dict)

        if env.price_board is None:
            from core.bar_dict_price_board import BarDictPriceBoard
            env.price_board = BarDictPriceBoard()

        ctx = ExecutionContext(const.EXECUTION_PHASE.GLOBAL)
        ctx._push()

        # FIXME
        start_dt = datetime.datetime.combine(config.base.start_date,
                                             datetime.datetime.min.time())
        env.calendar_dt = start_dt
        env.trading_dt = start_dt

        env.event_bus.publish_event(Event(EVENT.POST_SYSTEM_INIT))

        scope = create_base_scope()
        scope.update({"g": env.global_vars})

        apis = api_helper.get_apis(config.base.account_list)
        scope.update(apis)

        scope = env.strategy_loader.load(scope)

        if env.config.extra.enable_profiler:
            enable_profiler(env, scope)

        ucontext = StrategyContext()
        user_strategy = Strategy(env.event_bus, scope, ucontext)
        scheduler.set_user_context(ucontext)

        if not config.extra.force_run_init_when_pt_resume:
            with run_with_user_log_disabled(disabled=config.base.resume_mode):
                user_strategy.init()

        if config.extra.context_vars:
            for k, v in six.iteritems(config.extra.context_vars):
                setattr(ucontext, k, v)

        if config.base.persist:
            persist_provider = env.persist_provider
            persist_helper = PersistHelper(persist_provider, env.event_bus,
                                           config.base.persist_mode)
            persist_helper.register('core', CoreObjectsPersistProxy(scheduler))
            persist_helper.register('user_context', ucontext)
            persist_helper.register('global_vars', env.global_vars)
            persist_helper.register('universe', env._universe)
            if isinstance(event_source, Persistable):
                persist_helper.register('event_source', event_source)
            persist_helper.register('portfolio', env.portfolio)
            if env.benchmark_portfolio:
                persist_helper.register('benchmark_portfolio',
                                        env.benchmark_portfolio)
            for name, module in six.iteritems(env.mod_dict):
                if isinstance(module, Persistable):
                    persist_helper.register('mod_{}'.format(name), module)
            # broker will restore open orders from account
            if isinstance(broker, Persistable):
                persist_helper.register('broker', broker)

            persist_helper.restore()
            env.event_bus.publish_event(Event(EVENT.POST_SYSTEM_RESTORED))

        init_succeed = True

        # When force_run_init_when_pt_resume is active,
        # we should run `init` after restore persist data
        if config.extra.force_run_init_when_pt_resume:
            assert config.base.resume_mode == True
            with run_with_user_log_disabled(disabled=False):
                user_strategy.init()

        from core.executor import Executor
        Executor(env).run(bar_dict)
        # print(env.__dict__)
        if env.profile_deco:
            output_profile_result(env)

        result = mod_handler.tear_down(const.EXIT_CODE.EXIT_SUCCESS)
        system_log.debug(_(u"strategy run successfully, normal exit"))
        return result
    except CustomException as e:
        if init_succeed and env.config.base.persist and persist_helper:
            persist_helper.persist()

        user_detail_log.exception(_(u"strategy execute exception"))
        user_system_log.error(e.error)

        better_exceptions.excepthook(e.error.exc_type, e.error.exc_val,
                                     e.error.exc_tb)

        mod_handler.tear_down(const.EXIT_CODE.EXIT_USER_ERROR, e)
    except Exception as e:
        if init_succeed and env.config.base.persist and persist_helper:
            persist_helper.persist()

        exc_type, exc_val, exc_tb = sys.exc_info()
        user_exc = create_custom_exception(exc_type, exc_val, exc_tb,
                                           config.base.strategy_file)

        better_exceptions.excepthook(exc_type, exc_val, exc_tb)
        user_system_log.error(user_exc.error)
        code = const.EXIT_CODE.EXIT_USER_ERROR
        if not is_user_exc(exc_val):
            system_log.exception(_(u"strategy execute exception"))
            code = const.EXIT_CODE.EXIT_INTERNAL_ERROR
        else:
            user_detail_log.exception(_(u"strategy execute exception"))

        mod_handler.tear_down(code, user_exc)
Пример #10
0
def main():
    torch.set_num_threads(1)
    # ********************************************************************
    # ****************** create folders and print options ****************
    # ********************************************************************
    opt, gen_args, dis_args, loss_weights = TrainOptions().parse()
    makedir(opt.checkpoint_dir)
    makedir(opt.log_dir)
    makedir(opt.output_dir)
    listopt(opt)
    with open(os.path.join(opt.log_dir, 'train_opt.txt'), 'w+') as f:
        listopt(opt, f)

    # ********************************************************************
    # ******************** Prepare the dataloaders ***********************
    # ********************************************************************
    image_transforms = transforms.Compose([
        transforms.ToTensor(),
        lambda x: x[:opt.n_channels, ::],
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
    ])
    video_transforms = functools.partial(video_transform, image_transform=image_transforms)

    if opt.dataset == 'Weizmann':
        trainset = WeizmannDataset(opt.dataroot, opt.textroot, opt.video_length, opt.image_size, opt.every_nth, opt.crop,
                                   'Train', mini_clip=opt.miniclip)
        valset = WeizmannDataset(opt.dataroot, opt.textroot, opt.video_length, opt.image_size, opt.every_nth, False,
                                       'Test', mini_clip=opt.miniclip)
    elif opt.dataset == 'MUG':
        trainset = MUGDataset(opt.dataroot, opt.textroot, opt.video_length, opt.image_size, opt.every_nth, opt.crop,
                              'Train')
        valset = MUGDataset(opt.dataroot, opt.textroot, opt.video_length, opt.image_size, opt.every_nth, False, 'Test')
    elif opt.dataset == 'SynAction':
        trainset = SynActionDataset(opt.dataroot, opt.textroot, opt.video_length, opt.image_size, opt.crop, 'Train')
        valset = SynActionDataset(opt.dataroot, opt.textroot, opt.video_length, opt.image_size, False, 'Test')
    else:
        raise NotImplementedError('%s dataset is not supported' % opt.dataset)

    # get the validate dataloader
    video_trainset = VideoDataset(trainset, opt.video_length, every_nth=opt.every_nth, transform=video_transforms)
    video_train_loader = DataLoader(video_trainset, batch_size=opt.batch_size, drop_last=True, num_workers=2, shuffle=True)

    video_valset = VideoDataset(valset, opt.video_length, every_nth=opt.every_nth, transform=video_transforms)
    video_val_loader = DataLoader(video_valset, batch_size=opt.batch_size, drop_last=True, num_workers=2, shuffle=False)

    # ********************************************************************
    # ********************Create the environment *************************
    # ********************************************************************
    gen_args['num_categories'] = len(trainset.action_set)
    dis_args['num_categories'] = len(trainset.action_set)

    if opt.model == 'SGVAN':
        environ = SGVAN(gen_args, opt.checkpoint_dir, opt.log_dir, opt.output_dir, opt.video_length,
                        trainset.action_set, trainset.actor_set, is_eval=False, dis_args=dis_args,
                        loss_weights=loss_weights, pretrain_iters=opt.pretrain_iters)
    elif opt.model == 'TwoStreamVAN':
        environ = TwoStreamVAN(gen_args, opt.checkpoint_dir, opt.log_dir, opt.output_dir, opt.video_length,
                               trainset.action_set, trainset.actor_set, is_eval=False, dis_args=dis_args,
                               loss_weights=loss_weights, pretrain_iters=opt.pretrain_iters)
    else:
        raise ValueError('Model %s is not implemented' % opt.mode)

    current_iter = 0
    if opt.resume:
        current_iter = environ.load(opt.which_iter)
    else:
        environ.weight_init()
    environ.train()

    # ********************************************************************
    # ******************** Set the training ratio ************************
    # ********************************************************************
    # content vs motion
    cont_scheduler = Scheduler(opt.cont_ratio_start, opt.cont_ratio_end,
                               opt.cont_ratio_iter_start + opt.pretrain_iters, opt.cont_ratio_iter_end + opt.pretrain_iters,
                               mode='linear')
    # easier vs harder motion
    m_img_scheduler = Scheduler(opt.motion_ratio_start, opt.motion_ratio_end,
                                opt.motion_ratio_iter_start + opt.pretrain_iters, opt.motion_ratio_iter_end + opt.pretrain_iters,
                                mode='linear')

    # ********************************************************************
    # ***************************  Training  *****************************
    # ********************************************************************
    recons_c, pred_c, vid_c = 0, 0, 0
    video_enumerator = enumerate(video_train_loader)
    while current_iter < opt.total_iters:
        start_time = time.time()
        current_iter += 1
        batch_idx, batch = next(video_enumerator)
        environ.set_inputs(batch)

        if current_iter < opt.pretrain_iters:
            # ********************** Pre-train the Content Stream **************
            environ.optimize_recons_pretrain_parameters(current_iter)

            # print loss to the screen and save intermediate results to tensorboard
            if current_iter % opt.print_freq == 0:
                environ.print_loss(current_iter, start_time)
                environ.visual_batch(current_iter, name='%s_current_batch' % environ.task)

            # validation
            if current_iter % opt.val_freq == 0:
                environ.eval()
                # validation of the content generation
                for idx, batch in enumerate(video_val_loader):
                    environ.set_inputs(batch)
                    environ.reconstruct_forward(ae_mode='mean', is_eval=True)
                    if idx == 0:
                        environ.visual_batch(current_iter, name='val_recons')
                # save the current checkpoint
                environ.save('latest', current_iter)
                environ.train()
        else:
            # ********************* Jointly train the Content & Motion *************
            ep1 = cont_scheduler.get_value(current_iter)
            ep2 = m_img_scheduler.get_value(current_iter)
            recons = (random.random() > ep1)
            img_level = (random.random() > ep2)
            if recons:
                # content training
                recons_c += 1
                environ.optimize_recons_parameters(current_iter)
            else:
                if img_level:
                    # easier motion training
                    pred_c += 1
                    environ.optimize_pred_parameters()
                else:
                    # harder motion training
                    vid_c += 1
                    environ.optimize_vid_parameters(current_iter)

            # print loss to the screen and save intermediate results to tensorboard
            if current_iter % opt.print_freq == 0:
                environ.print_loss(current_iter, start_time)
                environ.visual_batch(current_iter, name='%s_current_batch' % environ.task)
                print('recons: %d, pred: %d, vid: %d' % (recons_c, pred_c, vid_c))
                recons_c, pred_c, vid_c = 0, 0, 0

            # validation and save checkpoint
            if current_iter % opt.val_freq == 0:
                environ.eval()
                for idx, batch in enumerate(video_val_loader):
                    environ.set_inputs(batch)

                    # content stream validation
                    environ.reconstruct_forward(ae_mode='mean', is_eval=True)
                    if idx == 0:
                        environ.visual_batch(current_iter, name='val_recons')

                    # easier motion stream validation
                    environ.predict_forward(ae_mode='mean', is_eval=True)
                    if idx == 0:
                        environ.visual_batch(current_iter, name='val_pred')

                    # harder motion stream validation
                    environ.video_forward(eplison=0, ae_mode='mean', is_eval=True)
                    if idx == 0:
                        environ.visual_batch(current_iter, name='val_video')

                # generate videos for different class
                for idx, cls_name in enumerate(valset.action_set):
                    environ.get_category(cls_id=idx)
                    output_dir = environ.full_test(idx, 32, 10, current_iter, cls_name)
                metrics = eval(opt, output_dir)
                environ.print_loss(current_iter, start_time, metrics)

                # remove the generated video
                rm_cmd = 'rm -r %s' % output_dir
                os.system(rm_cmd)

                # save the latest checkpoint
                environ.save('latest', current_iter)
                environ.train()

        # save the checkpoint
        if current_iter % opt.save_freq == 0:
            environ.save(current_iter, current_iter)

        # get a new enumerator
        if batch_idx == len(video_train_loader) - 1:
            video_enumerator = enumerate(video_train_loader)
Пример #11
0
            break
    return res


def workersHaveFinished():
    for name in config['workers']:
        log.info("Result of %s: %s" %
                 (workers[name]['instance'].getDisplayName(),
                  workers[name]['instance'].getResult()))
    return True


if __name__ == "__main__":
    with open(CFG_FILE) as c:
        config = json.load(c)
        scheduler = Scheduler()
        scheduler.start()
        signal.signal(signal.SIGINT, signal_handler)
        print(
            text2art('-----------------------\n    Welcome   to   ' +
                     config['general']['name'] + "   " +
                     config['general']['version'] +
                     '\n-----------------------'))
        log.debug("Starting %s v%s" %
                  (config['general']['name'], config['general']['version']))
        # Main loop waiting & polling
        workers = {}
        for worker in config['workers']:
            workers[worker] = {
                'import': None,
                'config': None,
Пример #12
0
		pb1=get_price(r1['ex']['availableToBack'])
		pb2=get_price(r2['ex']['availableToBack']) 
		
		pl1=get_price(r1['ex']['availableToLay'])
		pl2=get_price(r2['ex']['availableToLay'])
		curr=(ts, pb1, pl1, pt1, pt2, pb2, pl2)
		
		prev= self.cache[-1] if self.cache else (None,None)
		self.cache.append(curr)

		if curr[1:] != prev[1:]:
			print "{} R1:{}/{} ({}-{}) R2:{}/{}".format(*curr)


		if now-self.prev_dump > dt.timedelta(minutes=5):
			mode='a'  
			"""
			with open(fn, mode) as fp:
				for sample in self.cache:
					line="{}|{}|{}".format(*sample)
					fp.write(line+"\n")
			"""
			self.prev_dump=now
			self.cache=[]

dump=Dump(marketId, fn)
now=dt.datetime.now()
sc=Scheduler()
e=Event(now, dump.fetch_prices, marketId, dt.timedelta(seconds=1), now+dt.timedelta(minutes=10))
sc.register(e)
sc.run()