Exemple #1
0
def test_feeder():
    with open("config.yaml") as cnf:
        cfg = yaml.load(cnf)
    loader=utils.Loader(cfg)
    x,gt=loader.serve()
    print(x)
    print(gt)
Exemple #2
0
def test():
    cfg = get_config()
    inpts = tf.placeholder(tf.float32,
                           [cfg['batch'], cfg['prev_leng'], cfg['dims']])
    outs = tf.placeholder(
        tf.float32,
        [cfg['batch'], cfg['pred_ext'] + cfg['fut_leng'], cfg['dims']])
    #mod=model.rec_model(cfg)
    mod = model.rec_model(cfg)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg['GPU'])
    loader = utils.Loader(cfg)
    optimizer = tf.train.AdamOptimizer(cfg['lr'])

    # mini=opti.minimize(mod.loss)
    gvs = optimizer.compute_gradients(mod.loss)
    capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gvs]
    mini = optimizer.apply_gradients(capped_gvs)
    init = tf.initializers.global_variables()
    saver = tf.train.Saver()
    newp = str(time.time())[-4:] + "_test_" + cfg["load_path"].split("/")[-3]
    os.mkdir(newp)
    os.mkdir(newp + "/model")
    os.mkdir(newp + "/data")
    total = []
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        if (cfg['load'] == "True"):
            saver.restore(sess, cfg['load_path'])
        else:
            "TESTING must have load=true"

        print("OK")
        for i in range(0, 200):
            x, gt, f, box, info, img = loader.serve_test()

            tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
            o = sess.run(mod.out,
                         feed_dict={
                             inpts: x,
                             mod.image: img,
                             outs: tot,
                             mod.target: tot,
                             mod.box: box,
                             mod.inputs: x,
                             mod.feats: f
                         })
            for k in range(0, len(info)):
                total.extend(" ".join(str(info[k])))
                with open(newp + "/data/test" + frame + names[l] + ".json",
                          "w") as out_f:
                    json.dump(df, out_f, sort_keys=True)
                #im = drawer.draw_points(x[k], o[k], gt[k], cfg, info[k])

        f = open(newp + "/dat", "w")
        f.write(str(set(total)))
        f.close()
def main(_):
    config = flags.FLAGS
    os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    extractor = utils.Extractor()
    extractor.Extract()
    loader = utils.Loader(cut_len)
    t_data = loader.load_trigger()
    trigger = Trigger_Model(t_data, loader.maxlen, loader.wordemb, config.mode)
    trigger.train_trigger()
Exemple #4
0
def main(_):
    config = flags.FLAGS
    os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu
    extractor = utils.Extractor()
    extractor.Extract()
    loader = utils.Loader()
    t_data = loader.load_trigger()
    a_data = loader.load_argument()
    trigger = DMCNN(t_data,a_data,loader.maxlen,loader.max_argument_len,loader.wordemb)
    a_data_process = trigger.train_trigger()
    argument = DMCNN(t_data,a_data_process,loader.maxlen,loader.max_argument_len,loader.wordemb,stage=config.mode,classify=config.classify)
    argument.train_argument()
Exemple #5
0
def main(args):
    # load data
    print('-- load data...')
    loader = utils.Loader(args)
    train_dl, valid_dl = loader.train_dl, loader.valid_dl

    def adjust_learning_rate(optimizer, epoch):
        lr = args.lr * (0.1**(epoch // 10))
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr

    # construct model
    print('-- construct model...')
    model = KASR.KASR(args, loader.n_entity, loader.n_relation,
                      torch.Tensor(loader.D_node).to(device),
                      torch.Tensor(loader.adj_entity).long().to(device),
                      torch.Tensor(loader.adj_relation).long().to(device))
    model.to(device)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.lr_dc)

    #train & eval
    for epoch in range(args.epoch):
        print('\n[Epoch {}]'.format(epoch))
        #adjust_learning_rate(optimizer, epoch)

        total_loss = 0.
        t1 = time.time()
        for batch in tqdm(train_dl, desc='train', ncols=80):
            loss = _update(model, optimizer, batch)
            total_loss += loss
        print('loss:{:.2f}\tseconds:{:.2f}'.format(total_loss / len(train_dl),
                                                   time.time() - t1))

        #eval
        res = np.array([0.] * 6)
        for batch in tqdm(valid_dl, desc='eval ', ncols=80):
            res += _inference(model, batch, loader.all_items_list)
        res = res / len(valid_dl)
        print('hr@20:{:.4f}\tndcg@20:{:.4f}\tmrr@20:{:.4f}'.format(
            res[3], res[4], res[5]))
Exemple #6
0
def train():
    cfg = get_config()
    inpts = tf.placeholder(tf.float32,
                           [cfg['batch'], cfg['prev_leng'], cfg['dims']])
    outs = tf.placeholder(
        tf.float32,
        [cfg['batch'], cfg['pred_ext'] + cfg['fut_leng'], cfg['dims']])

    #mod=model.rec_model(cfg)
    mod = model.rec_model(cfg)
    # opti= tf.train.RMSPropOptimizer(cfg['lr'], decay=0.9, momentum=0.5)
    opti = tf.train.AdamOptimizer(cfg['lr'])
    os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg['GPU'])
    if (cfg['clipping']):
        gvs = opti.compute_gradients(mod.loss)
        capped_gvs = [(ClipIfNotNone(grad), var) for grad, var in gvs]
        mini = opti.apply_gradients(capped_gvs)
    else:
        mini = opti.minimize(mod.loss)

    loader = utils.Loader(cfg)

    init = tf.initializers.global_variables()
    saver = tf.train.Saver()

    if (cfg['type'] != 3):
        newp = str(time.time()).split(
            ".")[0][-5:] + "_" + cfg['prefix'] + "_" + "-".join(
                nms[i] for i in range(cfg['type'] + 1)) + str(
                    cfg['prev_leng']) + "_" + str(cfg['fut_leng']) + "_" + str(
                        cfg['units']) + "_" + str(
                            cfg['lat_size']) + "_" + "_".join(
                                name_generator.get_combo())
    else:
        newp = str(time.time()).split(
            ".")[0][-5:] + "_" + cfg['prefix'] + "_" + nms[cfg['type']] + str(
                cfg['prev_leng']) + "_" + str(cfg['fut_leng']) + "_" + str(
                    cfg['units']) + "_" + str(
                        cfg['lat_size']) + "_" + "_".join(
                            name_generator.get_combo())

    os.mkdir(newp)
    os.mkdir(newp + "/model")
    os.mkdir(newp + "/data")
    shutil.copy("config.yaml", newp + "/data/" + "config.yaml")
    tf.summary.scalar("loss", mod.loss)
    tf.summary.scalar("leng_loss", mod.leng_loss)
    tf.summary.scalar("dirs_loss", mod.dirs_loss)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    merge = tf.summary.merge_all()
    with tf.Session(config=config) as sess:
        if (cfg['load'] == "True"):
            saver.restore(sess, cfg['load_path'])
            print("LOADED MODEL at " + cfg['load_path'])
        else:
            sess.run(init)

        train_writer = tf.summary.FileWriter("logs/" + newp, sess.graph)
        #test_writer = tf.summary.FileWriter(newp + "/data", sess.graph)
        print(newp)
        for e in range(cfg['epochs']):
            x, gt, f, box, info, img = loader.serve_test()
            tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
            summary, ls, o = sess.run(
                [merge, mod.loss, mod.out],
                feed_dict={
                    inpts: x,
                    outs: tot,
                    mod.target: tot,
                    mod.inputs: x,
                    mod.drop: 1.0,
                    mod.feats: f,
                    mod.box: box,
                    mod.image: img
                })
            print(info[0])

            # for k in range(min(16,len(info))):
            #     im=drawer.draw_points(o[k],x[k],gt[k],cfg,info[k])
            #     im.save(newp+"/"+str(e)+"_"+str(k)+".png")
            for i in range(0, loader.total_data // cfg['batch']):
                # print(str(e)+" _ "+str(i))
                x, gt, f, box, info, img = loader.serve()
                tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
                ls, _, o = sess.run(
                    [mod.loss, mini, mod.out],
                    feed_dict={
                        inpts: x,
                        outs: tot,
                        mod.target: tot,
                        mod.image: img,
                        mod.inputs: x,
                        mod.box: box,
                        mod.feats: f,
                        mod.drop: 0.7
                    })
                if (i % 400 == 0):
                    print("TRAIN ", ls)
                if (i % 400 == 0):
                    summ = 0
                    for tst in range(0, 20):
                        x, gt, f, box, info, img = loader.serve_test()

                        tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
                        summary, ls, o = sess.run(
                            [merge, mod.loss, mod.out],
                            feed_dict={
                                inpts: x,
                                mod.image: img,
                                mod.drop: 1.0,
                                outs: tot,
                                mod.target: tot,
                                mod.box: box,
                                mod.inputs: x,
                                mod.feats: f
                            })
                        summ += ls
                        for k in range(4):
                            im = drawer.draw_points(o[k], x[k], gt[k], cfg,
                                                    info[k], box[k])
                            im.save(newp + "/" + str(e) + "_" + str(tst) +
                                    "_" + str(k) + ".png")
                        # print(x[0])
                    train_writer.add_summary(summary,
                                             (loader.total_data * e) + i)
                    print(
                        str(summ / 20.0) + " iteration " + str(i) + "of " +
                        str(loader.total_data // cfg['batch']) +
                        " ,at epoch " + str(e) + " of " + str(cfg['epochs']))
                    # x, gt, f, box, info, img = loader.serve_test()
                    #
                    # tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
                    # summary, ls, o = sess.run([merge, mod.loss, mod.out],
                    #                           feed_dict={inpts: x, mod.image: img, outs: tot, mod.target: tot,
                    #                                      mod.inputs: x, mod.feats: f, mod.box: box})
                    # print(info[0])

                    #drawer.points_alone(o[k],x[k],gt[k],k,newp)
                    if (i % 4000 == 0):
                        print("SAVING " + newp)
                        saver.save(sess, newp + "/model/model.ckpt")

            # if(e%3==0):
            #     x, gt, f, box,info,img = loader.serve_test()
            #
            #     tot = np.concatenate([x[:,-cfg['pred_ext']:], gt], -2)
            #     summary, ls, o = sess.run([merge, mod.loss, mod.out],
            #                               feed_dict={inpts: x,mod.image:img, outs: tot, mod.target: tot, mod.inputs: x, mod.feats: f,mod.box:box})
            #     print(info[0])
            #     for k in range(min(16,len(info))):
            #         im=drawer.draw_points(o[k],x[k],gt[k],cfg,info[k])
            #
            #         im.save(newp+"/"+str(e)+"_"+str(k)+".png")#"#str("_".join(info[i]))+".png")
            print("SAVING " + newp)
            saver.save(sess, newp + "/model/model.ckpt")
Exemple #7
0
def train_GAN():
    cfg = get_config()
    os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg['GPU'])
    inpts = tf.placeholder(tf.float32,
                           [cfg['batch'], cfg['prev_leng'], cfg['dims']])
    outs = tf.placeholder(
        tf.float32,
        [cfg['batch'], cfg['pred_ext'] + cfg['fut_leng'], cfg['dims']])
    real_imgs = tf.placeholder(tf.float32, shape=[cfg['batch'], 128, 256, 5])

    # mod=model.rec_model(cfg)
    mod = model.rec_model(cfg)
    optimizer = tf.train.AdamOptimizer(cfg['lr'])

    r_logits = mod.discrim(inpts, outs, real_imgs)
    f_logits = mod.discrim(inpts, mod.out, real_imgs, reuse=True)

    r_loss = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(logits=r_logits,
                                                labels=tf.ones_like(r_logits)))
    f_loss = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(
            logits=f_logits, labels=tf.zeros_like(f_logits)))
    disc_loss = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(
            logits=r_logits, labels=tf.ones_like(r_logits) * 0.9) +
        tf.nn.sigmoid_cross_entropy_with_logits(
            logits=f_logits, labels=tf.zeros_like(f_logits)))
    d_opti = tf.train.AdamOptimizer(cfg['d_lr'])
    dim_opti = tf.train.AdamOptimizer(cfg['im_d_lr'])
    gen_loss = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(logits=f_logits,
                                                labels=tf.ones_like(f_logits) *
                                                0.9))
    wei = cfg['wei']
    alpha = tf.placeholder(dtype=tf.float32)
    d_step = d_opti.minimize(disc_loss,
                             var_list=tf.get_collection(
                                 tf.GraphKeys.GLOBAL_VARIABLES, scope='DISCR'))
    dim_step = dim_opti.minimize(disc_loss,
                                 var_list=tf.get_collection(
                                     tf.GraphKeys.GLOBAL_VARIABLES,
                                     scope='DIM'))
    gvs = optimizer.compute_gradients(mod.loss + gen_loss * alpha,
                                      var_list=tf.get_collection(
                                          tf.GraphKeys.GLOBAL_VARIABLES,
                                          scope='GEN'))
    print(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='GEN'))
    print("============================================================")
    print(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='DISCR'))
    capped_gvs = [(ClipIfNotNone(grad), var) for grad, var in gvs]
    mini = optimizer.apply_gradients(capped_gvs)
    loader = utils.Loader(cfg)
    init = tf.initializers.global_variables()
    saver = tf.train.Saver()
    init = tf.initializers.global_variables()
    saver = tf.train.Saver()

    newp = str(time.time(
    )).split(".")[0][-4:] + "_" + cfg['prefix'] + "_" + "-".join(
        nms[i]
        for i in range(cfg['type'] + 1)) + str(cfg['prev_leng']) + "_" + str(
            cfg['fut_leng']) + "_" + "_".join(name_generator.get_combo())

    os.mkdir(newp)
    os.mkdir(newp + "/model")
    os.mkdir(newp + "/data")
    shutil.copy("config.yaml", newp + "/data/" + "config.yaml")
    tf.summary.scalar("loss", mod.loss)
    tf.summary.scalar("leng_loss", mod.leng_loss)
    tf.summary.scalar("dirs_loss", mod.dirs_loss)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    merge = tf.summary.merge_all()
    with tf.Session(config=config) as sess:
        if (cfg['load'] == "True"):
            saver.restore(sess, cfg['load_path'])
            print("LOADED MODEL at " + cfg['load_path'])
        else:
            sess.run(init)
        train_writer = tf.summary.FileWriter("logs/" + newp, sess.graph)
        # test_writer = tf.summary.FileWriter(newp + "/data", sess.graph)
        print("OK")
        for e in range(cfg['epochs']):
            wei = max(wei + 0.1, 1.0)
            # x, gt, f, box, info, img = loader.serve_test()
            # tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
            # summary, ls, o = sess.run([merge, mod.loss, mod.out],
            #                           feed_dict={inpts: x, outs: tot, mod.target: tot, mod.inputs: x, mod.feats: f,
            #                                      mod.box: box, mod.image: img})
            # print(info[0])
            #
            # for k in range(min(16, len(info))):
            #     im = drawer.draw_points(o[k], x[k], gt[k], cfg, info[k])
            #     im.save(newp + "/" + str(e) + "_" + str(k) + ".png")
            for i in range(0, loader.total_data // cfg['batch']):
                # print(str(e)+" _ "+str(i))

                for k in range(cfg["disc_runs_per_gen_runs"]):
                    x, gt, f, box, info, img = loader.serve()
                    tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
                    sess.run(
                        [d_step, dim_step],
                        feed_dict={
                            inpts: x,
                            outs: tot,
                            mod.target: tot,
                            mod.image: img,
                            mod.inputs: x,
                            mod.box: box,
                            mod.feats: f,
                            real_imgs: img
                        })
                ls, _, o = sess.run(
                    [mod.loss, mini, mod.out],
                    feed_dict={
                        inpts: x,
                        outs: tot,
                        mod.target: tot,
                        mod.image: img,
                        mod.inputs: x,
                        mod.box: box,
                        mod.feats: f,
                        real_imgs: img,
                        alpha: wei
                    })
                if (i % 200 == 0):
                    summ = 0
                    d_summ = 0
                    fake_loss = 0
                    real_loss = 0
                    g_l = 0
                    for tst in range(0, 20):
                        x, gt, f, box, info, img = loader.serve_test()
                        tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
                        summary, ls, o, gl = sess.run(
                            [merge, mod.loss, mod.out, gen_loss],
                            feed_dict={
                                inpts: x,
                                mod.image: img,
                                outs: tot,
                                mod.target: tot,
                                mod.box: box,
                                mod.inputs: x,
                                mod.feats: f,
                                real_imgs: img
                            })
                        rl, fl, dls, r_l, f_l = sess.run(
                            [r_logits, f_logits, disc_loss, r_loss, f_loss],
                            feed_dict={
                                inpts: x,
                                outs: tot,
                                mod.target: tot,
                                mod.image: img,
                                mod.inputs: x,
                                mod.box: box,
                                mod.feats: f,
                                real_imgs: img
                            })

                        summ += ls + gl
                        g_l += gl
                        d_summ += dls
                        fake_loss += f_l
                        real_loss += r_l
                        # print(x[0])
                    train_writer.add_summary(summary,
                                             (loader.total_data * e) + i)
                    print("fake: " + str(fake_loss / 20.0) + " real: " +
                          str(real_loss / 20.0))
                    print("GEN_TOTAL: " + str(summ / 20.0) + " DISC: " +
                          str(d_summ / 20.0) + " GEN_ADVERSARIAL:" +
                          str(gl / 20.0) + " iteration " + str(i) + "of " +
                          str(loader.total_data // cfg['batch']) +
                          " ,at epoch " + str(e) + " of " + str(cfg['epochs']))
                if (i % 200 == 0):
                    x, gt, f, box, info, img = loader.serve_test()

                    tot = np.concatenate([x[:, -cfg['pred_ext']:], gt], -2)
                    summary, ls, o = sess.run(
                        [merge, mod.loss, mod.out],
                        feed_dict={
                            inpts: x,
                            mod.image: img,
                            outs: tot,
                            mod.target: tot,
                            mod.inputs: x,
                            mod.feats: f,
                            mod.box: box,
                            real_imgs: img
                        })
                    for k in range(min(16, len(info))):
                        im = drawer.draw_points(o[k], x[k], gt[k], cfg,
                                                info[k])
                        im.save(newp + "/" + str(e) + "_" + str(k) +
                                ".png")  # "#str("_".join(info[i]))+".png")
                if (i % 2000 == 0):
                    saver.save(sess, newp + "/model/model.ckpt")
Exemple #8
0
    def __init__(self, adj_mx, **kwargs):
        self._kwargs = kwargs
        self._data_kwargs = kwargs.get('data')
        self._model_kwargs = kwargs.get('model')
        self._train_kwargs = kwargs.get('train')
        self.dataset_name = self._data_kwargs['dataset_dir'].split('/')[-1]
        self.adj_mx = adj_mx
        self.model_params = dict()
        self.model_params['seq_len'] = 30
        self.K = [1, 5, 10, 20, 50, 100]

        model_name = 'net_act_orig'  # self._kwargs['model_name']
        self.log_file_name = utils.get_log_dir(log_dir=self._kwargs['log_dir'],
                                               model_name=model_name,
                                               dataset_name=self.dataset_name)
        if not os.path.exists(self._kwargs['save_dir']):
            os.makedirs(self._kwargs['save_dir'])
        if not os.path.exists(
                os.path.join(self._kwargs['save_dir'], self.dataset_name)):
            os.makedirs(
                os.path.join(self._kwargs['save_dir'], self.dataset_name))
        if not os.path.exists(
                os.path.join(self._kwargs['save_dir'], self.dataset_name,
                             self._kwargs['model_name'])):
            os.makedirs(
                os.path.join(self._kwargs['save_dir'], self.dataset_name,
                             self._kwargs['model_name']))

        log_level = self._kwargs.get('log_level', 'INFO')
        self._logger = utils.get_logger(self.log_file_name,
                                        name=__name__,
                                        level=log_level)
        self._writer = tf.summary.FileWriter(self.log_file_name)
        self._logger.info(json.dumps(kwargs, indent=2))
        self._saved_file_name = 'best_model.ckpt'

        user_id, reverse_user_id, item_id, reverse_item_id = \
            utils.load_ids(self._data_kwargs['dataset_dir'], self._data_kwargs['ids_file_name'])
        print(len(user_id), len(reverse_user_id), len(item_id),
              len(reverse_item_id))

        self.n_users = len(user_id)
        self.n_context = self._model_kwargs['context_size']

        data_examples, self.user_history, num_bins = utils.load_dataset_timestamp(
            self._data_kwargs['dataset_dir'],
            self._data_kwargs['dataset_name'], self.n_users, self.n_context,
            self.model_params['seq_len'])
        self.num_bins = num_bins

        self.model_params['batch_size'] = self._data_kwargs['batch_size']
        self.model_params['user_size'] = self.n_users
        self.model_params['item_size'] = len(item_id)
        self.model_params['state_size'] = self._model_kwargs['state_size']
        self.model_params['emb_size'] = self._model_kwargs['emb_size']
        self.model_params['lr'] = self._train_kwargs['base_lr']
        self.model_params['n_bins'] = self.num_bins
        self.model_params['context_size'] = self.n_context
        self.model_params['start_lr'] = len(
            data_examples) // self._data_kwargs['batch_size']
        self.model_params['min_lr'] = self._train_kwargs['min_learning_rate']
        self.model_params['use_attn'] = self._model_kwargs['use_attn']
        self.model_params['normalize'] = self._model_kwargs['normalize']
        self.model_params['max_diff'] = self._model_kwargs['max_diff']
        if self._model_kwargs['n_samples'] == -1:
            self.model_params['n_samples'] = len(item_id)
        else:
            self.model_params['n_samples'] = self._model_kwargs['n_samples']
        self.model_params['comb'] = self._model_kwargs['comb']

        self.data_iterator = utils.Loader(data_examples,
                                          options=self.model_params)
Exemple #9
0
def test():
    with open("config.yaml") as config_raw:
        cfg=yaml.load(config_raw)
    ###LOADER spawna feeders
    loader=utils.Loader(cfg)
Exemple #10
0
    log.setLevel(logging.DEBUG)

    data_path = join(options['data_dir'], options['dataset_name'])
    # utils.write_seen_nodes(join(options['data_dir'], options['dataset_name']), 30)
    node_index = utils.load_graph(data_path)
    options['node_size'] = len(node_index)
    # print(nx.info(G))
    train_instances, max_diff_train = utils.load_instances(data_path,
                                                           'train',
                                                           node_index,
                                                           options['seq_len'],
                                                           limit=-1)
    test_instances, max_diff_test = utils.load_instances(data_path,
                                                         'test',
                                                         node_index,
                                                         options['seq_len'],
                                                         limit=-1)
    options['max_diff'] = max_diff_train
    print(len(train_instances), len(test_instances))
    options['n_train'] = len(train_instances)

    train_loader = utils.Loader(train_instances, options)
    test_loader = utils.Loader(test_instances, options)

    log.info('running glimpse attention model')
    log.info('using attention:' + str(options['use_attention']))
    log.info(options)
    glimpse_ins = GlimpseAttentionModel(options, options['use_attention'],
                                        options['n_train'])
    glimpse_ins.run_model(train_loader, test_loader, options)
Exemple #11
0
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import net
import utils

# set some global parameters
learning_rate = 0.01
batch_size = 32
epoch = 1000
optimizer = optim.Adam(lr=learning_rate)
loss_function = nn.CrossEntropyLoss()
root_dir = 'xxxxx/xxx/'
use_cuda = torch.cuda.is_available()
# load your train and test data
train_data = utils.Loader(root_dir, train_infor, batch_size)
test_data = utils.Loader(root_dir, test_infor, batch_size)


def train(save_path):
    """
    train your network.

    Args:
        save_path: the path where your model saved
    """

    #set your network  net = yournet()
    net = vgg19()
    if use_cuda:
        net = net.cuda()