예제 #1
0
파일: test.py 프로젝트: jacklu2016/sli_rec
def test(train_file="data/train_data",
         test_file="data/test_data",
         save_path="saved_model/",
         model_type=MODEL_TYPE,
         seed=SEED):
    tf.set_random_seed(SEED)
    np.random.seed(SEED)
    random.seed(SEED)
    with tf.Session() as sess:
        train_data, test_data = Iterator(train_file), Iterator(test_file)
        user_number, item_number, cate_number = train_data.get_id_numbers()

        if model_type in MODEL_DICT:
            cur_model = MODEL_DICT[model_type]
        else:
            print("{0} is not implemented".format(model_type))
            return
        model = cur_model(user_number, item_number, cate_number, EMBEDDING_DIM,
                          HIDDEN_SIZE, ATTENTION_SIZE)
        model_path = save_path + model_type
        model.restore(sess, model_path)
        test_auc, test_loss, test_acc = evaluate_epoch(sess, test_data, model)
        print(
            "test_auc: {0}, testing loss = {1}, testing accuracy = {2}".format(
                test_auc, test_loss, test_acc))
예제 #2
0
파일: wuzzer.py 프로젝트: 5l1v3r1/wuzzer
    def run(self):
        try:
            jobs = 0
            for method in self.methods:
                for mode in self.modes:
                    if self.ext_config is not None:
                        raw_requests = parse_file(ext_config)
                        for raw in raw_requests:
                            iterator = Iterator(self.host, mode, method, raw,
                                                proxy)
                            """(self.current_payload, self.current_parameter, self.request.assemble_request())"""
                            for index, request in enumerate(iterator):
                                task = Task(index, request[1], request[0],
                                            request[2])
                                jobs += 1
                                self.task_queue.put(task)
                    else:
                        iterator = Iterator(self.host, mode, method, None,
                                            proxy)
                        """(self.current_payload, self.current_parameter, self.request.assemble_request())"""
                        for index, request in enumerate(iterator):
                            task = Task(index, request[1], request[0],
                                        request[2])
                            jobs += 1
                            self.task_queue.put(task)
            for _ in range(self.workers_count):
                self.task_queue.put(None)

        except KeyboardInterrupt:
            pass
예제 #3
0
파일: train.py 프로젝트: jacklu2016/sli_rec
def train(train_file="data/train_data",
          test_file="data/test_data",
          save_path="saved_model/",
          model_type=MODEL_TYPE,
          seed=SEED):
    tf.set_random_seed(seed)
    np.random.seed(seed)
    random.seed(seed)
    with tf.Session() as sess:
        if model_type in MODEL_DICT:
            cur_model = MODEL_DICT[model_type]
        else:
            print("{0} is not implemented".format(model_type))
            return

        train_data, test_data = Iterator(train_file), Iterator(test_file)
        user_number, item_number, cate_number = train_data.get_id_numbers()
        model = cur_model(user_number, item_number, cate_number, EMBEDDING_DIM,
                          HIDDEN_SIZE, ATTENTION_SIZE)
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        itr = 0
        learning_rate = LR
        best_auc = 0.0
        best_model_path = save_path + model_type
        for i in range(MAX_EPOCH):
            train_loss_sum = 0.0
            train_accuracy_sum = 0.0
            for src, tgt in train_data:
                user, targetitem, targetcategory, item_history, cate_history, timeinterval_history, timelast_history, timenow_history, mid_mask, label, seq_len = prepare_data(
                    src, tgt)
                train_loss, train_acc = model.train(sess, [
                    user, targetitem, targetcategory, item_history,
                    cate_history, timeinterval_history, timelast_history,
                    timenow_history, mid_mask, label, seq_len, learning_rate
                ])
                train_loss_sum += train_loss
                train_accuracy_sum += train_acc
                itr += 1
                if (itr % TEST_FREQ) == 0:
                    print(
                        "Iter: {0}, training loss = {1}, training accuracy = {2}"
                        .format(itr, train_loss_sum / TEST_FREQ,
                                train_accuracy_sum / TEST_FREQ))

                    test_auc, test_loss, test_acc = evaluate_epoch(
                        sess, test_data, model)
                    print(
                        "test_auc: {0}, testing loss = {1}, testing accuracy = {2}"
                        .format(test_auc, test_loss, test_acc))

                    if test_auc > best_auc:
                        best_auc = test_auc
                        model.save(sess, best_model_path)
                        print("Model saved in {0}".format(best_model_path))

                    train_loss_sum = 0.0
                    train_accuracy_sum = 0.0
예제 #4
0
    def train(self):
        iter = 0
        train_iter = Iterator(time_interval=c.RAINY_TRAIN,
                              sample_mode="random",
                              seq_len=c.IN_SEQ + c.OUT_SEQ)
        merged = tf.summary.merge_all(
        )  # 合并所有的summary data的获取函数,merge_all 可以将所有summary全部保存到磁盘,以便tensorboard显示。如果没有特殊要求,一般用这一句就可一显示训练时的各种信息了。
        writer = tf.summary.FileWriter("/extend/rain_data/Logs",
                                       self.model.sess.graph)
        while iter < c.MAX_ITER:
            data, *_ = train_iter.sample(batch_size=c.BATCH_SIZE)
            in_data = data[:, :c.IN_SEQ, ...]
            gt_data = data[:, c.IN_SEQ:c.IN_SEQ + c.OUT_SEQ, ...]

            if c.NORMALIZE:
                in_data = normalize_frames(in_data)
                gt_data = normalize_frames(gt_data)

            mse, mae, gdl = self.model.train_step(in_data, gt_data)
            logging.info(
                f"Iter {iter}: \n\t mse:{mse} \n\t mae:{mae} \n\t gdl:{gdl}")

            if (iter + 1) % c.SAVE_ITER == 0:

                self.model.save_model(iter)

            if (iter + 1) % c.VALID_ITER == 0:
                self.run_benchmark(iter)
            iter += 1
    def train(self):
        train_iter = Iterator(time_interval=c.RAINY_TRAIN,
                              sample_mode="random",
                              seq_len=self._in_seq + self._out_seq,
                              stride=1)
        while self.global_step < c.MAX_ITER:

            if c.ADVERSARIAL and self.global_step > c.ADV_INVOLVE:
                print("start d_model")
                in_data, gt_data = self.get_train_batch(train_iter)
                d_loss, *_ = self.d_model.train_step(in_data, gt_data,
                                                     self.g_model)
            else:
                d_loss = 0

            in_data, gt_data = self.get_train_batch(train_iter)
            g_loss, mse, gd_loss, global_step = self.g_model.train_step(
                in_data, gt_data, self.d_model)

            self.global_step = global_step

            self.logger.info(f"Iter {self.global_step}: \n\t "
                             f"g_loss: {g_loss:.4f} \n\t"
                             f"mse: {mse:.4f} \n\t "
                             f"mse_real: {gd_loss:.4f} \n\t"
                             f"d_loss: {d_loss:.4f}")

            if (self.global_step + 1) % c.SAVE_ITER == 0:
                self.save_model()

            if (self.global_step + 1) % c.VALID_ITER == 0:
                self.run_benchmark(global_step, mode="Valid")
예제 #6
0
    def train(self):
        iter = 350000
        train_iter = Iterator(time_interval=c.RAINY_TRAIN,
                              sample_mode="random",
                              seq_len=c.IN_SEQ + c.OUT_SEQ)
        try:
            SummaryWriter = tf.train.SummaryWriter
        except:
            SummaryWriter = tf.summary.FileWriter
        writer = SummaryWriter(c.SAVE_SUMMARY, self.model.sess.graph)
        while iter < c.MAX_ITER:
            data, *_ = train_iter.sample(batch_size=c.BATCH_SIZE)
            in_data = data[:, :c.IN_SEQ, ...]
            gt_data = data[:, c.IN_SEQ:c.IN_SEQ + c.OUT_SEQ, ...]
            if c.NORMALIZE:
                in_data = normalize_frames(in_data)
                gt_data = normalize_frames(gt_data)

            mse, mae, gdl, summary = self.model.train_step(in_data, gt_data)

            logging.info(
                f"Iter {iter}: \n\t mse:{mse} \n\t mae:{mae} \n\t gdl:{gdl}")
            # merged=self.model.sess.run(merged,feed_dict={self.model.in_data_480:in_data_480,self.model.gt_data_480:gt_data})
            writer.add_summary(summary, iter)
            if (iter + 1) % c.SAVE_ITER == 0:
                self.model.save_model(iter)

            if (iter + 1) % c.VALID_ITER == 0:
                self.run_benchmark(iter)
            # if (iter + 1) % c.TEST_ITER == 0:
            #         self.test(iter)
            iter += 1
예제 #7
0
    def run_benchmark(self, iter, mode="Valid"):
        if mode == "Valid":
            time_interval = c.RAINY_VALID
            stride = 20
        else:
            time_interval = c.RAINY_TEST
            stride = 1
        test_iter = Iterator(time_interval=time_interval,
                             sample_mode="sequent",
                             seq_len=c.IN_SEQ + c.OUT_SEQ,
                             stride=1)
        evaluator = Evaluator(iter)
        i = 1
        while not test_iter.use_up:
            data, date_clip, *_ = test_iter.sample(batch_size=c.BATCH_SIZE)
            in_data = np.zeros(shape=(c.BATCH_SIZE, c.IN_SEQ, c.H, c.W, c.IN_CHANEL))
            gt_data = np.zeros(shape=(c.BATCH_SIZE, c.OUT_SEQ, c.H, c.W, 1))
            if type(data) == type([]):
                break
            in_data[...] = data[:, :c.IN_SEQ, ...]

            if c.IN_CHANEL == 3:
                gt_data[...] = data[:, c.IN_SEQ:c.IN_SEQ + c.OUT_SEQ, :, :, 1:-1]
            elif c.IN_CHANEL == 1:
                gt_data[...] = data[:, c.IN_SEQ:c.IN_SEQ + c.OUT_SEQ, ...]
            else:
                raise NotImplementedError

            # in_date = date_clip[0][:c.IN_SEQ]

            if c.NORMALIZE:
                in_data = normalize_frames(in_data)
                gt_data = normalize_frames(gt_data)

            mse, mae, gdl, pred = self.model.valid_step(in_data, gt_data)
            evaluator.evaluate(gt_data, pred)
            logging.info(f"Iter {iter} {i}: \n\t mse:{mse} \n\t mae:{mae} \n\t gdl:{gdl}")
            i += 1
            if i % stride == 0:
                if c.IN_CHANEL == 3:
                    in_data = in_data[:, :, :, :, 1:-1]

                for b in range(c.BATCH_SIZE):
                    predict_date = date_clip[b][c.IN_SEQ]
                    logging.info(f"Save {predict_date} results")
                    if mode == "Valid":
                        save_path = os.path.join(c.SAVE_VALID, str(iter), predict_date.strftime("%Y%m%d%H%M"))
                    else:
                        save_path = os.path.join(c.SAVE_TEST, str(iter), predict_date.strftime("%Y%m%d%H%M"))

                    path = os.path.join(save_path, "in")
                    save_png(in_data[b], path)

                    path = os.path.join(save_path, "pred")
                    save_png(pred[b], path)

                    path = os.path.join(save_path, "out")
                    save_png(gt_data[b], path)
        evaluator.done()
예제 #8
0
 def __init__(self, structure, **kwargs):
     kw_grid = dict()
     kw_ham  = dict()
     self.structure = structure
     self.grid      = Grid(structure, 30, **kw_grid)
     self.ham       = Hamiltonian(self.structure, self.grid, **kw_ham)
     self.it        = Iterator(self.ham)
     self.solved    = False
예제 #9
0
    def __init__(self, opt):
        self.opt = opt

        absa_dataset = ABSADatesetReader(dataset=opt.dataset,
                                         embed_dim=opt.embed_dim)
        self.model = opt.model_class(absa_dataset.embedding_matrix,
                                     opt).to(opt.device)

        self.train_data_loader = Iterator(data=absa_dataset.train_data,
                                          batch_size=opt.batch_size,
                                          shuffle=True)
        self.test_data_loader = Iterator(data=absa_dataset.test_data,
                                         batch_size=opt.batch_size,
                                         shuffle=False)

        # self.model = opt.model_class(absa_dataset.embedding_matrix, opt).to(opt.device)
        self._print_args()
        self.global_f1 = 0.

        if torch.cuda.is_available():
            print('cuda memory allocated:',
                  torch.cuda.memory_allocated(device=opt.device.index))
예제 #10
0
 def test_return_odd_items(self):
     tt = [
         (['a', 'b', 'c', 'd'], ['a', 'c']),
         ('abababab', ['a', 'a', 'a', 'a']),
         (range(5), [0, 2, 4]),
         ((v for v in [9, 8, 7, 6, 5, 4]), [9, 7, 5]),
     ]
     for t in tt:
         data = t[0]
         wait = t[1]
         it = Iterator(data)
         self.assertEqual([v for v in it], wait,
                         'test failed with data: {}'.format(repr(t)))
예제 #11
0
 def test_return_even_items(self):
     tt = [
         (['a', 'b', 'c', 'd'], ['b', 'd']),
         ('abababab', ['b', 'b', 'b', 'b']),
         (range(5), [1, 3]),
         ((v for v in [9, 8, 7, 6, 5, 4]), [8, 6, 4]),
     ]
     for t in tt:
         data = t[0]
         wait = t[1]
         it = Iterator(data, even=True)
         self.assertEqual([v for v in it], wait,
                         'test failed with data: {}'.format(repr(t)))
예제 #12
0
 def train(self):
     iterator = Iterator("201401010000", "201808010000")
     iter = 1
     while iter < c.ITER:
         in_data, gt_data = iterator.random_sample()
         l2, mse, rmse, mae = self.model.train_step(in_data, gt_data)
         logging.info(
             "Iter {}: \n\t l2: {} \n\t mse:{} \n\t rmse:{} \n\t mae:{}".
             format(iter, l2, mse, rmse, mae))
         if iter % c.SAVE_ITER == 0:
             self.model.save_model(iter)
         if iter % c.VALID_ITER == 0:
             self.valid(iter)
         iter += 1
예제 #13
0
파일: utils.py 프로젝트: Dutil/IFT6266
def generate_and_show_sample(fn,
                             nb=1,
                             seed=1993,
                             it=None,
                             verbose=True,
                             n_split=1,
                             return_64_64=False,
                             replace_middle=False):

    if it is None:
        it = Iterator(img_path="val2014",
                      load_caption=False,
                      process_text=True)

    choice = range(len(it))
    if seed > 0:
        np.random.seed(seed)
        np.random.shuffle(choice)

    choice = choice[:nb] * 5

    #try:
    xs, ys, cs = zip(*[it[i] for i in choice])
    loss, preds = fn(xs, ys, cs)

    figs = []

    for pl in np.array_split(np.arange(nb), n_split):
        figs.append(
            show_sample([xs[i] for i in pl], [ys[i] for i in pl],
                        [preds[i] for i in pl],
                        len(pl),
                        return_64_64=return_64_64,
                        replace_middle=replace_middle))
    #except Exception as e:
    #    print e
    #    print "Oups!"

    caps = []

    try:
        if verbose and it.mapping is not None:
            for img in cs:
                sentence = [it.mapping[idx] for idx in img[0]]
                caps.append(' '.join(sentence))

    except AttributeError:
        pass

    return figs, caps
예제 #14
0
    def test(self, iter):
        iterator = Iterator("201808010000", "201812310000", mode="valid")
        for in_data, out_data, date, save in iterator.sequent_sample():
            *_, result = self.model.valid(in_data, out_data)
            logging.info("Save {} results".format(date))
            save_path = c.SAVE_TEST_DIR + str(iter) + "/" + date[-12:] + "/"
            print(save_path)

            path = os.path.join(save_path, "in")
            save_png(in_data[0], path)

            path = os.path.join(save_path, "pred")
            save_png(result[0], path)

            path = os.path.join(save_path, "out")
            save_png(out_data[0], path)
예제 #15
0
    def valid(self, iter):
        iterator = Iterator("201808010000", "201812310000", mode="valid")
        num = 1
        t_l2 = 0
        t_mse = 0
        t_rmse = 0
        t_mae = 0
        count = 0
        for in_data, out_data, date, save in iterator.sequent_sample():
            l2, mse, rmse, mae, result = self.model.valid(in_data, out_data)
            t_mse += mse
            t_rmse += rmse
            t_mae += mae
            t_l2 += l2
            count += 1

            logging.info("Valid {} {}: \n\t l2:{} \n\t mse:{} \n\t ".format(
                num, date, l2, mse) + "rmse:{} \n\t mae:{}".format(rmse, mae))

            if save:
                logging.info("Save {} results".format(date))
                save_path = c.SAVE_VALID_DIR + str(
                    iter) + "/" + date[-12:] + "/"

                path = os.path.join(save_path, "in")
                save_png(in_data[0], path)

                path = os.path.join(save_path, "out")
                save_png(result[0], path)

                path = os.path.join(save_path, "gt")
                save_png(out_data[0], path)
            num += 1

        t_mse /= count
        t_rmse /= count
        t_mae /= count
        t_l2 /= count
        logging.info("Valid in {}: \n\t l2 \n\t mse:{} \n\t ".format(
            iter, t_l2, t_mse) + "rmse:{} \n\t mae:{}".format(t_rmse, t_mae))
        logging.info("#" * 30)
예제 #16
0
파일: window.py 프로젝트: ozhenchuk/SysAn
 def start(self):
     for widget in self.graphics:
         widget.hide()
         widget.destroy()
     self.graphicstab.clear()
     dimensions = [self.dimensions[i].value() for i in range(3)]
     degrees = [self.polinomdegree[i].value() for i in range(3)]
     if (self.lambdamethod[0].isChecked()):
         lambda_flag = 0
     else:
         lambda_flag = 1
     mod = Iterator(self.samplevolume.value(), dimensions,
                    self.dimensions[3].value(), self.filename[0].text(),
                    self.polinomtype.currentIndex(), degrees, lambda_flag)
     mod.normalization()
     n_array = np.arange(float(self.samplevolume.value()))
     ydim = self.dimensions[3].value()
     for i in range(ydim):
         self.graphics.append(PlotManager(self))
         self.graphicstab.addTab(self.graphics[i], 'Y' + str(i))
     for i in range(ydim):
         self.graphics.append(PlotManager(self))
         self.graphicstab.addTab(self.graphics[ydim + i], 'res' + str(i))
     mod.approximate(self.filename[1].text())
     mod.denormalization()
     for i in range(ydim):
         self.graphics[i].ax.clear()
         self.graphics[i].ax.set_facecolor('#dddddd')
         self.graphics[i].ax.plot(n_array, mod.y[i], 'b', n_array,
                                  mod.y_cnt[i], '#D53206')  ##0707FA082A6A
         self.graphics[i].canvas.draw()
     for i in range(ydim):
         resid = (mod.y[i] - mod.y_cnt[i]) / max(mod.y[i])
         for j in range(len(resid)):
             resid[j] = np.fabs(resid[j])
         print(mod.y[i], mod.y_cnt[i], resid)
         self.graphics[ydim + i].ax.clear()
         self.graphics[ydim + i].ax.set_facecolor('#dddddd')
         self.graphics[ydim + i].ax.plot(n_array, resid, '#0D6806')
         self.graphics[ydim + i].canvas.draw()
예제 #17
0
def train(args):
    texts = utility.readlines_from_filepath('./test_texts.txt')
    labels = utility.readlines_from_filepath('./test_labels.txt')
    for text in texts:
        vocaburaly.new(text)

    vocab_num = len(vocaburaly)
    D = args.D
    optimizer = optimizers.SGD()
    doc_num = len(labels)
    batch_size = 10
    texts_int = []
    for text in texts:
        text_int = []
        for word in text.strip().split():
            word = word.lower()
            text_int.append(vocaburaly.w2i[word])
        texts_int.append(text_int)
    labels_int = list(range(doc_num))
    window_size = args.window_size
    epoch = args.epoch

    model = NLMBase(vocab_num, D, doc_num)
    optimizer.setup(model)

    for e in range(epoch):
        iterator = Iterator(batch_size, texts_int, labels_int, window_size)
        loss_acc = 0
        for i in tqdm(iterator):
            label = model.prepare_input([i[0]], dtype=np.int32)
            center = model.prepare_input([i[1]], dtype=np.int32)
            context = model.prepare_input(i[2], dtype=np.int32)

            model.cleargrads()
            loss = model(label, context, center)
            loss_acc += float(loss.data)
            loss.backward()
            optimizer.update()
        print(loss_acc / len(labels_int) / batch_size)
def train(args, model,data,val_data):
    dirname = 'save-vrnn/'
    if not os.path.exists(dirname):
        os.makedirs(dirname)

    with open(os.path.join(dirname, 'config.pkl'), 'w') as f:
        cPickle.dump(args, f)

    ckpt = tf.train.get_checkpoint_state(dirname) #check if there exists a previously trained model in the checkpoint

    Xtrain,ytrain = data
    Xval, yval = val_data


    shape1 = np.shape(Xtrain)
    df1 = pd.DataFrame(np.reshape(Xtrain,(shape1[0],-1)))
    shape2 = np.shape(ytrain)
    df2 = pd.DataFrame(np.reshape(ytrain,(shape2[0],-1)))
    print("\nXtrain")
    print(df1.describe())
    print('\nytrain')
    print(df2.describe())

    train = Iterator(Xtrain,ytrain,batch_size = args.batch_size,n_steps=args.seq_length,shape_diff=True) #to split data into batches
    n_batches = train.nbatches
    Xtrain,ytrain = train.get_split()

    

    #split validation data into batches
    validate = Iterator(Xval,yval,batch_size = args.batch_size,n_steps=args.seq_length,shape_diff=True)
    val_nbatches = validate.nbatches
    Xval, yval = validate.get_split()

    myFile = open(dirname+'/outputValidation.csv', 'w')
    writer = csv.writer(myFile)
    writer.writerows([["Epoch","Train_Loss","MAE","MSE"]])

    mae = []
    mse = []
    with tf.Session() as sess:
        summary_writer = tf.summary.FileWriter('logs/' + datetime.now().isoformat().replace(':', '-'), sess.graph)
        check = tf.add_check_numerics_ops()
        merged = tf.summary.merge_all()
        tf.global_variables_initializer().run() #initialize all variables in the graph as defined
        saver = tf.train.Saver(tf.global_variables())
        if ckpt:
            saver.restore(sess, ckpt.model_checkpoint_path) #restore previously saved model
            print "Loaded model"
        start = time.time()
	state_c = None
	state_h = None

        logs = [] 
        for e in xrange(args.num_epochs):
            #assign learning rate 
            sess.run(tf.assign(model.lr, args.learning_rate * (args.decay_rate ** e))) 

            #get the initial state of lstm cell 
            state = model.initial_state_c, model.initial_state_h
            mae.append([])
            mse.append([])

            prior_mean = [] ##
            phi_mean = [] ##
            if((e+1)%10 != 0):
		    for b in xrange(n_batches):
		        x = Xtrain[b]
		        y = ytrain[b]
		        feed = {model.input_x: x, model.input_y: y, model.target_data: y} # input data : x and y ; target data : y

		        #train the model on this batch of data
		        train_loss, _, cr, summary, sigma, mu, inp, target, state_c, state_h, pred, prior_mu, phi_mu = sess.run(
		                [model.cost, model.train_op, check, merged, model.sigma, model.mu, model.flat_input, model.target, model.final_state_c, model.final_state_h, model.output, model.prior_mu, model.phi_mu], feed) ##

                        prior_mean.append(prior_mu)  ##
                        phi_mean.append(phi_mu)   ##

		        summary_writer.add_summary(summary, e * n_batches + b)

			pred = np.concatenate(pred, axis=1)
			sigma = np.concatenate(sigma, axis=1)
			mu = np.concatenate(mu, axis=1)

			#the output from the model is in the shape [50000,1] reshape to 3D (batch_size, time_steps, n_app)
		        pred = np.array(np.reshape(pred, [args.batch_size,args.seq_length,-1])).astype(float)
		        label = np.array(y).astype(float)

			#compute mae and mse for the output
		        mae_i  = np.reshape(np.absolute((label - pred)),[-1,]).mean()
		        mse_i =  np.reshape((label - pred)**2,[-1,]).mean()

		        mae[e].append(mae_i)
		        mse[e].append(mse_i)

			#save the model after every 800 (monitoring_freq) epochs
		        if (e * n_batches + b) % args.save_every == 0 and ((e * n_batches + b) > 0):
		            checkpoint_path = os.path.join(dirname, 'model_'+str(args.num_epochs)+'_'+str(args.learning_rate)+'.ckpt')
		            saver.save(sess, checkpoint_path, global_step=e * n_batches + b)
		            print "model saved to {}".format(checkpoint_path)

		        end = time.time()
		
		        print "{}/{} (epoch {}), train_loss = {:.6f}, time/batch = {:.1f}, std = {:.3f}" \
		            .format(e * n_batches + b,
		                    args.num_epochs * n_batches,
		                    e, args.chunk_samples * train_loss, end - start, sigma.mean(axis=0).mean(axis=0))
		        start = time.time()
            else: #pass validation data
		print("\nValidation Data\n")
		loss = 0
		for b in xrange(val_nbatches):
		        x = Xval[b]
		        y = yval[b]
		        feed = {model.input_x: x, model.input_y: y, model.target_data: y} # input data : x and y ; target data : y

		        #train the model on this batch of data
		        train_loss, cr, summary, sigma, mu, inp, target, state_c, state_h, pred = sess.run(
		                [model.cost, check, merged, model.sigma, model.mu, model.flat_input, model.target, model.final_state_c, model.final_state_h, model.output],
		                                                     feed)
		        loss += train_loss
		        summary_writer.add_summary(summary, e * n_batches + b)

			pred = np.concatenate(pred, axis=1)
			sigma = np.concatenate(sigma, axis=1)
			mu = np.concatenate(mu, axis=1)

			#the output from the model is in the shape [50000,1] reshape to 3D (batch_size, time_steps, n_app)
		        pred = np.array(np.reshape(pred, [args.batch_size,args.seq_length,-1])).astype(float)
		        label = np.array(y).astype(float)

			#compute mae and mse for the output
		        mae_i  = np.reshape(np.absolute((label - pred)),[-1,]).mean()
		        mse_i =  np.reshape((label - pred)**2,[-1,]).mean()

		        mae[e].append(mae_i)
		        mse[e].append(mse_i)

			#save the model after every 800 (monitoring_freq) epochs
		        if (e * n_batches + b) % args.save_every == 0 and ((e * n_batches + b) > 0):
		            checkpoint_path = os.path.join(dirname, 'model_'+str(args.num_epochs)+'_'+str(args.learning_rate)+'.ckpt')
		            saver.save(sess, checkpoint_path, global_step=e * n_batches + b)
		            print "model saved to {}".format(checkpoint_path)

		        end = time.time()
		
		        print "{}/{} (epoch {}), train_loss = {:.6f}, time/batch = {:.1f}, std = {:.3f}" \
		            .format(e * n_batches + b,
		                    args.num_epochs * n_batches,
		                    e, args.chunk_samples * train_loss, end - start, sigma.mean(axis=0).mean(axis=0))
		        start = time.time()
		logs.append([e,train_loss/val_nbatches,sum(mae[e])/len(mae[e]), sum(mse[e])/len(mse[e])])

            #the average mae,mse values in every epoch
            print "Epoch {}, mae = {:.3f}, mse = {:.3f}".format(e, sum(mae[e])/len(mae[e]), sum(mse[e])/len(mse[e]))
        
            print("prior_mu_mean:",np.mean(prior_mean))
            print("phi_mu_mean: ",np.mean(phi_mean))

        writer.writerows(logs)

	#path to save the final model
	checkpoint_path = os.path.join(dirname, 'final_model_'+str(args.num_epochs)+'_'+str(args.learning_rate)+'.ckpt') 

	saver2 = tf.train.Saver()
	saver2.save(sess, checkpoint_path)

	print "model saved to {}".format(checkpoint_path)
def train(args, model, data):
    dirname = 'save-vrnn/' + args.appliance
    if not os.path.exists(dirname):
        os.makedirs(dirname)

    with open(os.path.join(dirname, 'config.pkl'), 'w') as f:
        cPickle.dump(args, f)

    ckpt = tf.train.get_checkpoint_state(
        dirname
    )  #check if there exists a previously trained model in the checkpoint

    Xtrain, ytrain = data
    train = Iterator(Xtrain,
                     ytrain,
                     batch_size=args.batch_size,
                     n_steps=args.seq_length)  #to split data into batches
    n_batches = train.nbatches
    Xtrain, ytrain = train.get_split()
    mae = []
    mse = []
    with tf.Session() as sess:
        summary_writer = tf.summary.FileWriter(
            'logs/' + datetime.now().isoformat().replace(':', '-'), sess.graph)
        check = tf.add_check_numerics_ops()
        merged = tf.summary.merge_all()
        tf.global_variables_initializer().run(
        )  #initialize all variables in the graph as defined
        saver = tf.train.Saver(tf.global_variables())
        if ckpt:
            saver.restore(
                sess,
                ckpt.model_checkpoint_path)  #restore previously saved model
            print "Loaded model"
        start = time.time()
        state_c = None
        state_h = None
        for e in xrange(args.num_epochs):
            #assign learning rate
            sess.run(
                tf.assign(model.lr, args.learning_rate * (args.decay_rate**e)))

            #get the initial state of lstm cell
            state = model.initial_state_c, model.initial_state_h
            mae.append([])
            mse.append([])
            for b in xrange(n_batches):
                x = Xtrain[b]
                y = ytrain[b]
                feed = {
                    model.input_x: x,
                    model.input_y: y,
                    model.target_data: y
                }  # input data : x and y ; target data : y

                #train the model on this batch of data
                train_loss, _, cr, summary, sigma, mu, inp, target, state_c, state_h, pred = sess.run(
                    [
                        model.cost, model.train_op, check, merged, model.sigma,
                        model.mu, model.flat_input, model.target,
                        model.final_state_c, model.final_state_h, model.output
                    ], feed)

                summary_writer.add_summary(summary, e * n_batches + b)

                #the output from the model is in the shape [50000,1] reshape to 3D (batch_size, time_steps, n_app)
                pred = np.array(np.reshape(pred, [250, 200, -1])).astype(float)
                label = np.array(y).astype(float)

                #compute mae and mse for the output
                mae_i = np.reshape(np.absolute((label - pred)), [
                    -1,
                ]).mean()
                mse_i = np.reshape((label - pred)**2, [
                    -1,
                ]).mean()

                mae[e].append(mae_i)
                mse[e].append(mse_i)

                #save the model after every 800 (monitoring_freq) epochs
                if (e * n_batches + b) % args.save_every == 0 and (
                    (e * n_batches + b) > 0):
                    checkpoint_path = os.path.join(
                        dirname, 'model_' + str(args.num_epochs) + '_' +
                        str(args.learning_rate) + '.ckpt')
                    saver.save(sess,
                               checkpoint_path,
                               global_step=e * n_batches + b)
                    print "model saved to {}".format(checkpoint_path)

                end = time.time()

                print "{}/{} (epoch {}), train_loss = {:.6f}, time/batch = {:.1f}, std = {:.3f}" \
                    .format(e * n_batches + b,
                            args.num_epochs * n_batches,
                            e, args.chunk_samples * train_loss, end - start, sigma.mean(axis=0).mean(axis=0))
                start = time.time()

            #the average mae,mse values in every epoch
            print "Epoch {}, mae = {:.3f}, mse = {:.3f}".format(
                e,
                sum(mae[e]) / len(mae[e]),
                sum(mse[e]) / len(mse[e]))

#path to save the final model
        checkpoint_path = os.path.join(
            dirname, 'final_model_' + str(args.num_epochs) + '_' +
            str(args.learning_rate) + '.ckpt')

        saver2 = tf.train.Saver()
        saver2.save(sess, checkpoint_path)

        print "model saved to {}".format(checkpoint_path)
예제 #20
0
from iterator import Iterator
import os
import sys

source_dir = "/Volumes/chillydisk/ds_video2"
"""
   This just takes all the images in the source directory and numbers them
   so that you can run ffmpeg on them.
"""

iterator = Iterator()
images = iterator.iterate(source_dir)

x = 0
for image in images:
    new_name = "/".join(image.split("/")[:-1]) + ("/image_%04d.jpg" % x)
    x += 1
    os.rename(image, new_name)

# ffmpeg -f image2 -framerate 30 -pattern_type sequence -start_number 0 -r 15 -i image_%04d.jpg -s 1080x608 -vcodec libx264 -b 5000k video.avi
예제 #21
0
def tf_example(args):
    """Build, train, and test the discriminator using TensorFlow frontend."""
    dtype = tf.float32

    # Get one-hot encoded English and German/French words.
    words, labels = text.get_data(args.length, args.language, True)
    data = Dataset(words, labels, args.validation_split)
    iterator = Iterator(data, args.n_epochs, args.batch_size)

    # Model input and output.
    with tf.name_scope('io'):
        x = tf.placeholder(dtype, [words.shape[0], None, words.shape[2]], 'x')
        y = tf.placeholder(dtype, [None, 1], 'y')

    # The neural network.
    with tf.variable_scope('lstm0'):
        lstm0 = tf.contrib.rnn.LSTMBlockFusedCell(args.n_state)
        lstm0_output, lstm0_state = lstm0(x, dtype=dtype)

    with tf.variable_scope('lstm1'):
        lstm1 = tf.contrib.rnn.LSTMBlockFusedCell(args.n_state)
        lstm1_output, lstm1_state = lstm1(lstm0_output, dtype=dtype)

    with tf.variable_scope('dense'):
        # Apply the sigmoid in the loss, not in the dense layer.
        logits = tf.layers.dense(lstm1_output[-1, :, :], 1, name='logits')

    # The training loss.
    with tf.name_scope('loss'):
        loss = tf.losses.sigmoid_cross_entropy(y, logits)

    # Classification accuracy.  y = 1 iff logits > 0.
    with tf.name_scope('accuracy'):
        correct = tf.cast(tf.equal(tf.cast(y, tf.bool), tf.greater(logits, 0)),
                          dtype)
        accuracy = tf.reduce_mean(correct)

    # The optimizer.
    with tf.name_scope('optimizer'):
        optimizer = tf.train.AdamOptimizer()
        global_step = tf.Variable(0, False, name='global_step')

    # The training operation.
    with tf.name_scope('train'):
        train_op = optimizer.minimize(loss, global_step)

    # Label inputs.
    with tf.name_scope('predict'):
        label = tf.sigmoid(logits, 'label')

    # One-hot encoded words to label.
    test_words = text.get_test_data(args.language)
    words_encoded = text.one_hot(test_words, args.length, True)

    # Run the training and testing.
    with tf.Session() as session:
        tf.global_variables_initializer().run()

        # Run through the minibatches.
        for data_x, data_y in iterator:
            # Run the training operation and get the loss.
            train_op.run({x: data_x, y: data_y})

            # Report diagnostics at every epoch.
            if iterator.new_epoch:
                # Report training loss and accuracy.
                train_loss, train_accuracy = session.run([loss, accuracy], {
                    x: data_x,
                    y: data_y
                })
                # Report validation loss and accuracy.
                val_loss, val_accuracy = session.run([loss, accuracy], {
                    x: data.x['val'],
                    y: data.y['val']
                })

                print('Epoch {}:'.format(iterator.epoch))
                print('    Train: loss = {:.6f}, accuracy = {:.6f}'.format(
                    train_loss, train_accuracy))
                print(
                    '    Validation: loss = {:.6f}, accuracy = {:.6f}'.format(
                        val_loss, val_accuracy))

        # Label words.
        test_labels = label.eval({x: words_encoded})

        # Save all variables.
        saver = tf.train.Saver()
        saver.save(session, os.getcwd() + '/tf_example.ckpt')

    # Print predictions.
    print('\nWord: P({})'.format(args.language.capitalize()))

    for word, label in zip(test_words, test_labels):
        print('{}: {:.3f}'.format(word, float(label)))
예제 #22
0
파일: main.py 프로젝트: RickNelen/KBE
    # Return the choice True or False for wheels
    wheels_choice = (True if contents[17] == options[0]
                     or contents[17] == options[1] else False)

# -----------------------------------------------------------------------------
# Run the KBE app
# -----------------------------------------------------------------------------

if __name__ == '__main__':
    from parapy.gui import display

    pav = Iterator(label='PAV',
                   iterate=True,
                   n_passengers=passengers,
                   range_in_km=range_in_km,
                   max_span=max_span,
                   quality_choice=quality_choice,
                   wheels_choice=wheels_choice,
                   cruise_speed=cruise_speed,
                   primary_colour=primary_colour_in,
                   secondary_colour=secondary_colour_in)

    # As the client is assumed to be a non-expert, they are not provided
    # with the AVL analysis that is being run behind the scenes. They get
    # the GUI as clean as possible. If the client does not need to see the
    # vehicle at all, the following line can be commented out.

    display(pav)

    # However, if required, one of the following lines can be uncommented to
    # either show the AVL results for the initial aircraft or the iterated
    # aircraft
예제 #23
0
'''
    PFN internship 2019 coding task
    machine learning
    task-3
    Issei NAKASONE
'''

import datasets as D
import optimizers as op
from gnn import GNN, TrainGNN
from iterator import Iterator

dirpath = '../datasets/train/'
batch_size = 128

train, test = D.get_dataset(dirpath, test_ratio=0.25)
train_iter = Iterator(train, batch_size)
test_iter = Iterator(test, batch_size)

model = GNN()
optimizer = op.SGD()
#optimizer = op.MomentumSGD()
optimizer.setup(model)
trainer = TrainGNN(optimizer, train_iter, test_iter)
trainer.start(epoch=50)
예제 #24
0
    def run_benchmark(self, iter, mode="Valid"):
        if mode == "Valid":
            time_interval = c.RAINY_VALID
        else:
            time_interval = c.RAINY_TEST
        test_iter = Iterator(time_interval=time_interval,
                             sample_mode="sequent",
                             seq_len=c.IN_SEQ + c.OUT_SEQ,
                             stride=10,
                             mode=mode)
        i = 1
        while not test_iter.use_up:

            data, date_clip, *_ = test_iter.sample(batch_size=c.BATCH_SIZE)

            data = np.array(data)
            if data.shape[0] == 0:
                break
            print(data.shape)
            if mode == 'Valid':
                in_data = np.zeros(shape=(c.BATCH_SIZE, c.IN_SEQ, c.H_TRAIN,
                                          c.W_TRAIN, c.IN_CHANEL))
                gt_data = np.zeros(shape=(c.BATCH_SIZE, c.OUT_SEQ, c.H_TRAIN,
                                          c.W_TRAIN, c.IN_CHANEL))
                in_data[:, :, :, :, :] = data[:, :c.IN_SEQ, :, :, :]
                gt_data[:, :, :, :, :] = data[:, c.IN_SEQ:c.IN_SEQ +
                                              c.OUT_SEQ, :, :, :]
            else:
                in_data = np.zeros(shape=(c.BATCH_SIZE, c.DISPLAY_IN_SEQ,
                                          c.H_TEST, c.W_TEST, c.IN_CHANEL))
                gt_data = np.zeros(shape=(c.BATCH_SIZE, c.OUT_SEQ, c.H_TEST,
                                          c.W_TEST, c.IN_CHANEL))
                in_data[:, :, :, :, :] = data[:, :c.DISPLAY_IN_SEQ, :, :, :]
                gt_data[:, :, :, :, :] = data[:, c.
                                              DISPLAY_IN_SEQ:c.DISPLAY_IN_SEQ +
                                              c.OUT_SEQ, :, :, :]

            if type(data) == type([]):
                break

            if c.NORMALIZE:
                in_data = normalize_frames(in_data)
                gt_data = normalize_frames(gt_data)
            if mode == 'Valid':
                mse, mae, gdl, pred = self.model.valid_step(in_data, gt_data)
                logging.info(
                    f"Iter {iter} {i}: \n\t mse:{mse} \n\t mae:{mae} \n\t gdl:{gdl}"
                )
            else:
                pred = self.model.pred_step(in_data[:, 5:10])
            i += 1
            for b in range(c.BATCH_SIZE):
                predict_date = date_clip[b]
                logging.info(f"Save {predict_date} results")
                if mode == "Valid":
                    save_path = os.path.join(
                        c.SAVE_VALID, str(iter),
                        predict_date.strftime("%Y%m%d%H%M"))
                    display_path = os.path.join(
                        c.SAVE_DISPLAY, str(iter),
                        predict_date.strftime("%Y%m%d%H%M"))
                    save_in_data = in_data[b]
                    save_out_data = gt_data[b]
                    save_pred_data = pred[b]
                else:
                    display_path = os.path.join(
                        c.SAVE_DISPLAY, str(iter),
                        predict_date.strftime("%Y%m%d%H%M"))
                    save_path = os.path.join(
                        c.SAVE_TEST, str(iter),
                        predict_date.strftime("%Y%m%d%H%M"))
                    save_in_data = np.zeros((c.DISPLAY_IN_SEQ, 900, 900, 1))
                    save_out_data = np.zeros((c.OUT_SEQ, 900, 900, 1))
                    save_pred_data = np.zeros((c.PREDICT_LENGTH, 900, 900, 1))
                    save_in_data[:, 90:-90, :, :] = in_data[b]
                    save_out_data[:, 90:-90, :, :] = gt_data[b]
                    save_pred_data[:, 90:-90, :, :] = pred[b]

                path = os.path.join(save_path, "in")
                save_png(save_in_data, path)
                if mode != 'Valid':
                    multi_process_transfer(path, display_path + '/in')

                path = os.path.join(save_path, "pred")
                save_png(save_pred_data, path)
                if mode != 'Valid':
                    os.system(r'./post_processing/postprocessing' + ' ' +
                              save_path)
                    pred_display_dir = os.path.join(display_path, 'pred')
                    multi_process_transfer(path, pred_display_dir)
                # multi_process_transfer(path, display_path + 'pred')

                path = os.path.join(save_path, "out")
                save_png(save_out_data, path)
                if mode != 'Valid':
                    multi_process_transfer(path, display_path + '/out')
예제 #25
0
 def __iter__(self):
     return Iterator(self)
예제 #26
0
from iterator import Iterator
import ae_nets as autoencoder

filename = 'best_model'
learning_rate = 0.01
weight_decay = 0
batch_size = 512
extract_center = True
load_caption = True
n_filters = 32
nb_var = 750
filter_size = 3
pool_factor = 2

training_iteration = Iterator(which_set='train',
                              batch_size=batch_size,
                              extract_center=extract_center,
                              load_caption=load_caption)
valid_iteration = Iterator(which_set='valid',
                           batch_size=batch_size,
                           extract_center=extract_center,
                           load_caption=load_caption)
test_iteration = None
training_batches = training_iteration.n_batches
validation_batches = valid_iteration.n_batches
n_batches_test = test_iteration.n_batches if test_iteration is not None else 0
savingpath = '/Users/Omar/anaconda/lib/python3.6/site-packages/spyder/utils/site/load_models/'
loadingpath = '/Users/Omar/anaconda/lib/python3.6/site-packages/spyder/utils/site/load_models/'
weight_path = loadingpath
weight_path = os.path.join(WEIGHTS_PATH, filename, 'best_model.npz')
model_image_input = T.tensor4('image input')
model_captions_input = T.matrix('captions input')
예제 #27
0
import tensorflow as tf
from tensorflow import keras as ks
import matplotlib.pyplot as plt
import numpy as np
from model import Model
from mnistdataset import MnistDataSet
from iterator import Iterator
from variance import Variance

path = ""

model = Model(path + "/modelosimple.h5")
dataset = MnistDataSet()
iterator = Iterator(model, dataset)

variance = Variance(iterator)

variance.compute(10, 2)

plt.figure()
plt.title("Transformational Variance")
plt.plot(model.layers_names, variance.variance_layers)
print(variance.variance_layers)

model = Model(path + "/modelosimple.h5")
dataset = DataSet()
dataset.transpose()
iterator = Iterator(model, dataset)

variance = Variance(iterator)
    def run_benchmark(self, iter, mode="Test"):
        if mode == "Valid":
            time_interval = c.RAINY_VALID
            stride = 5
        else:
            time_interval = c.RAINY_TEST
            stride = 1
        test_iter = Iterator(time_interval=time_interval,
                             sample_mode="sequent",
                             seq_len=self._in_seq + self._out_seq,
                             stride=1)
        evaluator = Evaluator(iter, length=self._out_seq, mode=mode)
        i = 1
        while not test_iter.use_up:
            data, date_clip, *_ = test_iter.sample(batch_size=self._batch)
            in_data = np.zeros(shape=(self._batch, self._in_seq, self._h,
                                      self._w, c.IN_CHANEL))
            gt_data = np.zeros(shape=(self._batch, self._out_seq, self._h,
                                      self._w, 1))
            if type(data) == type([]):
                break
            in_data[...] = data[:, :self._in_seq, :, :, :]

            if c.IN_CHANEL == 3:
                gt_data[...] = data[:, self._in_seq:self._in_seq +
                                    self._out_seq, :, :, :]
            elif c.IN_CHANEL == 1:
                gt_data[...] = data[:, self._in_seq:self._in_seq +
                                    self._out_seq, :, :, :]
            else:
                raise NotImplementedError

            # in_date = date_clip[0][:c.IN_SEQ]

            if c.NORMALIZE:
                in_data = normalize_frames(in_data)
                gt_data = normalize_frames(gt_data)
            in_data = crop_img(in_data)
            gt_data = crop_img(gt_data)
            mse, mae, gdl, pred = self.g_model.valid_step(in_data, gt_data)
            evaluator.evaluate(gt_data, pred)
            self.logger.info(
                f"Iter {iter} {i}: \n\t mse:{mse} \n\t mae:{mae} \n\t gdl:{gdl}"
            )
            i += 1
            if i % stride == 0:
                if c.IN_CHANEL == 3:
                    in_data = in_data[:, :, :, :, 1:-1]

                for b in range(self._batch):
                    predict_date = date_clip[b][self._in_seq - 1]
                    self.logger.info(f"Save {predict_date} results")
                    if mode == "Valid":
                        save_path = os.path.join(
                            c.SAVE_VALID, str(iter),
                            predict_date.strftime("%Y%m%d%H%M"))
                    else:
                        save_path = os.path.join(
                            c.SAVE_TEST, str(iter),
                            predict_date.strftime("%Y%m%d%H%M"))

                    path = os.path.join(save_path, "in")
                    save_png(in_data[b], path)

                    path = os.path.join(save_path, "pred")
                    save_png(pred[b], path)

                    path = os.path.join(save_path, "out")
                    save_png(gt_data[b], path)
        evaluator.done()
        self.notifier.eval(iter, evaluator.result_path)
def test(args, data):
    '''
	This function performs the testing on new data using the weights and biases from the trained model. It generates the prediction 
	using new model which is defines in test_VRNN. It finds the MSE and MAE for the predictions.

	Arguments
	args: The saved arguments of the trained model.
	data: test data (unseen data by the model)
	'''

    #directory where the trained model is saved
    dirname = 'save-vrnn'

    #testing data
    Xtest, ytest = data

    #Iterator object to split the data into batches
    test = Iterator(Xtest,
                    ytest,
                    batch_size=args.batch_size,
                    n_steps=args.seq_length,
                    shape_diff=True)
    n_batches = test.nbatches
    Xtest, ytest = test.get_split()

    #create a new session to get the stored layers from trained model and also to run testing
    with tf.Session() as sess:

        old_params = []

        #import the trained model's graph into this session
        new_saver = tf.train.import_meta_graph('save-vrnn/final_model_' +
                                               str(args.num_epochs) + '_' +
                                               str(args.learning_rate) +
                                               '.ckpt.meta')
        new_saver.restore(sess, tf.train.latest_checkpoint('save-vrnn/'))

        g1 = tf.get_default_graph()

        #layers from trained model whose weights and biases are required in the test model
        test_layers = [
            "rnn/VartiationalRNNCell/x_1/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/x_1/Linear/bias:0",
            "rnn/VartiationalRNNCell/Prior/hidden/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Prior/hidden/Linear/bias:0",
            "rnn/VartiationalRNNCell/Prior/mu/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Prior/mu/Linear/bias:0",
            "rnn/VartiationalRNNCell/Prior/sigma/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Prior/sigma/Linear/bias:0",
            "rnn/VartiationalRNNCell/z_1/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/z_1/Linear/bias:0",
            "rnn/VartiationalRNNCell/Theta/hidden/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Theta/hidden/Linear/bias:0",
            "rnn/VartiationalRNNCell/Theta/mu_1/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Theta/mu_1/Linear/bias:0",
            "rnn/VartiationalRNNCell/Theta/sigma_1/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Theta/sigma_1/Linear/bias:0",
            "rnn/VartiationalRNNCell/Theta/coeff_1/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Theta/coeff_1/Linear/bias:0",
            "rnn/VartiationalRNNCell/Theta/mu_2/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Theta/mu_2/Linear/bias:0",
            "rnn/VartiationalRNNCell/Theta/sigma_2/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Theta/sigma_2/Linear/bias:0",
            "rnn/VartiationalRNNCell/Theta/coeff_2/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Theta/coeff_2/Linear/bias:0",
            "rnn/VartiationalRNNCell/Theta/mu_3/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Theta/mu_3/Linear/bias:0",
            "rnn/VartiationalRNNCell/Theta/sigma_3/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Theta/sigma_3/Linear/bias:0",
            "rnn/VartiationalRNNCell/Theta/coeff_3/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Theta/coeff_3/Linear/bias:0",
            "rnn/VartiationalRNNCell/Theta/mu_4/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Theta/mu_4/Linear/bias:0",
            "rnn/VartiationalRNNCell/Theta/sigma_4/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Theta/sigma_4/Linear/bias:0",
            "rnn/VartiationalRNNCell/Theta/coeff_4/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Theta/coeff_4/Linear/bias:0",
            "rnn/VartiationalRNNCell/Theta/mu_5/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Theta/mu_5/Linear/bias:0",
            "rnn/VartiationalRNNCell/Theta/sigma_5/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Theta/sigma_5/Linear/bias:0",
            "rnn/VartiationalRNNCell/Theta/coeff_5/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/Theta/coeff_5/Linear/bias:0",
            "rnn/VartiationalRNNCell/y_1/Linear/Matrix:0",
            "rnn/VartiationalRNNCell/y_1/Linear/bias:0",
            "rnn/VartiationalRNNCell/lstm_cell/weights:0",
            "rnn/VartiationalRNNCell/lstm_cell/biases:0"
        ]

        #get required tensors and variables for the test model
        for layer in test_layers:
            tensor = g1.get_tensor_by_name(layer)
            old_params.append(tensor)

        print("Loaded Model Weights and Biases!")

        #create an object of test model
        model = test_VRNN(args)

        #get the trainable variables from the graph
        trainable = tf.trainable_variables()

        #The last 46 variables in all belong to the test model (rest are from the train model that was imported)
        trainable = trainable[-len(test_layers):]

        num_train_var = len(trainable)

        #assign the weight and bias tensors in test model
        for i in range(num_train_var):
            assign_op = trainable[i].assign(old_params[i])
            sess.run(assign_op)

        print("Assigned weights to Test Model")

        mae = []
        mse = []

        timesteps = [x for x in range(1, 201)]

        #run the testing through all the batches in the test data
        for b in xrange(n_batches):
            x = Xtest[b]
            feed = {model.input_x: x}  #input to test model
            pred = sess.run(model.pred, feed)

            #reshape the predicted data to [batch_size, timesteps, n_app] size
            pred = np.concatenate(pred, axis=1)
            pred = np.reshape(pred, [args.batch_size, args.seq_length, -1])
            label = np.array(ytest[b]).astype(float)
            pred = np.array(pred).astype(float)

            #compute the mse and mae values on the predictions
            mae_i = np.reshape(np.absolute((label - pred)), [
                -1,
            ]).mean()
            mse_i = np.reshape((label - pred)**2, [
                -1,
            ]).mean()
            mae.append(mae_i)
            mse.append(mse_i)
            '''if((b+1)%5 == 0):
				se_idx = np.argmin(np.reshape((label - pred)**2,[-1,]))
				line1, = plt.plot(timesteps, np.reshape(pred[se_idx//args.seq_length],(-1,)),label="pred")
				line2, = plt.plot(timesteps, np.reshape(np.reshape(x,(250,200,-1))[se_idx//args.seq_length],(-1,)),label="grd_truth")
				plt.legend(handles=[line1, line2])
				plt.title("Prediction vs Groundtruth for "+args.appliance+", batch "+str(b+1))
				plt.xlabel("Timesteps")
				plt.ylabel("Power(kW)")
				plt.show()
				plt.savefig("output/plots/Pred10_"+args.appliance+"_batch"+str(b+1)+".png")'''

        print("MAE:", sum(mae) / len(mae))
        print("MSE:", sum(mse) / len(mse))
예제 #30
0
    def run_benchmark(self, iter, mode="Valid"):
        if mode == "Valid":
            time_interval = c.RAINY_VALID
        else:
            time_interval = c.RAINY_TEST
        test_iter = Iterator(time_interval=time_interval,
                             sample_mode="sequent",
                             seq_len=c.IN_SEQ + c.OUT_SEQ,
                             stride=20,
                             mode=mode)
        i = 1
        while not test_iter.use_up:
            data, date_clip, *_ = test_iter.sample(batch_size=c.BATCH_SIZE)

            if mode == 'Valid':
                in_data = np.zeros(shape=(c.BATCH_SIZE, c.IN_SEQ, c.H_TRAIN,
                                          c.W_TRAIN, c.IN_CHANEL))
                gt_data = np.zeros(shape=(c.BATCH_SIZE, c.OUT_SEQ, c.H_TRAIN,
                                          c.W_TRAIN, c.IN_CHANEL))
            else:
                in_data = np.zeros(shape=(c.BATCH_SIZE, c.IN_SEQ, c.H_TEST,
                                          c.W_TEST, c.IN_CHANEL))
                gt_data = np.zeros(shape=(c.BATCH_SIZE, c.OUT_SEQ, c.H_TEST,
                                          c.W_TEST, c.IN_CHANEL))

            if type(data) == type([]):
                break
            in_data[:, :, :, :, :] = data[:, :c.IN_SEQ, :, :, :]
            gt_data[:, :, :, :, :] = data[:, c.IN_SEQ:c.IN_SEQ +
                                          c.OUT_SEQ, :, :, :]
            if c.NORMALIZE:
                in_data = normalize_frames(in_data)
                gt_data = normalize_frames(gt_data)
            if mode == 'Valid':
                mse, mae, gdl, pred = self.model.valid_step(in_data, gt_data)
                logging.info(
                    f"Iter {iter} {i}: \n\t mse:{mse} \n\t mae:{mae} \n\t gdl:{gdl}"
                )
            else:
                pred = self.model.pred_step(in_data)
            i += 1
            for b in range(c.BATCH_SIZE):
                predict_date = date_clip[b]
                logging.info(f"Save {predict_date} results")
                if mode == "Valid":
                    save_path = os.path.join(
                        c.SAVE_VALID, str(iter),
                        predict_date.strftime("%Y%m%d%H%M"))
                else:
                    save_path = os.path.join(
                        c.SAVE_TEST, str(iter),
                        predict_date.strftime("%Y%m%d%H%M"))

                path = os.path.join(save_path, "in")
                save_png(in_data[0], path)

                path = os.path.join(save_path, "pred")
                save_png(pred[0], path)

                path = os.path.join(save_path, "out")
                save_png(gt_data[0], path)