Beispiel #1
0
def measure():
    print("MEASUREMENTS - MEASURE")
    ULTRASOUND.setup()
    measurements = []

    for x in range(0, NUMBER_OF_MEASUREMENTS):
        distance = ULTRASOUND.getDistance()
        measurements.append(distance)

    print("MEASUREMENTS - MEASURE - VALUES:")
    print(measurements)

    totalsum = sum(measurements)
    count = len(measurements)
    average = totalsum / float(count)
    print("MEASUREMENTS - MEASURE - AVERAGE = [%.3f]" % average)

    minimum = min(measurements)
    maximum = max(measurements)
    minOffset = average - minimum
    print("MEASUREMENTS - MEASURE - MIN OFFSET = [%.3f]" % minOffset)
    maxOffset = maximum - average
    print("MEASUREMENTS - MEASURE - MAX OFFSET = [%.3f]" % maxOffset)

    if minOffset > DIFF_TOLERANCE:
        measure()
    elif maxOffset > DIFF_TOLERANCE:
        measure()
    else:
        DATA.save_distance(average)
        VOLUME.update()
Beispiel #2
0
def update(enabled,name,date,ml,skipDays):
	print("SCHEDULE - UPDATE")
	DATA.save_enabled(enabled)
	DATA.save_name(name)
	DATA.save_date(date)
	DATA.save_ml(ml)
	DATA.save_skipDays(skipDays)
Beispiel #3
0
def lstm_stock(fname):
    df = pd.read_csv(fname, header=0)
    print(df.columns)
    MAX_PRICE = df.Open.max()
    print('MAX_PRICE', MAX_PRICE)
    df = df[df.Open != 0][['Open']]
    df.Open = df.Open / MAX_PRICE
    xy = df.as_matrix()
    #xy = MinMaxScaler(xy)

    train, validation, test = DATA.split_data(xy)
    WINDOWSIZE = 60
    train_x, train_y = DATA.getSeriesData(train, WINDOWSIZE, elementdim=1)
    valid_x, valid_y = DATA.getSeriesData(validation, WINDOWSIZE, elementdim=1)
    test_x, test_y = DATA.getSeriesData(test, WINDOWSIZE, elementdim=1)

    print('TRAIN', train.shape)
    print('TEST', test.shape)
    print('TRAIN X', train_x.shape)
    print('TRAIN Y', train_y.shape)

    lstm = LSTM.LSTM(1, WINDOWSIZE, 4, 1, loss='square', opt='adam')
    lstm.set_validation_data(valid_x, valid_y, valid_stop=0.0001)
    lstm.run(train_x,
             train_y,
             batch_size=int(train_x.shape[0] / 20),
             epochs=1000)
    lstm.do_test(test_x, test_y)
    predict_y = lstm.predict(test_x)
    chart = PLOT.LineChart()
    chart.line(test_y[:, 0] * MAX_PRICE, 'Actual')
    chart.line(predict_y[:, 0] * MAX_PRICE, 'ByNN')
    chart.show()
def test(Data, model, n = 6):
    """
    Data:'MNIST' or 'CIFAR'
    """
    if Data == 'MNIST':
        _, test_loader = DATA.MNISTLoader(p, download = False, batch_size=batch_size)
    elif Data == 'CIFAR':
        _, test_loader = DATA.CIFAR10Loader(p, download = False, batch_size=batch_size)
    else: raise ValueError("Data set not support")
    
    model = model.eval()
    with torch.no_grad():
        idx, (X,_) = next(enumerate(test_loader))
        X = X.to(device)
        X_bar, mu, logvar = model.forward(X)
        if  Data == 'MNIST':             
            img = torch.cat([X[:n], X_bar.view(batch_size, 1, 28, 28)[:n]])    
            plt.pause(0.001)
            plt.imshow(img.view(28*n*2,28*1).cpu())
            #save_image(img.cpu(),
                         #'C:/Users/funrr/Desktop/DL3/results/test0' + '.png', nrow=n)
        elif Data == 'CIFAR':
            img = torch.cat([X[:n], X_bar.view(batch_size, 3, 32, 32)[:n]])  
            plt.pause(0.001)
            plt.imshow(np.moveaxis(img.cpu().numpy(), 1, 3).reshape(n*2*32,32,3))
Beispiel #5
0
def lstm_with_sin_cos():
    NUM_DATA = 1200
    orgdata = np.linspace(0, 40, NUM_DATA, dtype=np.float32)
    sindata = np.sin(orgdata) + 0.1
    cosdata = np.cos(orgdata) * 2
    merge = np.stack((sindata, cosdata), axis=-1)
    print(merge[:3])

    train, validation, test = DATA.split_data(merge)
    WINDOWSIZE = 40
    train_x, train_y = DATA.getSeriesData(train, WINDOWSIZE, elementdim=2)
    valid_x, valid_y = DATA.getSeriesData(validation, WINDOWSIZE, elementdim=2)
    test_x, test_y = DATA.getSeriesData(test, WINDOWSIZE, elementdim=2)

    print('TRAIN', train.shape)
    print('TEST', test.shape)
    print('TRAIN X', train_x.shape)
    print('TRAIN Y', train_y.shape)

    lstm = LSTM.LSTM(2, WINDOWSIZE, 2, 2, loss='square', opt='adam')
    lstm.set_validation_data(valid_x, valid_y, valid_stop=0.0001)
    lstm.run(train_x,
             train_y,
             batch_size=int(train_x.shape[0] / 20),
             epochs=1000)
    lstm.do_test(test_x, test_y)
    predict_y = lstm.predict(test_x)
    chart = PLOT.LineChart()
    chart.scatter(test_y[:, 0], test_y[:, 1], 'g', 'Actual')
    chart.scatter(predict_y[:, 0], predict_y[:, 1], 'r', 'ByNN')
    chart.show()
Beispiel #6
0
def saveToken():
    print("API - SAVE TOKEN XX")
    if request.method == "POST":
        print("SAVE TOKEN, GET FROM REQUEST = ")
        token = request.data.get("token")
        print(token)
        DATA.save_token(token)
    return {"data": {}}
Beispiel #7
0
    def __init__(self, dataset, splitratio=0.9, startline=0):

        self.dataset = DS.Dataset(dataset)
        self.trainData, self.testData = self.dataset.splitdataset(
            splitratio, startline)
        if Global_V.PRINTPAR == 3:
            print(self.trainData, '\n', self.testData)
        self.trainDataSet = DS.Dataset(self.trainData)
        self.testDataSet = DS.Dataset(self.testData)
Beispiel #8
0
 def __init__(self, dataset, splitratio=0.9, startline=0):
     self.dataset = DS.Dataset(dataset)
     self.trainData, self.testData = self.dataset.splitdataset(splitratio, startline)
     if Global_V.PRINTPAR == 3:
         print(self.trainData, '\n', self.testData)
     self.trainDataSet = DS.Dataset(self.trainData)
     self.testDataSet = DS.Dataset(self.testData)
     # 如何使得self.trainDataSet, self.testDataSet也是dataset对象???
     self.parents = []
     self.cmi_temp = [[0] * self.trainDataSet.getNoAttr() for i in range(self.trainDataSet.getNoAttr())]
     self.cmi = self.trainDataSet.getCondMutInf(self.cmi_temp)  # get the cmi with the trainDataSet
def main():
    # 常量
    OUTPUT_DIR = './100-NET12-VIS/'
    FOLD_FOR_VAL = 0

    # 加载数据集
    [TRAIN_X, TRAIN_Y, TRAIN_Z, VAL_X, VAL_Y, VAL_Z] = DATA.load_cv(FOLD_FOR_VAL)

    # 设定四个用于插值的数据
    SUB_1 = 12
    SUB_2 = 10
    VAL_1_1 = VAL_X[7*SUB_1+3, :, :, 0]
    VAL_1_2 = VAL_X[7*SUB_1+1, :, :, 0]
    VAL_2_1 = VAL_X[7*SUB_2+3, :, :, 0]
    VAL_2_2 = VAL_X[7*SUB_2+1, :, :, 0]
    del TRAIN_X, TRAIN_Y, TRAIN_Z, VAL_X, VAL_Y, VAL_Z

    D = 5
    plt.figure(num=1, figsize=(D * 2, D * 2))
    plt.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)
    for i in xrange(D):
        for j in xrange(D):
            y_ratio = i / (D - 1.0)
            z_ratio = j / (D - 1.0)
            val_1 = VAL_1_1 * (1.0 - y_ratio) + VAL_1_2 * y_ratio
            val_2 = VAL_2_1 * (1.0 - y_ratio) + VAL_2_2 * y_ratio
            val = val_1 * (1.0 - z_ratio) + val_2 * z_ratio

            # 可视化
            plt.subplot(D, D, i * D + j + 1)
            plt.imshow(val.T, cmap='gray', vmin=0.0, vmax=255.0)
            plt.axis('off')
            # plt.title("%.1f %.1f" % (y_ratio, z_ratio), y=0.1)

    plt.savefig(OUTPUT_DIR + 'fig-interp-x-4.pdf')
    plt.close()

    D = 5
    plt.figure(num=1, figsize=(D*2, D*2))
    plt.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)
    for i in xrange(D):
        for j in xrange(D):
            # 可视化
            plt.subplot(D, D, i * D + j + 1)
            plt.imshow(np.zeros([64,64]), cmap='gray', vmin=0.0, vmax=255.0)
            plt.axis('off')

    for i in xrange(D):
        ratio = i / (D-1.0)
        val = VAL_1_1 * (1.0 - ratio) + VAL_2_2 * ratio

        # 可视化
        plt.subplot(D, D, i * D + i + 1)
        plt.imshow(val.T, cmap='gray', vmin=0.0, vmax=255.0)
        plt.axis('off')
        # plt.title("%.1f %.1f" % (y_ratio, z_ratio), y=0.1)

    plt.savefig(OUTPUT_DIR + 'fig-interp-x-2.pdf')
    plt.close()
Beispiel #10
0
 def __init__(self):
     self.data = DATA.CADF()
     self.net = Net.Unet(False)
     self.sess = tf.Session()
     self.variable_to_restore = tf.global_variables()
     self.saver = tf.train.Saver(self.variable_to_restore)
     self.ckpt = tf.train.get_checkpoint_state('./save/')
     if self.ckpt and self.ckpt.model_checkpoint_path:
         self.saver.restore(self.sess, self.ckpt.model_checkpoint_path)
         print("OK")
Beispiel #11
0
def boot():
    print("*[CUBE - BOOT]*")
    IO.setmode(IO.BCM)
    IO.setwarnings(0)
    ULTRASOUND.setup()
    PUMP.setup()
    LED.setup()

    duration = DATA.load_waitForInternetConnection() + BOOTING_EXTRATIME
    LED.bootBlinking(duration)

    start()
Beispiel #12
0
def pourIfNeeded():
	print("SCHEDULE - POUR IF NEEDED")
	isScheduleEnabled = DATA.load_enabled()
	
	if isScheduleEnabled == 1:
		next = TIMES.dateFrom(DATA.load_date())
		print("SCHEDULE - POUR IF NEEDED - NEXT:")
		print(next)

		now = datetime.datetime.now()
		print("SCHEDULE - POUR IF NEEDED - NOW:")
		print(now)

		skipDays = DATA.load_skipDays()
		ml = DATA.load_ml()

		if now.date() == next.date() and now.time().hour == next.time().hour and now.time().minute == next.time().minute:
			print("SCHEDULE - POUR IF NEEDED - [IT IS TIME!]")

			volume = DATA.load_volume()

			if volume >= ml:
				#Pouring...
				HISTORY.save_automaticPour(ml)
				duration = CONVERTOR.getDurationFrom(ml)
				PUMP.start(duration)
			else:
				HISTORY.save_automaticPourNotPossible(ml) #NOT ENOUGH WATER

			#Prepare next.
			scheduled = next + datetime.timedelta(days=skipDays+1)
			DATA.save_date(TIMES.stringFrom(scheduled))

		elif now > next:
			print("SCHEDULE - POUR IF NEEDED - [NEED TO UPDATE AUTOMATIC_POUR_SCHEDULED DATE]")

			while next <= now:
				HISTORY.save_automaticPourNotPossibleWithDate(ml,next) #DEVICE OFFLINE
				next = next + datetime.timedelta(days=skipDays+1)
			DATA.save_date(TIMES.stringFrom(next))

		else:
			print("SCHEDULE - POUR IF NEEDED - [IT IS NOT TIME YET]")
Beispiel #13
0
def lstm_with_sin():
    orgdata = np.linspace(0, 40, 1200, dtype=np.float32)
    sindata = np.sin(orgdata)

    train, validation, test = DATA.split_data(sindata)
    WINDOWSIZE = 80
    train_x, train_y = DATA.getSeriesData(train, WINDOWSIZE)
    valid_x, valid_y = DATA.getSeriesData(validation, WINDOWSIZE)
    test_x, test_y = DATA.getSeriesData(test, WINDOWSIZE)

    print('TRAIN', train.shape)
    print('TEST', test.shape)
    print('TRAIN X', train_x.shape)
    print('TRAIN Y', train_y.shape)

    lstm = LSTM.LSTM(1, WINDOWSIZE, 4, 1, loss='square', opt='rms')
    lstm.set_validation_data(valid_x, valid_y, valid_stop=0.0001)
    lstm.run(train_x, train_y, batch_size=100, epochs=1000)
    lstm.do_test(test_x, test_y)
    predict_y = lstm.predict(test_x)
    chart = PLOT.LineChart()
    chart.line(test_y, 'Actual')
    chart.line(predict_y, 'ByNN')
    chart.show()
Beispiel #14
0
def sendNotification():
    print("SEND NOTIFICATION...............................................")
    token = DATA.load_token()

    print("TOKEN =")
    print(token)

    # time.sleep(10)
    # print("SEND NOTIFICATION...............................................REALLY NOW")

    alert = 'Bonsai: Water level below 20%, please refill.'

    client = APNSSandboxClient(certificate=CERTIFICATE,
                               default_error_timeout=15,
                               default_expiration_offset=2592000,
                               default_batch_size=100,
                               default_retries=12)

    expired_tokens = client.get_expired_tokens()
    print("EXPIRED TOKENS = ")
    print(expired_tokens)

    # Send to single device.
    # NOTE: Keyword arguments are optional.
    res = client.send(token,
                      alert,
                      badge='1',
                      sound='sound to play',
                      category='category',
                      content_available=True,
                      title='WARNING',
                      title_loc_key='t_loc_key',
                      title_loc_args='t_loc_args',
                      action_loc_key='a_loc_key',
                      loc_key='loc_key',
                      launch_image='path/to/image.jpg',
                      extra={'custom': 'data'})

    print(res.errors)
    print(res.token_errors)

    client.close()
Beispiel #15
0
import DATA as DT

data_initial = pickle.load(
    open('./pickleFile/' + Global_V.TESTFILE + '_insts_list_file.pkl', 'rb'))
data_len = len(data_initial)
if Global_V.PRINTPAR == 3:
    print('\nThere are', data_len, 'instance in this data.\n'
          )  # 输出数据的行数,检验数据是否载入正确
loss01 = []  # 保存每次交叉验证的01loss
data_array = np.array(data_initial)

kf = KFold(n_splits=10)
fold_count = 0

for train, test in kf.split(data_initial):
    TrainSet = DT.DataSet(data_array[train].tolist())
    TestSet = DT.DataSet(data_array[test].tolist())
    print TestSet.totalcount
    print TrainSet.totalcount

    if Global_V.SCHEME.upper() == 'TAN':
        '''
        1. 载入数据
        2. 训练数据
        3. 分类
        4. 输出结果评估
           '''
        result = []
        fold_count += 1
        print "fold", fold_count
        p_c_fold = []
    def __init__(self):
        self.YELLOW = (255, 255, 0)
        self.BLUE = (10, 10, 220)
        self.WHITE = (255, 255, 255)
        self.BLACK = (0, 0, 0)
        self.GREEN = (0, 255, 0)
        self.RED = (255, 0, 0)
        self.FPS_FONT = pygame.font.Font("freesansbold.ttf", 11)
        self.SCORE_FONT = pygame.font.Font("freesansbold.ttf", 18)
        self.MOVE_SPEED = 2
        self.MAPSIZE = 40
        self.FPS = 60
        self.NUM_ENEMIES = 50
        self.PLAYER_DAMAGE = 5
        self.ARROW_SPEED = 10

        self.score = 0
        self.game_running = True

        self.screen = pygame.display.set_mode(SCREENSIZE)

        self.screen.fill(self.BLACK)

        self.items = DATA.load_items()

        loading = "LOADING..."
        text = self.SCORE_FONT.render(loading, 1, self.WHITE)
        self.screen.blit(text, (600, 500))
        pygame.display.flip()

        self.clock = pygame.time.Clock()

        dg = generation.Generator(width=self.MAPSIZE, height=self.MAPSIZE)
        dg.gen_level()
        self.world_map = dg.return_tiles()

        self.TILES = {}
        load_tiles = ["empty", "floor", "wall"]
        for tile in load_tiles:
            self.TILES[tile] = pygame.image.load("graphics/world/" + tile +
                                                 ".png")

        self.tiles_list = pygame.sprite.Group()
        self.all_sprites = pygame.sprite.Group()
        self.walls = pygame.sprite.Group()
        self.enemies = pygame.sprite.Group()
        self.projectiles = pygame.sprite.Group()

        self.spawned_tiles = []

        y_v = 0
        x_v = 0
        i = 0
        for y in range(len(self.world_map)):
            for x in self.world_map[y]:
                if x == 0:
                    new = ENTITIES.Tile(self.TILES["empty"], x_v, y_v, True,
                                        [i, y], SCREENSIZE)
                elif x == 1:
                    new = ENTITIES.Tile(self.TILES["wall"], x_v, y_v, True,
                                        [i, y], SCREENSIZE)
                    self.walls.add(new)
                elif x == 2:
                    new = ENTITIES.Tile(self.TILES["floor"], x_v, y_v, False,
                                        [i, y], SCREENSIZE)
                x_v += 50
                self.tiles_list.add(new)
                self.all_sprites.add(new)
                i += 1
            y_v += 50
            x_v = 0
            i = 0
        self.player = ENTITIES.Player(30, 500, self.PLAYER_DAMAGE, SCREENSIZE)
        self.spawn_enemies(self.NUM_ENEMIES)

        self.all_sprites.add(self.player)

        self.spawn_player()
        colors = {"red": self.RED, "green": self.GREEN}
        self.hud = ENTITIES.Hud(self.screen, colors, self.player)
        self.all_sprites.add(self.hud)
        self.inventory = ENTITIES.Inventory(self.player)
        self.all_sprites.add(self.inventory)
        self.inventory.add_item(self.items["sword"])

        player_anim = DATA.load_player_anim()
        self.player_animator = ANIMATIONS.Animator(self.player, player_anim)

        self.animators = []
        self.animators.append(self.player_animator)
def main():
    # 常量
    OUTPUT_DIR = './100-NET12-VIS/'
    FOLD_FOR_VAL = 0

    # 加载数据集
    [TRAIN_X, TRAIN_Y, TRAIN_Z, VAL_X, VAL_Y,
     VAL_Z] = DATA.load_cv(FOLD_FOR_VAL)

    # 查看VAL_X
    SUB_1 = 12
    SUB_2 = 10
    plt.figure(num=1, figsize=(14, 8))
    plt.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)
    for i in xrange(4 * 7):
        # 可视化
        plt.subplot(4, 7, i + 1)
        if (i < 7):
            plt.imshow(VAL_X[SUB_1 * 7 + i, :, :, :].squeeze().T,
                       cmap='gray',
                       vmin=0.0,
                       vmax=255.0)
        elif (i < 14):
            plt.imshow(VAL_X[SUB_2 * 7 + (i - 7), :, :, :].squeeze().T,
                       cmap='gray',
                       vmin=0.0,
                       vmax=255.0)
        else:
            plt.imshow(VAL_X[i, :, :, :].squeeze().T,
                       cmap='gray',
                       vmin=0.0,
                       vmax=255.0)
        plt.axis('off')
    plt.savefig(OUTPUT_DIR + 'fig-VAL_X-1.pdf')
    plt.close()

    # 查看VAL_X
    plt.figure(num=1, figsize=(20, 20))
    plt.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)
    for i in xrange(100):
        # 可视化
        plt.subplot(10, 10, i + 1)
        plt.imshow(VAL_X[7 * i + 4, :, :, :].squeeze().T,
                   cmap='gray',
                   vmin=0.0,
                   vmax=255.0)
        plt.axis('off')
    plt.savefig(OUTPUT_DIR + 'fig-VAL_X-2.pdf')
    plt.close()

    # 查看VAL_X
    plt.figure(num=1, figsize=(40, 40))
    plt.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)
    for i in xrange(400):
        # 可视化
        plt.subplot(20, 20, i + 1)
        plt.imshow(VAL_X[7 * i + 4, :, :, :].squeeze().T,
                   cmap='gray',
                   vmin=0.0,
                   vmax=255.0)
        plt.axis('off')
        plt.title(i, y=0.05)
    plt.savefig(OUTPUT_DIR + 'fig-VAL_X-3.pdf')
    plt.close()
def train_val():
    # 常量
    OUTPUT_DIR = './100-4/'
    MB = 100
    SNAPSHOT_RESUME_FROM = 0
    EPOCH_MAX = 10000
    # SNAPSHOT_INTERVAL = 1
    FOLD_FOR_VAL = 4

    # 加载数据集
    [TRAIN_X, TRAIN_Y, VAL_X, VAL_Y] = DATA.load_cv(FOLD_FOR_VAL)

    # 创建计算图
    with tf.Graph().as_default():
        # 为重现使用固定的随机数种子
        # 不同版本TF结果不同  同一版本下cpu/gpu结果相同
        # 可能和快照功能冲突
        seed = 1
        np.random.seed(seed)
        tf.set_random_seed(seed)
        random.seed(seed)

        # 创建计算图
        with tf.variable_scope('GRAPH', reuse=None):
            [
                train_x, train_y, train_loss, _, train_acc, train_op,
                train_summary, train_crop_x, train_mask
            ] = build_graph(True)
        with tf.variable_scope('GRAPH', reuse=True):
            [
                val_x, val_y, val_loss, _, val_acc, _, val_summary, val_crop_x,
                val_mask
            ] = build_graph(False)

        # 创建会话
        with tf.Session() as sess:
            # 创建summary_writer
            summary_writer = tf.train.SummaryWriter(OUTPUT_DIR, sess.graph)
            summary_writer.flush()

            # 训练初始化或加载快照
            if SNAPSHOT_RESUME_FROM == 0:
                tf.initialize_all_variables().run()
            else:
                tf.train.Saver().restore(
                    sess, OUTPUT_DIR + 'snapshot-' + str(SNAPSHOT_RESUME_FROM))
                print 'load snapshot'

            # 训练循环
            # 1 ~ EPOCH_MAX 或 SNAPSHOT_RESUME_FROM+1 ~ EPOCH_MAX
            for epoch in xrange(SNAPSHOT_RESUME_FROM + 1, EPOCH_MAX + 1):
                print '---------- epoch %d ----------' % epoch
                t = time.time()
                mean_train_loss = 0.0
                mean_train_acc = 0.0
                mean_train_count = 0
                mean_val_loss = 0.0
                mean_val_acc = 0.0
                mean_val_count = 0

                # 打乱训练集
                idx = np.random.permutation(TRAIN_X.shape[0])
                TRAIN_X = TRAIN_X[idx, :, :]
                TRAIN_Y = TRAIN_Y[idx, :]

                # 训练
                # 抛弃训练集尾部 担心变化的MB会影响ADAM BATCHNORM等计算
                ITER_COUNT = TRAIN_X.shape[0] / MB
                TRAIN_CROP_X_VAL = np.zeros([TRAIN_X.shape[0], 112, 144, 1])
                TRAIN_MASK_VAL = np.zeros([TRAIN_X.shape[0], 7, 9, 1])
                for itr in xrange(ITER_COUNT):
                    train_x_val = TRAIN_X[itr * MB:itr * MB + MB, :, :]
                    train_y_val = TRAIN_Y[itr * MB:itr * MB + MB, :]
                    [
                        _, train_loss_val, train_acc_val, train_summary_val,
                        train_crop_x_val, train_mask_val
                    ] = sess.run([
                        train_op, train_loss, train_acc, train_summary,
                        train_crop_x, train_mask
                    ],
                                 feed_dict={
                                     train_x: train_x_val,
                                     train_y: train_y_val
                                 })
                    mean_train_loss += train_loss_val * MB
                    mean_train_acc += train_acc_val * MB
                    mean_train_count += MB

                    summary_writer.add_summary(train_summary_val, epoch)
                    summary_writer.flush()
                    TRAIN_CROP_X_VAL[itr * MB:itr * MB +
                                     MB, :, :] = train_crop_x_val
                    TRAIN_MASK_VAL[itr * MB:itr * MB +
                                   MB, :, :, :] = train_mask_val

                print 'mean train loss %g, mean train acc %g' % (
                    mean_train_loss / mean_train_count,
                    mean_train_acc / mean_train_count)

                # 验证
                # 保留验证集尾部
                ITER_COUNT = ((VAL_X.shape[0] - 1) / MB) + 1
                VAL_CROP_X_VAL = np.zeros([VAL_X.shape[0], 112, 144, 1])
                VAL_MASK_VAL = np.zeros([VAL_X.shape[0], 7, 9, 1])
                for itr in xrange(ITER_COUNT):
                    mb = min(itr * MB + MB, VAL_X.shape[0]) - itr * MB
                    val_x_val = VAL_X[itr * MB:itr * MB + mb, :, :]
                    val_y_val = VAL_Y[itr * MB:itr * MB + mb, :]
                    [
                        val_loss_val, val_acc_val, val_summary_val,
                        val_crop_x_val, val_mask_val
                    ] = sess.run(
                        [val_loss, val_acc, val_summary, val_crop_x, val_mask],
                        feed_dict={
                            val_x: val_x_val,
                            val_y: val_y_val
                        })
                    mean_val_loss += val_loss_val * mb
                    mean_val_acc += val_acc_val * mb
                    mean_val_count += mb

                    summary_writer.add_summary(val_summary_val, epoch)
                    summary_writer.flush()
                    VAL_CROP_X_VAL[itr * MB:itr * MB +
                                   mb, :, :] = val_crop_x_val
                    VAL_MASK_VAL[itr * MB:itr * MB +
                                 mb, :, :, :] = val_mask_val

                print 'mean val loss %g, mean val acc %g' % (
                    mean_val_loss / mean_val_count,
                    mean_val_acc / mean_val_count)

                # save X-Y-MASK-100.mat
                if ((epoch % 100) == 0):
                    with h5py.File(
                            OUTPUT_DIR + 'X-Y-MASK-' + str(epoch) + '.mat',
                            'w') as h5:
                        h5['TRAIN_CROP_X_VAL'] = TRAIN_CROP_X_VAL
                        h5['TRAIN_Y'] = TRAIN_Y
                        h5['TRAIN_MASK_VAL'] = TRAIN_MASK_VAL
                        h5['VAL_CROP_X_VAL'] = VAL_CROP_X_VAL
                        h5['VAL_Y'] = VAL_Y
                        h5['VAL_MASK_VAL'] = VAL_MASK_VAL

                # 人工申请的save snapshot
                try:
                    os.remove(OUTPUT_DIR + 'requestsave')
                    tf.train.Saver().save(
                        sess, OUTPUT_DIR + 'snapshot-' + str(epoch))
                    print 'save snapshot'
                except:
                    pass

                # 计划的save snapshot
                # if (epoch in []) or ((epoch % SNAPSHOT_INTERVAL) == 0):
                #     tf.train.Saver().save(sess, OUTPUT_DIR+'snapshot-'+str(epoch))
                #     print 'save snapshot'

                print 't %g' % (time.time() - t)
Beispiel #19
0
print("\nBegin Model 5")

#1-40 pt run
#dymu2, dyU2 = 10000,10000
#tmu2, tU2 = 10000,10000

#small pt run
dymu2, dyU2 = 3000,3000
tmu2, tU2 = 3000,3000

#sampling for benchamrk
Tdymu2  , TdyU2 = 200000, 200000
Ttmu2, TtU2 = 200000, 200000


dataset_DY = DATA(dypath,"Drell-Yan",train_vars)
T_dataset_DY = DATA(T_dypath,"TEST_Drell-Yan",train_vars)
dataset_TT = DATA(ttpath, "TTJets",train_vars)
T_dataset_TT = DATA(T_ttpath,"TEST_ttJets",train_vars)
mdict = {13: [1,0], 999: [0,1], 211:[0,1], 321:[0,1], 2212:[0,1]}
bdict = {'mu': [1,0], 'U':[0,1]}


modelDesc = "Model trained only on true muons vs unmatched with non muons EXCLUDING electrons in both test and in training, binary classification"

m = NN("model5", modelDesc, train_vars,mdict,eval_tag)


lastChunk =35
nepoch =3;
Beispiel #20
0
def getDevice():
    print("API - DEVICE")
    if request.method == "GET":
        volumeMax = DATA.load_volumeMax()
        warningPercentage = DATA.load_warningPercentage()
        warningDaysLeft = DATA.load_warningDaysLeft()

        pouringInProgress = DATA.load_pouringInProgress()
        percentage = DATA.load_percentage()
        volume = DATA.load_volume()
        daysLeft = DATA.load_daysLeft()

        name = DATA.load_name()
        date = DATA.load_date()
        ml = DATA.load_ml()
        skipDays = DATA.load_skipDays()

        events = HISTORY.load_allEvents()

    return {
        "data": {
            "volumeMax": volumeMax,
            "warningPercentage": warningPercentage,
            "warningDaysLeft": warningDaysLeft,
            "pouringInProgress": pouringInProgress,
            "percentage": percentage,
            "volume": volume,
            "daysLeft": daysLeft,
            "name": name,
            "date": date,
            "ml": ml,
            "skipDays": skipDays,
            "events": events
        }
    }
Beispiel #21
0
def boot():
    duration = DATA.load_waitForInternetConnection()
    time.sleep(duration)
    SUB.call(['/home/pi/CUBE3/WaterCube2/serverStart.sh'])
Beispiel #22
0
 def data_initialization(self):
     data = DATA(self.timeIndex[0], self.timeIndex[1], self.timeIndex[2],
                 self.timeIndex[3], self.timeIndex[4], self.timeIndex[5],
                 self.fileDict, 'time', 'power (W)')
     dataPackage = data.group_clean(self.group)
     return dataPackage
Beispiel #23
0
p = os.path.join(work_dir, 'DATA')
import DATA
import GANs

batch_size = 800
discriminator_steps = 200  ##
z_dim = 150
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
one = torch.FloatTensor([1]).to(device)
generator_cold_time = 1  #wgan10 gan1
detector_cold_time = 1


generator_cold_time = 8

train_loader,_ = DATA.CIFAR10Loader(p, download = False, batch_size=batch_size)
#idx, (X,_) = next(enumerate(train_loader))


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

generator = GANs.Generator1(z_dim = z_dim,Data = 'CIFAR').to(device)
discriminator = GANs.Discriminator1(Data = 'CIFAR').to(device)

gen_optimizer = optim.Adam(generator.parameters(), lr=0.0005, betas=(0.5, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)
disc_optimizer = optim.SGD(discriminator.parameters(), lr=0.0005)

CEloss = nn.BCELoss()
JSD_log = []
D_reals = []
D_fakes = []
Beispiel #24
0
def main():
    # 常量
    OUTPUT_DIR = './100-NET12-DUMP/'
    MB = 25
    FOLD_FOR_VAL = 0

    # 加载数据集
    [TRAIN_X, TRAIN_Y, TRAIN_Z, VAL_X, VAL_Y, VAL_Z] = DATA.load_cv(FOLD_FOR_VAL)

    # 计算MEAN STD
    MEAN = np.mean(TRAIN_X, dtype=np.float32)
    STD = np.std(TRAIN_X, dtype=np.float32)
    print 'MEAN: ', MEAN
    print 'STD: ', STD

    '''
    # 缩短数据集 只计算少量数据 用于表情置换可视化
    TRAIN_X = TRAIN_X[:MB, :, :, :]
    TRAIN_Y = TRAIN_Y[:MB, :]
    TRAIN_Z = TRAIN_Z[:MB, :, :, :]
    VAL_X = VAL_X[7*2:7*2+MB, :, :, :]
    VAL_Y = VAL_Y[7*2:7*2+MB, :]
    VAL_Z = VAL_Z[7*2:7*2+MB, :, :, :]
    '''

    # 创建计算图
    with tf.Graph().as_default():
        # 创建网络
        with tf.variable_scope('GRAPH', reuse=None):
            [val_x, val_y, val_z, val_l_rec, val_l_cls, val_l, val_z_hat, val_y_hat, val_acc, _] = train_val_net1.build_graph('val')

        # 创建会话
        sess_config = tf.ConfigProto()
        with tf.Session(config=sess_config) as sess:
            # 加载快照
            saver = tf.train.Saver(max_to_keep=1000000)
            print 'load snapshot'
            saver.restore(sess, './100-NET1/snapshot-2000')

            # 验证
            # 保留验证集尾部
            VAL_Z_HAT = np.zeros(np.shape(VAL_Z))
            VAL_Y_HAT = np.zeros(np.shape(VAL_Y))
            mean_val_l_rec = 0.0
            mean_val_l_cls = 0.0
            mean_val_l = 0.0
            mean_val_acc = 0.0
            mean_val_count = 0
            ITER_COUNT = ((VAL_X.shape[0] - 1) / MB) + 1
            for itr in xrange(ITER_COUNT):
                # 准备MB
                mb = min(itr * MB + MB, VAL_X.shape[0]) - itr * MB
                val_x_val = VAL_X[itr * MB:itr * MB + mb, :, :, :]
                val_y_val = VAL_Y[itr * MB:itr * MB + mb, :]
                val_z_val = VAL_Z[itr * MB:itr * MB + mb, :, :, :]
                val_x_val = (val_x_val - MEAN) / STD
                val_z_val = (val_z_val - MEAN) / STD

                # run
                [val_l_rec_val, val_l_cls_val, val_l_val, val_z_hat_val, val_y_hat_val, val_acc_val] = \
                    sess.run([val_l_rec, val_l_cls, val_l, val_z_hat, val_y_hat, val_acc], feed_dict={val_x: val_x_val, val_y: val_y_val, val_z: val_z_val})
                mean_val_l_rec += val_l_rec_val * mb
                mean_val_l_cls += val_l_cls_val * mb
                mean_val_l += val_l_val * mb
                mean_val_acc += val_acc_val * mb
                mean_val_count += mb
                VAL_Z_HAT[itr * MB:itr * MB + mb, :, :, :] = val_z_hat_val * STD + MEAN
                VAL_Y_HAT[itr * MB:itr * MB + mb, :] = val_y_hat_val
                # print val_l_rec_val, val_l_cls_val, val_acc_val
            print 'mean_val_l_rec %g, mean_val_l_cls %g, mean_val_l %g, mean_val_acc %g' % \
                  (mean_val_l_rec / mean_val_count, mean_val_l_cls / mean_val_count, mean_val_l / mean_val_count, mean_val_acc / mean_val_count)

            '''
            # 验证 on training set
            # 保留验证集尾部
            TRAIN_Z_HAT = np.zeros(np.shape(TRAIN_Z))
            TRAIN_Y_HAT = np.zeros(np.shape(TRAIN_Y))
            mean_val_l_rec = 0.0
            mean_val_l_cls = 0.0
            mean_val_l = 0.0
            mean_val_acc = 0.0
            mean_val_count = 0
            ITER_COUNT = ((TRAIN_X.shape[0] - 1) / MB) + 1
            for itr in xrange(ITER_COUNT):
                # 准备MB
                mb = min(itr * MB + MB, TRAIN_X.shape[0]) - itr * MB
                val_x_val = TRAIN_X[itr * MB:itr * MB + mb, :, :, :]
                val_y_val = TRAIN_Y[itr * MB:itr * MB + mb, :]
                val_z_val = TRAIN_Z[itr * MB:itr * MB + mb, :, :, :]
                val_x_val = (val_x_val - MEAN) / STD
                val_z_val = (val_z_val - MEAN) / STD

                # run
                [val_l_rec_val, val_l_cls_val, val_l_val, val_z_hat_val, val_y_hat_val, val_acc_val] = \
                    sess.run([val_l_rec, val_l_cls, val_l, val_z_hat, val_y_hat, val_acc], feed_dict={val_x: val_x_val, val_y: val_y_val, val_z: val_z_val})
                mean_val_l_rec += val_l_rec_val * mb
                mean_val_l_cls += val_l_cls_val * mb
                mean_val_l += val_l_val * mb
                mean_val_acc += val_acc_val * mb
                mean_val_count += mb
                TRAIN_Z_HAT[itr * MB:itr * MB + mb, :, :, :] = val_z_hat_val * STD + MEAN
                TRAIN_Y_HAT[itr * MB:itr * MB + mb, :] = val_y_hat_val
                # print val_l_rec_val, val_l_cls_val, val_acc_val
            print 'mean_val_l_rec %g, mean_val_l_cls %g, mean_val_l %g, mean_val_acc %g' % \
                  (mean_val_l_rec / mean_val_count, mean_val_l_cls / mean_val_count, mean_val_l / mean_val_count, mean_val_acc / mean_val_count)
            '''

    # 创建计算图
    with tf.Graph().as_default():
        # 创建网络
        with tf.variable_scope('GRAPH', reuse=None):
            [val_x, val_y, val_z, val_l, val_x_hat, _, val_h2] = val_net12_dump_train_val_net2.build_graph('val')

        # 创建会话
        with tf.Session() as sess:
            # 加载快照
            saver = tf.train.Saver(max_to_keep=1000000)
            print 'load snapshot'
            saver.restore(sess, './100-NET2/snapshot-2000')

            # 验证
            # 保留验证集尾部
            VAL_X_HAT = np.zeros(np.shape(VAL_X))
            VAL_H2_HAT = np.zeros([np.shape(VAL_X)[0], 512])
            mean_val_l = 0.0
            mean_val_count = 0
            ITER_COUNT = ((VAL_X.shape[0] - 1) / MB) + 1
            for itr in xrange(ITER_COUNT):
                # 准备MB
                mb = min(itr * MB + MB, VAL_X.shape[0]) - itr * MB
                val_x_val = VAL_X[itr * MB:itr * MB + mb, :, :, :]
                val_y_val = VAL_Y_HAT[itr * MB:itr * MB + mb, :]  # 表情还原
                # val_y_val = np.zeros([mb, 7])  # 表情置换
                # val_y_val[:, 5] = 1.0  # 表情置换
                val_z_val = VAL_Z_HAT[itr * MB:itr * MB + mb, :, :, :]
                val_x_val = (val_x_val - MEAN) / STD
                val_z_val = (val_z_val - MEAN) / STD

                # run
                [val_l_val, val_x_hat_val, val_h2_val] = \
                    sess.run([val_l, val_x_hat, val_h2], feed_dict={val_x: val_x_val, val_y: val_y_val, val_z: val_z_val})
                mean_val_l += val_l_val * mb
                mean_val_count += mb
                VAL_X_HAT[itr * MB:itr * MB + mb, :, :, :] = val_x_hat_val * STD + MEAN
                VAL_H2_HAT[itr * MB:itr * MB + mb, :] = np.reshape(val_h2_val, [np.shape(val_h2_val)[0], np.shape(val_h2_val)[3]])
                # print val_l_val
            print 'mean_val_l %g' % (mean_val_l / mean_val_count)

            '''
            # 验证 on trainnin set
            # 保留验证集尾部
            TRAIN_X_HAT = np.zeros(np.shape(TRAIN_X))
            mean_val_l = 0.0
            mean_val_count = 0
            ITER_COUNT = ((TRAIN_X.shape[0] - 1) / MB) + 1
            for itr in xrange(ITER_COUNT):
                # 准备MB
                mb = min(itr * MB + MB, TRAIN_X.shape[0]) - itr * MB
                val_x_val = TRAIN_X[itr * MB:itr * MB + mb, :, :, :]
                val_y_val = TRAIN_Y_HAT[itr * MB:itr * MB + mb, :]  # 表情还原
                # val_y_val = np.zeros([mb, 7])  # 表情置换
                # val_y_val[:, 5] = 1.0  # 表情置换
                val_z_val = TRAIN_Z_HAT[itr * MB:itr * MB + mb, :, :, :]
                val_x_val = (val_x_val - MEAN) / STD
                val_z_val = (val_z_val - MEAN) / STD

                # run
                [val_l_val, val_x_hat_val] = \
                    sess.run([val_l, val_x_hat], feed_dict={val_x: val_x_val, val_y: val_y_val, val_z: val_z_val})
                mean_val_l += val_l_val * mb
                mean_val_count += mb
                TRAIN_X_HAT[itr * MB:itr * MB + mb, :, :, :] = val_x_hat_val * STD + MEAN
                # print val_l_val
            print 'mean_val_l %g' % (mean_val_l / mean_val_count)
            '''

    # 可视化
    plt.figure(num=1, figsize=(24, 13.5))
    for i in xrange(7):
        '''
        plt.subplot(7, 8, i * 8 + 1)
        plt.imshow(TRAIN_X[i, :, :, :].squeeze().T, cmap='gray', vmin=0.0, vmax=255.0)
        plt.axis('off')

        plt.subplot(7, 8, i * 8 + 2)
        plt.imshow(TRAIN_Z[i, :, :, :].squeeze().T, cmap='gray', vmin=0.0, vmax=255.0)
        plt.axis('off')
        plt.title(TRAIN_Y[i, :])

        plt.subplot(7, 8, i * 8 + 3)
        plt.imshow(TRAIN_Z_HAT[i, :, :, :].squeeze().T, cmap='gray', vmin=0.0, vmax=255.0)
        plt.axis('off')
        plt.title(TRAIN_Y_HAT[i, :])

        plt.subplot(7, 8, i * 8 + 4)
        plt.imshow(TRAIN_X_HAT[i, :, :, :].squeeze().T, cmap='gray', vmin=0.0, vmax=255.0)
        plt.axis('off')
        '''

        plt.subplot(7, 8, i * 8 + 5)
        plt.imshow(VAL_X[i, :, :, :].squeeze().T, cmap='gray', vmin=0.0, vmax=255.0)
        plt.axis('off')

        plt.subplot(7, 8, i * 8 + 6)
        plt.imshow(VAL_Z[i, :, :, :].squeeze().T, cmap='gray', vmin=0.0, vmax=255.0)
        plt.axis('off')
        plt.title(VAL_Y[i, :])

        plt.subplot(7, 8, i * 8 + 7)
        plt.imshow(VAL_Z_HAT[i, :, :, :].squeeze().T, cmap='gray', vmin=0.0, vmax=255.0)
        plt.axis('off')
        plt.title(VAL_Y_HAT[i, :])

        plt.subplot(7, 8, i * 8 + 8)
        plt.imshow(VAL_X_HAT[i, :, :, :].squeeze().T, cmap='gray', vmin=0.0, vmax=255.0)
        plt.axis('off')

    plt.savefig(OUTPUT_DIR + 'fig.pdf')
    plt.close()

    # save dump.mat
    MAT_FN = OUTPUT_DIR + 'dump.mat'
    with h5py.File(MAT_FN, 'w', userblock_size=512) as h5:
        h5['VAL_X'] = VAL_X
        h5['VAL_Z_HAT'] = np.float32(VAL_Z_HAT)
        h5['VAL_Y_HAT'] = np.float32(VAL_Y_HAT)
        h5['VAL_H2_HAT'] = np.float32(VAL_H2_HAT)
        h5['VAL_X_HAT'] = np.float32(VAL_X_HAT)
    with open(MAT_FN, "rb+") as f:
        MAT_HEAD = 'MATLAB 7.3 MAT-file, Platform: PCWIN64, Created on: Thu Oct 13 23:51:09 2016 HDF5 schema 1.00 .                     \x00\x00\x00\x00\x00\x00\x00\x00\x00\x02IM\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
        assert len(MAT_HEAD) == 512
        f.write(MAT_HEAD)
def train_val():
    # 常量
    OUTPUT_DIR = './100_g-3/'
    MB = 30
    SNAPSHOT_RESUME_FROM = 0
    EPOCH_MAX = 130
    FOLD_FOR_VAL = 3

    # 加载数据集
    [TRAIN_X, TRAIN_Y_EXP, TRAIN_Y_PSE, VAL_X, VAL_Y_EXP,
     VAL_Y_PSE] = DATA.load_cv(FOLD_FOR_VAL)

    # 创建计算图
    with tf.Graph().as_default():
        # 为重现使用固定的随机数种子
        # 不同版本TF结果不同  同一版本下cpu/gpu结果相同
        # 可能和快照功能冲突
        seed = 1
        np.random.seed(seed)
        tf.set_random_seed(seed)
        random.seed(seed)

        # 创建计算图
        with tf.variable_scope('GRAPH', reuse=None):
            [
                train_x, train_y_exp, train_y_pse, train_loss, train_acc_exp,
                train_acc_pse, train_op
            ] = build_graph(True)
        with tf.variable_scope('GRAPH', reuse=True):
            [
                val_x, val_y_exp, val_y_pse, val_loss, val_acc_exp,
                val_acc_pse, _
            ] = build_graph(False)

        # 创建会话
        with tf.Session() as sess:
            # 训练初始化或加载快照
            if SNAPSHOT_RESUME_FROM == 0:
                tf.global_variables_initializer().run()
            else:
                tf.train.Saver().restore(
                    sess, OUTPUT_DIR + 'snapshot-' + str(SNAPSHOT_RESUME_FROM))
                print 'load snapshot'

            # 训练循环
            # 1 ~ EPOCH_MAX 或 SNAPSHOT_RESUME_FROM+1 ~ EPOCH_MAX
            for epoch in xrange(SNAPSHOT_RESUME_FROM + 1, EPOCH_MAX + 1):
                print '---------- epoch %d ----------' % epoch
                t = time.time()
                mean_train_loss = 0.0
                mean_train_acc_exp = 0.0
                mean_train_acc_pse = 0.0
                mean_train_count = 0
                mean_val_loss = 0.0
                mean_val_acc_exp = 0.0
                mean_val_acc_pse = 0.0
                mean_val_count = 0

                # 打乱训练集
                idx = np.random.permutation(TRAIN_X.shape[0])
                TRAIN_X = TRAIN_X[idx, :, :]
                TRAIN_Y_EXP = TRAIN_Y_EXP[idx, :]
                TRAIN_Y_PSE = TRAIN_Y_PSE[idx, :]

                # 训练
                # 抛弃训练集尾部 担心变化的MB会影响ADAM BATCHNORM等计算
                ITER_COUNT = TRAIN_X.shape[0] / MB
                for itr in xrange(ITER_COUNT):
                    train_x_val = TRAIN_X[itr * MB:itr * MB + MB, :, :]
                    train_y_exp_val = TRAIN_Y_EXP[itr * MB:itr * MB + MB, :]
                    train_y_pse_val = TRAIN_Y_PSE[itr * MB:itr * MB + MB, :]
                    [_, train_loss_val, train_acc_exp_val, train_acc_pse_val] =\
                        sess.run([train_op, train_loss, train_acc_exp, train_acc_pse], feed_dict={train_x: train_x_val, train_y_exp: train_y_exp_val, train_y_pse: train_y_pse_val})
                    mean_train_loss += train_loss_val * MB
                    mean_train_acc_exp += train_acc_exp_val * MB
                    mean_train_acc_pse += train_acc_pse_val * MB
                    mean_train_count += MB

                print 'mean train loss %g, mean train acc exp %g, mean train acc pse %g' % (
                    mean_train_loss / mean_train_count, mean_train_acc_exp /
                    mean_train_count, mean_train_acc_pse / mean_train_count)

                # 验证
                # 保留验证集尾部
                ITER_COUNT = ((VAL_X.shape[0] - 1) / MB) + 1
                for itr in xrange(ITER_COUNT):
                    mb = min(itr * MB + MB, VAL_X.shape[0]) - itr * MB
                    val_x_val = VAL_X[itr * MB:itr * MB + mb, :, :]
                    val_y_exp_val = VAL_Y_EXP[itr * MB:itr * MB + mb, :]
                    val_y_pse_val = VAL_Y_PSE[itr * MB:itr * MB + mb, :]
                    [val_loss_val, val_acc_exp_val, val_acc_pse_val] =\
                        sess.run([val_loss, val_acc_exp, val_acc_pse], feed_dict={val_x: val_x_val, val_y_exp: val_y_exp_val, val_y_pse: val_y_pse_val})
                    mean_val_loss += val_loss_val * mb
                    mean_val_acc_exp += val_acc_exp_val * mb
                    mean_val_acc_pse += val_acc_pse_val * mb
                    mean_val_count += mb

                print 'mean val loss %g, mean val acc exp %g, mean val acc pse %g' % (
                    mean_val_loss / mean_val_count, mean_val_acc_exp /
                    mean_val_count, mean_val_acc_pse / mean_val_count)

                print 't %g' % (time.time() - t)
def main():
    # 常量
    OUTPUT_DIR = './100-NET12-VIS/'
    MB = 25
    FOLD_FOR_VAL = 0

    # 加载数据集
    [TRAIN_X, TRAIN_Y, TRAIN_Z, VAL_X, VAL_Y,
     VAL_Z] = DATA.load_cv(FOLD_FOR_VAL)

    # 计算MEAN STD
    MEAN = np.mean(TRAIN_X, dtype=np.float32)
    STD = np.std(TRAIN_X, dtype=np.float32)
    print 'MEAN: ', MEAN
    print 'STD: ', STD

    # REC
    SUB = 12
    TRAIN_X = TRAIN_X[7 * SUB:7 * SUB + 7, :, :, :]
    TRAIN_Y = TRAIN_Y[7 * SUB:7 * SUB + 7, :]
    TRAIN_Z = TRAIN_Z[7 * SUB:7 * SUB + 7, :, :, :]
    VAL_X = VAL_X[7 * SUB:7 * SUB + 7, :, :, :]
    VAL_Y = VAL_Y[7 * SUB:7 * SUB + 7, :]
    VAL_Z = VAL_Z[7 * SUB:7 * SUB + 7, :, :, :]

    # 创建计算图
    with tf.Graph().as_default():
        # 创建网络
        with tf.variable_scope('GRAPH', reuse=None):
            [
                val_x, val_y, val_z, val_l_rec, val_l_cls, val_l, val_z_hat,
                val_y_hat, val_acc, _
            ] = train_val_net1.build_graph('val')

        # 创建会话
        sess_config = tf.ConfigProto()
        with tf.Session(config=sess_config) as sess:
            # 加载快照
            saver = tf.train.Saver(max_to_keep=1000000)
            print 'load snapshot'
            saver.restore(sess, './100-NET1/snapshot-2000')

            # 验证
            # 保留验证集尾部
            VAL_Z_HAT = np.zeros(np.shape(VAL_Z))
            VAL_Y_HAT = np.zeros(np.shape(VAL_Y))
            mean_val_l_rec = 0.0
            mean_val_l_cls = 0.0
            mean_val_l = 0.0
            mean_val_acc = 0.0
            mean_val_count = 0
            ITER_COUNT = ((VAL_X.shape[0] - 1) / MB) + 1
            for itr in xrange(ITER_COUNT):
                # 准备MB
                mb = min(itr * MB + MB, VAL_X.shape[0]) - itr * MB
                val_x_val = VAL_X[itr * MB:itr * MB + mb, :, :, :]
                val_y_val = VAL_Y[itr * MB:itr * MB + mb, :]
                val_z_val = VAL_Z[itr * MB:itr * MB + mb, :, :, :]
                val_x_val = (val_x_val - MEAN) / STD
                val_z_val = (val_z_val - MEAN) / STD

                # run
                [val_l_rec_val, val_l_cls_val, val_l_val, val_z_hat_val, val_y_hat_val, val_acc_val] = \
                    sess.run([val_l_rec, val_l_cls, val_l, val_z_hat, val_y_hat, val_acc], feed_dict={val_x: val_x_val, val_y: val_y_val, val_z: val_z_val})
                mean_val_l_rec += val_l_rec_val * mb
                mean_val_l_cls += val_l_cls_val * mb
                mean_val_l += val_l_val * mb
                mean_val_acc += val_acc_val * mb
                mean_val_count += mb
                VAL_Z_HAT[itr * MB:itr * MB +
                          mb, :, :, :] = val_z_hat_val * STD + MEAN
                VAL_Y_HAT[itr * MB:itr * MB + mb, :] = val_y_hat_val
                # print val_l_rec_val, val_l_cls_val, val_acc_val
            print 'mean_val_l_rec %g, mean_val_l_cls %g, mean_val_l %g, mean_val_acc %g' % \
                  (mean_val_l_rec / mean_val_count, mean_val_l_cls / mean_val_count, mean_val_l / mean_val_count, mean_val_acc / mean_val_count)

            # 验证 on training set
            # 保留验证集尾部
            TRAIN_Z_HAT = np.zeros(np.shape(TRAIN_Z))
            TRAIN_Y_HAT = np.zeros(np.shape(TRAIN_Y))
            mean_val_l_rec = 0.0
            mean_val_l_cls = 0.0
            mean_val_l = 0.0
            mean_val_acc = 0.0
            mean_val_count = 0
            ITER_COUNT = ((TRAIN_X.shape[0] - 1) / MB) + 1
            for itr in xrange(ITER_COUNT):
                # 准备MB
                mb = min(itr * MB + MB, TRAIN_X.shape[0]) - itr * MB
                val_x_val = TRAIN_X[itr * MB:itr * MB + mb, :, :, :]
                val_y_val = TRAIN_Y[itr * MB:itr * MB + mb, :]
                val_z_val = TRAIN_Z[itr * MB:itr * MB + mb, :, :, :]
                val_x_val = (val_x_val - MEAN) / STD
                val_z_val = (val_z_val - MEAN) / STD

                # run
                [val_l_rec_val, val_l_cls_val, val_l_val, val_z_hat_val, val_y_hat_val, val_acc_val] = \
                    sess.run([val_l_rec, val_l_cls, val_l, val_z_hat, val_y_hat, val_acc], feed_dict={val_x: val_x_val, val_y: val_y_val, val_z: val_z_val})
                mean_val_l_rec += val_l_rec_val * mb
                mean_val_l_cls += val_l_cls_val * mb
                mean_val_l += val_l_val * mb
                mean_val_acc += val_acc_val * mb
                mean_val_count += mb
                TRAIN_Z_HAT[itr * MB:itr * MB +
                            mb, :, :, :] = val_z_hat_val * STD + MEAN
                TRAIN_Y_HAT[itr * MB:itr * MB + mb, :] = val_y_hat_val
                # print val_l_rec_val, val_l_cls_val, val_acc_val
            print 'mean_val_l_rec %g, mean_val_l_cls %g, mean_val_l %g, mean_val_acc %g' % \
                  (mean_val_l_rec / mean_val_count, mean_val_l_cls / mean_val_count, mean_val_l / mean_val_count, mean_val_acc / mean_val_count)

    # 创建计算图
    with tf.Graph().as_default():
        # 创建网络
        with tf.variable_scope('GRAPH', reuse=None):
            [val_x, val_y, val_z, val_l, val_x_hat,
             _] = train_val_net2.build_graph('val')

        # 创建会话
        with tf.Session() as sess:
            # 加载快照
            saver = tf.train.Saver(max_to_keep=1000000)
            print 'load snapshot'
            saver.restore(sess, './100-NET2/snapshot-2000')

            # 验证
            # 保留验证集尾部
            VAL_X_HAT = np.zeros(np.shape(VAL_X))
            mean_val_l = 0.0
            mean_val_count = 0
            ITER_COUNT = ((VAL_X.shape[0] - 1) / MB) + 1
            for itr in xrange(ITER_COUNT):
                # 准备MB
                mb = min(itr * MB + MB, VAL_X.shape[0]) - itr * MB
                val_x_val = VAL_X[itr * MB:itr * MB + mb, :, :, :]
                val_y_val = VAL_Y_HAT[itr * MB:itr * MB + mb, :]  # REC
                val_z_val = VAL_Z_HAT[itr * MB:itr * MB + mb, :, :, :]
                val_x_val = (val_x_val - MEAN) / STD
                val_z_val = (val_z_val - MEAN) / STD

                # run
                [val_l_val, val_x_hat_val] = \
                    sess.run([val_l, val_x_hat], feed_dict={val_x: val_x_val, val_y: val_y_val, val_z: val_z_val})
                mean_val_l += val_l_val * mb
                mean_val_count += mb
                VAL_X_HAT[itr * MB:itr * MB +
                          mb, :, :, :] = val_x_hat_val * STD + MEAN
                # print val_l_val
            print 'mean_val_l %g' % (mean_val_l / mean_val_count)

            # 验证 on trainnin set
            # 保留验证集尾部
            TRAIN_X_HAT = np.zeros(np.shape(TRAIN_X))
            mean_val_l = 0.0
            mean_val_count = 0
            ITER_COUNT = ((TRAIN_X.shape[0] - 1) / MB) + 1
            for itr in xrange(ITER_COUNT):
                # 准备MB
                mb = min(itr * MB + MB, TRAIN_X.shape[0]) - itr * MB
                val_x_val = TRAIN_X[itr * MB:itr * MB + mb, :, :, :]
                val_y_val = TRAIN_Y_HAT[itr * MB:itr * MB + mb, :]  # REC
                val_z_val = TRAIN_Z_HAT[itr * MB:itr * MB + mb, :, :, :]
                val_x_val = (val_x_val - MEAN) / STD
                val_z_val = (val_z_val - MEAN) / STD

                # run
                [val_l_val, val_x_hat_val] = \
                    sess.run([val_l, val_x_hat], feed_dict={val_x: val_x_val, val_y: val_y_val, val_z: val_z_val})
                mean_val_l += val_l_val * mb
                mean_val_count += mb
                TRAIN_X_HAT[itr * MB:itr * MB +
                            mb, :, :, :] = val_x_hat_val * STD + MEAN
                # print val_l_val
            print 'mean_val_l %g' % (mean_val_l / mean_val_count)

    # 可视化
    plt.figure(num=1, figsize=(24, 13.5))
    for i in xrange(7):
        plt.subplot(7, 8, i * 8 + 1)
        plt.imshow(TRAIN_X[i, :, :, :].squeeze().T,
                   cmap='gray',
                   vmin=0.0,
                   vmax=255.0)
        plt.axis('off')

        plt.subplot(7, 8, i * 8 + 2)
        plt.imshow(TRAIN_Z[i, :, :, :].squeeze().T,
                   cmap='gray',
                   vmin=0.0,
                   vmax=255.0)
        plt.axis('off')
        plt.title(TRAIN_Y[i, :])

        plt.subplot(7, 8, i * 8 + 3)
        plt.imshow(TRAIN_Z_HAT[i, :, :, :].squeeze().T,
                   cmap='gray',
                   vmin=0.0,
                   vmax=255.0)
        plt.axis('off')
        plt.title(TRAIN_Y_HAT[i, :])

        plt.subplot(7, 8, i * 8 + 4)
        plt.imshow(TRAIN_X_HAT[i, :, :, :].squeeze().T,
                   cmap='gray',
                   vmin=0.0,
                   vmax=255.0)
        plt.axis('off')

        plt.subplot(7, 8, i * 8 + 5)
        plt.imshow(VAL_X[i, :, :, :].squeeze().T,
                   cmap='gray',
                   vmin=0.0,
                   vmax=255.0)
        plt.axis('off')

        plt.subplot(7, 8, i * 8 + 6)
        plt.imshow(VAL_Z[i, :, :, :].squeeze().T,
                   cmap='gray',
                   vmin=0.0,
                   vmax=255.0)
        plt.axis('off')
        plt.title(VAL_Y[i, :])

        plt.subplot(7, 8, i * 8 + 7)
        plt.imshow(VAL_Z_HAT[i, :, :, :].squeeze().T,
                   cmap='gray',
                   vmin=0.0,
                   vmax=255.0)
        plt.axis('off')
        plt.title(VAL_Y_HAT[i, :])

        plt.subplot(7, 8, i * 8 + 8)
        plt.imshow(VAL_X_HAT[i, :, :, :].squeeze().T,
                   cmap='gray',
                   vmin=0.0,
                   vmax=255.0)
        plt.axis('off')

    plt.savefig(OUTPUT_DIR + 'fig-rec.pdf')
    plt.close()
Beispiel #27
0
def update():
    print("VOLUME - UPDATE")

    distanceFull = DATA.load_distanceFull()
    distanceEmpty = DATA.load_distanceEmpty()
    distance = DATA.load_distance()

    range = distanceEmpty - distanceFull

    percentage = 0.0

    if distance < distanceEmpty:
        diffFromEmpty = distanceEmpty - distance
        percentage = diffFromEmpty / range
        print("VOLUME - UPDATE - PERCENTAGE = [%.2f]" % percentage)

    volumeMax = DATA.load_volumeMax()
    volume = volumeMax * percentage
    print("VOLUME - UPDATE - VOLUME = [%d]" % volume)

    DATA.save_volume(volume)
    DATA.save_percentage(percentage)

    warningPercentage = DATA.load_warningPercentage()

    LED.setup()
    if percentage < warningPercentage:
        LED.red()

        shouldSend = DATA.load_shouldSend()
        print("********************************************************** SHOULD SEND = ")
        print(shouldSend)

        if shouldSend:
            print("********************************************************** SEND NOTIF")
    
            # sending = DATA.load_sending()
            # print("********************************************************** SENDING = ")
            # print(sending)
            # if not sending:
                # DATA.save_sending(1)
            
            # DATA.save_shouldSend(0)
            # APNS.sendNotification()
                
    else:
        if percentage > 0.5:
            print("********************************************************** SHOULD SEND SET TO 1")
            DATA.save_shouldSend(1)
        LED.blue()
    def run(self,
            training_x,
            training_y,
            epochs=1000,
            batch_size=0,
            display_step=100):
        '''
        LSTM모델의 학습(training)을 수행하는 함수이다. training 데이터, validation 데이터를 별도로 지정할 수 있다.
        training 데이터 전체에서  batch_size만큼의 입력 및 출력 데이터를 1회의 batch training에 사용한다.
        batch를 일정 회수(display_step)만큼 수행한 후 validation 수치를 계산한다.
        학습 종료 조건으로 최대 epoch를 지정하거나,  validation목표 오차를 지정할 수 있다.

        :param training_x:
        :param training_y:
        :param epochs:
        :param batch_size:
        :param display_step:
        :return:
        '''
        self.max_epochs = epochs
        if batch_size == 0:
            batch_size = int(training_x.shape[0] * 0.05)
        self.display_step = display_step

        # Run the initializer
        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())

        #training_y =  training_y.reshape((-1, self.predict_size * self.output_dim))
        training = DATA.BatchDataGen(training_x, training_y)

        for step in range(1, self.max_epochs + 1):
            batch_x, batch_y = training.next_batch(batch_size)

            self.sess.run(self.train_op,
                          feed_dict={
                              self.X: batch_x,
                              self.Y: batch_y
                          })
            if step % self.display_step == 0 or step == 1:
                loss, acc = self.sess.run([self.loss_op, self.accuracy],
                                          feed_dict={
                                              self.X: batch_x,
                                              self.Y: batch_y
                                          })
                try:
                    curr_lr = self.sess.run(self.optimizer._learning_rate)
                except:
                    curr_lr = self.sess.run(self.optimizer._lr)

                print("Step " + str(step) + ": Acc= " + "{:.6f}".format(acc) + \
                   ", LR= " + "{:.6f}".format(curr_lr))

                if self.training_stop is not None and acc < self.training_stop:
                    print('STOP by training_stop')
                    break

                valid_res = self.do_validation()
                if self.valid_stop != 0 and valid_res < self.valid_stop:
                    print('STOP by valid_stop')
                    break

        return acc, valid_res  # training_error, validation_error
def main():
    # 常量
    OUTPUT_DIR = './100-NET12-VIS/'
    FOLD_FOR_VAL = 0

    # 加载数据集
    [TRAIN_X, TRAIN_Y, TRAIN_Z, VAL_X, VAL_Y,
     VAL_Z] = DATA.load_cv(FOLD_FOR_VAL)

    # 计算MEAN STD
    MEAN = np.mean(TRAIN_X, dtype=np.float32)
    STD = np.std(TRAIN_X, dtype=np.float32)
    print 'MEAN: ', MEAN
    print 'STD: ', STD

    # 设定四个用于插值的数据
    SUB_1 = 12
    SUB_2 = 10
    VAL_Z_1 = VAL_Z[7 * SUB_1 + 4:7 * SUB_1 + 4 + 1, :, :, :]
    VAL_Z_2 = VAL_Z[7 * SUB_2 + 4:7 * SUB_2 + 4 + 1, :, :, :]
    VAL_Y_1 = np.zeros([1, 7])
    VAL_Y_1[:, 3] = 1.0
    VAL_Y_2 = np.zeros([1, 7])
    VAL_Y_2[:, 1] = 1.0
    del TRAIN_X, TRAIN_Y, TRAIN_Z, VAL_X, VAL_Y, VAL_Z

    # 创建计算图
    with tf.Graph().as_default():
        # 为重现使用固定的随机数种子
        # 不同版本TF结果不同  同一版本下cpu/gpu结果相同
        SEED = 1
        np.random.seed(SEED)
        tf.set_random_seed(SEED)
        random.seed(SEED)

        # 创建网络
        with tf.variable_scope('GRAPH', reuse=None):
            [val_z, val_h2_a] = build_graph_a('val')
            [val_x, val_y, val_h2_b, val_l, val_x_hat,
             _] = build_graph_b('val')

        # 创建会话
        with tf.Session() as sess:
            # 加载快照
            print 'load snapshot'
            saver = tf.train.Saver(max_to_keep=1000000)
            saver.restore(sess, './100-NET2/snapshot-2000')

            # 设定四个用于插值的数据
            [VAL_H2_1] = sess.run([val_h2_a],
                                  feed_dict={val_z: (VAL_Z_1 - MEAN) / STD})
            [VAL_H2_2] = sess.run([val_h2_a],
                                  feed_dict={val_z: (VAL_Z_2 - MEAN) / STD})

            # 验证
            # 保留验证集尾部
            # 准备MB
            D = 5
            plt.figure(num=1, figsize=(D * 2, D * 2))
            plt.subplots_adjust(left=0,
                                right=1,
                                bottom=0,
                                top=1,
                                wspace=0,
                                hspace=0)
            for i in xrange(D):
                for j in xrange(D):
                    y_ratio = i / (D - 1.0)
                    h2_ratio = j / (D - 1.0)
                    val_y_val = VAL_Y_1 * (1.0 - y_ratio) + VAL_Y_2 * y_ratio
                    val_h2_b_val = VAL_H2_1 * (1.0 -
                                               h2_ratio) + VAL_H2_2 * h2_ratio

                    # run
                    [val_x_hat_val] = sess.run([val_x_hat],
                                               feed_dict={
                                                   val_y: val_y_val,
                                                   val_h2_b: val_h2_b_val
                                               })

                    # 可视化
                    plt.subplot(D, D, i * D + j + 1)
                    plt.imshow(
                        (val_x_hat_val[0, :, :, :].squeeze().T * STD) + MEAN,
                        cmap='gray',
                        vmin=0.0,
                        vmax=255.0)
                    plt.axis('off')
                    # plt.title("%.1f %.1f" % (y_ratio, h2_ratio), y=0.1)

            plt.savefig(OUTPUT_DIR + 'fig-interp-h2+y.pdf')
            plt.close()
Beispiel #30
0
def main():
    # 常量
    OUTPUT_DIR = './100-NET2/'
    MB = 25
    FOLD_FOR_VAL = 0
    SNAPSHOT_RESUME_FROM = 0
    EPOCH_MAX = 2000
    SNAPSHOT_INTERVAL = 200

    # 加载数据集
    [TRAIN_X, TRAIN_Y, TRAIN_Z, VAL_X, VAL_Y,
     VAL_Z] = DATA.load_cv(FOLD_FOR_VAL)

    # 计算MEAN STD
    MEAN = np.mean(TRAIN_X, dtype=np.float32)
    STD = np.std(TRAIN_X, dtype=np.float32)
    print 'MEAN: ', MEAN
    print 'STD: ', STD
    '''
    # 缩短数据集 只计算少量数据 用于测试及可视化
    TRAIN_X = TRAIN_X[:MB, :, :, :]
    TRAIN_Y = TRAIN_Y[:MB, :]
    TRAIN_Z = TRAIN_Z[:MB, :, :, :]
    VAL_X = VAL_X[:MB, :, :, :]
    VAL_Y = VAL_Y[:MB, :]
    VAL_Z = VAL_Z[:MB, :, :, :]
    '''

    # 创建计算图
    with tf.Graph().as_default():
        # 为重现使用固定的随机数种子
        # 不同版本TF结果不同  同一版本下cpu/gpu结果相同
        SEED = 1
        np.random.seed(SEED)
        tf.set_random_seed(SEED)
        random.seed(SEED)

        # 创建网络
        with tf.variable_scope('GRAPH', reuse=None):
            [train_x, train_y, train_z, train_l, train_x_hat,
             train_op] = build_graph('train')
        with tf.variable_scope('GRAPH', reuse=True):
            [val_x, val_y, val_z, val_l, val_x_hat, _] = build_graph('val')

        # 创建会话
        with tf.Session() as sess:
            saver = tf.train.Saver(max_to_keep=1000000)

            # 初始化变量或加载快照
            if SNAPSHOT_RESUME_FROM == 0:
                print 'init vars'
                tf.global_variables_initializer().run()
            else:
                print 'load snapshot'
                saver.restore(
                    sess, OUTPUT_DIR + 'snapshot-' + str(SNAPSHOT_RESUME_FROM))

            # 训练循环
            # 1 ~ EPOCH_MAX 或 SNAPSHOT_RESUME_FROM+1 ~ EPOCH_MAX
            for epoch in xrange(SNAPSHOT_RESUME_FROM + 1, EPOCH_MAX + 1):
                print '---------- epoch %d ----------' % epoch
                t = time.time()

                # 打乱训练集
                idx = np.random.permutation(TRAIN_X.shape[0])
                TRAIN_X = TRAIN_X[idx, :, :, :]
                TRAIN_Y = TRAIN_Y[idx, :]
                TRAIN_Z = TRAIN_Z[idx, :]

                # 训练
                # 抛弃训练集尾部 担心变化的MB会影响ADAM BATCHNORM等计算
                mean_train_l = 0.0
                mean_train_count = 0
                ITER_COUNT = TRAIN_X.shape[0] / MB
                for itr in xrange(ITER_COUNT):
                    # 准备MB
                    train_x_val = TRAIN_X[itr * MB:itr * MB + MB, :, :, :]
                    train_y_val = TRAIN_Y[itr * MB:itr * MB + MB, :]
                    train_z_val = TRAIN_Z[itr * MB:itr * MB + MB, :]
                    train_x_val = (train_x_val - MEAN) / STD
                    train_z_val = (train_z_val - MEAN) / STD
                    '''
                    # 可视化MB
                    plt.figure(num=1, figsize=(24, 13.5))
                    for i in xrange(10):
                        plt.subplot(10, 2, i*2+1)
                        plt.imshow((train_x_val[i, :, :, :].squeeze().T * STD) + MEAN, cmap='gray', vmin=0.0, vmax=255.0)
                        plt.axis('off')
                        plt.subplot(10, 2, i*2+2)
                        plt.imshow((train_z_val[i, :, :, :].squeeze().T * STD) + MEAN, cmap='gray', vmin=0.0, vmax=255.0)
                        plt.axis('off')
                        plt.title(train_y_val[i, :])
                    plt.savefig(OUTPUT_DIR + 'mb-' + str(epoch) + '.pdf')
                    plt.close()
                    return
                    '''

                    # run
                    [_, train_l_val, train_x_hat_val] = \
                        sess.run([train_op, train_l, train_x_hat], feed_dict={train_x: train_x_val, train_y: train_y_val, train_z: train_z_val})
                    mean_train_l += train_l_val * MB
                    mean_train_count += MB
                    # print train_l_val
                print 'mean_train_l %g' % (mean_train_l / mean_train_count)

                # 验证
                # 保留验证集尾部
                mean_val_l = 0.0
                mean_val_count = 0
                ITER_COUNT = ((VAL_X.shape[0] - 1) / MB) + 1
                for itr in xrange(ITER_COUNT):
                    # 准备MB
                    mb = min(itr * MB + MB, VAL_X.shape[0]) - itr * MB
                    val_x_val = VAL_X[itr * MB:itr * MB + mb, :, :, :]
                    val_y_val = VAL_Y[itr * MB:itr * MB + mb, :]
                    val_z_val = VAL_Z[itr * MB:itr * MB + mb, :, :, :]
                    val_x_val = (val_x_val - MEAN) / STD
                    val_z_val = (val_z_val - MEAN) / STD

                    # run
                    [val_l_val, val_x_hat_val] = \
                        sess.run([val_l, val_x_hat], feed_dict={val_x: val_x_val, val_y: val_y_val, val_z: val_z_val})
                    mean_val_l += val_l_val * mb
                    mean_val_count += mb
                    # print val_l_val
                print 'mean_val_l %g' % (mean_val_l / mean_val_count)

                # 可视化
                if (epoch % SNAPSHOT_INTERVAL) == 0:
                    plt.figure(num=1, figsize=(24, 13.5))
                    for i in xrange(3):
                        plt.subplot(3, 6, i * 6 + 1)
                        plt.imshow(
                            (train_x_val[i, :, :, :].squeeze().T * STD) + MEAN,
                            cmap='gray',
                            vmin=0.0,
                            vmax=255.0)
                        plt.axis('off')

                        plt.subplot(3, 6, i * 6 + 2)
                        plt.imshow(
                            (train_z_val[i, :, :, :].squeeze().T * STD) + MEAN,
                            cmap='gray',
                            vmin=0.0,
                            vmax=255.0)
                        plt.axis('off')
                        plt.title(train_y_val[i, :])

                        plt.subplot(3, 6, i * 6 + 3)
                        plt.imshow(
                            (train_x_hat_val[i, :, :, :].squeeze().T * STD) +
                            MEAN,
                            cmap='gray',
                            vmin=0.0,
                            vmax=255.0)
                        plt.axis('off')

                        plt.subplot(3, 6, i * 6 + 4)
                        plt.imshow(
                            (val_x_val[i, :, :, :].squeeze().T * STD) + MEAN,
                            cmap='gray',
                            vmin=0.0,
                            vmax=255.0)
                        plt.axis('off')

                        plt.subplot(3, 6, i * 6 + 5)
                        plt.imshow(
                            (val_z_val[i, :, :, :].squeeze().T * STD) + MEAN,
                            cmap='gray',
                            vmin=0.0,
                            vmax=255.0)
                        plt.axis('off')
                        plt.title(val_y_val[i, :])

                        plt.subplot(3, 6, i * 6 + 6)
                        plt.imshow(
                            (val_x_hat_val[i, :, :, :].squeeze().T * STD) +
                            MEAN,
                            cmap='gray',
                            vmin=0.0,
                            vmax=255.0)
                        plt.axis('off')
                    plt.savefig(OUTPUT_DIR + 'fig-' + str(epoch) + '.pdf')
                    plt.close()

                # 计划的save snapshot
                if (epoch % SNAPSHOT_INTERVAL) == 0:
                    saver.save(sess,
                               OUTPUT_DIR + 'snapshot-' + str(epoch),
                               write_meta_graph=False)
                    print 'save snapshot'

                print 't %g' % (time.time() - t)