示例#1
0
def old_tests():
	print(backprop.final_layer_error(layer.softmax(np.array([5,2,5,1])), np.array([0,1,0,0]), np.array([5,2,5,1])))
	print(layer.conv_layer(np.array([[[1,2,3],[4,5,6]],[[7,8,9],[10,11,12]]]), layer.init_weights(5, (2,2,3)), layer.init_biases(5), zero_pad_dimensions=(2,2)))
	print(layer.conv_layer(
		layer.relu(
			layer.conv_layer(np.array([[[1,2,3],[4,5,6]],[[7,8,9],[10,11,12]]]), layer.init_weights(5, (2,2,3)), layer.init_biases(5), zero_pad_dimensions=(2,2))),
		layer.init_weights(32, (2,2,5)),
		layer.init_biases(32),
		zero_pad_dimensions=(1,1)
		).shape)
	import_files.import_batch("/Users/admin/Documents/code/python/tensorflow/projects/CIFAR-10-convnet/Data/test/", 1, 256)
示例#2
0
def test_full_net():
	"""
	test whether an evaluable network can be created
	"""

	#import

	test_data = import_files.import_batch("/Users/admin/Documents/code/python/tensorflow/projects/CIFAR-10-convnet/Data/test/", 1, 10)

	#weights and biases

	layer1_weights = layer.init_weights(32, (4,4,3))
	layer1_biases = layer.init_biases(32)

	layer2_weights = layer.init_weights(128, (8192))
	layer2_biases = layer.init_biases(128)

	layer3_weights = layer.init_weights(10, (128))
	layer3_biases = layer.init_biases(10)

	output = np.empty((10, 10))
	for i in range(0,9):
		layer1 = layer.relu(layer.conv_layer(test_data[i], layer1_weights, layer1_biases, zero_pad_dimensions=(2,2), stride=(2,2)))
		layer2 = layer.relu(layer.fulcon_layer(layer1, layer2_weights, layer2_biases))
		layer3 = layer.relu(layer.fulcon_layer(layer2, layer3_weights, layer3_biases))
		print(layer3)
		output[i] = layer.softmax(layer3)
    def _forward_pass(self, x):
        # Encoder
        h = deconv_layer(x,
                         filter_shape=[2, 2, 512, 512],
                         strides=[1, 2, 2, 1],
                         output_shape=[-1, 5, 26, 512],
                         padding='VALID',
                         name='L1')
        h = conv_layer(h, filter_shape=[3, 3, 512, 512], name='L2')
        h = conv_layer(h, filter_shape=[3, 3, 512, 512], name='L3')
        h = deconv_layer(h,
                         filter_shape=[2, 2, 512, 512],
                         strides=[1, 2, 2, 1],
                         output_shape=[-1, 11, 52, 512],
                         padding='VALID',
                         name='L4')
        h = conv_layer(h, filter_shape=[3, 3, 512, 512], name='L5')
        h = conv_layer(h, filter_shape=[3, 3, 512, 512], name='L6')
        h = deconv_layer(h,
                         filter_shape=[2, 2, 256, 512],
                         strides=[1, 2, 2, 1],
                         output_shape=[-1, 22, 105, 256],
                         padding='VALID',
                         name='L7')
        h = conv_layer(h, filter_shape=[3, 3, 256, 256], name='L8')
        h = conv_layer(h, filter_shape=[3, 3, 256, 256], name='L9')
        h = deconv_layer(h,
                         filter_shape=[2, 2, 128, 256],
                         strides=[1, 2, 2, 1],
                         output_shape=[-1, 45, 210, 128],
                         padding='VALID',
                         name='L10')
        h = conv_layer(h, filter_shape=[3, 3, 128, 128], name='L11')
        h = conv_layer(h, filter_shape=[3, 3, 128, 128], name='L12')
        h = deconv_layer(h,
                         filter_shape=[3, 3, 64, 128],
                         strides=[1, 2, 2, 1],
                         output_shape=[-1, 90, 420, 64],
                         name='L13')
        h = conv_layer(h, filter_shape=[3, 3, 64, 64], name='L14')
        h = conv_layer(h,
                       filter_shape=[3, 3, 64, self.output_shape[3]],
                       name='L15')
        h = h[:, 0:self.output_shape[1], 0:self.output_shape[2], :]

        return h
def main():
    check =0

    # main fuction
    if check == 0 :
    # training data
        train_parser = data_loader.parse_cambridge() # class
    # name list, label list, path list parser
        train_name_list, train_label_list, train_path_list = train_parser.label_extract('train')
    # read the data and concatinate
        train_parser.writing_data_to_tfrecord(train_path_list, 6 ,'train')

    # test data
        test_parser = data_loader.parse_cambridge()
    # name list, label list, path list parser
        test_name_list, test_label_list, test_path_list = test_parser.label_extract('test')
    # read the data and concatinate
        test_parser.writing_data_to_tfrecord(test_path_list, 6, 'test')

    check = 1

    #re-read and parsing the data

    batch_size = 5
    epoch_size = 10

    read_train_parser = data_loader.parse_cambridge()
    train_iterator = read_train_parser.read_data_from_tfrecode('train', batch_size, epoch_size, True) #, train_name
    train_next_element = train_iterator.get_next()


    read_test_parser = data_loader.parse_cambridge()
    test_iterator = read_test_parser.read_data_from_tfrecode('test', batch_size, epoch_size, False)  # , test_name
    test_next_element = test_iterator.get_next()

    #read_train_parser.preprocessing(train_image, train_label, train_channel, train_sample)

    #input_image = train_image
    #output_image = train_label

    #STEPS = 1000
    #MINIBATCH_SIZE = 10

    '''x = tf.placeholder(dtype=tf.float32,
                                      shape=[None, 60000],
                                      name='input')
    y_ = tf.placeholder(dtype=tf.int64,
                                      shape=[None, 1, 6],
                                      name='output')'''

    '''x_image = tf.reshape(x,[-1,1000,10,6],name='image_end')
   # batch_ind = tf.placeholder(dtype=tf.int64, shape =[1])'''
    x_image = train_next_element[0]
    y_ = train_next_element[1]

        conv1 = conv_layer(x_image, shape=[5, 5, 3, 32])
示例#5
0
文件: rpn.py 项目: HAMMERMASH/myssd
def inference(feature, k):
    """
        rpn part net work

        Args:
            feature: a 4-D tensor [batch_size,height,width,n_channel] tensor, the output of last conv layer of feature extractor
            k: a scalar indicating number of predictions per cell

        Returns:
            rpn_obj: a 4-D tensor [batch_size,height,width,2*k], the prediction of objectness score
            rpn_box: a 4-D tensor [batch_size,height,width,4*k], the prediction of box coordinates
    """
    shape = feature.get_shape().as_list()

    rpn_conv = layer.conv_layer('rpn_conv', feature, [3, 3, shape[3], 256])
    rpn_obj = layer.conv_layer('rpn_obj_conv', rpn_conv, [1, 1, 256, 2 * k])
    rpn_box = layer.conv_layer('rpn_box_conv', rpn_conv, [1, 1, 256, 4 * k])

    return rpn_obj, rpn_box
    def _forward_pass(self, x):
        # Encoder
        h = deconv_layer(x,
                         filter_shape=[3, 3, 1024, 2048],
                         strides=[1, 2, 2, 1],
                         output_shape=[-1, 6, 28, 1024],
                         name='L1')
        h = conv_layer(h, filter_shape=[3, 3, 1024, 1024], name='L2')
        h = conv_layer(h, filter_shape=[3, 3, 1024, 1024], name='L3')
        h = deconv_layer(h,
                         filter_shape=[3, 3, 512, 1024],
                         strides=[1, 2, 2, 1],
                         output_shape=[-1, 12, 56, 512],
                         name='L4')
        h = conv_layer(h, filter_shape=[3, 3, 512, 512], name='L5')
        h = conv_layer(h, filter_shape=[3, 3, 512, 512], name='L6')
        h = deconv_layer(h,
                         filter_shape=[3, 3, 256, 512],
                         strides=[1, 2, 2, 1],
                         output_shape=[-1, 24, 112, 256],
                         name='L7')
        h = conv_layer(h, filter_shape=[3, 3, 256, 256], name='L8')
        h = conv_layer(h, filter_shape=[3, 3, 256, 256], name='L9')
        h = deconv_layer(h,
                         filter_shape=[3, 3, 128, 256],
                         strides=[1, 2, 2, 1],
                         output_shape=[-1, 48, 224, 128],
                         name='L10')
        h = conv_layer(h, filter_shape=[3, 3, 128, 128], name='L11')
        h = conv_layer(h, filter_shape=[3, 3, 128, 128], name='L12')
        h = deconv_layer(h,
                         filter_shape=[3, 3, 64, 128],
                         strides=[1, 2, 2, 1],
                         output_shape=[-1, 96, 448, 64],
                         name='L13')
        h = conv_layer(h, filter_shape=[3, 3, 64, 64], name='L14')
        h = conv_layer(h,
                       filter_shape=[3, 3, 64, self.output_shape[3]],
                       name='L15')
        h = h[:, 0:self.output_shape[1], 0:self.output_shape[2], :]

        return h
示例#7
0
def inference(images, keep_prob):
    # images [N, 3, 33, 33]
    conv1 = conv_layer(images, 64, 'conv1')
    conv2 = conv_layer(conv1, 64, 'conv2')
    pool1 = max_pooling_layer(conv2, layer_name='max_pool_1') 

    conv3 = conv_layer(pool1, 128, 'conv3')
    conv4 = conv_layer(conv3, 128, 'conv4')
    pool2 = max_pooling_layer(conv4, layer_name='max_pool_2') 

    conv5 = conv_layer(pool2, 256, 'conv5')
    conv6 = conv_layer(conv5, 256, 'conv6')
    pool3 = max_pooling_layer(conv6, layer_name='max_pool_3')

    conv7 = conv_layer(pool3, 512, 'conv7')
    conv8 = conv_layer(conv7, 512, 'conv8')
    
    pool4 = max_pooling_layer(conv8, layer_name='max_pool_4') 
    '''
    Global average pooling
    gap = gap_layer(conv8, layer_name='global_avg_pool')
    fc = fc_layer(gap, 128, 'fc', use_DW=True)
    with tf.name_scope('dropout'):
        dropped = tf.nn.dropout(fc, keep_prob)
    logits = fc_layer(input_tensor=dropped, output_dim=2, layer_name='logits', act_ftn=tf.identity, use_DW=True)
    return logits
    '''
    flat = flatten(pool4)
    fc1 = fc_layer(input_tensor = flat, output_dim = 512, layer_name = 'fc1')
    fc2 = fc_layer(input_tensor = fc1, output_dim = 256, layer_name = 'fc2')
    fc3 = fc_layer(input_tensor = fc2, output_dim = 128, layer_name = 'fc3')

    with tf.name_scope('dropout'):
        dropped = tf.nn.dropout(fc3, keep_prob)

    logits = fc_layer(input_tensor=dropped, output_dim=2, layer_name='logits', act_ftn=tf.identity)
    return logits
 def _forward_pass(self, x):
     # Encoder
     h1 = conv_layer(x, filter_shape=[3, 3, 3, 64],
                     name='L1')  # (90, 420, 64)
     h2 = conv_layer(h1, filter_shape=[3, 3, 64, 64], name='L2')
     h3 = pooling_layer(h2,
                        ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1],
                        name='L3')  # (45, 210, 64)
     h4 = conv_layer(h3, filter_shape=[3, 3, 64, 256], name='L4')
     h5 = conv_layer(h4, filter_shape=[3, 3, 256, 256], name='L5')
     h6 = pooling_layer(h5,
                        ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1],
                        name='L6')  # (23, 105, 128)
     h7 = conv_layer(h6, filter_shape=[3, 3, 256, 512], name='L7')
     h8 = conv_layer(h7, filter_shape=[3, 3, 512, 512], name='L8')
     h9 = pooling_layer(h8,
                        ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1],
                        name='L9')  # (12, 53, 256)
     h10 = conv_layer(h9, filter_shape=[3, 3, 512, 1024], name='L10')
     h11 = conv_layer(h10, filter_shape=[3, 3, 1024, 1024], name='L11')
     # Decoder
     h12 = deconv_layer(h11,
                        filter_shape=[3, 3, 512, 1024],
                        strides=[1, 2, 2, 1],
                        output_shape=[-1, 24, 106, 512],
                        name='L12')
     h12 = tf.concat([h12[:, 0:-1, 0:-1, :], h8], axis=3)
     h13 = conv_layer(h12, filter_shape=[3, 3, 1024, 512], name='L13')
     h14 = conv_layer(h13, filter_shape=[3, 3, 512, 512], name='L14')
     h15 = deconv_layer(h14,
                        filter_shape=[3, 3, 256, 512],
                        strides=[1, 2, 2, 1],
                        output_shape=[-1, 46, 210, 256],
                        name='L15')
     h15 = tf.concat([h15[:, 0:-1, :, :], h5], axis=3)
     h16 = conv_layer(h15, filter_shape=[3, 3, 512, 256], name='L16')
     h17 = conv_layer(h16, filter_shape=[3, 3, 256, 256], name='L17')
     h18 = deconv_layer(h17,
                        filter_shape=[3, 3, 64, 256],
                        strides=[1, 2, 2, 1],
                        output_shape=[-1, 90, 420, 64],
                        name='L18')
     h18 = tf.concat([h18, h2], axis=3)
     h19 = conv_layer(h18, filter_shape=[3, 3, 128, 64], name='L19')
     h20 = conv_layer(h19, filter_shape=[3, 3, 64, 64], name='L20')
     h21 = conv_layer(h20,
                      filter_shape=[1, 1, 64, self.output_shape[3]],
                      name='L21',
                      non_linear=None)
     return h21
示例#9
0
def vgg16(image):

    conv1_1 = layer.conv_layer('conv1_1',image,[3,3,3,64])
    conv1_2 = layer.conv_layer('conv1_2',conv1_1,[3,3,64,64])
    pool1 = layer.pool_layer('pool1',conv1_2)
    conv2_1 = layer.conv_layer('conv2_1',pool1,[3,3,64,128])
    conv2_2 = layer.conv_layer('conv2_2',conv2_1,[3,3,128,128])
    pool2 = layer.pool_layer('pool2',conv2_2)
    conv3_1 = layer.conv_layer('conv3_1',pool2,[3,3,128,256])
    conv3_2 = layer.conv_layer('conv3_2',conv3_1,[3,3,256,256])
    conv3_3 = layer.conv_layer('conv3_3',conv3_2,[3,3,256,256])
    pool3 = layer.pool_layer('pool3',conv3_3)
    conv4_1 = layer.conv_layer('conv4_1',pool3,[3,3,256,512])
    conv4_2 = layer.conv_layer('conv4_2',conv4_1,[3,3,512,512])
    conv4_3 = layer.conv_layer('conv4_3',conv4_2,[3,3,512,512])
    pool4 = layer.pool_layer('pool4',conv4_3)
    conv5_1 = layer.conv_layer('conv5_1',pool4,[3,3,512,512])
    conv5_2 = layer.conv_layer('conv5_2',conv5_1,[3,3,512,512])
    conv5_3 = layer.conv_layer('conv5_3',conv5_2,[3,3,512,512])

    return conv5_3
def build_wide_resnet(x, num_classes, N, k, block, prob = None):
    channels = [3, 16, 16 * k, 32 * k, 64 * k]
    layers = []

    # conv1
    # conv1 = layer.bn_relu_conv(x, "conv1", channels[0], channels[1], 3)
    conv1 = layer.conv_bn_relu(x, "conv1", channels[0], channels[1], 3)
    layers.append(conv1)

    # conv2
    # 1st
    before20 = layers[-1]
    conv20 = layer.conv_layer(before20, "conv20", [1, 1, channels[1], channels[2]])
    # conv20b = block(before20, "conv20b", prob, channels[1], channels[2]) if block is dropout else block(before20, "conv20b", channels[1], channels[2])
    conv20b_ = layer.conv_bn_relu(before20, "conv20b_", channels[1], channels[2], 3)
    conv20b = layer.conv_layer(conv20b_, "conv20b", [3, 3, channels[2], channels[2]])
    output20 = layer.bn_relu(conv20 + conv20b, "output20")
    layers.append(output20)

    # others
    for n in range(1, N):
        before2n = tf.identity(layers[-1])
        # conv2n = layer.conv_layer(before2n, "conv2%d" % n, [3, 3, channels[2], channels[2]])
        conv2nb = block(layers[-1], "conv2%db" % n, prob, channels[2], channels[2]) if block is dropout else block(layers[-1], "conv2%db" % n, channels[2], channels[2])
        output2n = layer.bn_relu(before2n + conv2nb, "output2%d" % n)
        layers.append(output2n)

    # downsampling0
    #downsampling0 = layer.avg_pool_layer(layers[-1], "downsampling0", [1, 2, 2, 1])
    downsampling0 = layer.max_pool_layer(layers[-1], "downsampling0", [1, 2, 2, 1])
    layers.append(downsampling0)

    # conv3
    # 1st
    before30 = layers[-1]
    conv30 = layer.conv_layer(before30, "conv30", [1, 1, channels[2], channels[3]])
    # conv30b = block(before30, "conv30b", prob, channels[2], channels[3]) if block is dropout else block(before30, "conv30b", channels[2], channels[3])
    conv30b_ = layer.conv_bn_relu(before30, "conv30b_", channels[2], channels[3], 3)
    conv30b = layer.conv_layer(conv30b_, "conv30b", [3, 3, channels[3], channels[3]])
    output30 = layer.bn_relu(conv30 + conv30b, "output30")
    layers.append(output30)

    # others
    for n in range(1, N):
        before3n = tf.identity(layers[-1])
        # conv3n = layer.conv_layer(before3n, "conv3%d" % n, [3, 3, channels[3], channels[3]])
        conv3nb = block(layers[-1], "conv3%db" % n, prob, channels[3], channels[3]) if block is dropout else block(layers[-1], "conv3%db" % n, channels[3], channels[3])
        output3n = layer.bn_relu(before3n + conv3nb, "output3%d" % n)
        layers.append(output3n)

    # downsampling1
    #downsampling1 = layer.avg_pool_layer(layers[-1], "downsampling1", [1, 2, 2, 1])
    downsampling1 = layer.max_pool_layer(layers[-1], "downsampling1", [1, 2, 2, 1])
    layers.append(downsampling1)

    # conv4
    # 1st
    before40 = layers[-1]
    conv40 = layer.conv_layer(before40, "conv40", [1, 1, channels[3],channels[4]])
    # conv40b = block(before40, "conv40b", prob, channels[3], channels[4]) if block is dropout else block(before40, "conv40b", channels[3], channels[4])
    conv40b_ = layer.conv_bn_relu(before40, "conv40b_", channels[3], channels[4], 3)
    conv40b = layer.conv_layer(conv40b_, "conv40b", [3, 3, channels[4], channels[4]])
    output40 = layer.bn_relu(conv40 + conv40b, "output40")
    layers.append(output40)

    # others
    for n in range(1, N):
        before4n = tf.identity(layers[-1])
        # conv4n = layer.conv_layer(before4n, "conv4%d" % n, [3, 3, channels[4], channels[4]])
        conv4nb = block(layers[-1], "conv4%db" % n, prob, channels[4], channels[4]) if block is dropout else block(layers[-1], "conv4%db" % n, channels[4], channels[4])
        output4n = layer.bn_relu(before4n + conv4nb, "output4%d" % n)
        layers.append(output4n)

    # avg pooling
    avg_pool = layer.avg_pool_layer(layers[-1], name = "avg_pool", pooling_size = [1, 8, 8, 1])
    layers.append(avg_pool)

    # flatten and fully connected
    flatten = layer.flatten_layer(layers[-1])
    fc = layer.fc_layer(flatten, num_classes, "fc")
    layers.append(fc)
    
    sm = tf.nn.softmax(layers[-1], name = "prediction")
    layers.append(sm)

    return layers[-1]
def main():

    ## ---------------------------------------------------------------------------------------------------------------------
    ## hyper parameter

    class_num = 6
    batch_size_in = 10
    batch_size = 64
    batch_label = 512
    height_ = 200
    width_ = 9

    ## ---------------------------------------------------------------------------------------------------------------------
    ## Save dir
    now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
    board_logdir = "C:\\Users\\SNUGL\\tensorboard\\"
    ckpt_logdir = "D:\dev\jejucamp-seoyeon\classification\\model_ckpt\\"

    ## ---------------------------------------------------------------------------------------------------------------------
    ## data checking

    check = 1  # 데이터 저장 안됬을 시 0, 데이터 저장 됬을 시 1
    check2 = 0 # ckpt 로딩 안할시 0 , ckpt 로딩할 시 1
    ## ---------------------------------------------------------------------------------------------------------------------
    ## data loader

    if check == 0 :
    # training data
        train_parser = data_loader2.parse_cambridge() # class
    # name list, label list, path list parser
        train_name_list, train_label_list, train_path_list = train_parser.label_extract('train')
    # read the data and concatinate
        train_data, train_label = train_parser.loading_data(train_path_list, height_, width_, None, 'train')
        np.save('train_data_2', train_data)
        np.save('train_label_2', train_label)
    # test data
        test_parser = data_loader2.parse_cambridge()
    # name list, label list, path list parser
        test_name_list, test_label_list, test_path_list = test_parser.label_extract('test')
    # read the data and concatinate
        test_data, test_label = test_parser.loading_data(test_path_list, height_, width_, None, 'test')
        np.save('test_data_2', test_data)
        np.save('test_label_2', test_label)
        check = 2

    if check == 1 :
        train_data = np.load('train_data_2.npy')
        train_label = np.load('train_label_2.npy')
        test_data = np.load('test_data_2.npy')
        test_label = np.load('test_label_2.npy')
        check = 2

    ## ---------------------------------------------------------------------------------------------------------------------
    ## check point loading

    ## ---------------------------------------------------------------------------------------------------------------------
    ## config env


    # training constant config

    epochs = 1000
    iter_ = 1
    lr = .0001
#e-5
    num_end  = []
    for w in range(64) :
        num_end.append(height_)
    total_sample = np.shape(train_data)[0]
    total_batch = int(np.shape(train_data)[0] / batch_size)
    print(epochs)

    ## ---------------------------------------------------------------------------------------------------------------------
    ## model defined

    jeju_graph = tf.Graph()

    with jeju_graph.as_default():

        lstm_model = lstm.LstmModel()
        # input : A 'batch_size' x 'max_frames' x 'num_features'
        # max_frames : time series,
        # num_feature : contributes

        x = tf.placeholder(dtype=tf.float32,
                       shape=[None, height_, width_],
                       name='input')

        x = tf.reshape(x, [1, 64, height_, width_])
        y = tf.placeholder(dtype=tf.int64,
                        shape=[None, class_num],
                        name='output')

        conv1 = conv_layer(x, shape=[3, 3, 9, 9])
        conv1_pool = max_pool_3x3(conv1)

        conv2 = conv_layer(conv1_pool, shape=[3, 3, 9, 9])
        conv2_pool = max_pool_3x3(conv2)

        conv3 = conv_layer(conv2_pool, shape=[5, 5, 9, 9])
        conv3_pool = max_pool_3x3(conv3)

        conv3_pool = tf.reshape(conv3_pool,[-1, height_, width_])

        with tf.name_scope('output'):
            output = lstm_model.create_model(conv3_pool, class_num, num_end)
            tf.summary.histogram('output',output)

        # create training op
        opt = tf.train.AdamOptimizer(learning_rate=lr)
        with tf.name_scope('loss_l'):
            loss_l = tf.losses.softmax_cross_entropy(onehot_labels=y, logits=output) # cross_entroy : classification , l2_loss : regression
            tf.summary.scalar('loss_l',loss_l)
        training_op = opt.minimize(loss=loss_l)

        # prediction
        pred = tf.argmax(output, 1)
        pred_y = tf.argmax(y, 1)
        diff = tf.equal(pred, pred_y)
        hey = tf.cast(diff, tf.float32)
        predict_accuracy = tf.reduce_mean(hey)
        with tf.name_scope('accuracy'):
            tf.summary.scalar('accuracy',predict_accuracy)

        merged = tf.summary.merge_all()

    ## ---------------------------------------------------------------------------------------------------------------------
    ## savor

        tf_saver = tf.train.Saver() #max_to_keep=7, keep_checkpoint_every_n_hours=1

    ## ---------------------------------------------------------------------------------------------------------------------
    ## Tensorboard

    train_writer = tf.summary.FileWriter(logdir=board_logdir + '/train', graph = jeju_graph)
    test_writer = tf.summary.FileWriter(logdir=board_logdir + '/test', graph=jeju_graph)
    ## ---------------------------------------------------------------------------------------------------------------------
    # session
    with tf.Session(graph=jeju_graph) as sess:
        if check2 == 1 :
            tf_saver.restore(sess, os.path.join("D:\\dev\\jejucamp-seoyeon\\classification\\model_ckpt","ckpt-216310"))
        sess.run(tf.global_variables_initializer())

        # prepare session
        tr_loss_hist = []
        global_step = 0
        save_accuracy = []
        test_len = test_data.shape[0]
        train_len = train_data.shape[0]
        display_num = 0
        sum_accuracy = 0

        for e in range(epochs) :

            for i in range(total_batch) :
                random_idex = random.randrange(0, test_len - batch_size)
                batch_validation_data = test_data[random_idex:random_idex + batch_size, ...]
                batch_validation_label = test_label[random_idex:random_idex + batch_size, ...]
                data_start_index = i * batch_size
                data_end_index = (i + 1) * batch_size

                batch_train_data = train_data[data_start_index:data_end_index, ...]
                batch_train_label = train_label[data_start_index:data_end_index]
                batch_train_data.reshape([batch_size, height_, width_])
                avg_tr_loss = 0
                tr_step = 0
                try:
                    for h in range(iter_) : # for 문으로 tr 고정
                        batch_train_data =batch_train_data.reshape([1, 64, 200, 9])
                        _, tr_loss, summary1 = sess.run(fetches=[training_op, loss_l, merged], feed_dict = {x: batch_train_data,
                                 y: batch_train_label})
                        #print('epoch : {:3}, batch_step : {:3}, tr_step : {:3}, tr_loss : {:.5f}'.format(e + 1, i +1, tr_step + 1, tr_loss))
                        avg_tr_loss += tr_loss
                        tr_step += 1
                        global_step += 1

                except tf.errors.OutOfRangeError:
                    pass

                avg_tr_loss /= tr_step
                tr_loss_hist.append(avg_tr_loss)
                batch_validation_data = batch_validation_data.reshape([1, 64, 200, 9])
                acc = predict_accuracy.eval(feed_dict={x: batch_validation_data,
                                                        y: batch_validation_label}, session=sess)
                summary2 = merged.eval(feed_dict={x: batch_validation_data,
                                                        y: batch_validation_label}, session=sess)
                display_num = display_num + 1

                sum_accuracy = sum_accuracy + acc
                if(display_num%10==0):
                    avg_save_accuracy = sum_accuracy/display_num
                    print('epoch : {:3}, batch_step : {:3}/{:3}, avg_tr_loss : {:.5f}, prediction : {:.5f}'.format(e + 1,
                                                                                                               i + 1,
                                                                                                               total_batch,
                                                                                                               avg_tr_loss,
                                                                                                               acc))

                    tf_saver.save(sess, os.path.join(ckpt_logdir,"ckpt"), global_step=global_step)
                    display_num = 0
                    sum_accuracy = 0
                    save_accuracy.append(avg_save_accuracy)
                    train_writer.add_summary(summary1, global_step)
                    test_writer.add_summary(summary2, global_step)




    ## ---------------------------------------------------------------------------------------------------------------------
    ## plotting

    ''' plt.plot(tr_loss_hist, label='train')
    '''x = tf.placeholder(dtype=tf.float32,
                                      shape=[None, 60000],
                                      name='input')
    y_ = tf.placeholder(dtype=tf.int64,
                                      shape=[None, 1, 6],
                                      name='output')'''

    '''x_image = tf.reshape(x,[-1,1000,10,6],name='image_end')
   # batch_ind = tf.placeholder(dtype=tf.int64, shape =[1])'''
    x_image = train_next_element[0]
    y_ = train_next_element[1]

        conv1 = conv_layer(x_image, shape=[5, 5, 3, 32])
    conv1_pool = max_pool_2x2(conv1)

    conv2 = conv_layer(conv1_pool, shape=[3, 3, 32, 64])
    conv2_pool = max_pool_2x2(conv2)

    conv2_flat = tf.reshape(conv2_pool, [-1, 5 * 500 * 64])
    full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))

    keep_prob = tf.placeholder(tf.float32)
    full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)

    y_conv = full_layer(full1_drop, 6)

    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))