Beispiel #1
0
    def __init__(self, n_cells, n_x, n_h, initializer=None):
        with tf.name_scope("Weight_Matrices"):
            params = utils.read_params()
            # class variables
            self.n_cells = n_cells

            if initializer is None:
                init = tf.contrib.layers.xavier_initializer()
            else:
                init = initializer

            with tf.name_scope("x_list"):
                x_list = []
                for x in range(self.n_cells):
                    with tf.name_scope("y_list"):
                        y_list = []
                        for y in range(self.n_cells):
                            z_list = []
                            with tf.name_scope("z_list"):
                                for z in range(self.n_cells):
                                    name = "W_{}{}{}".format(x, y, z)
                                    W = tf.Variable(init([n_x, n_h]),
                                                    name=name)

                                    if params["VIS"]["HISTOGRAMS"]:
                                        tf.summary.histogram(name, W)
                                    z_list.append(W)
                            y_list.append(z_list)
                    x_list.append(y_list)

            self.weight_matrix_grid = x_list
Beispiel #2
0
    def __init__(self,
                 n_cells=4,
                 n_input=1024,
                 n_hidden_state=128,
                 initializer=None):
        with tf.name_scope("LSTM_Grid"):
            if initializer is None:
                init = tf.contrib.layers.xavier_initializer()
            else:
                init = initializer

            # parameters for the cell state and input,forget & output gates
            self.W = [
                Weight_Matrices(
                    n_cells, n_input, n_hidden_state, initializer=init)
            ] * 4
            self.U = [
                tf.Variable(init([3, 3, 3, n_hidden_state, n_hidden_state]),
                            name="U")
            ] * 4
            self.b = [
                tf.Variable(init([n_cells, n_cells, n_cells, n_hidden_state]),
                            name="b")
            ] * 4

            params = utils.read_params()
            if params["VIS"]["HISTOGRAMS"]:
                for i in range(4):
                    tf.summary.histogram("U[{}]".format(i), self.U[i])
                    tf.summary.histogram("b[{}]".format(i), self.b[i])
def fully_connected_sequence(sequence,
                             in_units=1024,
                             out_units=1024,
                             initializer=None):
    with tf.name_scope("fully_connected_sequence"):
        if initializer is None:
            init = tf.contrib.layers.xavier_initializer()
        else:
            init = initializer

        weights = tf.Variable(init([in_units, out_units]), name="weights")
        bias = tf.Variable(init([out_units]), name="bias")

        def forward_pass(a):
            return tf.nn.bias_add(tf.matmul(a, weights), bias)

        ret = tf.map_fn(forward_pass, sequence, name='fully_connected_map')
        #ret = map_images(forward_pass, sequence, name='fully_connected_map')

        params = utils.read_params()
        if params["VIS"]["HISTOGRAMS"]:
            tf.summary.histogram("weights", weights)
            tf.summary.histogram("bias", bias)
        if params["VIS"]["SHAPES"]:
            print(ret.shape)

    return ret
def voxel(vox, color=None, f_name=None, npimage=False, view=(30, 45)):
    assert (vox.ndim == 3)

    vox = vox.transpose(2, 0, 1)
    color = color.transpose(2, 0, 1)
    if color is None or len(np.unique(color)) <= 2:
        color = 'red'
    else:
        color_map = plt.get_cmap('coolwarm')
        color = color_map(color)

    fig = plt.figure()
    ax = fig.gca(projection='3d')
    #ax.voxels(vox, facecolors=color, edgecolor='k')
    ax.voxels(vox, edgecolor='k')
    ax.view_init(view[0], view[1])

    if npimage:
        return mplfig_to_npimage(fig)

    if f_name is not None:
        params = utils.read_params()
        f_name = os.path.join(params["DIRS"]["OUTPUT"], f_name)
        utils.make_prev_dirs(f_name)
        fig.savefig(f_name, bbox_inches='tight', dpi=2400)
        fig.clf()
        plt.close()
        return

    return fig.show()
def conv_sequence(sequence,
                  in_featuremap_count,
                  out_featuremap_count,
                  initializer=None,
                  K=3,
                  S=[1, 1, 1, 1],
                  D=[1, 1, 1, 1],
                  P="SAME"):
    with tf.name_scope("conv_sequence"):
        if initializer is None:
            init = tf.contrib.layers.xavier_initializer()
        else:
            init = initializer

        kernel = tf.Variable(init(
            [K, K, in_featuremap_count, out_featuremap_count]),
                             name="kernel")
        bias = tf.Variable(init([out_featuremap_count]), name="bias")

        def conv2d(x):
            return tf.nn.bias_add(
                tf.nn.conv2d(x,
                             kernel,
                             S,
                             padding=P,
                             dilations=D,
                             name="conv2d"), bias)

        ret = tf.map_fn(conv2d, sequence, name="conv2d_map")
        #ret = map_images(conv2d, sequence, name='conv2d_map')

        tf.add_to_collection("feature_maps", ret)

        # visualization code
        params = utils.read_params()
        image_count = params["VIS"]["IMAGE_COUNT"]
        if params["VIS"]["KERNELS"]:
            kern_1 = tf.concat(tf.unstack(kernel, axis=-1), axis=-1)
            kern_2 = tf.transpose(kern_1, [2, 0, 1])
            kern_3 = tf.expand_dims(kern_2, -1)
            tf.summary.image("2d kernel", kern_3, max_outputs=image_count)

        if params["VIS"]["FEATURE_MAPS"]:
            feature_map_1 = tf.concat(tf.unstack(ret, axis=4), axis=2)
            feature_map_2 = tf.concat(tf.unstack(feature_map_1, axis=1),
                                      axis=2)
            feature_map_3 = tf.expand_dims(feature_map_2, -1)
            tf.summary.image("feature_map",
                             feature_map_3,
                             max_outputs=image_count)

        if params["VIS"]["HISTOGRAMS"]:
            tf.summary.histogram("kernel", kernel)
            tf.summary.histogram("bias", bias)

        if params["VIS"]["SHAPES"]:
            print(ret.shape)
    return ret
Beispiel #6
0
def conv_vox(vox,
             in_featurevoxel_count,
             out_featurevoxel_count,
             K=3,
             S=[1, 1, 1, 1, 1],
             D=[1, 1, 1, 1, 1],
             initializer=None,
             P="SAME"):
    # deconvolution
    with tf.name_scope("conv_vox"):
        if initializer is None:
            init = tf.contrib.layers.xavier_initializer()
        else:
            init = initializer

        kernel = tf.Variable(init(
            [K, K, K, in_featurevoxel_count, out_featurevoxel_count]),
                             name="kernel")
        bias = tf.Variable(init([out_featurevoxel_count]), name="bias")
        ret = tf.nn.bias_add(
            tf.nn.conv3d(vox, kernel, S, padding=P, dilations=D,
                         name="conv3d"), bias)
        tf.add_to_collection("feature_voxels", ret)

        # visualization code
        params = utils.read_params()
        image_count = params["VIS"]["IMAGE_COUNT"]
        if params["VIS"]["KERNELS"]:
            kern_1 = tf.concat(tf.unstack(kernel, axis=-1), axis=-1)
            kern_2 = tf.transpose(kern_1, [3, 0, 1, 2])
            kern_3 = tf.expand_dims(kern_2, -1)
            kern_4 = tf.concat(tf.unstack(kern_3, axis=1), axis=1)
            tf.summary.image("3d kernel", kern_4, max_outputs=image_count)

        if params["VIS"]["VOXEL_SLICES"]:
            vox_slice_1 = tf.unstack(ret, axis=4)[1]
            vox_slice_2 = tf.split(vox_slice_1, 4, axis=3)
            vox_slice_3 = tf.concat(vox_slice_2, axis=1)
            vox_slice_4 = tf.concat(tf.unstack(vox_slice_3, axis=-1), axis=2)
            vox_slice_5 = tf.expand_dims(vox_slice_4, -1)
            tf.summary.image("vox_slices",
                             vox_slice_5,
                             max_outputs=image_count)

        if params["VIS"]["FEATURE_VOXELS"]:
            tf.summary.tensor_summary("feature_voxels", ret[0, :, :, :, 0])

        if params["VIS"]["HISTOGRAMS"]:
            tf.summary.histogram("kernel", kernel)
            tf.summary.histogram("bias", bias)

        if params["VIS"]["SHAPES"]:
            print(ret.shape)

    return ret
def create_video(obj_id="02691156_131db4a650873babad3ab188d086d4db"):

    params = utils.read_params()
    out_dir = params["DIRS"]["OUTPUT"]
    model_dir = params["SESSIONS"]["LONGEST"]
    epoch_count = utils.get_latest_epoch_index(model_dir) + 1

    x, _ = dataset.load_obj_id(obj_id)
    for i in range(epoch_count):
        net = network.Network_restored("{}/epoch_{}".format(model_dir, i))
        yp = net.predict(x)
        voxel_binary(yp[0], f_name="{}/{}/frame_{}".format(out_dir, obj_id, i))
def save_im(im, f_name=None, ndarray=False):
    fig = plt.figure()
    if ndarray:
        fig.set_tight_layout(True)
        fig.canvas.draw()
        ret = np.array(fig.canvas.renderer._renderer)
        fig.clf()
        plt.close()
        return ret

    if f_name is not None:
        params = utils.read_params()
        f_name = os.path.join(params["DIRS"]["OUTPUT"], f_name)
        utils.make_prev_dirs(f_name)
        plt.imsave(f_name, im)
        plt.clf()
        plt.close()

    return plt.imshow(im)
Beispiel #9
0
    def save_loss(loss_arr, loss_type):
        loss_ndarr = np.array(loss_arr)
        save_dir = net.get_cur_epoch_dir()
        np.save("{}/{}_loss.npy".format(save_dir, loss_type), loss_ndarr)

    def plot_loss(loss_arr, loss_type):
        loss_ndarr = np.array(loss_arr)
        save_dir = net.get_cur_epoch_dir()
        plt.plot(loss_ndarr.flatten())
        plt.savefig("{}/{}_loss.png".format(save_dir, loss_type),
                    bbox_inches="tight")
        plt.close()

    # params on disk
    print(utils.read_params()["TRAIN"])

    # get preprocessed data
    data, label = dataset.load_preprocessed_dataset()

    # init network
    net = network.Network()
    # net.init_parameters()

    params = net.get_params()
    train_params = params["TRAIN"]

    # split dataset
    X_train, y_train, X_val, y_val, X_test, y_test = dataset.train_val_test_split(
        data, label)
    save_dataset_split()
    def retrain(self, params=None):
        # read params
        if params is None:
            self.params = utils.read_params()
        else:
            self.params = params

        self.CREATE_TIME = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
        self.MODEL_DIR = "{}/model_{}".format(
            self.params["DIRS"]["MODELS_LOCAL"], self.CREATE_TIME)
        utils.make_dir(self.MODEL_DIR)

        with open(self.MODEL_DIR + '/params.json', 'w') as f:
            json.dump(self.params, f)

        #with tf.get_default_graph() as graph:
        #with self.graph.as_default() as graph:
        #graph = self.sess.graph
        graph = tf.get_default_graph()
        #with tf.Graph().as_default():
        if True:
            self.X = graph.get_tensor_by_name('Data/Placeholder:0')
            self.Y_onehot = graph.get_tensor_by_name('Labels/Placeholder:0')
            self.LR = graph.get_tensor_by_name('LearningRate/Placeholder:0')

            # TODO: make to read network name instead of hard coding
            #self.logits = graph.get_tensor_by_name('SENet_Decoder/conv_vox/BiasAdd:0')
            freeze_layer = graph.get_tensor_by_name(
                'SENet_Decoder/conv_vox/BiasAdd:0')
            #freeze_layer = graph.get_tensor_by_name('SENet_Decoder/block_seresnet_decoder_4/relu_vox_1/relu:0')
            # stop graident and make is identity
            freeze_layer = tf.stop_gradient(freeze_layer)
            self.logits = decoder.conv_vox(freeze_layer, 2, 2)

            #hidden_state = graph.get_tensor_by_name('Recurrent_module/while/Exit:0')
            ## decoder
            #print("decoder")
            #if isinstance(hidden_state, tuple):
            #    hidden_state = hidden_state[0]
            #if self.params["TRAIN"]["DECODER_MODE"] == "DILATED":
            #    de = decoder.Dilated_Decoder(hidden_state)
            #elif self.params["TRAIN"]["DECODER_MODE"] == "RESIDUAL":
            #    de = decoder.Residual_Decoder(hidden_state)
            #elif self.params["TRAIN"]["DECODER_MODE"] == "SERESNET":
            #    de = decoder.SENet_Decoder(hidden_state)
            #else:
            #    de = decoder.Simple_Decoder(hidden_state)
            #self.logits = de.out_tensor

            print("loss")
            #self.softmax = graph.get_tensor_by_name('Loss_Voxel_Softmax/clip_by_value:0')
            #self.loss = graph.get_tensor_by_name('Loss_Voxel_Softmax/Mean:0')
            if self.params["TRAIN"]["LOSS_FCN"] == "FOCAL_LOSS":
                voxel_loss = loss.Focal_Loss(self.Y_onehot, self.logits)
                self.softmax = voxel_loss.pred
            else:
                voxel_loss = loss.Voxel_Softmax(self.Y_onehot, self.logits)
                self.softmax = voxel_loss.softmax
            self.loss = voxel_loss.loss
            tf.summary.scalar("loss", self.loss)

            # misc
            print("misc")
            t = tf.constant(self.params["TRAIN"]["TIME_STEP_COUNT"])
            #self.step_count = graph.get_tensor_by_name('misc_2/step_count:0')
            #self.print = graph.get_tensor_by_name('misc_2/Print:0')
            with tf.name_scope("misc"):
                self.step_count = tf.Variable(0,
                                              trainable=False,
                                              name="step_count")
                self.print = tf.Print(self.loss,
                                      [self.step_count, self.loss, t])

            # optimizer
            print("optimizer")
            #optimizer = graph.get_tensor_by_name('Adam:0')
            if self.params["TRAIN"]["OPTIMIZER"] == "ADAM":
                optimizer = tf.train.AdamOptimizer(
                    learning_rate=self.LR,
                    epsilon=self.params["TRAIN"]["ADAM_EPSILON"],
                    name='NewAdam')
                tf.summary.scalar("adam_learning_rate", optimizer._lr)
            else:
                optimizer = tf.train.GradientDescentOptimizer(
                    learning_rate=self.LR)
                tf.summary.scalar("learning_rate", optimizer._learning_rate)
            grads_and_vars = optimizer.compute_gradients(self.loss)
            self.apply_grad = optimizer.apply_gradients(
                grads_and_vars, global_step=self.step_count)

            ## metric
            print("metrics")
            with tf.name_scope("newmetrics"):
                Y = tf.argmax(self.Y_onehot, -1)
                predictions = tf.argmax(self.softmax, -1)
                acc, acc_op = tf.metrics.accuracy(Y, predictions)
                rms, rms_op = tf.metrics.root_mean_squared_error(
                    self.Y_onehot, self.softmax)
                iou, iou_op = tf.metrics.mean_iou(Y, predictions, 2)
                self.metrics_op = tf.group(acc_op, rms_op, iou_op)

            tf.summary.scalar("accuracy", acc)
            tf.summary.scalar("rmse", rms)
            tf.summary.scalar("iou", iou)

            # initalize
            # config=tf.ConfigProto(log_device_placement=True)
            print("setup")
            self.summary_op = tf.summary.merge_all()

            #self.sess = tf.InteractiveSession()
            def initialize_uninitialized(sess):
                global_vars = tf.global_variables()
                is_not_initialized = sess.run(
                    [tf.is_variable_initialized(var) for var in global_vars])
                not_initialized_vars = [
                    v for (v, f) in zip(global_vars, is_not_initialized)
                    if not f
                ]
                print("-->", len(global_vars), len(is_not_initialized),
                      len(not_initialized_vars))

                if len(not_initialized_vars):
                    sess.run(tf.variables_initializer(not_initialized_vars))

            initialize_uninitialized(self.sess)
            print('trainable vars:', len(tf.trainable_variables()))

            self.sess.run(tf.local_variables_initializer())
            # summaries
            print("summaries")
            if self.params["MODE"] == "TEST":
                self.test_writer = tf.summary.FileWriter(
                    "{}/test".format(self.MODEL_DIR), self.sess.graph)
            else:
                self.train_writer = tf.summary.FileWriter(
                    "{}/train".format(self.MODEL_DIR), self.sess.graph)
                self.val_writer = tf.summary.FileWriter(
                    "{}/val".format(self.MODEL_DIR), self.sess.graph)
    def __init__(self, params=None):
        # read params
        if params is None:
            self.params = utils.read_params()
        else:
            self.params = params

        if self.params["TRAIN"]["INITIALIZER"] == "XAVIER":
            init = tf.contrib.layers.xavier_initializer()
        else:
            init = tf.random_normal_initializer()

        self.CREATE_TIME = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
        self.MODEL_DIR = "{}/model_{}".format(
            self.params["DIRS"]["MODELS_LOCAL"], self.CREATE_TIME)
        utils.make_dir(self.MODEL_DIR)

        with open(self.MODEL_DIR + '/params.json', 'w') as f:
            json.dump(self.params, f)

        # place holders
        with tf.name_scope("Data"):
            self.X = tf.placeholder(tf.float32, [None, None, None, None, None])
        with tf.name_scope("Labels"):
            if "64" in self.params["TRAIN"]["DECODER_MODE"]:
                self.Y_onehot = tf.placeholder(tf.float32,
                                               [None, 64, 64, 64, 2])
            else:
                self.Y_onehot = tf.placeholder(tf.float32,
                                               [None, 32, 32, 32, 2])
        with tf.name_scope("LearningRate"):
            self.LR = tf.placeholder(tf.float32, [])

        print("Initializing Network")
        #pp = preprocessor.Preprocessor(self.X) # here
        #X_preprocessed = pp.out_tensor # here
        X_preprocessed = self.X  # (n_batch, n_views, 127, 127, 3)
        n_batchsize = tf.shape(X_preprocessed)[0]

        # switch batch <-> nviews
        X_preprocessed = tf.transpose(X_preprocessed, [1, 0, 2, 3, 4])
        # encoder
        print("encoder")
        if self.params["TRAIN"]["ENCODER_MODE"] == "DILATED":
            en = encoder.Dilated_Encoder(X_preprocessed)
        elif self.params["TRAIN"]["ENCODER_MODE"] == "RESIDUAL":
            en = encoder.Residual_Encoder(X_preprocessed)
        elif self.params["TRAIN"]["ENCODER_MODE"] == "SERESNET":
            en = encoder.SENet_Encoder(X_preprocessed)
        else:
            en = encoder.Simple_Encoder(X_preprocessed)
        encoded_input = en.out_tensor
        # switch batch <-> nviews
        encoded_input = tf.transpose(encoded_input, [1, 0, 2])
        X_preprocessed = tf.transpose(X_preprocessed, [1, 0, 2, 3, 4])

        # visualize transformation of input state to voxel
        if self.params["VIS"]["ENCODER_PROCESS"]:
            with tf.name_scope("misc"):
                feature_maps = tf.get_collection("feature_maps")
                fm_list = []
                for fm in feature_maps:
                    fm_slice = fm[0, 0, :, :, 0]
                    #fm_shape = fm_slice.get_shape().as_list()
                    fm_shape = tf.shape(fm_slice)
                    fm_slice = tf.pad(fm_slice,
                                      [[0, 0], [127 - fm_shape[0], 0]])
                    fm_list.append(fm_slice)
                fm_img = tf.concat(fm_list, axis=0)
                tf.summary.image("feature_map_list",
                                 tf.expand_dims(tf.expand_dims(fm_img, -1), 0))

        # recurrent_module
        print("recurrent_module")
        with tf.name_scope("Recurrent_module"):
            rnn_mode = self.params["TRAIN"]["RNN_MODE"]
            n_cell = self.params["TRAIN"]["RNN_CELL_NUM"]
            n_hidden = self.params["TRAIN"]["RNN_HIDDEN_SIZE"]

            if rnn_mode == "LSTM":
                rnn = recurrent_module.LSTM_Grid(initializer=init)
                hidden_state = (
                    tf.zeros([n_batchsize, n_cell, n_cell, n_cell, n_hidden],
                             name="zero_hidden_state"),
                    tf.zeros([n_batchsize, n_cell, n_cell, n_cell, n_hidden],
                             name="zero_cell_state"))
            else:
                rnn = recurrent_module.GRU_Grid(initializer=init)
                hidden_state = tf.zeros(
                    [n_batchsize, n_cell, n_cell, n_cell, n_hidden],
                    name="zero_hidden_state")

            #n_timesteps = self.params["TRAIN"]["TIME_STEP_COUNT"]
            n_timesteps = np.shape(X_preprocessed)[1]
            # feed a limited seqeuence of images
            if isinstance(n_timesteps, int) and n_timesteps > 0:
                for t in range(n_timesteps):
                    hidden_state = rnn.call(encoded_input[:, t, :],
                                            hidden_state)
            else:  # feed an arbitray seqeuence of images
                n_timesteps = tf.shape(X_preprocessed)[1]

                t = tf.constant(0)

                def condition(h, t):
                    return tf.less(t, n_timesteps)

                def body(h, t):
                    h = rnn.call(encoded_input[:, t, :], h)
                    t = tf.add(t, 1)
                    return h, t

                hidden_state, t = tf.while_loop(condition, body,
                                                (hidden_state, t))

        # decoder
        print("decoder")
        if isinstance(hidden_state, tuple):
            hidden_state = hidden_state[0]
        if self.params["TRAIN"]["DECODER_MODE"] == "DILATED":
            de = decoder.Dilated_Decoder(hidden_state)
        elif self.params["TRAIN"]["DECODER_MODE"] == "RESIDUAL":
            de = decoder.Residual_Decoder(hidden_state)
        elif self.params["TRAIN"]["DECODER_MODE"] == "RESIDUAL64":
            de = decoder.Residual_Decoder64(hidden_state)
        elif self.params["TRAIN"]["DECODER_MODE"] == "SERESNET":
            de = decoder.SENet_Decoder(hidden_state)
        elif self.params["TRAIN"]["DECODER_MODE"] == "SERESNET64":
            de = decoder.SENet_Decoder64(hidden_state)
        else:
            de = decoder.Simple_Decoder(hidden_state)
        self.logits = de.out_tensor

        # visualize transformation of hidden state to voxel
        if self.params["VIS"]["DECODER_PROCESS"]:
            with tf.name_scope("misc"):
                feature_voxels = tf.get_collection("feature_voxels")
                fv_list = []
                for fv in feature_voxels:
                    fv_slice = fv[0, :, :, 0, 0]
                    fv_shape = fv_slice.get_shape().as_list()
                    if "64" in self.params["TRAIN"]["DECODER_MODE"]:
                        fv_slice = tf.pad(fv_slice,
                                          [[0, 0], [64 - fv_shape[0], 0]])
                    else:
                        fv_slice = tf.pad(fv_slice,
                                          [[0, 0], [32 - fv_shape[0], 0]])
                    fv_list.append(fv_slice)
                fv_img = tf.concat(fv_list, axis=0)
                tf.summary.image("feature_voxel_list",
                                 tf.expand_dims(tf.expand_dims(fv_img, -1), 0))

        # loss
        print("loss")
        if self.params["TRAIN"]["LOSS_FCN"] == "FOCAL_LOSS":
            voxel_loss = loss.Focal_Loss(self.Y_onehot, self.logits)
            self.softmax = voxel_loss.pred
        elif self.params["TRAIN"]["LOSS_FCN"] == "WEIGHTED_SOFTMAX":
            voxel_loss = loss.Weighted_Voxel_Softmax(self.Y_onehot,
                                                     self.logits)
            self.softmax = voxel_loss.softmax
        elif self.params["TRAIN"]["LOSS_FCN"] == "SOFTMAX":
            voxel_loss = loss.Voxel_Softmax(self.Y_onehot, self.logits)
            self.softmax = voxel_loss.softmax
        else:
            print("WRONG LOSS FUNCTION. CHECK LOSS")
            os.abort()
        self.loss = voxel_loss.loss
        tf.summary.scalar("loss", self.loss)

        # misc
        print("misc")
        with tf.name_scope("misc"):
            self.step_count = tf.Variable(0,
                                          trainable=False,
                                          name="step_count")
            self.print = tf.Print(self.loss, [self.step_count, self.loss, t])

        # optimizer
        print("optimizer")
        if self.params["TRAIN"]["OPTIMIZER"] == "ADAM":
            optimizer = tf.train.AdamOptimizer(
                learning_rate=self.LR,
                epsilon=self.params["TRAIN"]["ADAM_EPSILON"])
            #learning_rate=self.params["TRAIN"]["ADAM_LEARN_RATE"], epsilon=self.params["TRAIN"]["ADAM_EPSILON"])
            tf.summary.scalar("adam_learning_rate", optimizer._lr)
        else:
            optimizer = tf.train.GradientDescentOptimizer(
                learning_rate=self.LR)
            #learning_rate=self.params["TRAIN"]["GD_LEARN_RATE"])
            tf.summary.scalar("learning_rate", optimizer._learning_rate)

        grads_and_vars = optimizer.compute_gradients(self.loss)
        self.apply_grad = optimizer.apply_gradients(
            grads_and_vars, global_step=self.step_count)

        # metric
        print("metrics")
        with tf.name_scope("metrics"):
            Y = tf.argmax(self.Y_onehot, -1)
            predictions = tf.argmax(self.softmax, -1)
            acc, acc_op = tf.metrics.accuracy(Y, predictions)
            rms, rms_op = tf.metrics.root_mean_squared_error(
                self.Y_onehot, self.softmax)
            iou, iou_op = tf.metrics.mean_iou(Y, predictions, 2)
            self.metrics_op = tf.group(acc_op, rms_op, iou_op)

        tf.summary.scalar("accuracy", acc)
        tf.summary.scalar("rmse", rms)
        tf.summary.scalar("iou", iou)

        # initalize
        # config=tf.ConfigProto(log_device_placement=True)
        print("setup")
        self.summary_op = tf.summary.merge_all()
        self.sess = tf.InteractiveSession()
        if self.params["MODE"] == "DEBUG":
            self.sess = tf_debug.TensorBoardDebugWrapperSession(
                self.sess,
                "nat-oitwireless-inside-vapornet100-c-15126.Princeton.EDU:6064"
            )

        # summaries
        print("summaries")
        if self.params["MODE"] == "TEST":
            self.test_writer = tf.summary.FileWriter(
                "{}/test".format(self.MODEL_DIR), self.sess.graph)
        else:
            self.train_writer = tf.summary.FileWriter(
                "{}/train".format(self.MODEL_DIR), self.sess.graph)
            self.val_writer = tf.summary.FileWriter(
                "{}/val".format(self.MODEL_DIR), self.sess.graph)

        # initialize
        print("initialize")
        tf.global_variables_initializer().run()
        tf.local_variables_initializer().run()
        print('trainable vars:', len(tf.trainable_variables()))
        print("...done!")