Ejemplo n.º 1
0
    def __init__(self, image_w=cf.w, image_h=cf.h, channels=cf.channels, num_classes=100):
        self._width  = image_w # define the width of the image.
        self._height = image_h # define the height of the image.
        self._batch_size = cf.batch_size # define the batch size of mini-batch training.
        self._channels = cf.channels # define the number of channels. ex) RGB = 3, GrayScale = 1, FeatureMap = 50
        self._num_classes = num_classes # define the number of classes for final classfication

        # define the basic options for tensorflow session : restricts allocation of GPU memory.
        gpu_options = tf.GPUOptions(allow_growth = True)
        self._session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

        # placeholders : None will become the batch size of each batch. The last batch of an epoch may be volatile.
        self._images = tf.placeholder(tf.float32, shape=[None, self._width, self._height, self._channels])
        self._labels = tf.placeholder(tf.int64, shape=[None])
        self._keep_prob = tf.placeholder(tf.float32)
        self._is_train = tf.placeholder(tf.bool)
        self._global_step = tf.Variable(0, tf.int64, name="global_step") # saves the global step of training.
        self.UPDATE_OPS_COLLECTION = ops.GraphKeys.UPDATE_OPS

        # loss calculation & update
        self._logits = self._inference(self._images, self._keep_prob, self._is_train) # prediction
        self._avg_loss = self._loss(self._labels, self._logits) # difference between prediction & actual label.
        self._train_op = self._train(self._avg_loss) # back propagate the loss.
        self._accuracy = F.accuracy_score(self._labels, self._logits) # get the accuracy of given prediction batch.

        # basic tensorflow run operations
        self._saver = tf.train.Saver(tf.all_variables())
        self._session.run(tf.initialize_all_variables())
Ejemplo n.º 2
0
    def __init__(self,
                 embedding_dim=cf.embedding_dim,
                 time_steps=56,
                 AMR_steps=40,
                 num_classes=5,
                 batch_size=cf.batch_size):
        self._num_classes = num_classes
        self._batch_size = batch_size
        self._hidden_size = cf.hidden_size

        self._embedding_dim = embedding_dim
        self._time_steps = time_steps
        self._amr_steps = AMR_steps

        gpu_options = tf.GPUOptions(allow_growth=True)
        self._session = tf.Session(config=tf.ConfigProto(
            gpu_options=gpu_options))

        self._input1 = tf.placeholder(
            tf.float32, shape=[None, self._time_steps, self._embedding_dim])
        self._input2 = tf.placeholder(
            tf.float32, shape=[None, self._amr_steps, self._embedding_dim])
        self._labels = tf.placeholder(tf.int64, shape=[None])

        self._keep_prob = tf.placeholder(tf.float32)
        self._global_step = tf.Variable(0, tf.int64, name="global_step")
        self._logits = self._inference(self._input1, self._input2,
                                       self._keep_prob)
        self._avg_loss = self._loss(self._labels, self._logits)
        self._train_op = self._train(self._avg_loss)

        self._accuracy = F.accuracy_score(self._labels, self._logits)
        self._saver = tf.train.Saver(tf.all_variables())
        self._session.run(tf.initialize_all_variables())
Ejemplo n.º 3
0
 def __init__(self):
     self._image_size = 24
     self._num_classes = 10
     self._batch_size = 50
     gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gentleman.request_mem(3*1024, i_am_nice=False))
     self._session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
     self._images = tf.placeholder("float", shape=[None, self._image_size, self._image_size, 3])
     self._labels = tf.placeholder("float", shape=[None, self._num_classes])
     self._keep_prob = tf.placeholder("float")
     self._logits = self._inference(self._images, self._keep_prob)
     self._avg_loss = self._loss(self._labels, self._logits)
     self._train_op = self._train(self._avg_loss)
     self._accuracy = F.accuracy_score(self._labels, self._logits)
     self._session.run(tf.initialize_all_variables())
Ejemplo n.º 4
0
 def __init__(self,
              image_size=24,
              num_classes=10,
              batch_size=50,
              channels=3):
     self._image_size = image_size
     self._num_classes = num_classes
     self._batch_size = batch_size
     self._channels = channels
     gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
     self._session = tf.Session(config=tf.ConfigProto(
         gpu_options=gpu_options))
     self._images = tf.placeholder(
         tf.float32,
         shape=[None, self._image_size, self._image_size, self._channels])
     self._labels = tf.placeholder(tf.int64, shape=[None])
     self._keep_prob = tf.placeholder(tf.float32)
     self._global_step = tf.Variable(0, tf.int64, name="global_step")
     self._logits = self._inference(self._images, self._keep_prob)
     self._avg_loss = self._loss(self._labels, self._logits)
     self._train_op = self._train(self._avg_loss)
     self._accuracy = F.accuracy_score(self._labels, self._logits)
     self._saver = tf.train.Saver(tf.all_variables())
     self._session.run(tf.initialize_all_variables())
Ejemplo n.º 5
0
    def __init__(self,
                 image_w=cf.w,
                 image_h=cf.h,
                 channels=cf.channels,
                 num_classes=10):
        self._width = image_w  # define the width of the image.
        self._height = image_h  # define the height of the image.
        self._batch_size = cf.batch_size  # define the batch size of mini-batch training.
        self._channels = cf.channels  # define the number of channels. ex) RGB = 3, GrayScale = 1, FeatureMap = 50
        self._num_classes = num_classes  # define the number of classes for final classfication

        # define the basic options for tensorflow session : restricts allocation of GPU memory.
        gpu_options = tf.GPUOptions(allow_growth=True)
        self._session = tf.Session(config=tf.ConfigProto(
            gpu_options=gpu_options, allow_soft_placement=True))

        # placeholders : None will become the batch size of each batch. The last batch of an epoch may be volatile.
        # GPU 1 input
        self._images1 = tf.placeholder(
            tf.float32,
            shape=[None, self._width, self._height, self._channels])
        # GPU 2 input
        self._images2 = tf.placeholder(
            tf.float32,
            shape=[None, self._width, self._height, self._channels])

        self._labels = tf.placeholder(tf.int64, shape=[None])

        # general input
        self._keep_prob = tf.placeholder(tf.float32)
        self._is_train = tf.placeholder(tf.bool)
        self._global_step = tf.Variable(
            0, tf.int64,
            name="global_step")  # saves the global step of training.

        # loss calculation & update (GPU1)
        with tf.device('/gpu:0'):
            self._logits1 = self._inference(self._images1, self._keep_prob,
                                            self._is_train)  # prediction

        # loss calculation & update (GPU2)
        with tf.device('/gpu:1'):
            tf.get_variable_scope().reuse_variables()
            self._logits2 = self._inference(self._images2, self._keep_prob,
                                            self._is_train)  # prediction

        self._logits = tf.concat(0, [self._logits1, self._logits2])
        self._avg_loss = self._loss(
            self._labels,
            self._logits)  # difference between prediction & actual label.
        self._accuracy = F.accuracy_score(
            self._labels,
            self._logits)  # get the accuracy of given prediction batch.

        # train operation
        self._train_op = self._train(
            self._avg_loss)  # back propagate the loss.

        # basic tensorflow run operations
        self._saver = tf.train.Saver(tf.all_variables(),
                                     write_version=tf.train.SaverDef.V2)
        self._session.run(tf.initialize_all_variables())
print("Maximum accuracy " + str(np.amax(acc_storage)))
index = np.where(acc_storage == np.amax(acc_storage))
print("C_value for max accuracy : " + str(C_values[index[0][0]]))

#%% Get Results on train test Data for linear kernel
X_sub_train, y_sub_train, X_sub_test, y_sub_test = train_test_split(X_sub,
                                                                    y_sub,
                                                                    split=0.75)

c_val = C_values[index[0][0]]

clf = SVC(C=c_val, kernel='linear')
clf.fit(X_sub_train, y_sub_train)

y_pred = clf.predict(X_sub_test)
print(accuracy_score(y_pred, y_sub_test))

#%% Linear Kernel
C_values = np.linspace(1e-3, 0.08, 100)
k_fold = 10
X_split, y_split = cross_validation_split(X_sub, y_sub, folds=k_fold)
cross_val_scores = []

for c_val in C_values:
    total_acc = 0
    for i in range(k_fold):
        X_test_k_fold = X_split[i]
        y_test_k_fold = y_split[i]
        X_train_k_fold = np.empty(shape=(0, X_test_k_fold.shape[1]))
        y_train_k_fold = np.empty(shape=(0, 1))
        for j in range(k_fold):
Ejemplo n.º 7
0
X_cvx_train, y_cvx_train, X_cvx_test, y_cvx_test = train_test_split(X_cvx,
                                                                    y_cvx,
                                                                    split=0.75)

w, intercept, alphas = perform_cvx_opt(X_cvx_train,
                                       y_cvx_train,
                                       kernel='linear',
                                       C=c_val)

#%% Do predictions
y_pred = np.matmul(X_cvx_test, w) + intercept
y_pred = np.array([int(i[0] / abs(i[0])) for i in y_pred])

print("Accuracy using cvx for binary classification : " +
      str(accuracy_score(y_cvx_test, y_pred)))

#%% Polynomial Kernel
w, intercept, a = perform_cvx_opt(X_cvx_train,
                                  y_cvx_train,
                                  kernel='poly',
                                  C=0.005157,
                                  gamma=0.07878,
                                  p=5)

#%% Do predictions
y_pred = np.matmul(X_cvx_test, w) + intercept
y_pred = np.array([int(i[0] / abs(i[0])) for i in y_pred])

print("Accuracy using cvx for binary classification : " +
      str(accuracy_score(y_cvx_test, y_pred)))