def __init__(self, sess):
        self.sess = sess
        self.dataset_root = 'F:\\Learning\\tensorflow\\detect\\Dataset\\'

        if Detection_or_Classifier == 'Classifier':

            self.train_data = DataLoader(root=os.path.join(
                self.dataset_root, 'data', 'train'),
                                         batch=batch_size)

            print("Building the model...")
            self.model = Model(num_classes=1000)
            print("Model is built successfully\n\n")

        elif Detection_or_Classifier == 'Detection':
            pass

        #tf.profiler.profile(tf.get_default_graph(),options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter(), cmd='scope')

        num_params = get_num_params()
        print('all params:{}'.format(num_params))

        self.use_classifier_pretrain = False
        if not self.use_classifier_pretrain:
            var = tf.global_variables()
        else:
            var = tf.trainable_variables()
        var_list = [val for val in var]
        if Detection_or_Classifier == 'Detection' and self.use_classifier_pretrain:
            var_list = [val for val in var if 'zsc_detection' not in val.name]

        if Detection_or_Classifier == 'Classifier':
            var = tf.global_variables()
            var_list = [val for val in var]
        self.saver = tf.train.Saver(var_list=var_list, max_to_keep=max_to_keep)

        self.save_checkpoints_path = os.path.join(os.getcwd(), 'checkpoints',
                                                  Detection_or_Classifier)
        if not os.path.exists(self.save_checkpoints_path):
            os.makedirs(self.save_checkpoints_path)

        # Initializing the model
        self.init = None
        self.__init_model()

        # Loading the model checkpoint if exists
        self.__load_model()
        '''
Beispiel #2
0
    def __init__(self, sess):
        self.sess = sess
        self.dataset_root = '/data1/ZhangShiChang/TensorflowWork/Classifier/dataset'

        if Detection_or_Classifier == 'classifier':
            self.labels = list(range(18))

            self.train_data = DataLoader(root=os.path.join(
                self.dataset_root, 'train'),
                                         classes=len(self.labels),
                                         batch=batch_size)

            print("Building the model...")
            self.model = Model(num_classes=len(self.labels))
            print("Model is built successfully\n\n")

        elif Detection_or_Classifier == 'detection':
            pass

        #tf.profiler.profile(tf.get_default_graph(),options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter(), cmd='scope')

        num_params = get_num_params()
        print('all params:{}'.format(num_params))

        var = tf.global_variables()
        var_list = [val for val in var]
        self.saver = tf.train.Saver(var_list=var_list, max_to_keep=max_to_keep)

        self.save_checkpoints_path = os.path.join(os.getcwd(), 'checkpoints',
                                                  Detection_or_Classifier)
        if not os.path.exists(self.save_checkpoints_path):
            os.makedirs(self.save_checkpoints_path)

        # Initializing the model
        self.init = None
        self.__init_model()

        # Loading the model checkpoint if exists
        self.__load_model()
Beispiel #3
0
class Train:
    """Trainer class for the CNN.
    It's also responsible for loading/saving the model checkpoints from/to experiments/experiment_name/checkpoint_dir"""
    def __init__(self, sess):
        self.sess = sess
        self.dataset_root = '/data1/ZhangShiChang/TensorflowWork/Classifier/dataset'

        if Detection_or_Classifier == 'classifier':
            self.labels = list(range(18))

            self.train_data = DataLoader(root=os.path.join(
                self.dataset_root, 'train'),
                                         classes=len(self.labels),
                                         batch=batch_size)

            print("Building the model...")
            self.model = Model(num_classes=len(self.labels))
            print("Model is built successfully\n\n")

        elif Detection_or_Classifier == 'detection':
            pass

        #tf.profiler.profile(tf.get_default_graph(),options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter(), cmd='scope')

        num_params = get_num_params()
        print('all params:{}'.format(num_params))

        var = tf.global_variables()
        var_list = [val for val in var]
        self.saver = tf.train.Saver(var_list=var_list, max_to_keep=max_to_keep)

        self.save_checkpoints_path = os.path.join(os.getcwd(), 'checkpoints',
                                                  Detection_or_Classifier)
        if not os.path.exists(self.save_checkpoints_path):
            os.makedirs(self.save_checkpoints_path)

        # Initializing the model
        self.init = None
        self.__init_model()

        # Loading the model checkpoint if exists
        self.__load_model()

    ############################################################################################################
    # Model related methods
    def __init_model(self):
        print("Initializing the model...")
        self.init = tf.group(tf.global_variables_initializer())
        self.sess.run(self.init)
        print("Model initialized\n\n")

    def save_model(self):
        var = tf.global_variables()
        var_list = [val for val in var]
        self.saver = tf.train.Saver(var_list=var_list, max_to_keep=max_to_keep)

        print("Saving a checkpoint")
        self.saver.save(
            self.sess,
            self.save_checkpoints_path + '/' + Detection_or_Classifier,
            self.model.global_step_tensor)
        print("Checkpoint Saved\n\n")

        print('Saving a pb')
        if Detection_or_Classifier == 'classifier':
            output_graph_def = graph_util.convert_variables_to_constants(
                self.sess, self.sess.graph.as_graph_def(),
                ['output/zsc_output'])
            #tflite_model = tf.contrib.lite.toco_convert(output_graph_def, [self.model.input_image], [self.model.y_out_softmax])
            #open(Detection_or_Classifier+".tflite", "wb").write(tflite_model)
        elif Detection_or_Classifier == 'detection':
            output_graph_def = graph_util.convert_variables_to_constants(
                self.sess, self.sess.graph.as_graph_def(), ['zsc_output'])
        tf.train.write_graph(output_graph_def,
                             self.save_checkpoints_path,
                             Detection_or_Classifier + '.pb',
                             as_text=False)
        print('pb saved\n\n')

    def __load_model(self):
        if Detection_or_Classifier == 'detection' and self.use_classifier_pretrain:
            latest_checkpoint = tf.train.latest_checkpoint(
                os.path.join(os.getcwd(), 'checkpoints', 'classifier'))
            if latest_checkpoint:
                print("loading classifier checkpoint {} ...\n".format(
                    latest_checkpoint))
                self.saver.restore(self.sess, latest_checkpoint)
                print("classifier model success loaded\n\n")
            else:
                print('loading classifier model failure!!')
        else:
            latest_checkpoint = tf.train.latest_checkpoint(
                self.save_checkpoints_path)
            if latest_checkpoint:
                print("Loading model checkpoint {} ...\n".format(
                    latest_checkpoint))
                self.saver.restore(self.sess, latest_checkpoint)
                print("Checkpoint loaded\n\n")
            else:
                print("First time to train!\n\n")

    ############################################################################################################
    # Train and Test methods
    def train(self):
        for cur_epoch in range(
                self.model.global_epoch_tensor.eval(self.sess) + 1,
                num_epochs + 1, 1):

            batch = 0

            loss_list = []

            if Detection_or_Classifier == 'classifier':
                acc_list = []
                acc_5_list = []
                for X_batch, y_batch in self.train_data.next():
                    print('Training epoch:{},batch:{}\n'.format(
                        cur_epoch, batch))

                    cur_step = self.model.global_step_tensor.eval(self.sess)

                    feed_dict = {
                        self.model.input_image: X_batch,
                        self.model.y: y_batch
                    }

                    #_, loss, acc,acc_5, summaries_merged = self.sess.run(
                    #[self.model.train_op, self.model.all_loss, self.model.accuracy,self.model.accuracy_top_5, self.model.summaries_merged],
                    #feed_dict=feed_dict)
                    _, loss, acc, acc_5 = self.sess.run([
                        self.model.train_op, self.model.all_loss,
                        self.model.accuracy, self.model.accuracy_top_5
                    ],
                                                        feed_dict=feed_dict)

                    print('loss:' + str(loss) + '|' + 'accuracy:' + str(acc) +
                          '|' + 'top_5:' + str(acc_5))

                    loss_list += [loss]
                    acc_list += [acc]
                    acc_5_list += [acc_5]

                    self.model.global_step_assign_op.eval(
                        session=self.sess,
                        feed_dict={self.model.global_step_input: cur_step + 1})

                    if batch > self.train_data.__len__():
                        batch = 0

                        avg_loss = np.mean(loss_list).astype(np.float32)
                        avg_accuracy = np.mean(acc_list).astype(np.float32)
                        avg_top5 = np.mean(acc_5_list).astype(np.float32)

                        self.model.global_epoch_assign_op.eval(
                            session=self.sess,
                            feed_dict={
                                self.model.global_epoch_input: cur_epoch + 1
                            })

                        print("\nEpoch-" + str(cur_epoch) + '|' + 'avg loss:' +
                              str(avg_loss) + '|' + 'avg accuracy:' +
                              str(avg_accuracy) + '|' + 'avg top_5:' +
                              str(avg_top5) + '\n')
                        break

                    if batch == 0 and cur_epoch % 99 == 0:
                        #opts = tf.profiler.ProfileOptionBuilder.float_operation()
                        #flops = tf.profiler.profile(tf.get_default_graph(), run_meta=tf.RunMetadata(), cmd='op', options=opts)
                        #if flops is not None:
                        #    print('flops:{}'.format(flops.total_float_ops))
                        pass

                    if batch == 400:
                        self.save_model()

                    batch += 1

                if cur_epoch % save_model_every == 0 and cur_epoch != 0:
                    self.save_model()

                if cur_epoch % test_every == 0:
                    print('start test')
                    self.test()
                    print('end test')
            elif Detection_or_Classifier == 'detection':
                pass

    def test(self):
        if Detection_or_Classifier == 'classifier':

            ImageList = []
            ImageLabelList = []
            with open(
                    os.path.join(os.getcwd(), 'test_images',
                                 Detection_or_Classifier, 'imagelist.txt'),
                    'r') as f:
                for i, line in enumerate(f.readlines()):
                    ImageList.append(
                        cv2.resize(
                            cv2.imread(
                                os.path.join(os.getcwd(), 'test_images',
                                             Detection_or_Classifier,
                                             line.split(' ')[0])), (224, 224)))
                    ImageLabelList.append(int(line.split(' ')[1]))

            loss_list = []
            acc_list = []
            top5_list = []
            for i in range(0, len(ImageList) // batch_size):
                print('process batch:{}'.format(i))

                x = np.stack(ImageList[i * batch_size:(i + 1) * batch_size],
                             axis=0).astype(np.float32)
                y = np.array(ImageLabelList[i * batch_size:(i + 1) *
                                            batch_size],
                             dtype=np.int32)

                feed_dict = {self.model.input_image: x, self.model.y: y}

                loss, acc, acc_5 = self.sess.run([
                    self.model.all_loss, self.model.accuracy,
                    self.model.accuracy_top_5
                ],
                                                 feed_dict=feed_dict)

                loss_list.append(loss)
                acc_list.append(acc)
                top5_list.append(acc_5)

            print('test avg loss:' + str(np.mean(loss_list)) + '|' +
                  'avg accuracy:' + str(np.mean(acc_list)) + '|' +
                  'avg top_5:' + str(np.mean(top5_list)))
            self.save_model()

        elif Detection_or_Classifier == 'detection':
            pass
Beispiel #4
0
    def __init__(self, sess):
        self.sess = sess
        self.dataset_root = 'F:\\Learning\\tensorflow\\detect\\Dataset\\'
        
        if Detection_or_Classifier=='classifier':
            self.train_data = DataLoader(root=self.dataset_root+'SmallNORB\\trainImages',batch=batch_size)
            self.test_data = DataLoader(root=self.dataset_root+'SmallNORB\\testImages',batch=batch_size)
            
            self.labels = [str(i) for i in range(1,1001)]
            
            print("Building the model...")
            self.model = IGCV3FPN(num_classes=len(self.labels),
                                  num_anchors=5,
                                  batch_size = batch_size,
                                  max_box_per_image = max_box_per_image,
                                  max_grid=[max_input_size,max_input_size],
                                  )
            print("Model is built successfully\n\n")
            
            
        elif Detection_or_Classifier=='detection':
            train_ints, valid_ints, self.labels = create_training_instances(
            self.dataset_root+'VOC2012\\Annotations\\',
            self.dataset_root+'VOC2012\\JPEGImages\\',
            'data.pkl',
            '','','',
            ['person','head','hand','foot','aeroplane','tvmonitor','train','boat','dog','chair',
             'bird','bicycle','bottle','sheep','diningtable','horse','motorbike','sofa','cow',
             'car','cat','bus','pottedplant']
            )
            self.train_data = BatchGenerator(
                                            instances           = train_ints, 
                                            anchors             = anchors,   
                                            labels              = self.labels,        
                                            downsample          = 32, # ratio between network input's size and network output's size, 32 for YOLOv3
                                            max_box_per_image   = max_box_per_image,
                                            batch_size          = batch_size,
                                            min_net_size        = min_input_size,
                                            max_net_size        = max_input_size,   
                                            shuffle             = True, 
                                            jitter              = 0.3, 
                                            norm                = normalize
                                            )
            self.test_data = BatchGenerator(
                                            instances           = valid_ints, 
                                            anchors             = anchors,   
                                            labels              = self.labels,        
                                            downsample          = 32, # ratio between network input's size and network output's size, 32 for YOLOv3
                                            max_box_per_image   = max_box_per_image,
                                            batch_size          = batch_size,
                                            min_net_size        = min_input_size,
                                            max_net_size        = max_input_size,   
                                            shuffle             = True, 
                                            jitter              = 0.0, 
                                            norm                = normalize
                                            )
            
            print("Building the model...")
            self.model = IGCV3FPN(num_classes=len(self.labels),
                                  num_anchors=5,
                                  batch_size = batch_size,
                                  max_box_per_image = max_box_per_image,
                                  max_grid=[max_input_size,max_input_size],
                                  )
            print("Model is built successfully\n\n")
        
        #tf.profiler.profile(tf.get_default_graph(),options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter(), cmd='scope')
        
        num_params = get_num_params()
        print('all params:{}'.format(num_params))
        
        var = tf.global_variables()
        var_list = [val for val in var]
        if Detection_or_Classifier=='detection' and False:
            #var_list = [val for val in var if (('zsc_preprocessing' in val.name) or ('zsc_feature' in val.name) or ('zsc_attention' in val.name) or ('zsc_detection' in val.name)) and ('SE' not in val.name)]
            var_list = [val for val in var if ('SE' not in val.name)]
        
        self.saver = tf.train.Saver(var_list=var_list,max_to_keep=max_to_keep,
                                    keep_checkpoint_every_n_hours=10)
        
        self.save_checkpoints_path = os.path.join(os.getcwd(),'checkpoints',Detection_or_Classifier)
        if not os.path.exists(self.save_checkpoints_path):
            os.makedirs(self.save_checkpoints_path)

        # Initializing the model
        self.init = None
        self.__init_model()

        # Loading the model checkpoint if exists
        self.__load_model()
        
        summary_dir = os.path.join(os.getcwd(),'logs',Detection_or_Classifier)
        if not os.path.exists(summary_dir):
            os.makedirs(summary_dir)
        summary_dir_train = os.path.join(summary_dir,'train')
        if not os.path.exists(summary_dir_train):
            os.makedirs(summary_dir_train)
        summary_dir_test = os.path.join(summary_dir,'test')
        if not os.path.exists(summary_dir_test):
            os.makedirs(summary_dir_test)
        self.train_writer = tf.summary.FileWriter(summary_dir_train,sess.graph)
        self.test_writer = tf.summary.FileWriter(summary_dir_test)
Beispiel #5
0
    def __init__(self, sess):
        self.sess = sess
        self.dataset_root = '/home/b101/anaconda2/ZSCWork/Dataset/'

        if Detection_or_Classifier == 'classifier':
            self.train_data = DataLoader(root=self.dataset_root +
                                         'SmallNORB/trainImages',
                                         batch=batch_size)
            self.test_data = DataLoader(root=self.dataset_root +
                                        'SmallNORB/testImages',
                                        batch=batch_size)

            self.labels = ['1', '2', '3', '4', '5']

            print("Building the model...")
            self.model = CliqueFPN(
                num_classes=len(self.labels),
                num_anchors=3,
                batch_size=batch_size,
                max_box_per_image=max_box_per_image,
                max_grid=[max_input_size, max_input_size],
            )
            print("Model is built successfully\n\n")

        elif Detection_or_Classifier == 'detection':
            train_ints, valid_ints, self.labels = create_training_instances(
                self.dataset_root + 'Fish/Annotations/',
                self.dataset_root + 'Fish/JPEGImages/', 'data.pkl', '', '', '',
                [
                    'heidiao', 'niyu', 'lvqimamiantun', 'hualu', 'heijun',
                    'dalongliuxian', 'tiaoshiban'
                ]
                #['person','head','hand','foot','aeroplane','tvmonitor','train','boat','dog','chair',
                # 'bird','bicycle','bottle','sheep','diningtable','horse','motorbike','sofa','cow',
                # 'car','cat','bus','pottedplant']
            )
            self.train_data = BatchGenerator(
                instances=train_ints,
                anchors=anchors,
                labels=self.labels,
                downsample=
                32,  # ratio between network input's size and network output's size, 32 for YOLOv3
                max_box_per_image=max_box_per_image,
                batch_size=batch_size,
                min_net_size=min_input_size,
                max_net_size=max_input_size,
                shuffle=True,
                jitter=0.3,
                norm=normalize)
            self.test_data = BatchGenerator(
                instances=valid_ints,
                anchors=anchors,
                labels=self.labels,
                downsample=
                32,  # ratio between network input's size and network output's size, 32 for YOLOv3
                max_box_per_image=max_box_per_image,
                batch_size=batch_size,
                min_net_size=min_input_size,
                max_net_size=max_input_size,
                shuffle=True,
                jitter=0.0,
                norm=normalize)

            print("Building the model...")
            self.model = CliqueFPN(
                num_classes=len(self.labels),
                num_anchors=3,
                batch_size=batch_size,
                max_box_per_image=max_box_per_image,
                max_grid=[max_input_size, max_input_size],
            )
            print("Model is built successfully\n\n")

        #tf.profiler.profile(tf.get_default_graph(),options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter(), cmd='scope')

        num_params = get_num_params()
        print('all params:{}'.format(num_params))

        var = tf.global_variables()
        var_list = [val for val in var]
        if Detection_or_Classifier == 'detection' and False:
            var_list = [
                val for val in var
                if 'PrimaryConv/conv_0/batchnorm' not in val.name
                and 'PrimaryConv/conv_1/batchnorm' not in val.name
                and 'PrimaryConv/conv_2/batchnorm' not in val.name
                and 'PrimaryConv/conv_0/DecoupledOperator' not in val.name
                and 'PrimaryConv/conv_1/DecoupledOperator' not in val.name
                and 'PrimaryConv/conv_2/DecoupledOperator' not in val.name
                and 'PrimaryConv/conv_0/prelu' not in val.name
                and 'PrimaryConv/conv_1/prelu' not in val.name
                and 'PrimaryConv/conv_2/prelu' not in val.name
                and 'SelfAttention/g/batch_norm' not in val.name
                and 'SelfAttention/f/batch_norm' not in val.name
                and 'SelfAttention/h/batch_norm' not in val.name
                and 'SelfAttention/g/prelu' not in val.name
                and 'SelfAttention/f/prelu' not in val.name
                and 'SelfAttention/h/prelu' not in val.name
                and 'SelfAttention/DecoupledOperator' not in val.name
                and 'zsc_pred' not in val.name
            ]

        self.saver = tf.train.Saver(var_list=var_list, max_to_keep=max_to_keep)

        self.save_checkpoints_path = os.path.join(os.getcwd(), 'checkpoints',
                                                  Detection_or_Classifier)
        if not os.path.exists(self.save_checkpoints_path):
            os.makedirs(self.save_checkpoints_path)

        # Initializing the model
        self.init = None
        self.__init_model()

        # Loading the model checkpoint if exists
        self.__load_model()
Beispiel #6
0
    def __init__(self, sess):
        self.sess = sess
        self.dataset_root = 'F:\\Learning\\tensorflow\\detect\\Dataset\\'

        if Detection_or_Classifier == 'classifier':
            self.labels = list(range(18))

            self.train_data = DataLoader(root=os.path.join(
                self.dataset_root, 'data', 'train'),
                                         classes=len(self.labels),
                                         batch=batch_size)

            print("Building the model...")
            self.model = Model(num_classes=len(self.labels))
            print("Model is built successfully\n\n")

        elif Detection_or_Classifier == 'detection':
            train_ints, valid_ints, self.labels = create_training_instances(
                os.path.join(self.dataset_root, 'Fish', 'Annotations'),
                os.path.join(self.dataset_root, 'Fish', 'JPEGImages'),
                'data.pkl', '', '', '', False, [
                    'heidiao', 'niyu', 'lvqimamiantun', 'hualu', 'heijun',
                    'dalongliuxian', 'tiaoshiban'
                ]
                #['person','head','hand','foot','aeroplane','tvmonitor','train','boat','dog','chair',
                # 'bird','bicycle','bottle','sheep','diningtable','horse','motorbike','sofa','cow',
                # 'car','cat','bus','pottedplant']
            )

            self.train_data = BatchGenerator(train_ints, self.labels,
                                             batch_size)

            print("Building the model...")
            self.model = Model(num_classes=23)
            print("Model is built successfully\n\n")

        #tf.profiler.profile(tf.get_default_graph(),options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter(), cmd='scope')

        num_params = get_num_params()
        print('all params:{}'.format(num_params))

        self.use_classifier_pretrain = True
        if not self.use_classifier_pretrain:
            var = tf.global_variables()
        else:
            var = tf.trainable_variables()
        var_list = [val for val in var]
        if Detection_or_Classifier == 'detection' and self.use_classifier_pretrain:
            var_list = [val for val in var if 'zsc_detection' not in val.name]

        if Detection_or_Classifier == 'classifier':
            var = tf.global_variables()
            var_list = [val for val in var if 'Logits/bias' not in val.name]
        self.saver = tf.train.Saver(var_list=var_list, max_to_keep=max_to_keep)

        self.save_checkpoints_path = os.path.join(os.getcwd(), 'checkpoints',
                                                  Detection_or_Classifier)
        if not os.path.exists(self.save_checkpoints_path):
            os.makedirs(self.save_checkpoints_path)

        # Initializing the model
        self.init = None
        self.__init_model()

        # Loading the model checkpoint if exists
        self.__load_model()
Beispiel #7
0
    def __init__(self, sess):
        self.sess = sess
        self.dataset_root = '/home/b101/anaconda2/ZSCWork/Dataset/'
        
        if Detection_or_Classifier=='classifier':
            self.train_data = DataLoader(root=self.dataset_root+'SmallNORB/trainImages',batch=batch_size)
            self.test_data = DataLoader(root=self.dataset_root+'SmallNORB/testImages',batch=batch_size)
            
            self.labels = ['1','2','3','4','5']
            
            print("Building the model...")
            self.model = CliqueFPN(num_classes=len(self.labels),
                                  num_anchors=5,
                                  batch_size = batch_size,
                                  max_box_per_image = max_box_per_image,
                                  max_grid=[max_input_size,max_input_size],
                                  )
            print("Model is built successfully\n\n")
            
            
        elif Detection_or_Classifier=='detection':
            train_ints, valid_ints, self.labels = create_training_instances(
            self.dataset_root+'VOC2012/Annotations/',
            self.dataset_root+'VOC2012/JPEGImages/',
            'data.pkl',
            '','','',
            ['person','head','hand','foot','aeroplane','tvmonitor','train','boat','dog','chair',
             'bird','bicycle','bottle','sheep','diningtable','horse','motorbike','sofa','cow',
             'car','cat','bus','pottedplant']
            )
            self.train_data = BatchGenerator(
                                            instances           = train_ints, 
                                            anchors             = anchors,   
                                            labels              = self.labels,        
                                            downsample          = 32, # ratio between network input's size and network output's size, 32 for YOLOv3
                                            max_box_per_image   = max_box_per_image,
                                            batch_size          = batch_size,
                                            min_net_size        = min_input_size,
                                            max_net_size        = max_input_size,   
                                            shuffle             = True, 
                                            jitter              = 0.3, 
                                            norm                = normalize
                                            )
            self.test_data = BatchGenerator(
                                            instances           = valid_ints, 
                                            anchors             = anchors,   
                                            labels              = self.labels,        
                                            downsample          = 32, # ratio between network input's size and network output's size, 32 for YOLOv3
                                            max_box_per_image   = max_box_per_image,
                                            batch_size          = batch_size,
                                            min_net_size        = min_input_size,
                                            max_net_size        = max_input_size,   
                                            shuffle             = True, 
                                            jitter              = 0.0, 
                                            norm                = normalize
                                            )
            
            print("Building the model...")
            self.model = CliqueFPN(num_classes=len(self.labels),
                                  num_anchors=5,
                                  batch_size = batch_size,
                                  max_box_per_image = max_box_per_image,
                                  max_grid=[max_input_size,max_input_size],
                                  )
            print("Model is built successfully\n\n")
        
        #tf.profiler.profile(tf.get_default_graph(),options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter(), cmd='scope')
        
        num_params = get_num_params()
        print('all params:{}'.format(num_params))
        
        var = tf.global_variables()
        var_list = [val for val in var]
        if Detection_or_Classifier=='detection' and False:
            var_list = [val for val in var if 'CliqueBlock_2/loop/deform_conv' not in val.name ]
        
        self.saver = tf.train.Saver(var_list=var_list,max_to_keep=max_to_keep)
        
        self.save_checkpoints_path = os.path.join(os.getcwd(),'checkpoints',Detection_or_Classifier)
        if not os.path.exists(self.save_checkpoints_path):
            os.makedirs(self.save_checkpoints_path)

        # Initializing the model
        self.init = None
        self.__init_model()

        # Loading the model checkpoint if exists
        self.__load_model()
        
        summary_dir = os.path.join(os.getcwd(),'logs',Detection_or_Classifier)
        if not os.path.exists(summary_dir):
            os.makedirs(summary_dir)
        summary_dir_train = os.path.join(summary_dir,'train')
        if not os.path.exists(summary_dir_train):
            os.makedirs(summary_dir_train)
        summary_dir_test = os.path.join(summary_dir,'test')
        if not os.path.exists(summary_dir_test):
            os.makedirs(summary_dir_test)
        self.train_writer = tf.summary.FileWriter(summary_dir_train,sess.graph)
        self.test_writer = tf.summary.FileWriter(summary_dir_test)