def fit(self, data_dir_path, model_dir_path, vgg16_include_top=True, data_set_name='UCF-101', test_size=0.2, random_state=42,from_picture=False):
        self.vgg16_include_top = vgg16_include_top
        self.from_picture = from_picture
        config_file_path = self.get_config_file_path(model_dir_path, vgg16_include_top)
        weight_file_path = self.get_weight_file_path(model_dir_path, vgg16_include_top)
        architecture_file_path = self.get_architecture_file_path(model_dir_path, vgg16_include_top)


        vgg16_model = VGG16(include_top=self.vgg16_include_top, weights='imagenet')
        ### Ucommnet if you want to have intermidiate layers  ###
        #self.vgg16_model.summary()
        #block5_conv3 = vgg16_model.get_layer("block3_conv3").output
        #f0 = block5_conv3
        #vgg16_model = Model(inputs=vgg16_model.input, outputs=f0)

        vgg16_model.compile(optimizer=SGD(), loss='categorical_crossentropy', metrics=['accuracy'])
        self.vgg16_model = vgg16_model


        feature_dir_name = data_set_name + '-VGG16-Features'
        if not vgg16_include_top:
            feature_dir_name = data_set_name + '-VGG16-HiDimFeatures'
        print('Read Features from :', feature_dir_name)
        max_frames = 0
        self.labels = dict()
        x_samples, y_samples = scan_and_extract_vgg16_features(data_dir_path,
                                                               output_dir_path=feature_dir_name,
                                                               model=self.vgg16_model,
                                                               data_set_name=data_set_name,
                                                               is_picture=self.from_picture)
        self.num_input_tokens = x_samples[0].shape[1]
        frames_list = []
        for x in x_samples:
            frames = x.shape[0]
            frames_list.append(frames)
            max_frames = max(frames, max_frames)
            self.expected_frames = int(np.mean(frames_list))
        print('max frames: ', max_frames)
        print('expected frames: ', self.expected_frames)
        for i in range(len(x_samples)):
            x = x_samples[i]
            frames = x.shape[0]
            print(x.shape)
            if frames > self.expected_frames:
                x = x[0:self.expected_frames, :]
                x_samples[i] = x
            elif frames < self.expected_frames:
                temp = np.zeros(shape=(self.expected_frames, x.shape[1]))
                temp[0:frames, :] = x
                x_samples[i] = temp
        for y in y_samples:
            if y not in self.labels:
                self.labels[y] = len(self.labels)
        print(self.labels)
        for i in range(len(y_samples)):
            y_samples[i] = self.labels[y_samples[i]]

        self.nb_classes = len(self.labels)

        y_samples = np_utils.to_categorical(y_samples, self.nb_classes)

        config = dict()
        config['labels'] = self.labels
        config['nb_classes'] = self.nb_classes
        config['num_input_tokens'] = self.num_input_tokens
        config['expected_frames'] = self.expected_frames
        config['vgg16_include_top'] = self.vgg16_include_top
        self.config = config

        np.save(config_file_path, config)

        model = self.create_model()
        open(architecture_file_path, 'w').write(model.to_json())

        Xtrain, Xtest, Ytrain, Ytest = train_test_split(x_samples, y_samples, test_size=test_size,
                                                        random_state=random_state)
        print(len(x_samples))
        print(len(Xtrain))
        print(len(Xtest))

        train_gen = generate_batch(Xtrain, Ytrain)
        test_gen = generate_batch(Xtest, Ytest)

        train_num_batches = len(Xtrain) // BATCH_SIZE
        test_num_batches = len(Xtest) // BATCH_SIZE
        checkpoint = ModelCheckpoint(filepath=weight_file_path, save_best_only=True)
        history = model.fit_generator(generator=train_gen, steps_per_epoch=train_num_batches,
                                      epochs=NUM_EPOCHS,
                                      verbose=1, validation_data=test_gen, validation_steps=test_num_batches,
                                      callbacks=[checkpoint])
        print(weight_file_path)
        model.save_weights(weight_file_path)

        return history
    def fit(self,
            data_dir_path,
            model_dir_path,
            vgg16_include_top=True,
            data_set_name='UCF-101',
            test_size=0.3,
            random_state=42):

        self.vgg16_include_top = vgg16_include_top

        config_file_path = self.get_config_file_path(model_dir_path,
                                                     vgg16_include_top)
        weight_file_path = self.get_weight_file_path(model_dir_path,
                                                     vgg16_include_top)
        architecture_file_path = self.get_architecture_file_path(
            model_dir_path, vgg16_include_top)

        self.vgg16_model = VGG16(include_top=self.vgg16_include_top,
                                 weights='imagenet')
        self.vgg16_model.compile(optimizer=SGD(),
                                 loss='categorical_crossentropy',
                                 metrics=['accuracy'])

        feature_dir_name = data_set_name + '-VGG16-Features'
        if not vgg16_include_top:
            feature_dir_name = data_set_name + '-VGG16-HiDimFeatures'
        max_frames = 0
        self.labels = dict()
        x_samples, y_samples = scan_and_extract_vgg16_features(
            data_dir_path,
            output_dir_path=feature_dir_name,
            model=self.vgg16_model,
            data_set_name=data_set_name)
        print(x_samples[0])
        self.num_input_tokens = x_samples[0].shape[1]
        frames_list = []
        for x in x_samples:
            print(x)
            frames = x.shape[0]
            frames_list.append(frames)
            max_frames = max(frames, max_frames)
        self.expected_frames = int(np.mean(frames_list))
        print('max frames: ', max_frames)
        print('expected frames: ', self.expected_frames)
        for i in range(len(x_samples)):
            x = x_samples[i]
            frames = x.shape[0]
            if frames > self.expected_frames:
                x = x[0:self.expected_frames, :]
                x_samples[i] = x
            elif frames < self.expected_frames:
                print('shape: ', x.shape)
                print('before error expected_frames = ', self.expected_frames,
                      'x.shape[1] =', x.shape[1])
                temp = np.zeros(shape=(self.expected_frames, x.shape[1]))
                temp[0:frames, :] = x
                x_samples[i] = temp
        for y in y_samples:
            if y not in self.labels:
                self.labels[y] = len(self.labels)
        print(self.labels)
        for i in range(len(y_samples)):
            y_samples[i] = self.labels[y_samples[i]]

        self.nb_classes = len(self.labels)

        y_samples = np_utils.to_categorical(y_samples, self.nb_classes)

        config = dict()
        config['labels'] = self.labels
        config['nb_classes'] = self.nb_classes
        config['num_input_tokens'] = self.num_input_tokens
        config['expected_frames'] = self.expected_frames
        config['vgg16_include_top'] = self.vgg16_include_top

        self.config = config

        np.save(config_file_path, config)

        model = self.create_model()
        open(architecture_file_path, 'w').write(model.to_json())

        Xtrain, Xtest, Ytrain, Ytest = train_test_split(
            x_samples,
            y_samples,
            test_size=test_size,
            random_state=random_state)

        train_gen = generate_batch(Xtrain, Ytrain)
        test_gen = generate_batch(Xtest, Ytest)
        print('Xtrain', len(Xtrain), 'Xtest', len(Xtest))
        train_num_batches = len(Xtrain) // BATCH_SIZE
        test_num_batches = len(Xtest) // BATCH_SIZE
        print(test_num_batches, len(Xtest))
        checkpoint = ModelCheckpoint(filepath=weight_file_path,
                                     save_best_only=True)
        history = model.fit_generator(generator=train_gen,
                                      steps_per_epoch=train_num_batches,
                                      epochs=NUM_EPOCHS,
                                      verbose=1,
                                      validation_data=test_gen,
                                      validation_steps=test_num_batches,
                                      callbacks=[checkpoint])
        model.save_weights(weight_file_path)

        return history
    def fit(self,
            data_dir_path,
            model_dir_path,
            vgg16_include_top=True,
            data_set_name='UCF-101',
            test_size=0.2,
            random_state=42):
        self.vgg16_include_top = vgg16_include_top

        config_file_path = self.get_config_file_path(model_dir_path,
                                                     vgg16_include_top)
        weight_file_path = self.get_weight_file_path(model_dir_path,
                                                     vgg16_include_top)
        architecture_file_path = self.get_architecture_file_path(
            model_dir_path, vgg16_include_top)

        vgg16_model = VGG16(include_top=self.vgg16_include_top,
                            weights='imagenet')
        vgg16_model.compile(optimizer=SGD(),
                            loss='categorical_crossentropy',
                            metrics=['accuracy'])
        self.vgg16_model = vgg16_model

        feature_dir_name = data_set_name + '-VGG16-Features'
        if not vgg16_include_top:
            feature_dir_name = data_set_name + '-VGG16-HiDimFeatures'
        max_frames = 0
        self.labels = dict()
        x_samples, y_samples = scan_and_extract_vgg16_features(
            data_dir_path,
            output_dir_path=feature_dir_name,
            model=self.vgg16_model,
            data_set_name=data_set_name)
        self.num_input_tokens = x_samples[0].shape[1]
        print("num_input_tokens= :", self.num_input_tokens)  # = 7x7x512
        frames_list = []
        for x in x_samples:
            frames = x.shape[0]
            #print("frames in x: = " , frames)

            frames_list.append(frames)

            #print("frames_list: = " , frames_list)

            max_frames = max(frames, max_frames)

            #print("max_frames: = " , max_frames)

            self.expected_frames = int(np.mean(frames_list))

            #print("expected_frames: = " , self.expected_frames)

        print("frames_list: = ", frames_list)
        print('max frames: ', max_frames)
        print('expected frames: ', self.expected_frames)
        print("len(x_samples): = ", len(x_samples))
        for i in range(len(x_samples)):
            x = x_samples[i]
            frames = x.shape[0]
            #print("x.shape",x.shape) #(frames,7x7x512)
            if frames > self.expected_frames:
                x = x[0:self.expected_frames, :]
                #print("x.shape if frame > expected frame ",x.shape)
                x_samples[i] = x
            elif frames < self.expected_frames:
                temp = np.zeros(shape=(self.expected_frames, x.shape[1]))
                temp[0:frames, :] = x
                x_samples[i] = temp
                #print("x.shape if frame < expected frame (temp) ",temp.shape)
        print("WERAWERAWER")
        for y in y_samples:
            print("for y in y_samples: passed ")
            if y not in self.labels:
                self.labels[y] = len(self.labels)
        print("self.labels:", self.labels)
        for i in range(len(y_samples)):
            y_samples[i] = self.labels[y_samples[i]]

        self.nb_classes = len(self.labels)

        y_samples = np_utils.to_categorical(y_samples, self.nb_classes)

        config = dict()
        config['labels'] = self.labels
        config['nb_classes'] = self.nb_classes
        config['num_input_tokens'] = self.num_input_tokens
        config['expected_frames'] = self.expected_frames
        config['vgg16_include_top'] = self.vgg16_include_top
        self.config = config

        np.save(config_file_path, config)

        print("PASS NP SAVE in RCNN")

        model = self.create_model()
        open(architecture_file_path, 'w').write(model.to_json())
        print("PASS model = self.create_model() in RCNN")

        Xtrain, Xtest, Ytrain, Ytest = train_test_split(
            x_samples,
            y_samples,
            test_size=test_size,
            random_state=random_state)
        print("Xtrain", np.shape(Xtrain))
        print("Xtest", np.shape(Xtest))
        print("Ytrain", np.shape(Ytrain))
        print("Ytest", np.shape(Ytest))

        train_gen = generate_batch(Xtrain, Ytrain)
        test_gen = generate_batch(Xtest, Ytest)

        train_num_batches = len(Xtrain) // BATCH_SIZE
        test_num_batches = len(Xtest) // BATCH_SIZE

        checkpoint = ModelCheckpoint(filepath=weight_file_path,
                                     save_best_only=True)
        history = model.fit_generator(generator=train_gen,
                                      steps_per_epoch=train_num_batches,
                                      epochs=NUM_EPOCHS,
                                      verbose=VERBOSE,
                                      validation_data=test_gen,
                                      validation_steps=test_num_batches,
                                      callbacks=[checkpoint])
        model.save_weights(weight_file_path)

        return history