match = re.search(r'([A-Za-z_\d\.]+)-epoch(\d+)-.*\.hdf5',
                          args.weights)
        last_epoch = int(match.group(2))


def print_distribution(ids, classes=None):
    if classes is None:
        classes = [get_class(idx.split('/')[-2]) for idx in ids]
    classes_count = np.bincount(classes)
    for class_name, class_count in zip(CLASSES, classes_count):
        print('{:>22}: {:5d} ({:04.1f}%)'.format(
            class_name, class_count, 100. * class_count / len(classes)))


model.summary()
model = multi_gpu_model(model, gpus=args.gpus)

if not (args.test or args.test_train):

    # TRAINING
    ids = glob.glob(join(TRAIN_FOLDER, '*/*.jpg'))
    ids.sort()

    if not args.extra_dataset:
        ids_train, ids_val = train_test_split(ids,
                                              test_size=0.1,
                                              random_state=SEED)
    else:
        ids_train = ids
        ids_val = []
                yield _x, np.copy(y)
            else:
                yield _x

            batch = 0


if a.model:
    model = load_model(a.model, compile=False)
else:
    model = get_model()
    if a.weights:
        print("Loading weights from %s" % a.weights)
        model.load_weights(a.weights, by_name=True, skip_mismatch=True)
model.summary()
if gpus > 1: model = multi_gpu_model(model, gpus=gpus)

checkpoint = ModelCheckpoint('models/{val_rmse:.6f}.hdf5',
                             monitor='val_rmse',
                             verbose=1,
                             save_best_only=True)
early = EarlyStopping(patience=10, mode='min')
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                              factor=0.5,
                              patience=2,
                              min_lr=1e-7,
                              verbose=1,
                              mode='min')

callbacks = [checkpoint, early, reduce_lr]
Пример #3
0
    def generate_cnn_codes(self):
        """
        Use trained CNN to generate CNN codes/features for each image or (image, metadata) pair
        which can be used to train an LSTM.
        :param: 
        :return: 
        """

        metadataStats = json.load(open(self.params.files['dataset_stats']))
        trainData = json.load(open(self.params.files['training_struct']))
        testData = json.load(open(self.params.files['test_struct']))

        if self.params.args.load_model:
            model = load_model(self.params.args.load_model)
        else:
            model = get_cnn_model(self.params)

        if self.params.args.load_weights:
            model.load_weights(self.params.args.load_weights, by_name=True)

        features_layer_index = -3

        featuresModel = Model(model.inputs,
                              model.layers[features_layer_index].output)

        #assert model.layers[features_layer_index].output.shape[1] == self.params.cnn_multi_layer_length

        if self.params.print_model_summary:
            featuresModel.summary()

        featuresModel = multi_gpu_model(featuresModel, gpus=self.params.gpus)

        allTrainCodes = []

        featureDirs = ['train', 'test']

        for featureDir in featureDirs:

            codesData = {}

            isTrain = (featureDir == 'train')
            index = 0

            if isTrain:
                data = trainData
            else:
                data = testData

            outDir = os.path.join(self.params.directories['cnn_codes'],
                                  featureDir)
            if not os.path.isdir(outDir):
                os.mkdir(outDir)

            N = len(data)
            initBatch = True
            for i, currData in enumerate(tqdm(data)):
                if initBatch:
                    if N - i < self.params.batch_size:
                        batchSize = 1
                    else:
                        batchSize = self.params.batch_size
                    imgdata = np.zeros((batchSize, self.params.target_img_size,
                                        self.params.target_img_size,
                                        self.params.num_channels))
                    metadataFeatures = np.zeros(
                        (batchSize, self.params.metadata_length))
                    batchIndex = 0
                    tmpBasePaths = []
                    tmpFeaturePaths = []
                    tmpCategories = []
                    initBatch = False

                path, _ = os.path.split(currData['img_path'])
                if isTrain:
                    basePath = path[len(self.params.directories['train_data']
                                        ) + 1:]
                else:
                    basePath = path[len(self.params.directories['test_data']) +
                                    1:]

                tmpBasePaths.append(basePath)
                if isTrain:
                    tmpCategories.append(currData['category'])

                origFeatures = np.array(
                    json.load(open(currData['features_path'])))
                tmpFeaturePaths.append(currData['features_path'])

                metadataFeatures[batchIndex, :] = np.divide(
                    origFeatures - np.array(metadataStats['metadata_mean']),
                    metadataStats['metadata_max'])

                img = scipy.misc.imread(
                    currData['img_path'])  #image.load_img(imgPaths[ind])
                crop_size = self.params.target_img_size
                x0 = int(img.shape[1] / 2 - crop_size / 2)
                x1 = x0 + crop_size
                y0 = int(img.shape[0] / 2 - crop_size / 2)
                y1 = y0 + crop_size

                img = img[y0:y1, x0:x1, ...].astype(np.float32)

                imgdata[batchIndex, ...] = img

                batchIndex += 1

                if batchIndex == batchSize:
                    imgdata = imagenet_utils.preprocess_input(imgdata) / 255.

                    if self.params.use_metadata:
                        cnnCodes = featuresModel.predict(
                            [imgdata, metadataFeatures], batch_size=batchSize)
                    else:
                        cnnCodes = featuresModel.predict(imgdata,
                                                         batch_size=batchSize)

                    for codeIndex, currCodes in enumerate(cnnCodes):
                        currBasePath = tmpBasePaths[codeIndex]
                        #outFile = os.path.join(outDir, '%07d.npy' % index)
                        outFile = os.path.join(outDir, '%07d' % index)
                        index += 1
                        np.save(outFile, currCodes)
                        #json.dump(currCodes.tolist(), open(outFile, 'w'))
                        if currBasePath not in codesData.keys():
                            codesData[currBasePath] = {}
                            codesData[currBasePath]['cnn_codes_paths'] = []
                            codesData[currBasePath]['metadata_paths'] = []
                            if isTrain:
                                codesData[currBasePath][
                                    'category'] = tmpCategories[codeIndex]
                        codesData[currBasePath]['cnn_codes_paths'].append(
                            outFile)
                        codesData[currBasePath]['metadata_paths'].append(
                            tmpFeaturePaths[codeIndex])
                        if isTrain:
                            allTrainCodes.append(currCodes)
                        initBatch = True

            if isTrain:
                codesTrainData = codesData
            else:
                codesTestData = codesData

        N = len(allTrainCodes[0])
        sumCodes = np.zeros(N)
        for currCodes in allTrainCodes:
            sumCodes += currCodes
        avgCodes = sumCodes / len(allTrainCodes)
        maxCodes = np.zeros(N)
        for currCodes in allTrainCodes:
            maxCodes = np.maximum(maxCodes, np.abs(currCodes - avgCodes))
        maxCodes[maxCodes == 0] = 1

        maxTemporal = 0
        for key in codesTrainData.keys():
            currTemporal = len(codesTrainData[key]['cnn_codes_paths'])
            if currTemporal > maxTemporal:
                maxTemporal = currTemporal

        codesStats = {}
        codesStats['codes_mean'] = avgCodes.tolist()
        codesStats['codes_max'] = maxCodes.tolist()
        codesStats['max_temporal'] = maxTemporal

        json.dump(codesTrainData,
                  open(self.params.files['multi_training_struct'], 'w'))
        json.dump(codesStats, open(self.params.files['cnn_codes_stats'], 'w'))
        json.dump(codesTestData,
                  open(self.params.files['multi_test_struct'], 'w'))
Пример #4
0
    def test(self):

        if self.params.multi:
            codesTestData = json.load(
                open(self.params.files['multi_test_struct']))
            codesStats = json.load(open(self.params.files['cnn_codes_stats']))
            if self.params.max_temporal != 0:
                codesStats['max_temporal'] = self.params.max_temporal

        metadataStats = json.load(open(self.params.files['dataset_stats']))

        metadataMean = np.array(metadataStats['metadata_mean'])
        metadataMax = np.array(metadataStats['metadata_max'])

        loaded_filename = None
        if self.params.args.load_model:
            from keras.utils.generic_utils import CustomObjectScope
            with CustomObjectScope({
                    'relu6':
                    keras.applications.mobilenet.relu6,
                    'DepthwiseConv2D':
                    keras.applications.mobilenet.DepthwiseConv2D
            }):
                model = load_model(self.params.args.load_model)
            loaded_filename = os.path.basename(self.params.args.load_model)
        else:
            model = get_cnn_model(
                self.params) if not self.params.multi else get_multi_model(
                    self.params, codesStats)

        if self.params.args.load_weights:
            model.load_weights(self.params.args.load_weights, by_name=True)
            loaded_filename = os.path.basename(self.params.args.load_weights)

        model = multi_gpu_model(model, gpus=self.params.gpus)

        index = 0

        timestr = time.strftime("%Y%m%d-%H%M%S")

        prediction_name_preffix = os.path.join(
            self.params.directories['predictions'],
            'predictions-%s-%s' % (loaded_filename[:-5], timestr))
        fid = open(prediction_name_preffix + '.txt', 'w')

        predictions_map = {}

        def walkdir(folder):
            for root, dirs, files in os.walk(folder):
                if len(files) > 0:
                    yield (root, dirs, files)

        num_sequences = 0
        for _ in walkdir(self.params.directories['test_data']):
            num_sequences += 1

        for root, dirs, files in tqdm(walkdir(
                self.params.directories['test_data']),
                                      total=num_sequences):
            if len(files) > 0:
                imgPaths = []
                metadataPaths = []
                slashes = [i for i, ltr in enumerate(root) if ltr == '/']
                bbID = int(root[slashes[-1] + 1:])

            for file in files:
                if file.endswith(self.params.image_format_processed):
                    imgPaths.append(os.path.join(root, file))
                    metadataPaths.append(
                        os.path.join(root, file[:-4] + '_features.json'))

            if len(files) > 0:
                inds = []
                for metadataPath in metadataPaths:
                    underscores = [
                        ind for ind, ltr in enumerate(metadataPath)
                        if ltr == '_'
                    ]
                    inds.append(
                        int(metadataPath[underscores[-3] + 1:underscores[-2]]))
                inds = np.argsort(np.array(inds)).tolist()

                if not self.params.multi:
                    # single-image

                    tta_flip_v = self.params.flip_north_south
                    tta_flip_h = self.params.flip_east_west

                    currBatchSize = len(inds) * (2 if tta_flip_v else
                                                 1) * (2 if tta_flip_h else 1)
                    imgdata = np.zeros(
                        (currBatchSize, self.params.target_img_size,
                         self.params.target_img_size,
                         self.params.num_channels))
                    metadataFeatures = np.zeros(
                        (currBatchSize, self.params.metadata_length))

                    for ind in inds:
                        features = np.array(json.load(open(
                            metadataPaths[ind])))

                        img = scipy.misc.imread(
                            imgPaths[ind])  #image.load_img(imgPaths[ind])
                        crop_size = self.params.target_img_size
                        x0 = int(img.shape[1] / 2 - crop_size / 2)
                        x1 = x0 + crop_size
                        y0 = int(img.shape[0] / 2 - crop_size / 2)
                        y1 = y0 + crop_size

                        img = img[y0:y1, x0:x1, ...].astype(np.float32)

                        #show_image(img)
                        #raw_input("press enter")

                        metadataFeatures[ind, :] = features

                        img = imagenet_utils.preprocess_input(img) / 255.
                        imgdata[ind, ...] = img

                        tta_idx = len(inds) + ind
                        if tta_flip_v:
                            imgdata[tta_idx, ...] = flip_axis(img, 0)
                            metadataFeatures[tta_idx, :] = transform_metadata(
                                features, flip_h=False, flip_v=True)
                            tta_idx += len(inds)

                        if tta_flip_h:
                            imgdata[tta_idx, ...] = flip_axis(img, 1)
                            metadataFeatures[tta_idx, :] = transform_metadata(
                                features, flip_h=True, flip_v=False)
                            tta_idx += len(inds)

                            if tta_flip_v:
                                imgdata[tta_idx,
                                        ...] = flip_axis(flip_axis(img, 1), 0)
                                metadataFeatures[
                                    tta_idx, :] = transform_metadata(
                                        features, flip_h=True, flip_v=True)
                                tta_idx += len(inds)

                    if self.params.use_metadata:
                        metadataFeatures = np.divide(
                            metadataFeatures -
                            np.array(metadataStats['metadata_mean']),
                            metadataStats['metadata_max'])
                        if self.params.mask_metadata:
                            for ind in inds:
                                metadataFeatures[ind] = mask_metadata(
                                    metadataFeatures[ind])

                        _predictions = model.predict(
                            [imgdata, metadataFeatures],
                            batch_size=currBatchSize)
                    else:
                        _predictions = model.predict(imgdata,
                                                     batch_size=currBatchSize)

                    predictions_map[str(bbID)] = _predictions
                    predictions = np.sum(_predictions, axis=0)

                else:
                    # multi-image

                    currBatchSize = len(inds)
                    metadataFeatures = np.zeros(
                        (currBatchSize, self.params.metadata_length))

                    codesIndex = 0
                    code_index = '/'.join(root.split('/')[-3:])
                    codesPaths = codesTestData[code_index]
                    codesFeatures = []
                    for ind in inds:

                        features = np.array(json.load(open(
                            metadataPaths[ind])))
                        metadataFeatures[ind, :] = features

                        codesFeatures.append(
                            json.load(
                                open(codesPaths['cnn_codes_paths']
                                     [codesIndex])))
                        codesIndex += 1

                    if self.params.use_metadata:
                        codesMetadata = np.zeros(
                            (1, codesStats['max_temporal'],
                             self.params.cnn_multi_layer_length +
                             self.params.metadata_length))
                    else:
                        codesMetadata = np.zeros(
                            (1, codesStats['max_temporal'],
                             self.params.cnn_multi_layer_length))

                    timestamps = []
                    for codesIndex in range(currBatchSize):
                        cnnCodes = codesFeatures[codesIndex]
                        metadata = metadataFeatures[codesIndex]
                        #print(metadata)
                        timestamp = (metadata[4] - 1970) * 525600 + metadata[
                            5] * 12 * 43800 + metadata[
                                6] * 31 * 1440 + metadata[7] * 60
                        timestamps.append(timestamp)
                        cnnCodes = np.divide(
                            cnnCodes - np.array(codesStats['codes_mean']),
                            np.array(codesStats['codes_max']))
                        metadata = np.divide(metadata - metadataMean,
                                             metadataMax)
                        #print(metadata)

                        if self.params.use_metadata:
                            if self.params.mask_metadata:
                                metadata = mask_metadata(metadata)
                            codesMetadata[0, codesIndex, :] = np.concatenate(
                                (cnnCodes, metadata), axis=0)
                        else:
                            codesMetadata[0, codesIndex, :] = cnnCodes

                    sortedInds = sorted(range(len(timestamps)),
                                        key=lambda k: timestamps[k])
                    codesMetadata[0,
                                  range(len(sortedInds)), :] = codesMetadata[
                                      0, sortedInds, :]
                    predictions = model.predict(codesMetadata, batch_size=1)

            if len(files) > 0:
                prediction = np.argmax(predictions)
                prediction_category = self.params.category_names[prediction]
                fid.write('%d,%s\n' % (bbID, prediction_category))
                index += 1

        hickle.dump(predictions_map, prediction_name_preffix + ".hkl")
        fid.close()
        print(prediction_name_preffix + '.txt')
        print(prediction_name_preffix + '.hkl')
Пример #5
0
    def train_multi(self):
        """
        Train LSTM pipeline using pre-generated CNN codes.
        :param: 
        :return: 
        """

        allCodesTrainingData = json.load(
            open(self.params.files['multi_training_struct']))

        # add 50% of the /val/ data (which has false_detection instances) to train dataset, leave rest for validation
        codesTrainData = {}
        codesValidData = {}

        for k, v in allCodesTrainingData.iteritems():
            if k.startswith('val/'):
                if np.random.randint(2):
                    codesTrainData[k] = v
                else:
                    codesValidData[k] = v
            else:
                codesTrainData[k] = v

        assert len(
            allCodesTrainingData) == len(codesTrainData) + len(codesValidData)

        codesStats = json.load(open(self.params.files['cnn_codes_stats']))
        if self.params.max_temporal != 0:
            codesStats['max_temporal'] = self.params.max_temporal

        metadataStats = json.load(open(self.params.files['dataset_stats']))

        loaded_filename = None
        if self.params.args.load_model:
            model = load_model(self.params.args.load_model)
            loaded_filename = os.path.basename(self.params.args.load_model)
        else:
            model = get_multi_model(self.params, codesStats)

        if self.params.args.load_weights:
            model.load_weights(self.params.args.load_weights, by_name=True)
            loaded_filename = os.path.basename(self.params.args.load_weights)

        initial_epoch = self.get_initial_epoch(loaded_filename)

        if self.params.print_model_summary:
            model.summary()

        model = multi_gpu_model(model, gpus=self.params.gpus)

        model.compile(optimizer=RMSprop(lr=self.params.learning_rate),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        preffix = self.get_preffix()

        print("training multi-image model: " + preffix)

        filePath = os.path.join(
            self.params.directories['multi_checkpoint_weights'],
            preffix + '-epoch_' + '{epoch:02d}' + '-acc_' + '{acc:.4f}' +
            '-val_acc_' + '{val_acc:.4f}.hdf5')

        checkpoint = ModelCheckpoint(filepath=filePath,
                                     monitor='loss',
                                     verbose=1,
                                     save_best_only=False,
                                     save_weights_only=False,
                                     mode='auto',
                                     period=1)
        reduce_lr = ReduceLROnPlateau(monitor='val_acc',
                                      factor=0.1,
                                      patience=1,
                                      min_lr=1e-7,
                                      epsilon=0.01,
                                      verbose=1)

        model.fit_generator(generator=codes_metadata_generator(self.params, \
                                                                codesTrainData, metadataStats, codesStats, \
                                                                class_aware_sampling = not self.params.leave_unbalanced, \
                                                                temporal_dropout = self.params.temporal_dropout),
                            steps_per_epoch=int(math.ceil((len(codesTrainData) / self.params.batch_size))),
                            epochs=self.params.epochs,
                            callbacks=[checkpoint, FMOW_Callback(), reduce_lr],
                            initial_epoch = initial_epoch,
                            validation_data = codes_metadata_generator(self.params, \
                                                                        codesValidData, metadataStats, codesStats, \
                                                                        class_aware_sampling = False,\
                                                                        temporal_dropout = 0.),
                            validation_steps = int(math.ceil((len(codesValidData) / self.params.batch_size))),
                            )

        model.save(self.params.files['multi_model'])
Пример #6
0
    def train(self):
        """
        Train CNN with or without metadata depending on setting of 'use_metadata' in params.py.
        :param: 
        :return: 
        """

        allTrainingData = json.load(open(self.params.files['training_struct']))

        # add 50% of the /val/ data (which has false_detection instances) to train dataset, leave rest for validation
        trainData = [
            _t for _t in allTrainingData
            if _t['features_path'].find('/val/') == -1
        ]
        allValidData = [
            _t for _t in allTrainingData
            if _t['features_path'].find('/val/') != -1
        ]

        addToTrainData, validData = train_test_split(allValidData,
                                                     test_size=0.5)

        trainData.extend(addToTrainData)

        assert len(allTrainingData) == len(trainData) + len(validData)

        if self.params.views != 0:
            allTrainingViews = []
            for k, g in groupby(sorted(allTrainingData),
                                lambda x: x['features_path'].split('/')[-2]):
                group = list(g)
                if len(group) >= self.params.views:
                    allTrainingViews.append(group)
            trainViews = [
                _t for _t in allTrainingViews
                if _t[0]['features_path'].find('/val/') == -1
            ]
            allValidViews = [
                _t for _t in allTrainingViews
                if _t[0]['features_path'].find('/val/') != -1
            ]

            addToTrainViews, validViews = train_test_split(allValidViews,
                                                           test_size=0.5)
            trainViews.extend(addToTrainViews)

            assert len(allTrainingViews) == len(trainViews) + len(validViews)

            # for validation leave only exact number of views
            validViews = [
                _t for _t in validViews if len(_t) == self.params.views
            ]

        metadataStats = json.load(open(self.params.files['dataset_stats']))

        loaded_filename = None
        if self.params.args.load_model:
            from keras.utils.generic_utils import CustomObjectScope
            with CustomObjectScope({
                    'relu6':
                    keras.applications.mobilenet.relu6,
                    'DepthwiseConv2D':
                    keras.applications.mobilenet.DepthwiseConv2D
            }):
                model = load_model(self.params.args.load_model)
            loaded_filename = os.path.basename(self.params.args.load_model)
        else:
            model = get_cnn_model(self.params)

        if self.params.args.load_weights:
            model.load_weights(
                self.params.args.load_weights,
                by_name=True)  # Keras 2.1.2, skip_mismatch=True)
            loaded_filename = os.path.basename(self.params.args.load_weights)

        initial_epoch = self.get_initial_epoch(loaded_filename)

        if self.params.print_model_summary:
            model.summary()

        model = multi_gpu_model(model, gpus=self.params.gpus)

        _loss = self.params.loss

        if _loss == 'focal':
            loss = focal_loss
        elif _loss == 'softF1':
            loss = softF1_loss
        elif _loss == 'surrogateF1':
            loss = surrogateF1_loss
        else:
            loss = _loss

        model.compile(
            optimizer=Adam(lr=self.params.learning_rate
                           ),  # amsgrad=self.params.amsgrad), 
            loss=loss,
            metrics=['accuracy'])

        preffix = self.get_preffix()

        print("training single-image model: " + preffix)

        filePath = os.path.join(
            self.params.directories['cnn_checkpoint_weights'],
            preffix + '-epoch_' + '{epoch:02d}' + '-acc_' + '{acc:.4f}' +
            '-val_acc_' + '{val_acc:.4f}.hdf5')

        checkpoint = ModelCheckpoint(filepath=filePath,
                                     monitor='loss',
                                     verbose=1,
                                     save_best_only=False,
                                     save_weights_only=False,
                                     mode='auto')
        reduce_lr = ReduceLROnPlateau(monitor='val_acc',
                                      factor=0.1,
                                      patience=1,
                                      min_lr=1e-7,
                                      epsilon=0.01,
                                      verbose=1)

        if self.params.views != 0:
            trainData = trainViews
            validData = validViews

        print("Train samples: %d, validation samples: %d" %
              ((len(trainData), len(validData))))

        model.fit_generator(
            generator=img_metadata_generator(
                self.params,
                trainData,
                metadataStats,
                class_aware_sampling=not self.params.leave_unbalanced),
            steps_per_epoch=int(
                math.ceil((len(trainData) / self.params.batch_size))),
            class_weight=self.get_class_weights()
            if self.params.weigthed else None,
            epochs=self.params.epochs,
            initial_epoch=initial_epoch,
            callbacks=[checkpoint, FMOW_Callback(), reduce_lr],
            validation_data=img_metadata_generator(self.params,
                                                   validData,
                                                   metadataStats,
                                                   class_aware_sampling=False,
                                                   augmentation=False),
            validation_steps=int(
                math.ceil((len(validData) / self.params.batch_size))),
            shuffle=False,
        )

        model.save(self.params.files['cnn_model'])