コード例 #1
0
    def _download_image_dataset(self, image_class_name):
        try:
            dir_path = os.path.dirname(os.path.realpath(__file__)) + main_image_data_dir + image_class_name + '/'
            Constants.directory_validation(dir_path)
            if len(self.imageUrls) > 0:

                for imagedata in self.imageUrls:

                    random_hash = hashlib.sha256(str(random.randint(0, 1000000000000000000)).encode('utf-8') +
                                                 str(random.randint(0, 1000000000000000000)).encode('utf-8'))

                    path = dir_path + image_class_name + str(random_hash.hexdigest()) + '.' + imagedata.format
                    url = imagedata.content
                    download_success = self._request_downloader(url, path)

                    if download_success:
                        cph.info_print(self, ': Downloaded an image for ', image_class_name)
                    else:
                        cph.failure_print(self, ': Error downloading image to ' + path)
                return True
            else:
                cph.failure_print('No images in the list')
                return False
        except Exception as e:
            cph.failure_print("_download_image_dataset", e)
            return False
コード例 #2
0
    def download_main_image_dataset(self, image_list):
        successfully_downloaded = False

        for key, value in image_list.items():

            dataset = DatabaseFunctions().find_item(imageDatasetConstants.database, imageDatasetConstants.collection,
                                                    imageDatasetConstants.id_dataset_name, key)
            if dataset is None:

                imageurls = []
                for image in value:
                    imagemodel = ImageModel()
                    imagemodel.thumbnail = image['thumbnailUrl']
                    imagemodel.content = image['contentUrl']
                    imagemodel.format = image['format']
                    imageurls.append(imagemodel)

                self.imageUrls = imageurls
                successfully_downloaded = self._download_image_dataset(key)
                if successfully_downloaded:
                    number_of_images = Constants.get_number_of_files(main_image_data_dir, key)
                    mongo_entry = {
                        'name' : key,
                        'count': number_of_images
                    }
                    DatabaseFunctions().insert_item(imageDatasetConstants.database, imageDatasetConstants.collection,
                                                    items_to_insert=mongo_entry)
            else:
                PrintHelper.warning_print("Image dataset already present")
                continue
コード例 #3
0
    def transfer_datasets_and_start_training(self, dataset_list, model_hash_key):
        try:
            model_dir_path = os.path.dirname(os.path.realpath(__file__)) + train_data_dir + model_hash_key + '/'
            Constants.directory_validation(model_dir_path)
            number_of_samples = 0
            for dataset_name in dataset_list:
                # copy the files from the main directory to training directory. required for ImageDataGenerator utility of Keras
                source_dir_path = os.path.dirname(os.path.realpath(__file__)) + main_image_data_dir + dataset_name + '/'
                destination_dir_path = model_dir_path + dataset_name + '/'
                shutil.copytree(source_dir_path, destination_dir_path)

                training_directory_path = os.path.dirname(
                        os.path.realpath(__file__)) + train_data_dir + model_hash_key + '/' + dataset_name

                src_files = os.listdir(training_directory_path)

                # transfer 30% of the images to training folder
                count = 0
                for file_name in src_files:
                    percentage = 100 * (len(src_files) - count) / len(src_files)
                    if percentage <= 70:
                        break

                    full_file_name = os.path.join(training_directory_path, file_name)
                    testing_directory = os.path.dirname(
                            os.path.realpath(__file__)) + test_data_dir + model_hash_key + '/' + dataset_name + '/'
                    Constants.directory_validation(testing_directory)
                    if os.path.isfile(full_file_name):
                        shutil.copy(full_file_name, testing_directory + file_name)
                        os.remove(full_file_name)
                    else:
                        pass
                    count = count + 1

                # calculate the number of samples in training directory
                temp_samples = Constants.get_number_of_files(train_data_dir + model_hash_key + '/', dataset_name)
                if number_of_samples > 0 and number_of_samples < temp_samples:
                    pass
                else:
                    number_of_samples = temp_samples

            files = os.listdir(os.path.dirname(os.path.realpath(__file__)) + train_data_dir + model_hash_key + '/')
            number_of_classes = len(files)
            return True, number_of_samples, number_of_classes
        except Exception as e:
            cph.failure_print("transfer_datasets_and_start_training  ", e)
            return False, 0, 0
コード例 #4
0
    def recieve_image_dataset_from_user(self, fileList, class_name):

        destination = os.path.dirname(os.path.realpath(__file__)) + main_image_data_dir + class_name + "/"

        for upload in fileList:
            filename = upload.filename.rsplit("/")[0]
            PrintHelper.info_print("Uploaded: ", filename)
            Constants.directory_validation(destination)
            upload.save(destination + filename)

        number_of_images = Constants.get_number_of_files(main_image_data_dir, class_name)
        mongo_entry = {
            'name' : class_name,
            'count': number_of_images
        }
        DatabaseFunctions().insert_item(imageDatasetConstants.database, imageDatasetConstants.collection,
                                        items_to_insert=mongo_entry)
コード例 #5
0
	def start_validation(model_name, class_hash):
		"""

		:return:
		"""

		img_width = Constants.get_image_dimensions(model_name)
		img_height = img_width

		cph = PrintHelper()
		validation_data_dir = os.path.dirname(os.path.realpath(__file__)) + test_data_dir + class_hash
		base_validation_dir = os.path.dirname(os.path.realpath(__file__)) + test_data_dir
		save_weights = os.path.dirname(
			os.path.realpath(__file__)) + Constants.weights_directory_path + 'Weights#' + class_hash + '.h5'
		save_model_path = os.path.dirname(
				os.path.realpath(__file__)) + Constants.model_file_directory_path + 'Model_file#' + class_hash + '.h5'

		batch_size = 5
		number_of_files = ModelValidation.number_of_images(validation_data_dir)

		cph.info_print('Loading Model')

		model = load_model(save_model_path)
		model.load_weights(save_weights)

		models_result = SaveModelToMongo.validation_started(class_hash)

		PrintHelper.info_print(' Model Loaded')
		model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])

		PrintHelper.info_print(' Model Compiled')
		# prepare data augmentation configuration

		PrintHelper.info_print('creating image data generator')

		validation_generator = DataGeneratorUtility.validation_data_generator(img_height, img_width,
		                                                                      batch_size=batch_size,
		                                                                      validation_data_dir=validation_data_dir)

		PrintHelper.info_print('Starting Evaluate Generator')
		loss, accuracy = model.evaluate_generator(validation_generator,number_of_files)

		PrintHelper.info_print('Loss: ', loss, ' Accuracy: ', accuracy)

		shutil.rmtree(validation_data_dir)
		SaveModelToMongo.validation_completed(class_hash=class_hash,
		                                      stats=cph.return_string('Accuracy: ', round(accuracy, 4), ' Loss: ',
		                                                              round(loss, 4)),
		                                      models_result=models_result)
コード例 #6
0
    def __init__(self,
                 model_name,
                 images,
                 image_count,
                 model_hash=None,
                 base_model=None):
        """

		:param model_name: name/hash of the model to predict/extract-features from
		:param images: path for the images downlaoded
		:param model_hash: hash saved in the database: required for loading weights and files
		:param base_model: required only when a custom generated model is being evaluated, check what is the base model of the model being used
		"""
        self._images_path = images
        self._model_name = model_name
        size = Constants.get_image_dimensions(model_name)
        self._target_size = (size, size)
        self._image_count = image_count

        self._model_path = None
        self._model_weights = None

        # only required for custom models, in which case base_model should be supplied
        if base_model is not None and model_hash is not None:
            try:
                self._model_path = os.path.dirname(os.path.realpath(
                    __file__)) + Constants.saving_model_specific_file(
                        base_model
                    ) + base_model + '----model_file' + model_hash + '.h5'
                self._model_weights = os.path.dirname(
                    os.path.realpath(
                        __file__)) + Constants.saving_model_weights(
                            base_model
                        ) + base_model + '----weights' + model_hash + '.h5'
            except Exception as e:
                PrintHelper.failure_print("model file creation", e)
コード例 #7
0
def start_predictions():
    try:
        form = request.form

        model_name = ""

        # Needed only with custom model training
        base_model = None
        model_hash = None
        ######

        for key, value in form.items():
            if key == 'model_hash':
                model_hash = value
            if key == 'model_name':
                model_name = value
            if key == 'model_base_name':
                base_model = value

        image_count = 0
        image_paths = ''
        save_files = request.files.getlist("file")
        for upload in request.files.getlist("file"):
            filename = upload.filename.rsplit("/")[0]
            PrintHelper.info_print("Uploaded: ", filename)
            destination_main = os.path.dirname(
                os.path.realpath(__file__)) + prediction_images_dir + '/'
            destination = destination_main + model_name + '/'
            Constants.directory_validation(destination)
            upload.save(destination + filename)
            image_paths = destination_main
            image_count = +1

        feature_extraction = FeatureExtraction(model_name, image_paths,
                                               image_count)
        predictions = feature_extraction.get_predictions()

        # converting the complex tuple structre into javascript friendly json object
        count = 0
        prediction_array = []
        for item in predictions:
            result_outer = []
            for items in item:
                result_inner = {
                    "class": items[0],
                    "description": items[1],
                    "probability": str(items[2])
                }
                result_outer.append(result_inner)

            image_file = save_files[count]
            count = count + 1

            image_file_name = image_file.filename.rsplit("/")[0]

            final_prediction_result = {
                "filename": image_file_name,
                "predictions": result_outer
            }
            prediction_array.append(final_prediction_result)

        return Response.create_success_response(prediction_array, "success")

    except Exception as e:
        PrintHelper.failure_print("An error occurered:  " + str(e))
        return Response.create_failure_response("An error occurred: " + str(e))