示例#1
0
def classify_image(path_to_image: str,
                   model: Optional[Db_vae] = None,
                   path_to_model: Optional[str] = None,
                   z_dim: Optional[int] = None,
                   device: Optional[str] = None,
                   batch_size: int = 10):
    """Classify an image from a particular path, given either an intitialized model or path to model."""
    if not model and not path_to_model:
        logger.error(
            "No model or path_to_model given",
            next_step="Classification will not be done",
            tip="Instantiate with a trained model, or set `path_to_model`.")
        raise Exception

    model = Db_vae.init(path_to_model, device,
                        z_dim) if model is None else model

    # Make sub-images
    img = utils.read_image(path_to_image)
    sub_images: torch.Tensor = slide_windows_over_img(img, 30, 64, 10, 0.2)
    sub_images = torch.split(sub_images, batch_size)

    if utils.find_face_in_subimages(model, sub_images, device):
        logger.success("This is a face!")
        return True
    else:
        logger.error("This is NOT a face!")
        return False
示例#2
0
 def calculate_checksum(self):
     with open(self.filepath, 'rb') as file:
         file_content = file.read()
         checksum = hashlib.sha256(file_content).hexdigest()
         self.file_length = len(file_content)
     self.checksum = checksum
     logger.success("Checksum calculated for file {}!".format(self.filepath))
     return checksum
示例#3
0
def change_avatar(request):
    """Change the user's avatar to the transmitted image"""
    user = request.user

    user.avatar = request.FILES.get("avatar_file")
    user.save()

    logger.success(f"User '{user.username}' changed avatar successfully")
    return JsonResponse({"new_avatar_url": user.avatar.url})
示例#4
0
 def form_valid(self, form):
     super().form_valid(form)  # firstly, save new-user object
     logger.success(
         f"New user '{form.cleaned_data['username']}' registered successfully"
     )
     messages.success(self.request,
                      'Congratulations! You registered successfully!')
     new_user = authenticate(username=form.cleaned_data['username'],
                             password=form.cleaned_data['password1'])
     login(self.request,
           new_user,
           backend='django.contrib.auth.backends.ModelBackend')
     logger.info(
         f"New user '{form.cleaned_data['username']}' logged in after registering"
     )
     return redirect(
         self.request.session.get(
             'next',
             reverse('profile', kwargs={"username": new_user.username})))
示例#5
0
    def train(self, epochs: Optional[int] = None):
        # Optionally use passed epochs
        epochs = self.epochs if epochs is None else epochs

        # Start training and validation cycle
        for epoch in range(epochs):
            epoch_start_t = datetime.now()
            logger.info(f"Starting epoch: {epoch+1}/{epochs}")

            self._update_sampling_histogram(epoch)

            # Training
            train_loss, train_acc = self._train_epoch()
            epoch_train_t = datetime.now() - epoch_start_t
            logger.info(f"epoch {epoch+1}/{epochs}::Training done")
            logger.info(
                f"epoch {epoch+1}/{epochs} => train_loss={train_loss:.2f}, train_acc={train_acc:.2f}"
            )

            # Validation
            logger.info("Starting validation")
            val_loss, val_acc = self._eval_epoch(epoch)
            epoch_val_t = datetime.now() - epoch_start_t
            logger.info(f"epoch {epoch+1}/{epochs}::Validation done")
            logger.info(
                f"epoch {epoch+1}/{epochs} => val_loss={val_loss:.2f}, val_acc={val_acc:.2f}"
            )

            # Print reconstruction
            valid_data = concat_datasets(self.valid_loaders.faces.dataset,
                                         self.valid_loaders.nonfaces.dataset,
                                         proportion_a=0.5)
            self.print_reconstruction(self.model, valid_data, epoch,
                                      self.device)

            # Save model and scores
            self._save_epoch(epoch, train_loss, val_loss, train_acc, val_acc)

        logger.success(f"Finished training on {epochs} epochs.")
示例#6
0
 def get(self, request, *args, **kwargs):
     logger.success(
         f"User '{request.session.get('email_who_resets_password', '*email*')}' reset password successfully"
     )
     del request.session["email_who_resets_password"]
     return super().get(request, *args, **kwargs)
示例#7
0
 def get_success_url(self):
     logger.success(
         f"User '{self.request.user.username}' changed password successfully"
     )
     messages.success(self.request, "You changed password successfully!")
     return reverse('home')
示例#8
0
    def eval_on_setups(self, eval_name: Optional[str] = None):
        """Evaluates a model and writes the results to a given file name."""
        eval_name = self.config.eval_name if eval_name is None else eval_name

        # Define the predefined setups
        gender_list = [["Female"], ["Male"], ["Female"], ["Male"]]
        skin_list = [["lighter"], ["lighter"], ["darker"], ["darker"]]
        name_list = ["dark male", "dark female", "light male", "light female"]

        # Init the metrics
        recalls = []
        correct_pos = 0
        total_count = 0

        # Go through the predefined setup
        for i in range(4):
            logger.info(f"Running setup for {name_list[i]}")

            # Calculate on the current setup
            correct_count, count = self.eval(
                filter_exclude_gender=gender_list[i],
                filter_exclude_skin_color=skin_list[i]
            )

            # Calculate the metrics
            recall = correct_count / count * 100
            correct_pos += correct_count
            total_count += count

            # Log the recall
            logger.info(f"Recall for {name_list[i]} is {recall:.3f}")
            recalls.append(recall)

        # Calculate the average recall
        avg_recall = correct_pos/total_count*100
        variance = (torch.tensor(recalls)).var().item()

        # Calculate the amount of negative performance
        logger.info("Evaluating on negative samples")
        incorrect_neg, neg_count = self.eval(dataset_type='h5_imagenet', max_images=1270)
        correct_neg: int = neg_count - incorrect_neg

        # Calculate the precision and accuracy
        precision = correct_pos/(correct_pos + neg_count)*100
        accuracy = (correct_pos + correct_neg)/(2*1270)*100

        # Logger info
        logger.info(f"Recall => all: {avg_recall:.3f}")
        logger.info(f"Recall => dark male: {recalls[0]:.3f}")
        logger.info(f"Recall => dark female: {recalls[1]:.3f}")
        logger.info(f"Recall => white male: {recalls[2]:.3f}")
        logger.info(f"Recall => white female: {recalls[3]:.3f}")
        logger.info(f"Variance => {variance:.3f}")
        logger.info(f"Precision => {precision:.3f}")
        logger.info(f"Accuracy => {accuracy:.3f}")

        # Write final results
        path_to_eval_results = f"results/{self.path_to_model}/{eval_name}"
        with open(path_to_eval_results, 'a+') as write_file:

            # If file has no header
            if not os.path.exists(path_to_eval_results) or os.path.getsize(path_to_eval_results) == 0:
                write_file.write(f"name,dark male,dark female,light male,light female,var,precision,recall,accuracy\n")

            write_file.write(f"{self.path_to_model}_{self.model_name}")
            write_file.write(f",{recalls[0]:.3f},{recalls[1]:.3f},{recalls[2]:.3f},{recalls[3]:.3f},{variance:.3f},{precision:.3f},{avg_recall:.3f},{accuracy:.3f}\n")

        logger.success("Finished evaluation!")
def main():
    user = User()
    datasets = []
    experiments = []
    invalid_sensors_error = False
    file_paths = user.choose_files()

    ###Dataset Creation
    for i, path in enumerate(file_paths):
        dataset = Dataset(path)

        if (len(dataset.invalid_sensors()) == 0):
            datasets.append(dataset)
            logger.info("All sensors OK in file {}".format(i))
        else:
            dataset.report_invalid_sensors()
            invalid_sensors_error = True
    if invalid_sensors_error:
        print(
            "Please check if the column names of the CSV files are spelled correctly! If they are correct, please add the new sensors in the database!"
        )
        return

    logger.print_chosen_files(file_paths)
    logger.print_available_rocks(api_handler.rocks)

    for i, dataset in enumerate(datasets):
        experiment = Experiment(dataset)
        experiment.rock_id = user.choose_rock(dataset.filepath)
        experiment.description = user.write_description()
        experiment.start_time = user.set_date()

        dataset.calculate_checksum()
        experiments.append(experiment)

    chunk_size = 500000
    while True:
        experiment_addded = False
        continue_with_upload = user.continue_with_upload()
        if continue_with_upload:
            try:
                for experiment in experiments:
                    logger.success("\n\nUploading file {}.".format(
                        experiment.dataset.filepath))

                    nr_of_chunks = experiment.dataset.file_length // chunk_size if experiment.dataset.file_length % chunk_size == 0 else experiment.dataset.file_length // chunk_size + 1
                    with open(experiment.dataset.filepath) as f:
                        for i in tqdm(range(nr_of_chunks)):
                            chunk_response = api_handler.send_file_chunk(
                                experiment, f, chunk_size=chunk_size)
                            if chunk_response.text == 'DATASET_ALREADY_IN_DB':
                                logger.error(
                                    "\n\nThis dataset is already stored in database! Stopping upload!"
                                )
                                break
                    if chunk_response.text == 'DATASET_ALREADY_IN_DB':
                        continue
                    metadata_response = api_handler.send_metadata(experiment)

                    if metadata_response.text == "METADATA_RECEIVED":
                        logger.success("File {} uploaded!".format(
                            experiment.dataset.filepath))
                        add_experiment_response = api_handler.add_experiment(
                            experiment)

                        if add_experiment_response.text == 'EXPERIMENT_BEING_ADDED_TO_THE_DB':
                            logger.success(
                                "The uploaded dataset is being written in the database. It may take some time. You can check its progress in the web application!"
                            )
                        else:
                            print(
                                "An error occred while inserting the dataset in the database!"
                            )

            except Exception as exc:
                logger.error("ERROR:  " + str(exc))
            logger.input("Press Enter to continue!")

            break
        elif continue_with_upload == False:
            exit()
示例#10
0
 def serve_forever(self):
     logger.success('Serving HTTP running Port %s....' % PORT)
     while 1:
         self.client_connection, client_address = listen_socket.accept()
         self.handle_request()