def test_save(self): #Arrange input_data = get_input_data() input_data_file = InputDataFile() #Mocks input_data.to_csv = MagicMock() #Act input_data_file.save(input_data, batch_id, epoch_id) #Assert input_data.to_csv.assert_called_with(input_data_file.file_name(batch_id, epoch_id))
update_params(image_generation_params, **image_generation_params_update) logger.info('Updated image generation parameters: %s', image_generation_params) #Compute predictions num_prediction_steps = num_prediction_steps or ceil(len(input_data) / image_generation_params.batch_size) predictor = Prediction(model, input_params, image_generation_params) predicted_data = predictor.predict(input_data, num_prediction_steps) #Compute accuracy num_matches = (predicted_data[constants.PANDAS_MATCH_COLUMN].to_numpy().nonzero())[0].shape[0] num_mismatches = len(predicted_data[constants.PANDAS_MATCH_COLUMN]) - num_matches accuracy = (num_matches/len(predicted_data[constants.PANDAS_MATCH_COLUMN])) * 100. #Write-out predicted output prediction_result_file = InputDataFile(constants.PREDICTION_RESULT_FILE_NAME_GUIDANCE) prediction_result_file.save(predicted_data, 0, 0) input_files_client.put_all([prediction_result_file.file_name(0, 0)]) print_summary = """ Result Dataframe: {} Total predictions: {} Correct predictions: {} Wrong predictions: {} Accuracy: {} """.format( predicted_data, len(predicted_data), num_matches, num_mismatches, accuracy)