def test_read_raw_file_FileNotFoundError(): path = 'nonsense' with pytest.raises(FileNotFoundError): mk.read_raw_file(path, minimal_length=4)
def test_read_raw_file_number_of_lines(): number_of_records = 69934 path = './datasource/influx-export.csv' data = mk.read_raw_file(path, minimal_length=4) assert data.__len__() == number_of_records
def test_read_raw_file_FileTooShort(): path = './datasource/influx-export_testfile_header_only.csv' with pytest.raises(mk.FileTooShort): mk.read_raw_file(path, minimal_length=4)
def test_read_raw_file_type(): path = './datasource/influx-export.csv' data = mk.read_raw_file(path, minimal_length=4) assert type(data) is list
target_file_prefix = None data_directory = './aws-datasource' charts_directory = './aws-diagrams' # source_file = 'influx-export-short.csv' source_file = 'influx-export.csv' attributes = [ 'co2_hum', 'co2_ppm', 'co2_tmp', 'ec', 'ph', 'rpi_t', 'rtd_t', 'tsl' ] print("Downloading data from AWS S3") logging.info("Downloading data from AWS S3") get_data_from_s3(source_bucket, source_file_name, os.path.join(data_directory, source_file)) source_file_path = os.path.join(data_directory, source_file) source_data = mikrolab.read_raw_file(source_file_path) for index in range(10): print("{}".format(source_data[index])) sd_location, sd_feature_names, sd_column_lengths, sd_dataset = mikrolab.process_data( source_data) print("Location: {}".format(sd_location)) print("Feature names: {}".format(",".join(sd_feature_names))) print("Column lengths: {}".format(",".join( str(l) for l in sd_column_lengths))) print("number of data points: {}".format(sd_dataset.__len__())) print("last datapoint: {}".format(sd_dataset[-20:-1])) mikrolab.write_csv_file(target_directory, target_file_prefix, sd_location,