async def main(): combinations: list = await Combination.objects.all() last_combo: str = combinations[-1].name if combinations else '' for lang_name in LANGUAGES: language = await get_or_create(Language, {'name': lang_name}) alphabet: list = get_alphabet(lang_name) for number_of_letters in range( len(last_combo) or 1, LETTERS_QUANTITY + 1): sequence: list = generate_sequence(alphabet, number_of_letters, last_combo) if last_combo: last_combo = '' for seq in sequence: data_raw: str = await make_request(seq) if not data_raw: data_to_write = f"Timestamp {int(time())}. " \ f"Request unsuccessful for combination \"{seq}\"" await logger(ERROR_LOG_NAME, data_to_write) continue try: data = json.loads(data_raw) if not isinstance(data, dict): data_to_write = f"Timestamp {int(time())}. " \ f"Expect dict, received {data}\n" await logger(ERROR_LOG_NAME, data_to_write) continue except JSONDecodeError: data_to_write = f"Timestamp {int(time())}. " \ f"Can't decode. Value - {data_raw}\n" await logger(ERROR_LOG_NAME, data_to_write) except Exception as e: data_to_write = f"Timestamp {int(time())}. " \ f"Huston, we have a problem here. " \ f"Value - {data_raw}. Error is {e}\n" await logger(ERROR_LOG_NAME, data_to_write) else: combination = await get_or_create(Combination, { 'language': language, 'name': seq }) for product_data in data.get('products', []): price_raw = product_data['price'].replace(' ', '') price = search(r'[>]\d+[<]', price_raw) await get_or_create( Good, { 'name': product_data['name'], 'product_url': product_data['url'], 'image_url': product_data['image'], 'price': price[0][1:-1] if price else price_raw, 'combination': combination })
def test_integer_20percent(self): sequence, report = generate_sequence() integers = 0 total = 0 for s in sequence.split(','): total += 1 try: int(s) integers += 1 except: pass self.assertEqual("{:.2f}".format(integers / total), "0.20")
output_file=args.output_file, frequency=iter_interval, train_computation=train_computation, total_iterations=num_iterations, eval_set=test_set, loss_computation=loss_computation, use_progress_bar=args.progress_bar) # Train the network loop_train(train_set, cbs) # Get predictions for the test set predictions = utils.eval_loop(test_set, eval_function, inputs) if (do_plots is True): # Plot the predictions time_points = 8 * no_points utils.plot_inference(predictions, predict_seq, data, time_points) # Generate a sequence # uses the first seq_len samples of the input sequence as seed time_points = 8 * no_points gen_series, gt_series = utils.generate_sequence(data, time_points, eval_function, predict_seq, batch_size, seq_len, feature_dim, inputs) if (do_plots is True): # Plot the generated series vs ground truth series utils.plot_generated(gen_series, gt_series, predict_seq)
def test_integer_20percent_report(self): sequence, report = generate_sequence() total = sum(report.values()) self.assertEqual("{:.2f}".format(report['integer'] / total), "0.20")
parser.add_argument("--importance_weight", action="store_true") parser.add_argument("--log_mode", type=bool, default=False) config = parser.parse_args() path = 'data/' if config.data == 'exponential_hawkes': train_data = read_timeseries(path + config.data + '_training.csv') val_data = read_timeseries(path + config.data + '_validation.csv') test_data = read_timeseries(path + config.data + '_testing.csv') train_timeseq, train_eventseq = generate_sequence(train_data, config.seq_len, log_mode=config.log_mode) train_loader = DataLoader(torch.utils.data.TensorDataset( train_timeseq, train_eventseq), shuffle=True, batch_size=config.batch_size) val_timeseq, val_eventseq = generate_sequence(val_data, config.seq_len, log_mode=config.log_mode) val_loader = DataLoader(torch.utils.data.TensorDataset( val_timeseq, val_eventseq), shuffle=False, batch_size=len(val_data)) model = GTPP(config)
def generate(timestamp): sequence, report = generate_sequence() save_sequence(sequence, timestamp=timestamp) save_report(report, timestamp=timestamp) return jsonify({'status': 'Success.', 'data': sequence})