def get_loader(mode, task_id, data_id, train_size, val=False, ques_id=0): ''' mode = 'train' or 'test' task_id = 4, 15, 16, 18, 19 info_type = 'edge_types', 'graphs', 'labels', 'node_ids', 'question_types', data_id = 1~10, random generated 10 dataset train_size = split train set and validation set val = validation or train ques_id = some task have different questions, default is 0 Returns a data loader for the desired split ''' edge_type = get_data_types(utils.get_file_location(mode=mode, task_id=task_id, info_type='edge_types', data_id=data_id)) node_type = get_data_types(utils.get_file_location(mode=mode, task_id=task_id, info_type='node_ids', data_id=data_id)) question_type = get_data_types(utils.get_file_location(mode=mode, task_id=task_id, info_type='question_types', data_id=data_id)) label_type = get_data_types(utils.get_file_location(mode=mode, task_id=task_id, info_type='labels', data_id=data_id)) if len(label_type) == 0: label_type = node_type split = BABI( train_size = train_size, datapath = utils.get_file_location(mode=mode, task_id=task_id, info_type='graphs', data_id=data_id), edge_type = edge_type, node_type = node_type, question_type = question_type, label_type = label_type, is_validation = val, task_id = task_id, question_id = ques_id, ) loader = torch.utils.data.DataLoader( split, batch_size=config.BATCH_SIZE, shuffle=(mode == 'train'), pin_memory=True, num_workers=config.NUM_WORKER, ) return split, loader
import numpy as np from utils import get_file_location with open(get_file_location(day=11)) as f: data = np.array([[seat for seat in row] for row in f.read().split('\n')]) moves_dict = { 'up': { 'x': 0, 'y': -1 }, 'up_right': { 'x': 1, 'y': -1 }, 'right': { 'x': 1, 'y': 0 }, 'down_right': { 'x': 1, 'y': 1 }, 'down': { 'x': 0, 'y': 1 }, 'down_left': { 'x': -1, 'y': 1
import random from utils import get_file_location with open(get_file_location(day=8)) as f: data = [[x.split(' ')[0], int(x.split(' ')[1])] for x in f.read().split('\n')] # data = list(zip(random.sample(range(0,len(data)), len(data)), data)) with open(get_file_location(day=8)) as f: data = [[x.split(' ')[0], int(x.split(' ')[1])] for x in f.read().split('\n')] class CorruptnessFinder: def __init__(self, data): self.data = data self.iterator = 0 self.accumulator = 0 self.used_list, self.corrupt_moves = [], [] self.corrupt_flag = True def _parse_move(self, move): return move[0], move[1] def _execute_action(self, move): move_action, move_quantity = self._parse_move(move) if move_action == 'jmp': self.iterator += move_quantity elif move_action == 'acc': self.accumulator += move_quantity