Пример #1
0
def multitrainer(trainers, keep_on_gpu=True, parallel=False, cuda_pool=(0, )):
    """Train multiple models in parallel.

    Parameters
    ----------
    trainers : sequence[ModelTrainer]
    keep_on_gpu : bool, default=True
        Keep all models on GPU (risk of out-of-memory)
    parallel : bool or int, default=False
        Train model in parallel.
        If an int, only this number of models will be trained in parallel.
    cuda_pool : sequence[int], default=[0]
        IDs of GPUs that can be used to dispatch the models.

    """
    n = len(trainers)
    initial_epoch = max(min(trainer.initial_epoch for trainer in trainers), 1)
    nb_epoch = max(trainer.nb_epoch for trainer in trainers)
    trainers = tuple(trainers)
    if parallel:
        parallel = len(trainers) if parallel is True else parallel
        chunksize = max(len(trainers) // (2 * parallel), 1)
        pool = Pool(parallel)
        cuda_pool = py.make_list(pool, parallel)

    if not torch.cuda.is_available():
        cuda_pool = []

    if not keep_on_gpu:
        for trainer in trainers:
            trainer.to(device='cpu')

    # --- init ---
    if parallel:
        args = zip(trainers, [keep_on_gpu] * n, [cuda_pool] * n)
        trainers = list(pool.map(_init1, args, chunksize=chunksize))
    else:
        args = zip(trainers, [keep_on_gpu] * n, [cuda_pool] * n)
        trainers = list(map(_init1, args))

    # --- train ---
    for epoch in range(initial_epoch + 1, nb_epoch + 1):
        if parallel:
            args = zip(trainers, [epoch] * n, [keep_on_gpu] * n,
                       [cuda_pool] * n)
            trainers = list(pool.map(_train1, args, chunksize=chunksize))
        else:
            args = zip(trainers, [epoch] * n, [keep_on_gpu] * n,
                       [cuda_pool] * n)
            trainers = list(map(_train1, args))
Пример #2
0
def runInParallel(args):
    cur_args = []

    for i in range(args.r):
        cur_args.append({'seed': base_seed + i})

    print('total to run', cur_args, 'nProc', args.nproc)

    if args.nproc > 1:
        pool = Pool(processes=int(args.nproc))
        pool.map(single_run_algorithm, cur_args)
        pool.close()
        pool.join()
    else:
        for cur_arg in cur_args:
            single_run_algorithm(cur_arg)
Пример #3
0
def dsrg_layer(targets, labels, probs_ori, num_classes, thre_fg, thre_bg,
               NUM_WORKERS):

    targets = torch.reshape(targets, (-1, 1, 1, num_classes))
    labels = torch.transpose(labels, 1, 3)  # bx41x41x21
    probs = torch.transpose(probs_ori, 1, 3)  # bx41x41x21
    targets[:, :, :, 0] = 1
    # probs = probs.clone().detach()

    batch_size = targets.shape[0]
    complete_para_list = []
    for i in range(batch_size):
        params_list = []
        params_list.append([targets[i], labels[i], probs[i]])
        params_list.append([thre_fg, thre_bg])
        complete_para_list.append(params_list)

    ret = pool.map(single_generate_seed_step, complete_para_list)
    ret = []
    # for i in range(batch_size):
    #     ret.append(single_generate_seed_step(complete_para_list[i]))

    new_labels = ret[0]
    for i in range(1, batch_size):
        new_labels = torch.cat([new_labels, ret[i]], axis=0)
    new_labels = torch.transpose(new_labels, 1, 3)  # bx21x41x41
    return new_labels
Пример #4
0
    def step(self, items_seq, items_ori, items_batch=None, boxes_batch=None):
        if (items_batch != None) & (boxes_batch != None):
            self.items_batch = items_batch
            self.boxes_batch = boxes_batch
            self.batch_indx = list(range(self.BATCH_SIZE))
            self.expected_items_n = [self.ITEMS_SEQ_LN] * BATCH_SIZE
            self.all_outs = {i: [] for i in range(self.BATCH_SIZE)}
            self.current_level = 0
            self.items_batch_alligned = None
            self.boxes_batch_alligned = None

        items_seq_ = torch.LongTensor(items_seq).transpose(1, 0).expand(
            self.INPUT_SIZE, self.ITEMS_SEQ_LN,
            self.BATCH_SIZE).transpose(2, 0)
        items_ori_ = items_ori[torch.arange(self.BATCH_SIZE).expand(
            self.ITEMS_SEQ_LN, self.BATCH_SIZE).transpose(1, 0),
                               torch.LongTensor(items_seq).
                               expand(self.BATCH_SIZE, self.ITEMS_SEQ_LN)]
        self.items_batch_alligned = self.items_batch[[
            self.base_indx_items, items_seq_, items_ori_
        ]]
        lookup_sm = self.boxes_batch.expand(
            self.ITEMS_SEQ_LN,
            self.BATCH_SIZE, self.ITEMS_SEQ_LN, self.INPUT_SIZE).transpose(
                1, 0) - self.items_batch_alligned.unsqueeze(2)
        validities = (lookup_sm >= 0).all(3).any(2).tolist()
        new_seq = []
        for i, j in zip(items_seq, validities):
            new_seq.append([i[k] for k in range(len(i)) if j[k] == True])
        self.batch_indx = [i for i in self.batch_indx if len(new_seq[i]) > 0]
        items_seq = [i for i in new_seq if len(i) > 0]

        zp = list(
            zip(self.batch_indx, self.items_batch[self.batch_indx],
                self.boxes_batch[self.batch_indx], items_seq,
                items_ori[self.batch_indx]))
        p = Pool(10)
        out = p.map(self.target_func, zp)
        p.close()
        p.join()
        out = [pickle.loads(i) for i in out]

        out_series = pd.Series(out)
        _ = out_series.apply(lambda x: self.dict_update(x))
        #         out = [i for i in out if i[1] < i[2]]

        self.batch_indx = [i[0] for i in out]

        self.current_level += 1

        items_seq = [i[5] for i in out]
        all_rewards = [i[-1] * i[-2] for i in out]

        #         filled_items_indx = {i:[i[2] for i in j] for i,j in self.all_outs.items() if len(j) > 0}
        #         filled_items_HUs = {i:[i[7] for i in j if len(i[7]) > 0] for i,j in self.all_outs.items()}
        #         all_rewards = [self.calc_reward(self.items_batch[i],i,filled_items_indx,filled_items_HUs) for i in range(self.BATCH_SIZE)]
        return all_rewards
Пример #5
0
def run_main():
    deck_lists = list(map(int,args.decklists.split(","))) if args.decklists is not None else None
    if deck_lists is None:
        deck_lists = list(deck_id_2_name.keys())
    else:
        assert all(key in deck_id_2_name for key in deck_lists)
    if deck_lists == [0, 1, 4, 5, 10, 12]:
        deck_lists = [0, 1, 4, 12, 5, 10]
    mylogger.info("deck_lists:{}".format(deck_lists))
    D = [Deck() for i in range(len(deck_lists))]
    deck_index = 0
    # sorted_keys = sorted(list(deck_id_2_name.keys()))
    # for i in sorted_keys:
    #    if i not in deck_lists:
    #        continue
    for i in deck_lists:
        mylogger.info("{}(deck_id:{}):{}".format(deck_index, i, key_2_tsv_name[i]))
        D[deck_index] = tsv_to_deck(key_2_tsv_name[i][0])
        D[deck_index].set_leader_class(key_2_tsv_name[i][1])
        deck_index += 1
    Results = {}
    list_range = range(len(deck_lists))
    #print(list(itertools.product(list_range,list_range)))

    Player1 = Player(9, True, policy=New_Dual_NN_Non_Rollout_OM_ISMCTSPolicy(model_name=args.model_name),
                     mulligan=Min_cost_mulligan_policy())

    if args.opponent is not None:
        if args.model_name is not None:
            if args.opponent == "Greedy":
                Player2 = Player(9, True, policy=NN_GreedyPolicy(model_name=args.model_name),
                                 mulligan=Min_cost_mulligan_policy())
            elif args.opponent == "MCTS":
                Player2 = Player(9, True, policy=New_Dual_NN_Non_Rollout_OM_ISMCTSPolicy(model_name=args.model_name),
                                 mulligan=Min_cost_mulligan_policy())
        else:
            Player2 = Player(9, True, policy=Opponent_Modeling_MCTSPolicy(),
                             mulligan=Min_cost_mulligan_policy())
    else:
        Player2 = Player(9, True, policy=AggroPolicy(),
             mulligan=Min_cost_mulligan_policy())

    Player1.name = "Alice"
    Player2.name = "Bob"
    iteration = int(args.iteration) if args.iteration is not None else 10
    deck_list_len = len(deck_lists)
    iter_data = [(deck_list_len*i+j,Player1,
                   Player2,(i,j),(deck_lists[i],deck_lists[j]),iteration) for i,j in itertools.product(list_range,list_range)]
    pool = Pool(3)  # 最大プロセス数:8
    # memory = pool.map(preparation, iter_data)
    result = pool.map(multi_battle, iter_data)
    #result = list(tqdm(result, total=len(list_range)**2))
    pool.close()  # add this.
    pool.terminate()  # add this.
    for data in result:
        Results[data[0]] = data[1]
    print(Results)
Пример #6
0
    def _invoke_pool(self, pool: TorchPool, fn: Callable, data: iter) -> \
            List[int]:
        """Invoke on a torch pool (rather than a :class:`multiprocessing.Pool`).

        """
        if pool is None:
            return tuple(map(fn, data))
        else:
            return pool.map(fn, data)
Пример #7
0
    def sample(self, observations, num_samples, thetas=None):
        assert (thetas is None or len(thetas) is self.chains)
        self.sampler.reset()
        if thetas is None:
            inputs = self._prepare_inputs()
        pool = Pool(processes=self.workers)
        arguments = self._prepare_arguments(observations, inputs, num_samples)
        chains = pool.map(self.sample_chain, arguments)
        del pool

        return chains
Пример #8
0
def final_experiments():
    for i in range(2, 5):
        x_train, train, x_valid, valid, x_test, test, name = get_dataset(i)
        print("Running for", name, "dataset")

        x_train, e_train, t_train, x_valid, e_valid, t_valid, x_test, e_test, t_test = scale_data_to_torch(
            x_train, train, x_valid, valid, x_test, test)
        print("Dataset loaded and scaled")

        risk_set, risk_set_valid, risk_set_test = compute_risk_set(
            t_train, t_valid, t_test)
        print("Risk set computed")

        data_dict = {
            "x_train": x_train,
            "e_train": e_train,
            "t_train": t_train,
            "x_valid": x_valid,
            "e_valid": e_valid,
            "t_valid": t_valid,
            "x_test": x_test,
            "e_test": e_test,
            "t_test": t_test,
            "risk_set": risk_set,
            "risk_set_valid": risk_set_valid,
            "risk_set_test": risk_set_test
        }

        n_in = x_train.shape[1]
        linear_models = [2, 5, 10, 12]
        learning_rates = [1e-4, 1e-3]
        layer_sizes = [[n_in], [n_in, n_in], [n_in, n_in, n_in],
                       [n_in, 20, 15]]
        data = [data_dict]
        hyperparams = [(linear_model, learning_rate, layer_size, seed, d)
                       for layer_size in layer_sizes
                       for learning_rate in learning_rates
                       for linear_model in linear_models for seed in range(3)
                       for d in data]
        print("Hyperparams initialized")

        p = Pool(50)
        print("Pool created")
        output = p.map(run_experiment, hyperparams)
        p.close()
        p.join()
        print("Models trained. Writing to file")
        filename = name + "_results.pkl"
        f = open(filename, "wb")
        pkl.dump(output, f)
        f.flush()
        f.close()
        print(name, "done")
        print("")
Пример #9
0
def make_dataset_XNAT(project, opt, phase):
    session = xnat.connect('http://rufus.stanford.edu', user='******', password='******') #make XNAT connection
    #xnat version of make_dataset_dicom()
    images = []
    #parsing xnat hierarchy: project -> subject -> experiment -> scan -> dicom img
    subjs = session.projects[project].subjects

    try:
        set_start_method('spawn')
    except RuntimeError:
        pass
    
    #multiprocessing code (not working) ------------
    pool = Pool(os.cpu_count())
    images = pool.map(process_subj, [subjs[s] for s in subjs])

    images = [i for im in images for i in im]
    #-----------------------------------------------

    #original code (before multiproc, works fine) -------------------
    '''
    for s in subjs:
        exps = subjs[s].experiments

        for e in exps:
            scans = exps[e].scans
            
            for sc in scans:
                my_file = scans[sc].resources['DICOM'].files[0]
                fname = my_file.data['Name']
                path = my_file.uri

                with my_file.open() as f:
                    dicom = pydicom.read_file(f)

                #save images whose filenames are in phase
                if fname.endswith('.dcm'):
                    try:
                        opt.names_phase[fname]
                    except:
                        print('Error: we donot find the phase for file name' , fname, 'ignore this file')
                        continue
                        # sys.exit(3)
                    if opt.names_phase[fname] == phase:
                        Img = process_dicom(dicom)
                        if Img is not None:
                            print("x" + fname)
                            images.append([path, dicom]) #each image list item has filename + FileData obj
    '''
    #-------------------------------------------------

    session.disconnect()
    return images
Пример #10
0
    def forward(
        self,
        images,
    ):
        parameters = []
        num_workers = self.num_workers
        pool = Pool(num_workers)
        for bx in range(len(images)):
            bx_params = [bx, images[bx], True]
            parameters.append(bx_params, )

        predictions = pool.map(worker_distance_transform, parameters)
        predictions = torch.stack(predictions)
        pool.close()
        pool.join()
        return predictions
Пример #11
0
class ParallelApproximateBayesianComputation:
    def __init__(self, abc, workers=2):
        super(ParallelApproximateBayesianComputation, self).__init__()
        self.abc = abc
        self.pool = Pool(processes=workers)
        self.workers = workers

    def _prepare_arguments(self, observation, num_samples):
        arguments = []

        inputs = torch.arange(num_samples)
        num_chunks = num_samples // self.workers
        if num_chunks == 0:
            num_chunks = 1
        chunks = inputs.split(num_chunks, dim=0)
        for chunk in chunks:
            a = (self.abc, observation, len(chunk))
            arguments.append(a)

        return arguments

    def sample(self, observation, num_samples=1):
        arguments = self._prepare_arguments(observation, num_samples)
        outputs = self.pool.map(self._sample, arguments)
        outputs = torch.cat(outputs, dim=0)

        return outputs

    def __del__(self):
        self.pool.close()
        del self.pool
        self.pool = None

    @staticmethod
    def _sample(arguments):
        abc, observation, n = arguments

        return abc.sample(observation, num_samples=n)
Пример #12
0
class ParallelSimulator(Simulator):
    def __init__(self, simulator, workers=2):
        super(ParallelSimulator, self).__init__()
        self.pool = Pool(processes=workers)
        self.simulator = simulator
        self.workers = workers

    def _prepare_arguments(self, inputs):
        arguments = []

        chunks = inputs.shape[0] // self.workers
        if chunks == 0:
            chunks = 1
        chunks = inputs.split(chunks, dim=0)
        for chunk in chunks:
            a = (self.simulator, chunk)
            arguments.append(a)

        return arguments

    def forward(self, inputs):
        arguments = self._prepare_arguments(inputs)
        outputs = self.pool.map(self._simulate, arguments)
        outputs = torch.cat(outputs, dim=0)

        return outputs

    def terminate(self):
        self.pool.close()
        del self.pool
        self.pool = None
        self.simulator.terminate()

    @staticmethod
    def _simulate(arguments):
        simulator, inputs = arguments

        return simulator(inputs)
Пример #13
0
    def fitness_grads(
            self,
            n_samples: int,
            pool: Pool = None,
            fitness_shaping_fn: Callable[[Iterable[float]], Iterable[float]] = lambda x: x
    ):
        """
        Computes the (approximate) gradients of the expected fitness of the population.

        Uses torch autodiff to compute the gradients. The Individual.fitness does NOT need to be differentiable,
        but the log probability computations in Population.sample MUST be.

        :param n_samples: How many individuals to sample to approximate the gradient
        :param pool: Optional process pool to use when computing the fitness of the sampled individuals.
        :param fitness_shaping_fn: Optional function to modify the fitness, e.g. normalization, etc. Input is a list of n raw fitness floats. Output must also be n floats.
        :return: A (n,) tensor containing the raw fitness (before fitness_shaping_fn) for the n individuals.
        """

        samples = self.sample(n_samples)  # Generator
        individuals = []
        grads = []
        for individual, log_prob in samples:  # Compute gradients one at a time so only one log prob computational graph needs to be kept in memory at a time.
            assert log_prob.ndim == 0 and log_prob.isfinite() and log_prob.grad_fn is not None, "log_probs must be differentiable finite scalars"
            individuals.append(individual)
            grads.append(t.autograd.grad(log_prob, self.parameters()))

        if pool is not None:
            raw_fitness = pool.map(_fitness_fn_no_grad, individuals)
        else:
            raw_fitness = list(map(_fitness_fn_no_grad, individuals))

        fitness = fitness_shaping_fn(raw_fitness)

        for i, p in enumerate(self.parameters()):
            p.grad = -t.mean(t.stack([ind_fitness * grad[i] for grad, ind_fitness in zip(grads, fitness)]), dim=0)

        return t.tensor(raw_fitness)
Пример #14
0
    def fitness_grads(
        self,
        n_samples: int,
        pool: Pool = None,
        fitness_shaping_fn: Callable[[Iterable[float]],
                                     Iterable[float]] = lambda x: x):
        """
        Computes the (approximate) gradients of the expected fitness of the population.

        Uses torch autodiff to compute the gradients. The Individual.fitness does NOT need to be differentiable,
        but the log probability computations in Population.sample MUST be.

        :param n_samples: How many individuals to sample to approximate the gradient
        :param pool: Optional process pool to use when computing the fitness of the sampled individuals.
        :param fitness_shaping_fn: Optional function to modify the fitness, e.g. normalization, etc. Input is a list of n raw fitness floats. Output must also be n floats.
        :return: A (n,) tensor containing the raw fitness (before fitness_shaping_fn) for the n individuals.
        """

        individuals, log_probs = zip(*self.sample(n_samples))
        assert all(
            lp.ndim == 0 and lp.isfinite() and lp.grad_fn is not None for lp in
            log_probs), "log_probs must be differentiable finite scalars"

        if pool is not None:
            raw_fitness = pool.map(_fitness_fn_no_grad, individuals)
        else:
            raw_fitness = list(map(_fitness_fn_no_grad, individuals))

        fitness = fitness_shaping_fn(raw_fitness)

        t.mean(
            t.stack([(-ind_fitness * log_prob)
                     for log_prob, ind_fitness in zip(log_probs, fitness)
                     ])).backward()

        return t.tensor(raw_fitness)
Пример #15
0
def main(input_fname, output_dir):
    # open input file, read one filename per line
    img_fnames = []
    with open(input_fname, 'r') as ifd:
        for line in ifd:
            img_fname = line.strip()
            # handle CSVs where the first column is the filename.
            # This will obviously break if the filenames contain commas.
            img_fname = img_fname.split(',')[0]
            img_fnames.append(img_fname)
    print('Read %d image filenames' % len(img_fnames))

    chunk_size = 128
    chunks = [
        img_fnames[i:i + chunk_size]
        for i in range(0, len(img_fnames), chunk_size)
    ]
    if num_threads > 1:
        pool = Pool(num_threads)
        _ = pool.map(process_chunk, chunks)
    else:
        for chunk in chunks:
            process_chunk(chunk)
    print('done.')
Пример #16
0
class CTC(nn.Module):
    def __init__(self, blank=0):
        self.blank = blank
        self.pool = Pool(8)

    def create_target_p(self, target):
        target_p = [self.blank]
        for c in target:
            target_p.append(c)
            target_p.append(self.blank)

        return target_p

    def empty_dp(self, T, target_p):
        return [[torch.tensor(-1.0).float() for _ in range(len(target_p))]
                for _ in range(T)]

    def compute_probs_dp(self, log_prob, dp, target_p):
        ninf = torch.log(torch.from_numpy(np.array(0.0)))
        for t in range(len(dp)):
            for s in range(len(dp[0])):
                if t == 0:
                    if s == 0:
                        res = log_prob[0, self.blank]
                    elif s == 1:
                        res = log_prob[0, target_p[1]]
                    else:
                        res = ninf

                    dp[t][s] = res
                    continue

                a = dp[t - 1][s].float()
                b = dp[t - 1][s - 1].float() if s - 1 >= 0 else ninf

                # in case of a blank or a repeated label, we only consider s and s-1 at t-1, so we're done
                if target_p[s] == self.blank or (s >= 2 and target_p[s - 2]
                                                 == target_p[s]):
                    if a == ninf and b == ninf: res = ninf
                    else:
                        res = max(a, b) + torch.log(1 + torch.exp(
                            -torch.abs(a - b))) + log_prob[t, target_p[s]]

                    dp[t][s] = res
                    continue

            # otherwise, in case of a non-blank and non-repeated label, we additionally add s-2 at t-1
                c = dp[t - 1][s - 2] if s - 2 >= 0 else ninf

                m = max([a, b, c])
                if a == ninf and b == ninf and c == ninf: res = ninf
                else:
                    res = m + torch.log(
                        torch.exp(-torch.abs(a - m)) +
                        torch.exp(-torch.abs(b - m)) +
                        torch.exp(-torch.abs(c - m))) + log_prob[t,
                                                                 target_p[s]]

                dp[t][s] = res

        return dp

    def compute_instance_loss(self, inps):
        log_prob, target = inps

        T = log_prob.shape[0]
        target_p = self.create_target_p(target)
        dp = self.empty_dp(T, target_p)

        log_prob.requires_grad = True
        dp = self.compute_probs_dp(log_prob, dp, target_p)
        a, b = dp[T - 1][len(target_p) - 1], dp[T - 1][len(target_p) - 2]

        loss = max(a, b) + torch.log(1 + torch.exp(-torch.abs(a - b)))

        loss.backward()
        return log_prob.grad

    def compute_loss(self, log_probs, targets, input_lengths, target_lengths):
        grads = self.pool.map(self.compute_instance_loss,
                              [(log_probs[:input_lengths[i], i, :].detach(),
                                targets[i][:target_lengths[i]])
                               for i in range(log_probs.shape[1])])
        dlog_probs = torch.stack(grads).permute((1, 0, 2))
        loss = (dlog_probs * log_probs).sum() / len(grads) / -4

        return loss

    def __call__(self,
                 log_probs,
                 targets,
                 input_lengths,
                 target_lengths,
                 forget_rate,
                 w=None):
        return self.compute_loss(log_probs, targets, input_lengths,
                                 target_lengths)

    def __getstate__(self):
        self_dict = self.__dict__.copy()
        del self_dict['pool']
        return self_dict

    def __setstate__(self, state):
        self.__dict__.update(state)
Пример #17
0
class HyperOptArgumentParser(ArgumentParser):
    """
    Subclass of argparse ArgumentParser which adds optional calls to sample from lists or ranges
    Also enables running optimizations across parallel processes
    """

    # these are commands injected by test tube from cluster operations
    TRIGGER_CMD = 'test_tube_from_cluster_hopt'
    SLURM_CMD_PATH = 'test_tube_slurm_cmd_path'
    SLURM_EXP_CMD = 'hpc_exp_number'
    SLURM_LOAD_CMD = 'test_tube_do_checkpoint_load'
    CMD_MAP = {
        TRIGGER_CMD: bool,
        SLURM_CMD_PATH: str,
        SLURM_EXP_CMD: int,
        SLURM_LOAD_CMD: bool
    }

    def __init__(self, strategy='grid_search', **kwargs):
        """

        :param strategy: 'grid_search', 'random_search'
        :param enabled:
        :param experiment:
        :param kwargs:
        """
        ArgumentParser.__init__(self, **kwargs)

        self.strategy = strategy
        self.trials = []
        self.parsed_args = None
        self.opt_args = {}
        self.json_config_arg_name = None
        self.pool = None

    def __getstate__(self):
        # capture what is normally pickled
        state = self.__dict__.copy()

        # remove all functions from the namespace
        clean_state = {}
        for k, v in state.items():
            if not hasattr(v, '__call__'):
                clean_state[k] = v

        # what we return here will be stored in the pickle
        return clean_state

    def __setstate__(self, newstate):
        # re-instate our __dict__ state from the pickled state
        self.__dict__.update(newstate)

    def add_argument(self, *args, **kwargs):
        super(HyperOptArgumentParser, self).add_argument(*args, **kwargs)

    def opt_list(self, *args, **kwargs):
        options = kwargs.pop("options", None)
        tunable = kwargs.pop("tunable", False)
        self.add_argument(*args, **kwargs)
        for i in range(len(args)):
            arg_name = args[i]
            self.opt_args[arg_name] = OptArg(obj_id=arg_name, opt_values=options, tunable=tunable)

    def opt_range(
            self,
            *args,
            **kwargs
    ):
        low = kwargs.pop("low", None)
        high = kwargs.pop("high", None)
        arg_type = kwargs["type"]
        nb_samples = kwargs.pop("nb_samples", 10)
        tunable = kwargs.pop("tunable", False)
        log_base = kwargs.pop("log_base", None)

        self.add_argument(*args, **kwargs)
        arg_name = args[-1]
        self.opt_args[arg_name] = OptArg(
            obj_id=arg_name,
            opt_values=[low, high],
            arg_type=arg_type,
            nb_samples=nb_samples,
            tunable=tunable,
            log_base=log_base,
        )

    def json_config(self, *args, **kwargs):
        self.add_argument(*args, **kwargs)
        self.json_config_arg_name = re.sub('-', '', args[-1])

    def __parse_args(self, args=None, namespace=None):
        # allow bypassing certain missing params which other parts of test tube may introduce
        args, argv = self.parse_known_args(args, namespace)
        args, argv = self.__whitelist_cluster_commands(args, argv)
        if argv:
            msg = _('unrecognized arguments: %s')
            self.error(msg % ' '.join(argv))
        return args

    def __whitelist_cluster_commands(self, args, argv):
        parsed = {}

        # build a dict where key = arg, value = value of the arg or None if just a flag
        for i, arg_candidate in enumerate(argv):
            arg = None
            value = None

            # only look at --keys
            if '--' not in arg_candidate:
                continue

            # skip items not on the white list
            if arg_candidate[2:] not in HyperOptArgumentParser.CMD_MAP:
                continue

            arg = arg_candidate[2:]
            # pull out the value of the argument if given
            if i + 1 <= len(argv) - 1:
                if '--' not in argv[i + 1]:
                    value = argv[i + 1]

                if arg is not None:
                    parsed[arg] = value
            else:
                if arg is not None:
                    parsed[arg] = value

        # add the whitelist cmds to the args
        all_values = set()
        for k, v in args.__dict__.items():
            all_values.add(k)
            all_values.add(v)

        for arg, v in parsed.items():
            v_parsed = self.__parse_primitive_arg_val(v)
            all_values.add(v)
            all_values.add(arg)
            args.__setattr__(arg, v_parsed)

        # make list with only the unknown args
        unk_args = []
        for arg in argv:
            arg_candidate = re.sub('--', '', arg)
            is_bool = arg_candidate == 'True' or arg_candidate == 'False'
            if is_bool: continue

            if arg_candidate not in all_values:
                unk_args.append(arg)

        # when no bad args are left, return none to be consistent with super api
        if len(unk_args) == 0:
            unk_args = None

        # add hpc_exp_number if not passed in so we can never get None
        if HyperOptArgumentParser.SLURM_EXP_CMD not in args:
            args.__setattr__(HyperOptArgumentParser.SLURM_EXP_CMD, None)

        return args, unk_args

    def __parse_primitive_arg_val(self, val):
        if val is None:
            return True
        try:
            return int(val)
        except ValueError:
            try:
                return float(val)
            except ValueError:
                return val

    def parse_args(self, args=None, namespace=None):
        # call superclass arg first
        results = self.__parse_args(args, namespace)

        # extract vals
        old_args = vars(results)

        # override with json args if given
        if self.json_config_arg_name and old_args[self.json_config_arg_name]:
            for arg, v in self.__read_json_config(old_args[self.json_config_arg_name]).items():
                old_args[arg] = v

        # track args
        self.parsed_args = deepcopy(old_args)
        # attach optimization fx
        old_args['trials'] = self.opt_trials
        old_args['optimize_parallel'] = self.optimize_parallel
        old_args['optimize_parallel_gpu'] = self.optimize_parallel_gpu
        old_args['optimize_parallel_cpu'] = self.optimize_parallel_cpu
        old_args['generate_trials'] = self.generate_trials
        old_args['optimize_trials_parallel_gpu'] = self.optimize_trials_parallel_gpu

        return TTNamespace(**old_args)

    def __read_json_config(self, file_path):
        with open(file_path) as json_data:
            json_args = json.load(json_data)
            return json_args

    def opt_trials(self, num):
        self.trials = strategies.generate_trials(
            strategy=self.strategy,
            flat_params=self.__flatten_params(self.opt_args),
            nb_trials=num,
        )

        for trial in self.trials:
            ns = self.__namespace_from_trial(trial)
            yield ns

    def generate_trials(self, nb_trials):
        trials = strategies.generate_trials(
            strategy=self.strategy,
            flat_params=self.__flatten_params(self.opt_args),
            nb_trials=nb_trials,
        )

        trials = [self.__namespace_from_trial(x) for x in trials]
        return trials

    def optimize_parallel_gpu(
            self,
            train_function,
            gpu_ids,
            max_nb_trials=None,
    ):
        """
        Runs optimization across gpus with cuda drivers
        :param train_function:
        :param max_nb_trials:
        :param gpu_ids: List of strings like: ['0', '1, 3']
        :return:
        """
        self.trials = strategies.generate_trials(
            strategy=self.strategy,
            flat_params=self.__flatten_params(self.opt_args),
            nb_trials=max_nb_trials,
        )

        self.trials = [(self.__namespace_from_trial(x), train_function) for x in self.trials]

        # build q of gpu ids so we can use them in each process
        # this is thread safe so each process can pull out a gpu id, run its task and put it back when done
        if self.pool is None:
            gpu_q = Queue()
            for gpu_id in gpu_ids:
                gpu_q.put(gpu_id)

            # init a pool with the nb of worker threads we want
            nb_workers = len(gpu_ids)
            self.pool = Pool(processes=nb_workers, initializer=init, initargs=(gpu_q,))

        # apply parallelization
        results = self.pool.map(optimize_parallel_gpu_private, self.trials)
        return results

    def optimize_trials_parallel_gpu(
            self,
            train_function,
            nb_trials,
            trials,
            gpu_ids,
            nb_workers=4,
    ):
        """
        Runs optimization across gpus with cuda drivers
        :param train_function:
        :param nb_trials:
        :param gpu_ids: List of strings like: ['0', '1, 3']
        :param nb_workers:
        :return:
        """
        self.trials = trials
        self.trials = [(x, train_function) for x in self.trials]

        # build q of gpu ids so we can use them in each process
        # this is thread safe so each process can pull out a gpu id, run its task and put it back when done
        if self.pool is None:
            gpu_q = Queue()
            for gpu_id in gpu_ids:
                gpu_q.put(gpu_id)

            # init a pool with the nb of worker threads we want
            self.pool = Pool(processes=nb_workers, initializer=init, initargs=(gpu_q,))

        # apply parallelization
        results = self.pool.map(optimize_parallel_gpu_private, self.trials)
        return results

    def optimize_parallel_cpu(
            self,
            train_function,
            nb_trials,
            nb_workers=4,
    ):
        """
        Runs optimization across n cpus
        :param train_function:
        :param nb_trials:
        :param nb_workers:
        :return:
        """
        self.trials = strategies.generate_trials(
            strategy=self.strategy,
            flat_params=self.__flatten_params(self.opt_args),
            nb_trials=nb_trials
        )

        self.trials = [(self.__namespace_from_trial(x), train_function) for x in self.trials]

        # init a pool with the nb of worker threads we want
        if self.pool is None:
            self.pool = Pool(processes=nb_workers)

        # apply parallelization
        results = self.pool.map(optimize_parallel_cpu_private, self.trials)
        return results

    def optimize_parallel(
            self,
            train_function,
            nb_trials,
            nb_parallel=4,
    ):
        self.trials = strategies.generate_trials(
            strategy=self.strategy,
            flat_params=self.__flatten_params(self.opt_args),
            nb_trials=nb_trials
        )

        # nb of runs through all parallel systems
        fork_batches = [
            self.trials[i:i + nb_parallel] for i in range(0, len(self.trials), nb_parallel)
        ]

        for fork_batch in fork_batches:
            children = []

            # run n parallel forks
            for parallel_nb, trial in enumerate(fork_batch):

                # q up the trial and convert to a namespace
                ns = self.__namespace_from_trial(trial)

                # split new fork
                pid = os.fork()

                # when the process is a parent
                if pid:
                    children.append(pid)

                # when process is a child
                else:
                    # slight delay to make sure we don't overwrite over test tube log versions
                    sleep(parallel_nb * 0.5)
                    train_function(ns, parallel_nb)
                    os._exit(0)

            for i, child in enumerate(children):
                os.waitpid(child, 0)

    def __namespace_from_trial(self, trial):
        trial_dict = {d['name']: d['val'] for d in trial}
        for k, v in self.parsed_args.items():
            if k not in trial_dict:
                trial_dict[k] = v

        return TTNamespace(**trial_dict)

    def __flatten_params(self, params):
        """
        Turns a list of parameters with values into a flat tuple list of lists
        so we can permute
        :param params:
        :return:
        """
        flat_params = []
        for i, (opt_name, opt_arg) in enumerate(params.items()):
            if opt_arg.tunable:
                clean_name = opt_name.strip('-')
                clean_name = re.sub('-', '_', clean_name)
                param_groups = []
                for val in opt_arg.opt_values:
                    param_groups.append({'idx': i, 'val': val, 'name': clean_name})
                flat_params.append(param_groups)
        return flat_params
        str) + ".jpg"
    return df


path = untar_data(URLs.FOOD)
train_path = path / 'train.txt'
test_path = path / 'test.txt'


def load_data(index):
    train_df = filelist2df(train_path)
    test_df = filelist2df(test_path)
    food = DataBlock(blocks=(ImageBlock, CategoryBlock),
                     get_x=ColReader(1, pref=path / 'images'),
                     splitter=RandomSplitter(),
                     get_y=ColReader(cols=0),
                     item_tfms=Resize(224))
    dls = food.dataloaders(train_df.values, bs=64)


if __name__ == '__main__':
    set_start_method('spawn', force=True)
    try:
        pool = Pool(8)
        pool.map(load_data, [1, 2, 3, 4, 5, 6, 7, 8])
    except KeyboardInterrupt:
        exit()
    finally:
        pool.terminate()
        pool.join()
Пример #19
0
 torch.set_default_tensor_type(torch.DoubleTensor)
 experiment = experiments.cart_pole
 pool = Pool(experiment.thread_count)
 #torch.multiprocessing.set_start_method('spawn')
 thread_count = experiment.thread_count
 pop_size = experiment.population
 generation_count = experiment.generations
 mutate_range = experiment.mutate_range
 population = Population()
 sys.stdout.write("Evaluating Intial Fitness:")
 sys.stdout.flush()
 thread_list = []
 new_nets = []
 for i in range(pop_size):
     new_net = Genome(experiment)
     new_net.evalFitness()
     new_nets.append(new_net)
 iters_required = math.ceil(pop_size / thread_count)
 for _ in range(iters_required):
     threads = min(thread_count, len(new_nets))
     unevaled_nets = []
     for i in range(threads):
         unevaled_nets.append(3)  #(new_nets[i])
     for _ in range(threads):
         del new_nets[0]  #Check for bug/change line? inefficient at best
     fitnesses = pool.map(multiEvalFitness, unevaled_nets)
     #for i in range(threads):
     #    unevaled_nets[i].fitness = fitnesses[i]
     #for net in unevaled_nets:
     #    population.add(net)
 print("Done!")
Пример #20
0
def ars(env_name, n_epochs, env_config, step_size, n_delta, n_top, exp_noise,
        n_workers, policy, seed):
    torch.autograd.set_grad_enabled(False)  # Gradient free baby!
    pool = Pool(processes=n_workers)

    W = torch.nn.utils.parameters_to_vector(policy.parameters())
    n_param = W.shape[0]

    if env_config is None:
        env_config = {}

    env = gym.make(env_name, **env_config)

    env.seed(seed)
    torch.manual_seed(seed)
    np.random.seed(seed)

    total_steps = 0
    r_hist = []

    exp_dist = torch.distributions.Normal(torch.zeros(n_delta, n_param),
                                          torch.ones(n_delta, n_param))
    do_rollout_partial = partial(do_rollout_train, env_name, policy)

    for _ in range(n_epochs):

        deltas = exp_dist.sample()

        ###
        pm_W = torch.cat((W + (deltas * exp_noise), W - (deltas * exp_noise)))

        results = pool.map(do_rollout_partial, pm_W)

        states = torch.empty(0)
        p_returns = []
        m_returns = []
        l_returns = []
        top_returns = []

        for p_result, m_result in zip(results[:n_delta], results[n_delta:]):
            ps, pr, plr = p_result
            ms, mr, mlr = m_result

            states = torch.cat((states, ms, ps), dim=0)
            p_returns.append(pr)
            m_returns.append(mr)
            l_returns.append(plr)
            l_returns.append(mlr)
            top_returns.append(max(pr, mr))

        top_idx = sorted(range(len(top_returns)),
                         key=lambda k: top_returns[k],
                         reverse=True)[:n_top]
        p_returns = torch.stack(p_returns)[top_idx]
        m_returns = torch.stack(m_returns)[top_idx]
        l_returns = torch.stack(l_returns)[top_idx]

        r_hist.append(l_returns.mean())
        ###

        W = W + (step_size / (n_delta * torch.cat(
            (p_returns, m_returns)).std() + 1e-6)) * torch.sum(
                (p_returns - m_returns) * deltas[top_idx].T, dim=1)

        ep_steps = states.shape[0]
        policy.state_means = update_mean(states, policy.state_means,
                                         total_steps)
        policy.state_std = update_std(states, policy.state_std, total_steps)
        do_rollout_partial = partial(do_rollout_train, env_name, policy)

        total_steps += ep_steps

        torch.nn.utils.vector_to_parameters(W, policy.parameters())

    return policy, r_hist
Пример #21
0
            json_decoded[county] = [[
                county_dict[county][0], county_dict[county][1], num_cases,
                date.today().strftime("%m/%d/%y")
            ]]

        with lock:
            with open(path, 'w') as json_file:
                json.dump(json_decoded, json_file)
        print(county, num_cases)
        return max(scraped_cases)
    else:
        print("Blocked")
        return 0


if __name__ == '__main__':
    counties = list(county_dict.keys())

    print(len(counties))
    freeze_support()

    try:
        set_start_method('spawn')
    except RuntimeError:
        pass

    p = Pool(4)
    records = p.map(get_cases, counties)
    p.close()
    p.join()
import torch
from torch.multiprocessing import Pool, Process, set_start_method
from torch.autograd import Variable
import numpy as np
from scipy.ndimage import zoom

def get_pred(args):

  img = args[0]
  scale = args[1]
  # feed input data
  input_img = Variable(torch.from_numpy(img),
                     volatile=True).cuda()
  return input_img

if __name__ == '__main__':
    try:
     set_start_method('spawn')
    except RuntimeError:
        pass

    img = np.float32(np.random.randint(0, 2, (300, 300, 3)))
    scales = [1,2,3,4,5]
    scale_list = []
    for scale in scales: 
        scale_list.append([img,scale])
    multi_pool = Pool(processes=5)
    predictions = multi_pool.map(get_pred,scale_list)
    multi_pool.close() 
    multi_pool.join()
Пример #23
0
class LibriSelectionDataset(Dataset):
    """LibriSpeech Selection data from sincnet paper."""
    def __init__(self,
                 sizeWindow=20480,
                 db_wav_root=DB_WAV_ROOT,
                 fps_list=str(),
                 label_path=str(),
                 nSpeakers=-1,
                 n_process_loader=50,
                 MAX_SIZE_LOADED=4000000000):
        """Init.
        
        Args:
            - sizeWindow (int): size of the sliding window
            - db_wav_path (str):
            - fps_list_path (str): 
            - label_path (str):
            - n_process_loader (int):
            - MAX_SIZE_LOADED (int): target maximal size of the floating array
                                    containing all loaded data.
                                    
        """
        self.MAX_SIZE_LOADED = MAX_SIZE_LOADED
        self.n_process_loader = n_process_loader
        self.db_wav_root = Path(db_wav_root)
        self.sizeWindow = sizeWindow
        """Parsing customized to Libri-selection dataset."""
        fps_name_only = get_fps_from_txt(fps_list)
        label_dict = np.load(label_path, allow_pickle=True)[()]
        self.all_labels_fps = [(label_dict[x], Path(db_wav_root) / Path(x))
                               for x in fps_name_only]

        self.reload_pool = Pool(n_process_loader)
        self.prepare(
        )  # Split large number of files into packages, and set {self.currentPack=-1, self.nextPack=0}

        if nSpeakers == -1:
            nSpeakers = len(set(label_dict.values()))
        self.speakers = list(range(nSpeakers))
        self.data = []

        self.loadNextPack(first=True)
        self.loadNextPack()

    def __len__(self):
        """Get length."""
        return self.totSize // self.sizeWindow

    def prepare(self):
        """Prepare."""
        random.shuffle(self.all_labels_fps)
        start_time = time.time()

        print("Checking length...")
        allLength = self.reload_pool.map(extractLength, self.all_labels_fps)

        self.packageIndex, self.totSize = [], 0
        start, packageSize = 0, 0
        for index, length in tqdm.tqdm(enumerate(allLength)):
            packageSize += length
            if packageSize > self.MAX_SIZE_LOADED:
                self.packageIndex.append([start, index])
                self.totSize += packageSize
                start, packageSize = index, 0

        if packageSize > 0:
            self.packageIndex.append([start, len(self.all_labels_fps)])
            self.totSize += packageSize

        print(f"Done, elapsed: {time.time() - start_time:.3f} seconds")
        print(f'Scanned {len(self.all_labels_fps)} sequences '
              f'in {time.time() - start_time:.2f} seconds')
        print(f"{len(self.packageIndex)} chunks computed")
        self.currentPack = -1
        self.nextPack = 0

    def clear(self):
        """Clear."""
        if 'data' in self.__dict__:
            del self.data
        if 'speakerLabel' in self.__dict__:
            del self.speakerLabel
        if 'seqLabel' in self.__dict__:
            del self.seqLabel

    def getNPacks(self):
        """Get N packs."""
        return len(self.packageIndex)

    def getNSeqs(self):
        """Get N seqs."""
        return len(self.seqLabel) - 1

    def getNLoadsPerEpoch(self):
        """Get N loads per epoch."""
        return len(self.packageIndex)

    def getSpeakerLabel(self, idx):
        idSpeaker = next(
            x[0] for x in enumerate(self.speakerLabel) if x[1] > idx) - 1
        return idSpeaker

    def loadNextPack(self, first=False):
        """Load next pack."""
        self.clear()
        if not first:
            self.currentPack = self.nextPack
            start_time = time.time()
            print('Joining pool')
            self.r.wait()
            print(f'Joined process, elapsed={time.time()-start_time:.3f} secs')
            self.nextData = self.r.get()
            self.parseNextDataBlock()
            del self.nextData
        self.nextPack = (self.currentPack + 1) % len(self.packageIndex)
        seqStart, seqEnd = self.packageIndex[self.nextPack]
        if self.nextPack == 0 and len(self.packageIndex) > 1:
            self.prepare()
        """map() blocks until complete, map_async() returns immediately and 
        schedules a callback to be run on the result."""
        self.r = self.reload_pool.map_async(
            loadFile, self.all_labels_fps[seqStart:seqEnd])
        """loadFile: return speaker, seqName, seq"""

    def parseNextDataBlock(self):
        """Parse next data block."""
        # Labels
        self.speakerLabel = [0]
        self.seqLabel = [0]
        speakerSize = 0
        indexSpeaker = 0

        # To accelerate the process a bit
        self.nextData.sort(key=lambda x: (x[0], x[1]))
        """
        nextData[0] = (1243, '4910-14124-0001-1',
                       tensor([-0.0089, -0.0084, -0.0079,  ..., -0.0015, -0.0056,  0.0047]))
        """
        tmpData = []

        for speaker, seqName, seq in self.nextData:
            while self.speakers[indexSpeaker] < speaker:
                indexSpeaker += 1
                self.speakerLabel.append(speakerSize)
            if self.speakers[indexSpeaker] != speaker:
                raise ValueError(f'{speaker} invalid speaker')

            sizeSeq = seq.size(0)
            tmpData.append(seq)
            self.seqLabel.append(self.seqLabel[-1] + sizeSeq)
            speakerSize += sizeSeq
            del seq

        self.speakerLabel.append(speakerSize)
        self.data = torch.cat(tmpData, dim=0)

    def __getitem__(self, idx):
        """Get item."""
        if idx < 0 or idx >= len(self.data) - self.sizeWindow - 1:
            print(idx)

        outData = self.data[idx:(self.sizeWindow + idx)].view(1, -1)
        label = torch.tensor(self.getSpeakerLabel(idx), dtype=torch.long)

        return outData, label

    def getBaseSampler(self, type, batchSize, offset):
        """Get base sampler."""
        if type == "samespeaker":
            return SameSpeakerSampler(batchSize, self.speakerLabel,
                                      self.sizeWindow, offset)
        if type == "samesequence":
            return SameSpeakerSampler(batchSize, self.seqLabel,
                                      self.sizeWindow, offset)
        if type == "sequential":
            return SequentialSampler(len(self.data), self.sizeWindow, offset,
                                     batchSize)
        sampler = UniformAudioSampler(len(self.data), self.sizeWindow, offset)
        return BatchSampler(sampler, batchSize, True)

    def getDataLoader(self,
                      batchSize,
                      type,
                      randomOffset,
                      numWorkers=0,
                      onLoop=-1):
        """Get a batch sampler for the current dataset.
        
        Args:
            - batchSize (int): batch size
            - groupSize (int): in the case of type in ["samespeaker", "samesequence"]
            number of items sharing a same label in the group
            (see AudioBatchSampler)
            - type (string):
                type == "samespeaker": grouped sampler speaker-wise
                type == "samesequence": grouped sampler sequence-wise
                type == "sequential": sequential sampling
                else: uniform random sampling of the full audio
                vector
            - randomOffset (bool): if True add a random offset to the sampler
                                   at the begining of each iteration
                                   
        """
        nLoops = len(self.packageIndex)
        totSize = self.totSize // (self.sizeWindow * batchSize)
        if onLoop >= 0:
            self.currentPack = onLoop - 1
            self.loadNextPack()
            nLoops = 1

        def samplerCall():
            offset = random.randint(0, self.sizeWindow // 2) \
                if randomOffset else 0
            return self.getBaseSampler(type, batchSize, offset)

        return AudioLoader(self, samplerCall, nLoops, self.loadNextPack,
                           totSize, numWorkers)
        if loss.requires_grad:
            lbfgs_g.zero_grad()
            loss.backward()
        print("total train loss", loss.item()**(1/train_exp))
        return loss


    if True:
        for t in range(25):
            lbfgs_g.step(closure)    
            if t % 1 == 0:
                val_loss, test_loss = val_test_loss()
                print("Iteration", t, best_loss, val_loss.item(), test_loss.item())
                if val_loss.item() < best_loss:
                    best_loss = val_loss.item()
                    torch.save(model.state_dict(), "trained_models/%s_game_model_vdelta%d_spacing%d_scaleaccel%d_vel_predsize%d_fold%d.torch" % (dataset_name,use_deltav,spacing,scale_accel,pred_size,fold,))

    # load best model
    model.load_state_dict(torch.load("trained_models/%s_game_model_vdelta%d_spacing%d_scaleaccel%d_vel_predsize%d_fold%d.torch" % (dataset_name,use_deltav,spacing,scale_accel,pred_size,fold,)))

    val_loss, test_loss = val_test_loss()
    print("Final  Result spacing%d pred_size%d dataset %s fold%d" % (spacing,pred_size, dataset_name, fold,), val_loss.item(), test_loss.item())


    
multi_pool = Pool(processes=5)
print("Running training on dataset %s ..." % (dataset_name,))
predictions = multi_pool.map(train_fold,list(range(4)))
multi_pool.close() 
multi_pool.join()
Пример #25
0
def evolve(experiment):
    total_frames = 0
    #If CUDA is breaking try adding this and removing it from main/run_experiment
    #try:
    #    set_start_method('spawn')
    #except RuntimeError:
    #    pass
    pool = Pool(experiment.thread_count)
    thread_count = experiment.thread_count
    time_start = time.perf_counter()
    #Set params based on the current experiment
    pop_size = experiment.population
    generation_count = experiment.generations

    #Create new random population, sorted by starting fitness
    population = Population(experiment)
    new_nets = []
    saved = []  #Saving fittest from each gen to pickle file
    #sys.stdout.write("Evaluating Intial Fitness:")
    #sys.stdout.flush()
    for i in range(pop_size):
        new_net = "Maybe I can write a function to make a new net of type specified by the experiment"
        if experiment.genome == 'NEAT':
            new_net = NEATGenome(experiment)
        elif experiment.genome == 'TensorNEAT':
            new_net = TensorNEATGenome(experiment)
        else:
            new_net = Genome(experiment)
        new_nets.append(new_net)

    #Multithreaded fitness evaluation
    """
    net_copies = []
    for _ in range(thread_count):
        net_copies.append([])
    for i in range(pop_size):
        net_copies[i%thread_count].append(copy.deepcopy(new_nets[i]))
    multiReturn = pool.map(multiEvalFitness, net_copies)
    fitnesses = []
    for thread in multiReturn:
        fitnesses.append(thread[0])
        total_frames += thread[1]
    for i in range(pop_size):
        new_nets[i].fitness = fitnesses[i%thread_count][i//thread_count]
    for net in new_nets:
        population.add(net)
    """
    #net_copies = []
    #for i in range(pop_size):
    #    net_copies.append(copy.deepcopy(new_nets[i]))
    multiReturn = pool.map(multiEvalFitness, new_nets)
    for i in range(pop_size):
        #print(new_nets[i].fitness)
        new_nets[i].fitness = multiReturn[i][0]
        #sys.stdout.write(str(multiReturn[i][0]) + "\n")
        #sys.stdout.flush()
        total_frames += multiReturn[i][1]
    for net in new_nets:
        population.add(net)

    #Run the main algorithm over many generations
    #for g in range(generation_count):
    generation = 0
    while total_frames < experiment.max_frames:
        #First print reports on generation:
        #Debugging report I hope to remove soon
        """
        if generation%20 == 0:
            print("Top genomes:")
            population[0].printToTerminal()
            population[1].printToTerminal()
            population[2].printToTerminal()
            print("Species Report: size, gens_since_improvement, record fitness, current fitness")
            for s in population.species:
                if s.size() > 0:
                    print(s.size(), s.gens_since_improvement, s.last_fittest, s.genomes[0].fitness)
            #print(torch.cuda.memory_summary())
        gen_report_string = "\nGeneration " + str(generation) + "\nTotal frames used: " + str(total_frames) + "\nHighest Fitness: "+ str(population.fittest().fitness) + "\nTotal elapsed time:" + str(time.perf_counter() - time_start) + " seconds\n"
        sys.stdout.write(gen_report_string)
        sys.stdout.flush()
        """

        #Next do the speciation work
        #Check all species and remove those that haven't improved in so many generations
        population.checkSpeciesForImprovement(experiment.gens_to_improve)
        #Adjust fitness of each individual with fitness sharing
        #Done here so it is skipped for the final population, where plain maximum fitness is desired
        if experiment.fitness_sharing:
            for species in population.species:
                for genome in species:
                    genome.fitness = genome.fitness / species.size()
        #Population is re-ordered afterwards based on new fitness
        population.reorder()  #make sure this is only called when necessary
        #Assign how many offspring each species gets based on fitness of the species
        population.assignOffspringProportions()
        #Now we set the crossover/mutate counts for NEAT; since speciated evolution has a varying count of elite genomes retained
        elite_count = 0
        for species in population.species:
            if species.size(
            ) >= experiment.elite_threshold and species.gens_since_improvement < experiment.gens_to_improve:
                elite_count += experiment.elite_per_species
        experiment.elite_count = elite_count
        experiment.mutate_count = math.floor(
            experiment.mutate_ratio *
            (experiment.population - experiment.elite_count))
        experiment.crossover_count = experiment.population - experiment.mutate_count - experiment.elite_count
        #Make the new population to fill this generation
        new_pop = Population(experiment)
        #Now we select species reps for the new pop based on the old one
        for s in population.species:
            rep = population.randOfSpecies(s)
            new_species = Species(
                experiment, rep, False, s.gens_since_improvement,
                s.last_fittest, s.can_reproduce
            )  #The genome is copied over as a rep but not added
            new_pop.species.append(new_species)

        new_nets = []

        #Crossover is done first
        #Roll the dice for interspecies; limit to one per gen to make this calculation simpler
        if random.random(
        ) < experiment.interspecies_crossover * experiment.crossover_count:
            parent1 = population.select()
            parent2 = population.select()
            while parent1 == parent2:
                parent2 = population.select()
            new_net = parent1.crossover(parent2)
            new_nets.append(new_net)
            experiment.crossover_count -= 1
        #Create and add them to the pop, subtract from crossover count
        #Since we round up to the nearest integer to find how many to take from each species, the total number at the end will likely exceed the desired number
        #We fix this at the end through random pruning
        offspring = []
        for species in population.species:
            count = math.ceil(experiment.crossover_count *
                              species.offspring_proportion)
            for _ in range(count):
                if species.size() == 1:
                    #Just do mutation
                    parent = species.select()
                    new_net = parent.mutate()
                    offspring.append(new_net)
                else:  #It's possible that this has to repeat a lot if there's only a few with a large fit diff, but unlikely
                    parent1 = species.select()
                    parent2 = species.select()
                    while parent1 == parent2:
                        parent2 = species.select()
                    new_net = parent1.crossover(parent2)
                    offspring.append(new_net)
                    #Do the crossover here
        #Now remove from mutated at random until we have the right number
        to_remove = len(offspring) - experiment.crossover_count
        if to_remove < 0:
            print(to_remove)
            assert False
        for _ in range(to_remove):
            del offspring[random.randint(0, len(offspring) - 1)]
        for n in offspring:
            new_nets.append(n)

        #Mutation to create offspring is done next; the same pruning method is used
        mutated = []
        for species in population.species:
            for _ in range(
                    math.ceil(experiment.mutate_count *
                              species.offspring_proportion)):
                parent = species.select()
                new_net = parent.mutate()
                mutated.append(new_net)
        #Now remove from mutated at random until we have the right number
        to_remove = len(mutated) - experiment.mutate_count
        assert (to_remove >= 0)
        for _ in range(to_remove):
            del mutated[random.randint(0, len(mutated) - 1)]
        for n in mutated:
            new_nets.append(n)

        #net_copies = []
        #for i in range(pop_size-elite_count):
        #    net_copies.append(copy.deepcopy(new_nets[i]))
        multiReturn = pool.map(multiEvalFitness, new_nets)
        for i in range(pop_size - elite_count):
            new_nets[i].fitness = multiReturn[i][0]
            total_frames += multiReturn[i][1]
        for net in new_nets:
            new_pop.add(net)

        #Elite Carry-over; re-evaluates fitness first before selection
        #Currently not built to carry best of each species over; this should be handled by fitness sharing
        #And since this is typically only 1, we just want the fittest genome regardless of species

        #Eval all the elite nets many times
        elite_nets = []
        for species in population.species:
            if species.size(
            ) >= experiment.elite_threshold and species.gens_since_improvement < experiment.gens_to_improve:
                for i in range(experiment.elite_range):
                    elite_nets.append(species[i])
        #net_copies = []
        #for i in range(len(elite_nets)):
        #    net_copies.append(copy.deepcopy(elite_nets[i]))
        multiReturn = pool.map(multiEvalFitnessElite, elite_nets)
        #print(fitnesses)
        for i in range(len(elite_nets)):
            elite_nets[i].fitness = multiReturn[i][0]
            total_frames += multiReturn[i][1]

        elite_max = float("-inf")
        top_elite = None
        for species in population.species:
            if species.size(
            ) >= experiment.elite_threshold and species.gens_since_improvement < experiment.gens_to_improve:
                for i in range(
                        experiment.elite_per_species
                ):  #This needs to be redone for elite_count > 1; currently would just take best genome twice
                    best_fitness = float('-inf')
                    fittest = None
                    for i in range(experiment.elite_range):
                        if species[i].fitness > best_fitness:
                            best_fitness = species[i].fitness
                            fittest = species[i]
                    if best_fitness > elite_max:
                        elite_max = best_fitness
                        top_elite = fittest
                    new_pop.add(fittest)
        #If the experiment ran enough trials, we can just use the top elite. Generally this should be reserved for single-species algorithms
        if not experiment.save_elite:  #If not, run our own trials, not counting frames, to find the actual best genome from this generation to save for final evaluation
            elite_nets = []
            for i in range(10):
                elite_nets.append(population[i])
            fitnesses = pool.map(multiEvalFitnessThirty, elite_nets)
            for i in range(10):
                elite_nets[i].fitness = fitnesses[i]

            elite_max = float("-inf")
            top_elite = None
            for net in elite_nets:
                if net.fitness > elite_max:
                    elite_max = net.fitness
                    top_elite = net

        save_copy = top_elite.newCopy(
        )  #Still need to modify/add this for regular/tensor genomes
        saved.append([save_copy, elite_max])
        """
        if top_elite is None:
            sys.stdout.write("No elite could be found, using pop.fittest instead\n")
            for s in population.species:
                sys.stdout.write(str(species.size()) + str(species.gens_since_improvement) + str(species.can_reproduce))
            top_elite = population.fittest()
            top_elite.evalFitness(iters=top_elite.experiment.elite_evals)#These frames are not counted since they are only for reporting purposes and do not affect the actual algorithm
            elite_max = top_elite.fitness
        if elite_max < 5.0:
            for s in population.species:
                print(s.can_reproduce)
            sys.stdout.write("Strange elite behavior. Fitness is " + str(top_elite.fitness) + " Trial is " + str(top_elite.evalFitness()) + " Top fitness is: " + str(population.fittest().fitness) + "\n")
            sys.stdout.flush()
        #Save top elite carryover to pickle file
        save_copy = copy.deepcopy(top_elite)
        saved.append([save_copy, elite_max])
        """
        #total_layers = 0
        #for genome in new_pop:
        #    total_layers += genome.layer_count
        #avg_layers = total_layers/new_pop.size()
        elapsed = int(time.perf_counter() - time_start)
        time_string = str(elapsed // 3600) + ":" + str(
            (elapsed % 3600) // 60) + ":" + str(elapsed % 60)
        #sys.stdout.write(str(100*total_frames/experiment.max_frames) + "% complete | " + time_string + " elapsed | " + str(elite_max) + " recent score | " + str(save_copy.layer_size) + " " + str(save_copy.layer_count) + " layer size/count | " + str(len(new_pop.species)) + " total species | " + str(avg_layers) + " average layers\n")
        sys.stdout.write(
            str(100 * total_frames / experiment.max_frames) + "% complete | " +
            time_string + " elapsed | " + str(elite_max) + " recent score | " +
            str(len(new_pop.species)) + " total species \n")
        sys.stdout.flush()
        population.species = []
        population.genomes = []
        population = new_pop
        generation += 1
    print("Final frame count:", str(total_frames))
    print("Total generations:", generation)
    print("Time Elapsed:", time.perf_counter() - time_start)
    return population, saved
Пример #26
0
class AudioBatchData(Dataset):
    def __init__(self,
                 path,
                 sizeWindow,
                 seqNames,
                 phoneLabelsDict,
                 nSpeakers,
                 nProcessLoader=50,
                 MAX_SIZE_LOADED=4000000000):
        """
        Args:
            - path (string): path to the training dataset
            - sizeWindow (int): size of the sliding window
            - seqNames (list): sequences to load
            - phoneLabelsDict (dictionnary): if not None, a dictionnary with the
                                             following entries

                                             "step": size of a labelled window
                                             "$SEQ_NAME": list of phonem labels for
                                             the sequence $SEQ_NAME
           - nSpeakers (int): number of speakers to expect.
           - nProcessLoader (int): number of processes to call when loading the
                                   data from the disk
           - MAX_SIZE_LOADED (int): target maximal size of the floating array
                                    containing all loaded data.
        """
        self.MAX_SIZE_LOADED = MAX_SIZE_LOADED
        self.nProcessLoader = nProcessLoader
        self.dbPath = Path(path)
        self.sizeWindow = sizeWindow
        self.seqNames = [(s, self.dbPath / x) for s, x in seqNames]
        self.reload_pool = Pool(nProcessLoader)

        self.prepare()
        self.speakers = list(range(nSpeakers))
        self.data = []

        self.phoneSize = 0 if phoneLabelsDict is None else \
            phoneLabelsDict["step"]
        self.phoneStep = 0 if phoneLabelsDict is None else \
            self.sizeWindow // self.phoneSize

        self.phoneLabelsDict = deepcopy(phoneLabelsDict)
        self.loadNextPack(first=True)
        self.loadNextPack()
        self.doubleLabels = False

    def resetPhoneLabels(self, newPhoneLabels, step):
        self.phoneSize = step
        self.phoneStep = self.sizeWindow // self.phoneSize
        self.phoneLabelsDict = deepcopy(newPhoneLabels)
        self.loadNextPack()

    def splitSeqTags(seqName):
        path = os.path.normpath(seqName)
        return path.split(os.sep)

    def getSeqNames(self):
        return [str(x[1]) for x in self.seqNames]

    def clear(self):
        if 'data' in self.__dict__:
            del self.data
        if 'speakerLabel' in self.__dict__:
            del self.speakerLabel
        if 'phoneLabels' in self.__dict__:
            del self.phoneLabels
        if 'seqLabel' in self.__dict__:
            del self.seqLabel

    def prepare(self):
        random.shuffle(self.seqNames)
        start_time = time.time()

        print("Checking length...")
        allLength = self.reload_pool.map(extractLength, self.seqNames)

        self.packageIndex, self.totSize = [], 0
        start, packageSize = 0, 0
        for index, length in tqdm.tqdm(enumerate(allLength)):
            packageSize += length
            if packageSize > self.MAX_SIZE_LOADED:
                self.packageIndex.append([start, index])
                self.totSize += packageSize
                start, packageSize = index, 0

        if packageSize > 0:
            self.packageIndex.append([start, len(self.seqNames)])
            self.totSize += packageSize

        print(f"Done, elapsed: {time.time() - start_time:.3f} seconds")
        print(f'Scanned {len(self.seqNames)} sequences '
              f'in {time.time() - start_time:.2f} seconds')
        print(f"{len(self.packageIndex)} chunks computed")
        self.currentPack = -1
        self.nextPack = 0

    def getNPacks(self):
        return len(self.packageIndex)

    def loadNextPack(self, first=False):
        self.clear()
        if not first:
            self.currentPack = self.nextPack
            start_time = time.time()
            print('Joining pool')
            self.r.wait()
            print(f'Joined process, elapsed={time.time()-start_time:.3f} secs')
            self.nextData = self.r.get()
            self.parseNextDataBlock()
            del self.nextData
        self.nextPack = (self.currentPack + 1) % len(self.packageIndex)
        seqStart, seqEnd = self.packageIndex[self.nextPack]
        if self.nextPack == 0 and len(self.packageIndex) > 1:
            self.prepare()
        self.r = self.reload_pool.map_async(loadFile,
                                            self.seqNames[seqStart:seqEnd])

    def parseNextDataBlock(self):

        # Labels
        self.speakerLabel = [0]
        self.seqLabel = [0]
        self.phoneLabels = []
        speakerSize = 0
        indexSpeaker = 0

        # To accelerate the process a bit
        self.nextData.sort(key=lambda x: (x[0], x[1]))
        tmpData = []

        for speaker, seqName, seq in self.nextData:
            while self.speakers[indexSpeaker] < speaker:
                indexSpeaker += 1
                self.speakerLabel.append(speakerSize)
            if self.speakers[indexSpeaker] != speaker:
                raise ValueError(f'{speaker} invalid speaker')

            if self.phoneLabelsDict is not None:
                self.phoneLabels += self.phoneLabelsDict[seqName]
                newSize = len(self.phoneLabelsDict[seqName]) * self.phoneSize
                seq = seq[:newSize]

            sizeSeq = seq.size(0)
            tmpData.append(seq)
            self.seqLabel.append(self.seqLabel[-1] + sizeSeq)
            speakerSize += sizeSeq
            del seq

        self.speakerLabel.append(speakerSize)
        self.data = torch.cat(tmpData, dim=0)

    def getPhonem(self, idx):
        idPhone = idx // self.phoneSize
        return self.phoneLabels[idPhone:(idPhone + self.phoneStep)]

    def getSpeakerLabel(self, idx):
        idSpeaker = next(
            x[0] for x in enumerate(self.speakerLabel) if x[1] > idx) - 1
        return idSpeaker

    def __len__(self):
        return self.totSize // self.sizeWindow

    def __getitem__(self, idx):

        if idx < 0 or idx >= len(self.data) - self.sizeWindow - 1:
            print(idx)

        outData = self.data[idx:(self.sizeWindow + idx)].view(1, -1)
        label = torch.tensor(self.getSpeakerLabel(idx), dtype=torch.long)
        if self.phoneSize > 0:
            label_phone = torch.tensor(self.getPhonem(idx), dtype=torch.long)
            if not self.doubleLabels:
                label = label_phone
        else:
            label_phone = torch.zeros(1)

        if self.doubleLabels:
            return outData, label, label_phone

        return outData, label

    def getNSpeakers(self):
        return len(self.speakers)

    def getNSeqs(self):
        return len(self.seqLabel) - 1

    def getNLoadsPerEpoch(self):
        return len(self.packageIndex)

    def getBaseSampler(self, type, batchSize, offset):
        if type == "samespeaker":
            return SameSpeakerSampler(batchSize, self.speakerLabel,
                                      self.sizeWindow, offset)
        if type == "samesequence":
            return SameSpeakerSampler(batchSize, self.seqLabel,
                                      self.sizeWindow, offset)
        if type == "sequential":
            return SequentialSampler(len(self.data), self.sizeWindow, offset,
                                     batchSize)
        sampler = UniformAudioSampler(len(self.data), self.sizeWindow, offset)
        return BatchSampler(sampler, batchSize, True)

    def getDataLoader(self,
                      batchSize,
                      type,
                      randomOffset,
                      numWorkers=0,
                      onLoop=-1):
        r"""
        Get a batch sampler for the current dataset.
        Args:
            - batchSize (int): batch size
            - groupSize (int): in the case of type in ["samespeaker", "samesequence"]
            number of items sharing a same label in the group
            (see AudioBatchSampler)
            - type (string):
                type == "speaker": grouped sampler speaker-wise
                type == "sequence": grouped sampler sequence-wise
                type == "sequential": sequential sampling
                else: uniform random sampling of the full audio
                vector
            - randomOffset (bool): if True add a random offset to the sampler
                                   at the begining of each iteration
        """
        nLoops = len(self.packageIndex)
        totSize = self.totSize // (self.sizeWindow * batchSize)
        if onLoop >= 0:
            self.currentPack = onLoop - 1
            self.loadNextPack()
            nLoops = 1

        def samplerCall():
            offset = random.randint(0, self.sizeWindow // 2) \
                if randomOffset else 0
            return self.getBaseSampler(type, batchSize, offset)

        return AudioLoader(self, samplerCall, nLoops, self.loadNextPack,
                           totSize, numWorkers)
Пример #27
0
def trawl(dset,dtype,epochs,parallel=False,batch_size=1,max_n=30,max_rankings=1000,opt='Adam',num_dsets=10,seed=True,RE=False,K=5):
    """
    trawls over a directory and fits models to all data files

    Args:
    dset- name of dataset(s) considered
    dtype- 'soi' for partial rankings, 'soc' for complete rankings
    epochs- number of times to loop over the data
    parallel- whether to train models in parallel over the datasets in the directory
    batch_size- number of choices to train on at a time
    max_n- largest number of alternatives allowed to train on a dataset
    max_rankings- maximum number of rankings to fit a dataset
    opt- which optimizer to use
    num_dsets- number of datasets to fit
    seed- whether to seed PCMC
    RE- whether to compute repeated elimianation (RS if false)
    K- number of CV folds for each dataset
    """
    #we will loop over the datasets stored in this directory
    path = os.getcwd()+os.sep+'data'+os.sep+dset
    files = os.listdir(path)
    #shuffle(files)

    #this is where we'll save the output models
    save_path = os.getcwd()+os.sep+'cache'+os.sep+'learned_models'+os.sep+dset+os.sep

    job_list = []
    batch = (batch_size>1)
    for filename in files:#loop over the directory
        print(filename)
        if filename.endswith(dtype):#will
            filepath = path+os.sep+filename
            if dtype=='soi':
                L,n = scrape_soi(filepath)
            else:
                L,n = scrape_soc(filepath)
            if len(L)<=10 or len(L)>max_rankings or n>max_n:
                if len(L)<=10:
                    reason = 'too few rankings- '+str(len(L))+', min is 10'
                elif len(L)>max_rankings:
                    reason = 'too many rankings- '+str(len(L))+', max is '+str(max_rankings)
                else:
                    reason = 'too many alternatives- '+str(n)+', max is '+str(max_n)
                print(filename+' skipped, '+reason)
                continue
            else:
                print(filename+' added')

            #collect models
            models = []
            for d in [1,4,8]:
                if d>n:
                    continue
                models.append((f'CRS, r = {d}', cp.CDM, {'embedding_dim': d}))
            models.append((f'PL', cp.MNL, {}))
            #models.append(PCMC(n,batch=batch))
            #models.append((f'RS-PCMC', cp.PCMC, {'batch': batch}))
            

            #append tuple containing all the ojects needed to train the model on the dataset
            job_list.append((L,n,models,save_path+filename[:-4]+'-'+dtype,epochs,batch_size,opt,seed,False,K))
            if RE:
                job_list.append((map(lambda x:x[::-1],L),n,models,save_path+filename[:-4]+'-'+dtype,epochs,batch_size,opt,seed,True,K))
            if len(job_list)>=num_dsets:
                print('maximum number of datasets reached')
                continue

    print(str(len(job_list))+' datasets total')
    print(str(sum(map(lambda x: len(x[0]),job_list)))+ ' total rankings')
    #sorts the jobs by number of alternatives*number of (partial) rankings
    #will roughly be the number of choices, up to partial ranking length
    sorted(job_list,key=lambda x: x[1]*len(x[0]))
    #training for each dataset can be done in parallel with this
    if parallel:
        p = Pool(4)
        p.map(parallel_helper,job_list)
    else:
        [x for x in map(parallel_helper,job_list)]
Пример #28
0
def ars(env_name,
        policy,
        n_epochs,
        n_workers=8,
        step_size=.02,
        n_delta=32,
        n_top=16,
        exp_noise=0.03,
        zero_policy=True,
        postprocess=postprocess_default):
    torch.autograd.set_grad_enabled(False)
    """
    Augmented Random Search
    https://arxiv.org/pdf/1803.07055

    Args:

    Returns:

    Example:
    """

    pool = Pool(processes=n_workers)
    env = gym.make(env_name)
    W = torch.nn.utils.parameters_to_vector(policy.parameters())
    n_param = W.shape[0]

    if zero_policy:
        W = torch.zeros_like(W)

    r_hist = []
    s_mean = torch.zeros(env.observation_space.shape[0])
    s_stdv = torch.ones(env.observation_space.shape[0])

    total_steps = 0
    exp_dist = torch.distributions.Normal(torch.zeros(n_delta, n_param),
                                          torch.ones(n_delta, n_param))
    do_rollout_partial = partial(do_rollout_train, env_name, policy,
                                 postprocess)

    for _ in range(n_epochs):

        deltas = exp_dist.sample()
        pm_W = torch.cat((W + (deltas * exp_noise), W - (deltas * exp_noise)))

        results = pool.map(do_rollout_partial, pm_W)

        states = torch.empty(0)
        p_returns = []
        m_returns = []
        l_returns = []
        top_returns = []

        for p_result, m_result in zip(results[:n_delta], results[n_delta:]):
            ps, pr, plr = p_result
            ms, mr, mlr = m_result

            states = torch.cat((states, ms, ps), dim=0)
            p_returns.append(pr)
            m_returns.append(mr)
            l_returns.append(plr)
            l_returns.append(mlr)
            top_returns.append(max(pr, mr))

        top_idx = sorted(range(len(top_returns)),
                         key=lambda k: top_returns[k],
                         reverse=True)[:n_top]
        p_returns = torch.stack(p_returns)[top_idx]
        m_returns = torch.stack(m_returns)[top_idx]
        l_returns = torch.stack(l_returns)[top_idx]

        r_hist.append(l_returns.mean())

        ep_steps = states.shape[0]
        s_mean = update_mean(states, s_mean, total_steps)
        s_stdv = update_std(states, s_stdv, total_steps)
        total_steps += ep_steps

        policy.state_means = s_mean
        policy.state_std = s_stdv
        do_rollout_partial = partial(do_rollout_train, env_name, policy,
                                     postprocess)

        W = W + (step_size / (n_delta * torch.cat(
            (p_returns, m_returns)).std() + 1e-6)) * torch.sum(
                (p_returns - m_returns) * deltas[top_idx].T, dim=1)

    pool.terminate()
    torch.nn.utils.vector_to_parameters(W, policy.parameters())
    return policy, r_hist
Пример #29
0
def check_score():
    print(args)
    p_size = cpu_num
    print("use cpu num:{}".format(p_size))

    loss_history = []
    check_deck_id = int(
        args.check_deck_id) if args.check_deck_id is not None else None
    cuda_flg = args.cuda == "True"
    #node_num = int(args.node_num)
    #net = New_Dual_Net(node_num)
    model_name = args.model_name
    existed_output_list = os.listdir(path="./Battle_Result")
    existed_output_list = [
        f for f in existed_output_list
        if os.path.isfile(os.path.join("./Battle_Result", f))
    ]
    result_name = "{}:{}".format(model_name.split(".")[0], args.deck_list)
    same_name_count = len(
        [1 for cell in existed_output_list if result_name in cell])
    print("same_name_count:", same_name_count)
    result_name += "_{:0>3}".format(same_name_count + 1)
    PATH = 'model/' + model_name
    model_dict = torch.load(PATH)
    n_size = model_dict["final_layer.weight"].size()[1]
    net = New_Dual_Net(n_size, hidden_num=args.hidden_num[0])
    net.load_state_dict(model_dict)
    opponent_net = None
    if args.opponent_model_name is not None:
        # opponent_net = New_Dual_Net(node_num)
        o_model_name = args.opponent_model_name
        PATH = 'model/' + o_model_name
        model_dict = torch.load(PATH)
        n_size = model_dict["final_layer.weight"].size()[1]
        opponent_net = New_Dual_Net(n_size, hidden_num=args.hidden_num[1])
        opponent_net.load_state_dict(model_dict)

    if torch.cuda.is_available() and cuda_flg:
        net = net.cuda()
        opponent_net = opponent_net.cuda(
        ) if opponent_net is not None else None
        print("cuda is available.")
    #net.zero_grad()
    deck_sampling_type = False
    if args.deck is not None:
        deck_sampling_type = True
    G = Game()
    net.cpu()
    t3 = datetime.datetime.now()
    if args.greedy_mode is not None:
        p1 = Player(9, True, policy=Dual_NN_GreedyPolicy(origin_model=net))
    else:
        p1 = Player(9,
                    True,
                    policy=New_Dual_NN_Non_Rollout_OM_ISMCTSPolicy(
                        origin_model=net,
                        cuda=cuda_flg,
                        iteration=args.step_iter),
                    mulligan=Min_cost_mulligan_policy())
    #p1 = Player(9, True, policy=AggroPolicy())
    p1.name = "Alice"
    if fixed_opponent is not None:
        if fixed_opponent == "Aggro":
            p2 = Player(9,
                        False,
                        policy=AggroPolicy(),
                        mulligan=Min_cost_mulligan_policy())
        elif fixed_opponent == "OM":
            p2 = Player(9, False, policy=Opponent_Modeling_ISMCTSPolicy())
        elif fixed_opponent == "NR_OM":
            p2 = Player(9,
                        False,
                        policy=Non_Rollout_OM_ISMCTSPolicy(iteration=200),
                        mulligan=Min_cost_mulligan_policy())
        elif fixed_opponent == "ExItGreedy":
            tmp = opponent_net if opponent_net is not None else net
            p2 = Player(9,
                        False,
                        policy=Dual_NN_GreedyPolicy(origin_model=tmp))
        elif fixed_opponent == "Greedy":
            p2 = Player(9,
                        False,
                        policy=New_GreedyPolicy(),
                        mulligan=Simple_mulligan_policy())
        elif fixed_opponent == "Random":
            p2 = Player(9,
                        False,
                        policy=RandomPolicy(),
                        mulligan=Simple_mulligan_policy())
    else:
        assert opponent_net is not None
        p2 = Player(9,
                    False,
                    policy=New_Dual_NN_Non_Rollout_OM_ISMCTSPolicy(
                        origin_model=opponent_net, cuda=cuda_flg),
                    mulligan=Min_cost_mulligan_policy())
    # p2 = Player(9, False, policy=RandomPolicy(), mulligan=Min_cost_mulligan_policy())
    p2.name = "Bob"
    Battle_Result = {}
    deck_list = tuple(map(int, args.deck_list.split(",")))
    print(deck_list)
    test_deck_list = deck_list  # (0,1,4,10,13)
    test_deck_list = tuple(itertools.product(test_deck_list, test_deck_list))
    test_episode_len = evaluate_num  #100
    episode_num = evaluate_num
    match_num = len(test_deck_list)
    manager = Manager()
    shared_array = manager.Array("i",
                                 [0 for _ in range(3 * len(test_deck_list))])
    iter_data = [(p1, p2, shared_array, episode_num, p_id, test_deck_list)
                 for p_id in range(p_size)]
    freeze_support()
    p1_name = p1.policy.name.replace("origin", args.model_name)
    if args.opponent_model_name is not None:
        p2_name = p2.policy.name.replace("origin", args.opponent_model_name)
    else:
        p2_name = p2.policy.name.replace("origin", args.model_name)
    print(p1_name)
    print(p2_name)
    pool = Pool(p_size, initializer=tqdm.set_lock,
                initargs=(RLock(), ))  # 最大プロセス数:8
    memory = pool.map(multi_battle, iter_data)
    pool.close()  # add this.
    pool.terminate()  # add this.
    print("\n" * (match_num + 1))
    memory = list(memory)
    min_WR = 1.0
    Battle_Result = {(deck_id[0], deck_id[1]): \
                         tuple(shared_array[3*index+1:3*index+3]) for index, deck_id in enumerate(test_deck_list)}
    print(shared_array)
    txt_dict = {}
    for key in sorted(list((Battle_Result.keys()))):
        cell = "{}:WR:{:.2%},first_WR:{:.2%}"\
              .format(key,Battle_Result[key][0]/test_episode_len,2*Battle_Result[key][1]/test_episode_len)
        print(cell)
        txt_dict[key] = cell
    print(Battle_Result)
    #     result_name = "{}:{}_{}".format(model_name.split(".")[0],args.deck_list,)
    #     result_name = model_name.split(".")[0] + ":" + args.deck_list + ""
    deck_num = len(deck_list)
    # os.makedirs("Battle_Result", exist_ok=True)
    with open("Battle_Result/" + result_name, "w") as f:
        writer = csv.writer(f, delimiter='\t', lineterminator='\n')
        row = ["{} vs {}".format(p1_name, p2_name)]
        deck_names = [deck_id_2_name[deck_list[i]] for i in range(deck_num)]
        row = row + deck_names
        writer.writerow(row)
        for i in deck_list:
            row = [deck_id_2_name[i]]
            for j in deck_list:
                row.append(Battle_Result[(i, j)])
            writer.writerow(row)
        for key in list(txt_dict.keys()):
            writer.writerow([txt_dict[key]])
Пример #30
0
                               table_prep_params['LENGTH_PER_CELL'])
            for i in range(0, table_prep_params['MAX_ROW_LEN'] - rows):
                table.append(
                    [['<PAD>'] * table_prep_params['LENGTH_PER_CELL']] *
                    table_prep_params['MAX_COL_LEN'])
            return table

        def table_words2index(tables):
            w2i = {w: i for i, w in enumerate(vocab)}
            for i, t in enumerate(tables):
                tables[i] = np.vectorize(lambda y: w2i[y])(
                    np.array(t)).tolist()
            return tables

        p = Pool(processes=40)
        X = p.map(pad_table, X)
        p.close()
        p.join()
        X = table_words2index(X)
        X = np.array(X)
        print(X.shape)
        savepkl('./data/xp_2D_10-50_pad.pkl', X)
    else:
        device = torch.device(
            f"cuda:{1}" if torch.cuda.is_available() else 'cpu')
        dataset = T2VDataset(X, y, vocab, device, config)
        dataloader = DataLoader(dataset, batch_size=32, shuffle=True)
        X_, y_ = next(iter(dataloader))
        print(X_.shape, y_.shape)

    print(time.time() - start)