Esempio n. 1
0
def run(args):
    train, test = util.get_dataset(args.dataset)
    names = ['all-one (standard)', 'linear']
    colors = [vz.colors.all_one_lg, vz.colors.linear_lg]
    models = [
        BinaryNet.BinaryConvNet(10, cg.uniform, 'all'),
        BinaryNet.BinaryConvNet(10, cg.linear, 'slow_exp'),
    ]
    comp_ratios = np.linspace(0.1, 1.0, 20)
    acc_dict = {}
    ratios_dict = {}
    for name, model in zip(names, models):
        util.load_or_train_model(model, train, test, args)
        acc_dict[name] = util.sweep_idp(model, test, comp_ratios, args)
        ratios_dict[name] = [100. * cr for cr in comp_ratios]

    filename = "BinaryNet_{}".format(args.dataset)
    vz.plot(ratios_dict,
            acc_dict,
            names,
            filename,
            colors=colors,
            folder=args.figure_path,
            ext=args.ext,
            title='BinaryNet (MNIST)',
            xlabel='IDP (%)',
            ylabel='Classification Accuracy (%)',
            ylim=(90, 100))
Esempio n. 2
0
def GeneticAlgorithm(popSize, maksgenerasi):
    allBestFitness = []
    for p in fpopulasi(popSize):
        populasi = p
    generasi = 0
    while generasi < maksgenerasi:
        generasi += 1

        for _ in range(int(popSize / 2)):
            populasi.append(next(Reproduction(populasi)))

        _ = (populasi.remove(gen) if gen.fitness > weaknes else None
             for gen in populasi)

        averageFitness = np.round(
            np.sum((gen.fitness for gen in populasi)) / len(populasi), 2)
        genfix = next(genterbaik(populasi))
        print("\n")
        print(
            f"Generasi: {generasi}\nJumlah populasi: {len(populasi)}\nFitness Rata-rata : {averageFitness}\nFitness Terbaik: {genfix.fitness}"
        )

        allBestFitness.append(genfix.fitness)

    # Visualize
    plot(generasi, allBestFitness, genfix, kordinat)
def main(jobid, slurm_start, slurm_end, user):
    # time adjust
    slurm_start = slurm_start
    slurm_end = slurm_end

    dataset = []
    for db in config.measurements_databases:
        # dataset.append({db: []})
        database = []
        for group in config.measurements:
            collection = {
                'unit': group['unit'],
                'name': group['name'],
                'database': db['name'],
                'measurements': []
            }
            #            measurements = []
            for measurement in group['measurements']:
                table = db['entry'] + '."' + measurement['value'] + '"'
                data = getDataFromDb(slurm_start, slurm_end, user, table)
                data = filterDataByJobId(jobid, slurm_start, data,
                                         measurement['value'])
                collection['measurements'].append({
                    'name': measurement['name'],
                    'value': data
                })
            database.append(collection)
        dataset.append(database)
    visualize.plot(jobid, dataset)
    file_writer.write_csv(jobid, dataset)
Esempio n. 4
0
def run(args):
    train, test = util.get_dataset(args.dataset)
    names = ['linear']
    colors = [vz.colors.linear_sm, vz.colors.linear_lg]
    models = [VGG.VGGMulti(10, cg.linear, profiles=[(0, 2), (2, 10)])]
    comp_ratios = np.linspace(0.1, 1, 20)
    acc_dict = {}
    ratios_dict = {}
    key_names = []
    for name, model in zip(names, models):
        util.train_model_profiles(model, train, test, args)
        for profile in range(len(model.profiles)):
            key = name + '-' + str(profile + 1)
            key_names.append(key)
            acc_dict[key] = util.sweep_idp(model,
                                           test,
                                           comp_ratios,
                                           args,
                                           profile=profile)
            ratios_dict[key] = [100. * cr for cr in comp_ratios]

    filename = "VGG_{}_multi".format(args.dataset)
    vz.plot(ratios_dict,
            acc_dict,
            key_names,
            filename,
            colors=colors,
            folder=args.figure_path,
            ext=args.ext,
            title='VGG-16 (CIFAR-10)',
            xlabel='IDP (%)',
            ylabel='Classification Accuracy (%)')
Esempio n. 5
0
def run(args):
    train, test = util.get_dataset(args.dataset)
    colors = [vz.colors.all_one_lg, vz.colors.linear_lg]
    names = ['all-one', 'harmonic', 'linear', 'half_one']
    colors = [
        vz.colors.all_one_lg, vz.colors.harmonic_lg, vz.colors.linear_lg,
        vz.colors.half_one_lg
    ]
    models = [
        MLP.MLP(10, cg.uniform),
        MLP.MLP(10, cg.harmonic),
        MLP.MLP(10, cg.linear),
        MLP.MLP(10, cg.uniform_exp),
    ]
    comp_ratios = np.linspace(0.1, 1.0, 20)
    acc_dict = {}
    ratios_dict = {}
    for name, model in zip(names, models):
        util.load_or_train_model(model, train, test, args)
        acc_dict[name] = util.sweep_idp(model, test, comp_ratios, args)
        ratios_dict[name] = [100. * cr for cr in comp_ratios]

    filename = "MLP_coef_comparison_{}".format(args.dataset)
    vz.plot(ratios_dict,
            acc_dict,
            names,
            filename,
            colors=colors,
            folder=args.figure_path,
            ext=args.ext,
            xlabel='IDP (%)',
            ylabel='Classification Accuracy (%)',
            title='MLP (MNIST)',
            legend_loc='lower right',
            ylim=(85, 100))
Esempio n. 6
0
def GeneticAlgorithm(popSize, maxGeneration, points):
    global obstacles
    obstacles = points
    start = time.perf_counter()
    generation = 0
    population = CreateNewPopulation(popSize)
    startingGenome = prim(cityCoordinates)
    leadingFitnesses = []
    leadingGenome = Genome()
    bestGenome = Genome()

    while generation < maxGeneration:
        generation += 1
        population.sort(key=lambda x: x.fitness, reverse=False)
        naturalSelection(population, popSize)
        mutatePopulation(population, maxGeneration - generation,
                         MUTATION_RATE_POINT, MUTATION_RATE_DELETE,
                         MUTATION_RATE_MOVE_POINT)
        leadingGenome = copy.deepcopy(findBestGenome(population))
        leadingFitnesses.append(leadingGenome.fitness)
        average_fitness = round(
            np.sum([genom.fitness for genom in population]) / len(population),
            2)
        if (leadingGenome.fitness < bestGenome.fitness):
            bestGenome = copy.deepcopy(leadingGenome)
        print(
            "Generation: {0}\t\t Max Generation: {1}\nPopulation Size: {2}\t Average Fitness: {3}\nBest Fitness: {4}\n\n\n"
            .format(generation, maxGeneration, len(population),
                    average_fitness, bestGenome.fitness))
    plot(int(time.perf_counter() - start), generation, leadingFitnesses,
         bestGenome, startingGenome, obstacles)
def GeneticAlgorithm(popSize, maxGeneration):
    allBestFitness = []
    population = CreateNewPopulation(popSize)
    generation = 0
    while generation < maxGeneration:
        generation += 1

        for i in range(int(popSize / 2)):
            # Select parent, make crossover and
            # after, append in population a new child
            population.append(Reproduction(population))

        # Kill weakness person
        for genom in population:
            if genom.fitness > WEAKNESS_THRESHOLD:
                population.remove(genom)

        averageFitness = round(
            np.sum([genom.fitness for genom in population]) / len(population),
            2)
        bestGenome = findBestGenome(population)
        print("\n" * 5)
        print(
            "Generation: {0}\nPopulation Size: {1}\t Average Fitness: {2}\nBest Fitness: {3}"
            .format(generation, len(population), averageFitness,
                    bestGenome.fitness))

        allBestFitness.append(bestGenome.fitness)

    # Visualize
    plot(generation, allBestFitness, bestGenome, cityCoordinates)
Esempio n. 8
0
def main():
    if (len(argv) < 2):
        print("Provide input tmd5")
        exit(1)

    inf = argv[1]
    outf = "test.png"

    pts = extract(inf)
    grid, error = search(pts)
    print("Use a {} grid. Cost: {}".format(grid, error))

    plot(grid, pts, outf)
Esempio n. 9
0
def train(epochs=20):
    '''
    Train the RNN and save the model

    Inputs
    ------
    epochs: the number of rounds we train on the whole dataset
    '''
    iters = 0
    for epoch in range(epochs):

        batch_num = 0
        losses = []
        h = net.blank_hidden(batch_size)
        for x, y in batches(X, Y, batch_size, seq_size):

            # use network predictions to compute loss
            h = tuple([state.detach() for state in h])
            out, h = net(x, h)
            loss = F.cross_entropy(out.transpose(1, 2), y)

            # optimization step
            opt.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(net.parameters(), grad_norm)
            opt.step()

            # print training progress
            progress(batch_num, num_batches, iters, epochs * num_batches,
                     epoch + 1)

            # bookkeeping
            losses.append(loss)
            batch_num += 1
            iters += 1

        # plot loss after every epoch
        plot(epoch + 1,
             torch.stack(losses),
             'Loss',
             'Training',
             '#5DE58D',
             refresh=False)
        plot(epoch + 1, validation_loss(), 'Loss', 'Validation', '#4AD2FF')

        # save the model occasionally
        if epoch % save_iter == save_iter - 1:
            save_model(net, filename, epoch + 1)

    # save model at the end of training
    save_model(net, filename, 'final')
Esempio n. 10
0
def firstExperiment (st):
	for i in range(2):
		print ('recording...')
		fr = record (st)
		print (fr)

		from visualize import getWave_pivot_jump as gpj, getMax

		mx, idx = getMax (fr)
		trim, L = gpj (fr, pivot=idx, outPick = True)
		sd.play (trim)
		vz.plot (fr)	
		vz.plot (trim, L)
		vz.show ()
Esempio n. 11
0
def main(args):

    env = gym.make("FourierSeries-v0", config_path="config.py")

    data = {
        "train_input": [],
        "train_target": [],
        "test_input": [],
        "test_target": []
    }

    # Create a new model
    model = Model(device=cfg.device)

    # Start training
    for i in range(args.steps):
        print("STEP:", i)

        # 1) Get data
        data["train_input"], data["train_target"] = getDataBatch(
            env)  # Use different data for \
        data["test_input"], data["test_target"] = getDataBatch(
            env)  # training and testing...
        unfiltered_test_input = data["test_input"]  # for visualization

        # 2) Preprocess data: filter it
        for batch_name, batch_data in data.items():
            data[batch_name] = preprocessBatch(batch_data, n=5)

        # 3) Train the model with collected data
        model.train(data["train_input"], data["train_target"])

        # 4) Check how the model is performing
        y = model.predict(data["test_input"], data["test_target"])

        # 5) Visualize performance
        if args.make_plots:
            plot(unfiltered_test_input, data["test_input"], y[:, 1, :], i,
                 args.invert)

    # Save outcome
    torch.save(model.seq.state_dict(), f"{cfg.data_dir}/weights.mdl")
Esempio n. 12
0
def post_temperature(tweet):

    now = dt.now()

    media_ids = []
    for key in 'cpu', 'disk':
        d = now.strftime(dt_fmt_date)

        title = "Temperature(%s): %s" % (key, d)
        pngfile = os.path.join('/tmp/', "temperature-%s-%s.png" % (key, d))
        plot(key, now, title, pngfile, inc24h=True)

        data = post_tweet_media(pngfile)
        media_ids.append(data["media_id_string"])

    s = get_current_temperature()
    msg = "%s現在の温度は、cpu: %s度, disk: %s度です!" % \
           (now.strftime('%H時%M分%S秒'), s.data['cpu'], s.data['disk'])

    post_reply(tweet, msg, ','.join(media_ids))
Esempio n. 13
0
def GeneticAlgorithm(popSize, maxGeneration):
    allBestFitness = []
    population = CreateNewPopulation(popSize)
    generation = 0
    netSize = int(len(population))
    firstGen = findBestGenome(population)
    lowestGenome = Genome()
    while generation < maxGeneration:
        generation += 1

        for i in range(netSize):
            if random.randrange(0, 100) < REPRODUCTION_RATE:
                population.append(Reproduction(population))

        for genom in population:
            if random.randrange(0, 100) < MUTATION_RATE:
                genom.chromosomes = SwapMutation(genom.chromosomes)
                genom.fitness = Evaluate(genom.chromosomes)

        while (int((len(population))) >
               popSize + generation * POPULATION_GROWTH_RATE):
            for x in population:
                if x.fitness == max([y.fitness for y in population]):
                    population.remove(x)
                    break
        averageFitness = round(
            np.sum([genom.fitness for genom in population]) / len(population),
            2)
        bestGenome = findBestGenome(population)
        if (lowestGenome.fitness > bestGenome.fitness):
            lowestGenome = bestGenome
        print("\n" * 5)
        print(
            "Generation: {0}\t\t Max Generation: {1}\nPopulation Size: {2}\t Average Fitness: {3}\nBest Fitness: {4}"
            .format(generation, maxGeneration, len(population), averageFitness,
                    lowestGenome.fitness))
        allBestFitness.append(bestGenome.fitness)

    plot(generation, allBestFitness, lowestGenome, cityCoordinates, firstGen)
Esempio n. 14
0
def run(args):
    train, test = util.get_dataset(args.dataset)
    # names = ['all-ones,exp', 'all-ones,all', 'linear,exp', 'linear,all']
    names = ['linear']
    colors = [vz.colors.linear_sm, vz.colors.linear_lg]
    models = [
        MLP.MLP(10, cg.linear, [(0, 3), (3, 10)], n_units=100),
    ]
    comp_ratios = np.linspace(0.1, 1, 20)
    acc_dict = {}
    ratios_dict = {}
    key_names = []
    for name, model in zip(names, models):
        util.train_model_profiles(model, train, test, args)
        for profile in range(len(model.profiles)):
            key = name + '-' + str(profile + 1)
            key_names.append(key)
            acc_dict[key] = util.sweep_idp(model,
                                           test,
                                           comp_ratios,
                                           args,
                                           profile=profile)
            ratios_dict[key] = [100. * cr for cr in comp_ratios]

    filename = "MLP_{}_multi".format(args.dataset)
    vz.plot(ratios_dict,
            acc_dict,
            key_names,
            filename,
            colors=colors,
            folder=args.figure_path,
            ext=args.ext,
            xlabel='IDP (%)',
            ylabel='Classification Accuracy (%)',
            title='MLP (MNIST)',
            ylim=(85, 100))
Esempio n. 15
0
def main():
    print("######")
    print("HELLO! Returns start with infinity values")
    print("######")

    os.environ['OMP_NUM_THREADS'] = '1'

    if args.random_task:
        env_params = {
            'wt': np.round(np.random.uniform(0.5, 1.0), 2),
            'x': np.round(np.random.uniform(-0.1, 0.1), 2),
            'y': np.round(np.random.uniform(-0.1, 0.1), 2),
            'z': np.round(np.random.uniform(0.15, 0.2), 2),
        }
    else:
        env_params = {
            'wt': args.euclidean_weight,
            'x': args.goal_x,
            'y': args.goal_y,
            'z': args.goal_z,
        }
    envs = [make_env(args.env_name, args.seed, i, args.log_dir, **env_params)
            for i in range(args.num_processes)]

    if args.num_processes > 1:
        envs = SubprocVecEnv(envs)
    else:
        envs = DummyVecEnv(envs)

    envs = VecNormalize(envs, ob=False)

    obs_shape = envs.observation_space.shape
    obs_shape = (obs_shape[0] * args.num_stack, *obs_shape[1:])

    if len(envs.observation_space.shape) == 3:
        actor_critic = CNNPolicy(obs_shape[0], envs.action_space, args.recurrent_policy)
    else:
        assert not args.recurrent_policy, \
            "Recurrent policy is not implemented for the MLP controller"
        actor_critic = MLPPolicy(obs_shape[0], envs.action_space)

    if envs.action_space.__class__.__name__ == "Discrete":
        action_shape = 1
    else:
        action_shape = envs.action_space.shape[0]

    if args.cuda:
        actor_critic.cuda()

    if args.algo == 'a2c':
        optimizer = optim.RMSprop(actor_critic.parameters(), args.lr, eps=args.eps, alpha=args.alpha)
    elif args.algo == 'ppo':
        optimizer = optim.Adam(actor_critic.parameters(), args.lr, eps=args.eps)
    elif args.algo == 'acktr':
        optimizer = KFACOptimizer(actor_critic)

    rollouts = RolloutStorage(args.num_steps, args.num_processes, obs_shape, envs.action_space, actor_critic.state_size)
    current_obs = torch.zeros(args.num_processes, *obs_shape)

    def update_current_obs(obs):
        shape_dim0 = envs.observation_space.shape[0]
        obs = torch.from_numpy(obs).float()
        if args.num_stack > 1:
            current_obs[:, :-shape_dim0] = current_obs[:, shape_dim0:]
        current_obs[:, -shape_dim0:] = obs

    obs = envs.reset()
    update_current_obs(obs)

    rollouts.observations[0].copy_(current_obs)

    # These variables are used to compute average rewards for all processes.
    episode_rewards = torch.zeros([args.num_processes, 1])
    final_rewards = torch.zeros([args.num_processes, 1])

    if args.cuda:
        current_obs = current_obs.cuda()
        rollouts.cuda()

    actor_critic.input_norm.update(rollouts.observations[0])

    last_return = -np.inf
    best_return = -np.inf
    best_models = None

    start = time.time()
    for j in range(num_updates):
        for step in range(args.num_steps):
            # Sample actions
            value, action, action_log_prob, states = actor_critic.act(Variable(rollouts.observations[step], volatile=True),
                                                                      Variable(rollouts.states[step], volatile=True),
                                                                      Variable(rollouts.masks[step], volatile=True))
            cpu_actions = action.data.squeeze(1).cpu().numpy()

            # Obser reward and next obs
            obs, reward, done, info = envs.step(cpu_actions)
            reward = torch.from_numpy(np.expand_dims(np.stack(reward), 1)).float()
            episode_rewards += reward

            # If done then clean the history of observations.
            masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done])
            final_rewards *= masks
            final_rewards += (1 - masks) * episode_rewards
            episode_rewards *= masks

            if args.cuda:
                masks = masks.cuda()

            if current_obs.dim() == 4:
                current_obs *= masks.unsqueeze(2).unsqueeze(2)
            else:
                current_obs *= masks

            update_current_obs(obs)
            rollouts.insert(step, current_obs, states.data, action.data, action_log_prob.data, value.data, reward, masks)
            actor_critic.input_norm.update(rollouts.observations[step + 1])

        next_value = actor_critic(Variable(rollouts.observations[-1], volatile=True),
                                  Variable(rollouts.states[-1], volatile=True),
                                  Variable(rollouts.masks[-1], volatile=True))[0].data

        rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.tau)

        if args.algo in ['a2c', 'acktr']:
            values, action_log_probs, dist_entropy, states = actor_critic.evaluate_actions(Variable(rollouts.observations[:-1].view(-1, *obs_shape)),
                                                                                           Variable(rollouts.states[0].view(-1, actor_critic.state_size)),
                                                                                           Variable(rollouts.masks[:-1].view(-1, 1)),
                                                                                           Variable(rollouts.actions.view(-1, action_shape)))

            values = values.view(args.num_steps, args.num_processes, 1)
            action_log_probs = action_log_probs.view(args.num_steps, args.num_processes, 1)

            advantages = Variable(rollouts.returns[:-1]) - values
            value_loss = advantages.pow(2).mean()

            action_loss = -(Variable(advantages.data) * action_log_probs).mean()

            if args.algo == 'acktr' and optimizer.steps % optimizer.Ts == 0:
                # Sampled fisher, see Martens 2014
                actor_critic.zero_grad()
                pg_fisher_loss = -action_log_probs.mean()

                value_noise = Variable(torch.randn(values.size()))
                if args.cuda:
                    value_noise = value_noise.cuda()

                sample_values = values + value_noise
                vf_fisher_loss = -(values - Variable(sample_values.data)).pow(2).mean()

                fisher_loss = pg_fisher_loss + vf_fisher_loss
                optimizer.acc_stats = True
                fisher_loss.backward(retain_graph=True)
                optimizer.acc_stats = False

            optimizer.zero_grad()
            (value_loss * args.value_loss_coef + action_loss - dist_entropy * args.entropy_coef).backward()

            if args.algo == 'a2c':
                nn.utils.clip_grad_norm(actor_critic.parameters(), args.max_grad_norm)

            optimizer.step()
        elif args.algo == 'ppo':
            advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]
            advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)

            for e in range(args.ppo_epoch):
                if args.recurrent_policy:
                    data_generator = rollouts.recurrent_generator(advantages,
                                                            args.num_mini_batch)
                else:
                    data_generator = rollouts.feed_forward_generator(advantages,
                                                            args.num_mini_batch)

                for sample in data_generator:
                    observations_batch, states_batch, actions_batch, \
                       return_batch, masks_batch, old_action_log_probs_batch, \
                            adv_targ = sample

                    # Reshape to do in a single forward pass for all steps
                    values, action_log_probs, dist_entropy, states = actor_critic.evaluate_actions(Variable(observations_batch),
                                                                                                   Variable(states_batch),
                                                                                                   Variable(masks_batch),
                                                                                                   Variable(actions_batch))

                    adv_targ = Variable(adv_targ)
                    ratio = torch.exp(action_log_probs - Variable(old_action_log_probs_batch))
                    surr1 = ratio * adv_targ
                    surr2 = torch.clamp(ratio, 1.0 - args.clip_param, 1.0 + args.clip_param) * adv_targ
                    action_loss = -torch.min(surr1, surr2).mean() # PPO's pessimistic surrogate (L^CLIP)

                    value_loss = (Variable(return_batch) - values).pow(2).mean()

                    optimizer.zero_grad()
                    (value_loss + action_loss - dist_entropy * args.entropy_coef).backward()
                    nn.utils.clip_grad_norm(actor_critic.parameters(), args.max_grad_norm)
                    optimizer.step()

        rollouts.after_update()

        if args.vis and j % args.vis_interval == 0:
            last_return = plot(logger, args.log_dir)

        if last_return > best_return:
            best_return = last_return
            try:
                os.makedirs(os.path.dirname(args.save_path))
            except OSError:
                pass

            info = {
                'return': best_return,
                'reward_norm': np.sqrt(envs.ret_rms.var + envs.epsilon)
            }

            # A really ugly way to save a model to CPU
            save_model = actor_critic
            if args.cuda:
                save_model = copy.deepcopy(actor_critic).cpu()

            torch.save((save_model, env_params, info), args.save_path)

        if j % args.log_interval == 0:
            end = time.time()
            total_num_steps = (j + 1) * args.num_processes * args.num_steps
            print("Updates {}, num timesteps {}, FPS {}, average return {:.5f}, best_return {:.5f}, value loss {:.5f}, policy loss {:.5f}".
                  format(j, total_num_steps,
                         int(total_num_steps / (end - start)),
                         last_return, best_return,
                         value_loss.data[0], action_loss.data[0]))
Esempio n. 16
0
File: test.py Progetto: tjliao/vault
import build
from analyze import tool
import visualize

class test(tool):

	def test(self, protein_obj):
		return self.__init__

pdb_filename='sarah.pdb'
protein_obj=build.protein()
protein_obj.read_residue_info(pdb_filename)
protein_obj.move_to_com()

aa=visualize.plot(protein_obj)
aa.plot()
Esempio n. 17
0
def main():
    if hvd.rank() == 0:
        logger.info("Logger is set - training start")

    # set default gpu device id
    # torch.cuda.set_device(config.gpus[0])

    # set seed
    np.random.seed(config.seed)
    torch.manual_seed(config.seed)
    # torch.cuda.manual_seed_all(config.seed)

    torch.backends.cudnn.benchmark = False

    # get data with meta info
    (train_X, train_y), (valid_X, valid_y) = load_data()
    in_dim = np.shape(train_X)[1]
    out_dim = np.shape(train_y)[1]

    train_X, train_y = (torch.tensor(train_X,
                                     dtype=torch.float), torch.tensor(train_y))
    train_data = torch.utils.data.TensorDataset(train_X, train_y)

    valid_X, valid_y = (torch.tensor(valid_X,
                                     dtype=torch.float), torch.tensor(valid_y))
    valid_data = torch.utils.data.TensorDataset(valid_X, valid_y)
    print("in_dim: ", in_dim)
    print("out_dim: ", out_dim)

    net_crit = nn.MSELoss().to(device)
    layers = 1
    n_nodes = 4
    model = SearchFCNNController(in_dim,
                                 out_dim,
                                 layers,
                                 net_crit,
                                 n_nodes=n_nodes,
                                 device_ids=config.gpus)
    model = model.to(device)

    # weights optimizer
    # By default, Adasum doesn't need scaling up learning rate.
    lr_scaler = hvd.size()
    # w_optim = torch.optim.SGD(
    #     model.weights(),
    #     config.w_lr * lr_scaler,
    #     momentum=config.w_momentum,
    #     weight_decay=config.w_weight_decay,
    # )
    w_optim = torch.optim.Adagrad(model.weights(),
                                  config.w_lr * lr_scaler,
                                  weight_decay=config.w_weight_decay)
    # w_optim = torch.optim.RMSprop(model.weights())

    # alphas optimizer
    alpha_lr = config.alpha_lr
    alpha_optim = torch.optim.Adam(
        model.alphas(),
        alpha_lr,
        betas=(0.5, 0.999),
        weight_decay=config.alpha_weight_decay,
    )

    # split data to train/validation
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        train_data, num_replicas=hvd.size(), rank=hvd.rank())
    # valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices_valid)
    valid_sampler = torch.utils.data.distributed.DistributedSampler(
        valid_data, num_replicas=hvd.size(), rank=hvd.rank())
    train_loader = torch.utils.data.DataLoader(
        train_data,
        batch_size=config.batch_size,
        sampler=train_sampler,
        num_workers=config.workers,
        pin_memory=True,
    )
    # vis.
    # dataiter = iter(train_loader)
    # images, labels = dataiter.next()
    # writer.add_graph(model, [images[0]])
    # writer.close()

    valid_loader = torch.utils.data.DataLoader(
        valid_data,
        batch_size=config.batch_size,
        sampler=valid_sampler,
        num_workers=config.workers,
        pin_memory=True,
    )
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        w_optim, config.epochs, eta_min=config.w_lr_min)
    architect = Architect(model,
                          config.w_momentum,
                          config.w_weight_decay,
                          allow_unused=False)

    # Horovod: broadcast parameters & optimizer state.
    hvd.broadcast_parameters(model.state_dict(), root_rank=0)
    hvd.broadcast_optimizer_state(w_optim, root_rank=0)

    # Horovod: (optional) compression algorithm.
    # compression = hvd.Compression.fp16

    # Horovod: wrap optimizer with DistributedOptimizer.
    w_optim = hvd.DistributedOptimizer(
        w_optim,
        named_parameters=model.named_parameters(),
        #  compression=compression,
        # op=hvd.Adasum,
        op=hvd.Average,
    )

    # training loop
    best_top1 = None
    epochs = config.epochs
    for epoch in range(epochs):
        lr = lr_scheduler.get_lr()[0]

        if hvd.rank() == 0:
            model.print_alphas(logger)

        # training
        train(
            train_loader,
            valid_loader,
            model,
            architect,
            w_optim,
            alpha_optim,
            lr,
            epoch,
            train_sampler,
        )
        lr_scheduler.step()

        # validation
        cur_step = (epoch + 1) * len(train_loader)
        top1 = validate(valid_loader, model, epoch, cur_step)

        top1 = metric_average(top1, name="avg_val_top1")

        if hvd.rank() == 0:
            # log
            # genotype
            genotype = model.genotype()
            logger.info("genotype = {}".format(genotype))

            # genotype as a image
            plot_path = "." + os.path.join(config.plot_path,
                                           "EP{:02d}".format(epoch + 1))
            caption = "Epoch {}".format(epoch + 1)
            plot(genotype.normal, plot_path + "-normal", caption)

            # save
            if best_top1 is None or best_top1 < top1:
                best_top1 = top1
                best_genotype = genotype
                is_best = True
            else:
                is_best = False
            # utils.save_checkpoint(model, "." + config.path, is_best)
            print("")

    if hvd.rank() == 0:
        best_genotype = model.genotype()

        with open("." + config.path + "/best_genotype.txt", "w") as f:
            f.write(str(best_genotype))

        logger.info("Final best TopR2@1 = {:.3f}".format(best_top1))
        logger.info("Best Genotype = {}".format(best_genotype))
Esempio n. 18
0
	weather_data = frost.get_as_aggregated(1, [2013, 2014, 2015, 2016, 2017, 2018, 2019])
	data = data.merge(weather_data, on=['year', 'orgnr'])

	normalize_cols = ['areal_tilskudd', 'levert_per_tilskudd', 'levert', 'lat', 'elevation', 'growth_start_day']
	data = data\
		.pipe(ku.merge_with_elevation_data)\
		.pipe(get_levert_per_tilskudd)\
		.pipe(ku.filter_extremes, 'levert_per_tilskudd')\
		.pipe(ku.normalize_by_keys, normalize_cols)\
		.pipe(ku.one_hot_column, 'komnr')\
		.pipe(ku.one_hot_column, 'year')\
		.pipe(lambda df_: df_.drop(['year', 'komnr'], axis=1))

	# Split into training and validation data
	y_column = ['levert_per_tilskudd']
	remove_from_training = ['orgnr', 'levert'] + y_column

	train, val = train_test_split(shuffle(data), test_size=0.2)
	val, test = train_test_split(val, test_size=0.2)
	train_x = train.drop(remove_from_training, axis=1).to_numpy()
	train_y = train[y_column].to_numpy()

	val_x = val.drop(remove_from_training, axis=1).to_numpy()
	val_y = val[y_column].to_numpy()

	model = train_simple_dense(train_x, train_y, val_x, val_y)

	area_type = "areal grant"
	plot(model, val_x, val_y)
	generate_alternative_outcomes(test, model, y_column, remove_from_training, area_type, 20)
Esempio n. 19
0
def main():
    parser = argparse.ArgumentParser(
        description='Support Vector Classification Model')
    parser.add_argument('--embedding',
                        default='embedding/glove.6B.50d.subset.oov.vec',
                        help='Path to the embedding')
    parser.add_argument('--train',
                        default='data/train_labeled.tsv',
                        help='Path to training data')
    parser.add_argument('--unlabeled',
                        default='data/train_unlabeled.tsv',
                        help='data to sample from')
    parser.add_argument('--test',
                        default='data/test.tsv',
                        help='Path to test data')
    parser.add_argument(
        '--sampling',
        default='random',
        help='active learning heuristic for uncertainty sampling')
    parser.add_argument('--predict',
                        default='results/svm-al-random.result',
                        help='Path to the prediction file')
    parser.add_argument('--al_history',
                        default='results/svm-al-random.history',
                        help='Our active learning history')
    parser.add_argument('--max_iter',
                        type=int,
                        default=500,
                        help='Maximal number of active learning iterations')
    parser.add_argument('--c',
                        type=float,
                        default=2,
                        help='C parameter for SVM')
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help='Random seed for random sampling')

    args = parser.parse_args()
    embedding, embed_dim = dp.load_word2vec_embedding(args.embedding)
    predict_file = args.predict
    c = args.c

    # Data format is:
    # Pre-word \t Gap-word \t Suc-word \t Error
    X_train, y_train = dp.load_data(args.train, textindex=1, labelindex=0)
    X_test, y_test = dp.load_data(args.test, textindex=1, labelindex=0)

    # Active learning data
    X_active, y_active = dp.load_data(args.unlabeled,
                                      textindex=1,
                                      labelindex=0)

    # Get index-word/label dicts for lookup:
    vocab_dict = dp.get_index_dict(X_train + X_test + X_active)

    # Replace words / labels in the data by the according index
    vocab_dict_flipped = dict((v, k) for k, v in vocab_dict.items())

    # Get indexed data and labels
    X_train_index = [[vocab_dict_flipped[word] for word in chunk]
                     for chunk in X_train]
    X_test_index = [[vocab_dict_flipped[word] for word in chunk]
                    for chunk in X_test]

    # Active learning data
    X_active_index = [[vocab_dict_flipped[word] for word in chunk]
                      for chunk in X_active]

    print("Number of initial training documents: ", len(X_train))

    # Get embedding matrix:
    embed_matrix = dp.get_embedding_matrix(embedding, vocab_dict)

    # Use the simple count over all features in a single example:
    # Do average over word vectors:
    X_train_embedded = [
        np.mean([embed_matrix[element] for element in example], axis=0)
        for example in X_train_index
    ]
    X_test_embedded = [
        np.mean([embed_matrix[element] for element in example], axis=0)
        for example in X_test_index
    ]

    # Active learning
    X_active_embedded = [
        np.mean([embed_matrix[element] for element in example], axis=0)
        for example in X_active_index
    ]

    # Do active learning as long as there is data:
    pool_data = X_active_embedded[:]
    pool_labels = y_active[:]

    # Active learning results for visualization
    step, acc = [], []

    print("Loaded data.")

    iteration = 0
    outlog = open(args.predict, 'w')
    outlog.write('Iteration\tC\tAcc\n')

    activelog = open(args.al_history, 'w')
    activelog.write('Iteration\tLabel\tText\n')

    while len(pool_data) > 1 and iteration < args.max_iter:
        if len(X_train_embedded) % 50 == 0:
            print("Training on: ", len(X_train_embedded), " instances.")

        model_svm = SVC(C=c, kernel='linear', probability=True)
        model_svm.fit(X_train_embedded, y_train)

        pred = model_svm.predict(X_test_embedded)

        test_acc = accuracy_score(y_test, pred)

        outlog.write('{}\t{}\t{}\n'.format(iteration, c, test_acc))
        step.append(iteration)
        acc.append(test_acc)

        # Add data from the pool to the training set based on our active learning:
        al = Active_Learning(pool_data, model_svm, args.seed)
        if args.sampling == 'random':
            add_sample_data = al.get_random()
        else:
            add_sample_data = al.get_most_uncertain(args.sampling)

        # Get the data index from pool
        sample_index = dp.get_array_index(pool_data, add_sample_data)

        # Get the according label
        add_sample_label = pool_labels[sample_index]

        # Add the results to our learning history
        activelog.write('{}\t{}\t{}\n'.format(
            iteration, add_sample_label, ' '.join(X_active[sample_index])))

        # Add it to the training pool
        X_train_embedded.append(add_sample_data)
        y_train.append(add_sample_label)

        # Remove labeled data from pool
        del pool_labels[sample_index]
        del pool_data[sample_index]

        iteration += 1

    outlog.close()

    vz.plot(step, acc)

    print("Done")
def main():
    start = time.time()
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    torch.cuda.set_device(config.local_rank % len(config.gpus))
    torch.distributed.init_process_group(backend='nccl', init_method='env://')
    config.world_size = torch.distributed.get_world_size()
    config.total_batch = config.world_size * config.batch_size

    np.random.seed(config.seed)
    torch.manual_seed(config.seed)
    torch.cuda.manual_seed_all(config.seed)
    torch.backends.cudnn.benchmark = True

    CLASSES = 1000
    channels = [32, 16, 24, 40, 80, 96, 192, 320, 1280]
    steps = [1, 1, 2, 3, 4, 3, 3, 1, 1]
    strides = [2, 1, 2, 2, 1, 2, 1, 1, 1]

    criterion = nn.CrossEntropyLoss()
    criterion_latency = LatencyLoss(channels[2:9], steps[2:8], strides[2:8])
    criterion = criterion.cuda(config.gpus)
    criterion_latency = criterion_latency.cuda(config.gpus)
    model = Network(channels, steps, strides, CLASSES, criterion)
    model = model.to(device)
    #model = DDP(model, delay_allreduce=True)
    # For solve the custome loss can`t use model.parameters() in apex warpped model via https://github.com/NVIDIA/apex/issues/457 and
    model = torch.nn.parallel.DistributedDataParallel(
        model, device_ids=[config.local_rank], output_device=config.local_rank)
    logger.info("param size = %fMB", utils.count_parameters_in_MB(model))

    optimizer = torch.optim.SGD(model.parameters(),
                                config.w_lr,
                                momentum=config.w_momentum,
                                weight_decay=config.w_weight_decay)

    train_data = get_imagenet_iter_torch(
        type='train',
        # image_dir="/googol/atlas/public/cv/ILSVRC/Data/"
        # use soft link `mkdir ./data/imagenet && ln -s /googol/atlas/public/cv/ILSVRC/Data/CLS-LOC/* ./data/imagenet/`
        image_dir=config.data_path + config.dataset.lower(),
        batch_size=config.batch_size,
        num_threads=config.workers,
        world_size=config.world_size,
        local_rank=config.local_rank,
        crop=224,
        device_id=config.local_rank,
        num_gpus=config.gpus,
        portion=config.train_portion)
    valid_data = get_imagenet_iter_torch(
        type='train',
        # image_dir="/googol/atlas/public/cv/ILSVRC/Data/"
        # use soft link `mkdir ./data/imagenet && ln -s /googol/atlas/public/cv/ILSVRC/Data/CLS-LOC/* ./data/imagenet/`
        image_dir=config.data_path + "/" + config.dataset.lower(),
        batch_size=config.batch_size,
        num_threads=config.workers,
        world_size=config.world_size,
        local_rank=config.local_rank,
        crop=224,
        device_id=config.local_rank,
        num_gpus=config.gpus,
        portion=config.val_portion)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, float(config.epochs), eta_min=config.w_lr_min)

    if len(config.gpus) > 1:
        architect = Architect(model.module, config)
    else:
        architect = Architect(module, config)

    best_top1 = 0.
    for epoch in range(config.epochs):
        scheduler.step()
        lr = scheduler.get_lr()[0]
        logger.info('epoch %d lr %e', epoch, lr)

        #print(F.softmax(model.alphas_normal, dim=-1))
        #print(F.softmax(model.alphas_reduce, dim=-1))

        # training
        train_top1, train_loss = train(train_data, valid_data, model,
                                       architect, criterion, criterion_latency,
                                       optimizer, lr, epoch, writer)
        logger.info('Train top1 %f', train_top1)

        # validation
        top1 = 0
        if config.epochs - epoch <= 1:
            top1, loss = infer(valid_data, model, epoch, criterion, writer)
            logger.info('valid top1 %f', top1)

        if len(config.gpus) > 1:
            genotype = model.module.genotype()
        else:
            genotype = model.genotype()
        logger.info("genotype = {}".format(genotype))

        # genotype as a image
        plot_path = os.path.join(config.plot_path,
                                 "EP{:02d}".format(epoch + 1))
        caption = "Epoch {}".format(epoch + 1)
        plot(genotype.normal, plot_path + "-normal")
        plot(genotype.reduce, plot_path + "-reduce")
        # save
        if best_top1 < top1:
            best_top1 = top1
            best_genotype = genotype
            is_best = True
        else:
            is_best = False
        utils.save_checkpoint(model, config.path, is_best)
        print("")

    utils.time(time.time() - start)
    logger.info("Final best Prec@1 = {:.4%}".format(best_top1))
    logger.info("Best Genotype = {}".format(best_genotype))
 def plot_genotype(self, plot_path, caption):
     plot(self.genotype().normal, plot_path + '-normal',
          caption + '-normal')
     plot(self.genotype().reduce, plot_path + '-reduce',
          caption + '-reduce')
Esempio n. 22
0
def main():
    logger.info("Logger is set - training start")

    # set gpu device id
    torch.cuda.set_device(config.gpu)

    # set seed
    np.random.seed(config.seed)
    torch.manual_seed(config.seed)
    torch.cuda.manual_seed_all(config.seed)

    torch.backends.cudnn.benchmark = True
    torch.backends.cudnn.enabled = True

    # get data with meta info
    input_size, input_channels, n_classes, train_data = utils.get_data(
        config.train_data,
        config.train_label,
        config.data_path,
        logger,
        cutout_length=0,
        validation=False)

    net_crit = nn.BCEWithLogitsLoss().to(device)
    model = SearchCNN(input_channels, config.init_channels, n_classes,
                      config.layers, net_crit)

    try:
        logger.info("all gpus: {}".format(torch.cuda.device_count()))
        # model = nn.DataParallel(model, device_ids=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] )
        model = nn.DataParallel(model,
                                device_ids=range(torch.cuda.device_count()))
        # model = nn.DataParallel(model)
        # torch.distributed.init_process_group(backend="nccl")
        # model = torch.nn.DistributedDataParallel(model)
        # logger.info('1')

        model = model.to(device)
        # logger.info('2')

        # weights optimizer
        w_optim = torch.optim.SGD(model.module.weights(),
                                  config.w_lr,
                                  momentum=config.w_momentum,
                                  weight_decay=config.w_weight_decay)
        # logger.info('3')

        # alphas optimizer
        alpha_optim = torch.optim.Adam(model.module.alphas(),
                                       config.alpha_lr,
                                       betas=(0.5, 0.999),
                                       weight_decay=config.alpha_weight_decay)
        # logger.info('4')

        # split data to train/validation
        n_train = len(train_data)
        split = int(0.8 * n_train)
        indices = list(range(n_train))
        shuffle(indices)
        trainIndices = indices[:split]
        testIndices = indices[split:]
        with open(config.data_path + "trainTestIndices.pickle",
                  "wb") as indicesFile:
            pickle.dump(trainIndices, indicesFile)
            pickle.dump(testIndices, indicesFile)

        with open(config.data_path + "trainTestIndices.pickle",
                  "rb") as indicesFile:
            trainIndices = pickle.load(indicesFile)
        n_train = len(trainIndices)
        split = n_train // 2

        train_sampler = torch.utils.data.sampler.SubsetRandomSampler(
            trainIndices[:split])
        valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(
            trainIndices[split:])
        train_loader = torch.utils.data.DataLoader(
            train_data,
            batch_size=config.batch_size,
            sampler=train_sampler,
            num_workers=config.workers,
            pin_memory=True)
        valid_loader = torch.utils.data.DataLoader(
            train_data,
            batch_size=config.batch_size,
            sampler=valid_sampler,
            num_workers=config.workers,
            pin_memory=True)
        # logger.info('5')

        lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            w_optim, config.epochs, eta_min=config.w_lr_min)
        # logger.info('6')

        architect = Architect(model, config.w_momentum, config.w_weight_decay)
        # logger.info('7')

        # training loop
        best_genotype = None
        best_top1 = 0.
        for epoch in range(config.epochs):
            lr_scheduler.step()
            lr = lr_scheduler.get_lr()[0]

            model.module.print_alphas()
            # logger.info('8')

            # training
            train(train_loader, valid_loader, model, architect, w_optim,
                  alpha_optim, lr, epoch)
            logger.info('9')

            # validation
            cur_step = (epoch + 1) * len(train_loader)
            top1 = validate(valid_loader, model, epoch, cur_step)
            logger.info('10')

            # log
            # genotype
            genotype = model.module.genotype()
            logger.info("genotype = {}".format(genotype))

            # genotype as a image
            plot_path = os.path.join(config.plot_path,
                                     "EP{:02d}".format(epoch + 1))
            caption = "Epoch {}".format(epoch + 1)
            plot(genotype.normal, plot_path + "-normal", caption)
            plot(genotype.reduce, plot_path + "-reduce", caption)

            # save
            if best_top1 < top1:
                best_top1 = top1
                best_genotype = genotype
                is_best = True
            else:
                is_best = False
            utils.save_checkpoint(model, config.path, is_best)
            print("")

        logger.info("Final best Prec@1 = {:.4%}".format(best_top1))
        logger.info("Best Genotype = {}".format(best_genotype))
    except Exception as e:
        logger.info("error: {}".format(e))
Esempio n. 23
0
    0.336926, 0.44017, 0.504055, 0.547369, 0.574367, 0.456685, 0.549842,
    0.640625, 0.667227, 0.696598, 0.669699, 0.696796, 0.720827, 0.734672,
    0.728046, 0.752967, 0.700059, 0.747824, 0.74199, 0.755835, 0.744264,
    0.762362, 0.687994, 0.751483, 0.785305, 0.824763, 0.816653, 0.801721,
    0.813192, 0.80983, 0.798952, 0.802314, 0.800138, 0.81517, 0.797073,
    0.776998, 0.773339, 0.794798, 0.801523, 0.816851, 0.778778, 0.801226,
    0.805874, 0.784513, 0.781448, 0.794403, 0.794007, 0.794106, 0.740012,
    0.799644, 0.843453, 0.849684, 0.856903, 0.853145, 0.852848, 0.854826,
    0.852156, 0.856804, 0.855518, 0.854826, 0.850178, 0.853738, 0.854331,
    0.852749, 0.847805, 0.83485, 0.8125, 0.792326, 0.815071, 0.805578,
    0.795787, 0.805676, 0.823477, 0.814379, 0.823774, 0.851266, 0.856606,
    0.855419, 0.858089, 0.858386, 0.858089, 0.858089, 0.8571, 0.858485,
    0.848695, 0.859276, 0.854727, 0.854925, 0.859672, 0.850574, 0.855716,
    0.85621, 0.852354, 0.854233, 0.858287, 0.86165, 0.853837, 0.859869,
    0.86165, 0.859177
]

xs_dict = {'all-one': range(100), 'linear': range(100)}
epoch_dict = {'all-one': all_one, 'linear': linear}
colors = [vz.colors.all_one_lg, vz.colors.linear_lg]
vz.plot(xs_dict,
        epoch_dict, ['all-one', 'linear'],
        'loss',
        colors,
        folder='_figures/',
        linewidth=1.5,
        marker='',
        xlabel='Epoch',
        ylabel='Classification Accuracy',
        ext='pdf')
Esempio n. 24
0
def main():
    logger.info("Logger is set - training start")

    # set default gpu device id
    # torch.cuda.set_device(config.gpus[0])

    # set seed
    np.random.seed(config.seed)
    torch.manual_seed(config.seed)
    # torch.cuda.manual_seed_all(config.seed)

    torch.backends.cudnn.benchmark = True

    # get data with meta info
    input_size, input_channels, n_classes, train_data = utils.get_data(
        config.dataset, config.data_path, cutout_length=0, validation=False
    )

    net_crit = nn.CrossEntropyLoss().to(device)
    model = SearchCNNController(
        input_channels,
        config.init_channels,
        n_classes,
        config.layers,
        net_crit,
        device_ids=config.gpus,
        imagenet_mode=config.dataset.lower() in utils.LARGE_DATASETS,
    )
    model = model.to(device)

    # weights optimizer
    w_optim = torch.optim.SGD(
        model.weights(),
        config.w_lr,
        momentum=config.w_momentum,
        weight_decay=config.w_weight_decay,
    )
    # alphas optimizer
    alpha_optim = torch.optim.Adam(
        model.alphas(),
        config.alpha_lr,
        betas=(0.5, 0.999),
        weight_decay=config.alpha_weight_decay,
    )

    # split data to train/validation
    n_train = len(train_data)
    split = n_train // 2
    indices = list(range(n_train))
    random.shuffle(indices)
    train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split])
    valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split:])
    train_loader = torch.utils.data.DataLoader(
        train_data,
        batch_size=config.batch_size,
        sampler=train_sampler,
        num_workers=config.workers,
        pin_memory=True,
    )
    valid_loader = torch.utils.data.DataLoader(
        train_data,
        batch_size=config.batch_size,
        sampler=valid_sampler,
        num_workers=config.workers,
        pin_memory=True,
    )
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        w_optim, config.epochs, eta_min=config.w_lr_min
    )
    architect = Architect(model, config.w_momentum, config.w_weight_decay)

    # training loop
    best_top1 = 0.0
    for epoch in range(config.epochs):
        lr_scheduler.step()
        lr = lr_scheduler.get_lr()[0]

        model.print_alphas(logger)

        # training
        train(
            train_loader, valid_loader, model, architect, w_optim, alpha_optim, lr, epoch
        )

        # validation
        cur_step = (epoch + 1) * len(train_loader)
        top1 = validate(valid_loader, model, epoch, cur_step)

        # log
        # genotype
        genotype = model.genotype()
        logger.info("genotype = {}".format(genotype))

        # genotype as a image
        plot_path = os.path.join(config.plot_path, "EP{:02d}".format(epoch + 1))
        caption = "Epoch {}".format(epoch + 1)
        plot(genotype.normal, plot_path + "-normal", caption)
        plot(genotype.reduce, plot_path + "-reduce", caption)

        # save
        if best_top1 < top1:
            best_top1 = top1
            best_genotype = genotype
            is_best = True
        else:
            is_best = False
        utils.save_checkpoint(model, config.path, is_best)
        print("")

    # restrict skip-co
    count = 0
    indices = []
    for i in range(4):
        _, primitive_indices = torch.topk(model.alpha_normal[i][:, :], 1)
        for j in range(2 + i):
            if primitive_indices[j].item() == 2:
                count = count + 1
                indices.append((i, j))

    while count > 2:
        alpha_min, indice_min = model.alpha_normal[indices[0][0]][indices[0][1], 2], 0
        for i in range(1, count):
            alpha_c = model.alpha_normal[indices[i][0]][indices[i][1], 2]
            if alpha_c < alpha_min:
                alpha_min, indice_min = alpha_c, i
        model.alpha_normal[indices[indice_min][0]][indices[indice_min][1], 2] = 0
        indices.pop(indice_min)
        print(indices)
        count = count - 1

    best_genotype = model.genotype()

    with open(config.path + "/best_genotype.txt", "w") as f:
        f.write(str(best_genotype))
    logger.info("Final best Prec@1 = {:.4%}".format(best_top1))
    logger.info("Best Genotype = {}".format(best_genotype))
Esempio n. 25
0
 def test_plot(self):
     genotype = eval('geno_types.{}'.format(self.cell_name))
     plot(genotype.normal, 'normal')
     plot(genotype.reduce, 'reduction')
Esempio n. 26
0
def main():
    logger.info("Logger is set - training start")

    # set default gpu device id
    torch.cuda.set_device(config.gpus[0])

    # set seed
    np.random.seed(config.seed)
    torch.manual_seed(config.seed)
    torch.cuda.manual_seed_all(config.seed)

    torch.backends.cudnn.benchmark = True

    # get data with meta info
    input_size, input_channels, n_classes, train_data = utils.get_data(
        config.dataset, config.data_path, cutout_length=0, validation=False)

    net_crit = nn.CrossEntropyLoss().to(device)
    model = SearchCNNController(input_channels,
                                config.init_channels,
                                n_classes,
                                config.layers,
                                net_crit,
                                device_ids=config.gpus)
    model = model.to(device)

    # weights optimizer
    w_optim = torch.optim.SGD(model.weights(),
                              config.w_lr,
                              momentum=config.w_momentum,
                              weight_decay=config.w_weight_decay)
    # alphas optimizer
    alpha_optim = torch.optim.Adam(model.alphas(),
                                   config.alpha_lr,
                                   betas=(0.5, 0.999),
                                   weight_decay=config.alpha_weight_decay)

    # split data to train/validation
    n_train = len(train_data)
    split = n_train // 2
    indices = list(range(n_train))
    train_sampler = torch.utils.data.sampler.SubsetRandomSampler(
        indices[:split])
    valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(
        indices[split:])
    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=config.batch_size,
                                               sampler=train_sampler,
                                               num_workers=config.workers,
                                               pin_memory=False)
    valid_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=config.batch_size,
                                               sampler=valid_sampler,
                                               num_workers=config.workers,
                                               pin_memory=False)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        w_optim, config.epochs, eta_min=config.w_lr_min)
    architect = Architect(model, config.w_momentum, config.w_weight_decay)

    # training loop
    best_top1 = -1.0
    best_epoch = 0
    ################################ restore from last time #############################################
    epoch_restore = config.epoch_restore
    if config.restore:
        utils.load_state_dict(model,
                              config.path,
                              extra='model',
                              parallel=(len(config.gpus) > 1))
        if not config.model_only:
            utils.load_state_dict(w_optim,
                                  config.path,
                                  extra='w_optim',
                                  parallel=False)
            utils.load_state_dict(alpha_optim,
                                  config.path,
                                  extra='alpha_optim',
                                  parallel=False)
            utils.load_state_dict(lr_scheduler,
                                  config.path,
                                  extra='lr_scheduler',
                                  parallel=False)
            utils.load_state_dict(epoch_restore,
                                  config.path,
                                  extra='epoch_restore',
                                  parallel=False)
    #####################################################################################################
    for epoch in range(epoch_restore, config.epochs):
        lr_scheduler.step()
        lr = lr_scheduler.get_lr()[0]

        model.print_alphas(logger)

        # training
        train(train_loader, valid_loader, model, architect, w_optim,
              alpha_optim, lr, epoch)

        # validation
        cur_step = (epoch + 1) * len(train_loader)
        top1 = validate(valid_loader, model, epoch, cur_step)
        # top1 = 0.0

        # log
        # genotype
        genotype = model.genotype()
        logger.info("genotype = {}".format(genotype))

        # genotype as a image
        plot_path = os.path.join(config.plot_path,
                                 "EP{:02d}".format(epoch + 1))
        caption = "Epoch {}".format(epoch + 1)
        plot(genotype.normal, plot_path + "-normal", caption)
        plot(genotype.reduce, plot_path + "-reduce", caption)

        # save
        if best_top1 < top1:
            best_top1 = top1
            best_genotype = genotype
            is_best = True
            best_epoch = epoch + 1
        else:
            is_best = False
        utils.save_checkpoint(model, config.path, is_best)

        ######################################## save all state ###################################################
        utils.save_state_dict(model,
                              config.path,
                              extra='model',
                              is_best=is_best,
                              parallel=(len(config.gpus) > 1),
                              epoch=epoch + 1,
                              acc=top1,
                              last_state=((epoch + 1) >= config.epochs))
        utils.save_state_dict(lr_scheduler,
                              config.path,
                              extra='lr_scheduler',
                              is_best=is_best,
                              parallel=False,
                              epoch=epoch + 1,
                              acc=top1,
                              last_state=((epoch + 1) >= config.epochs))
        utils.save_state_dict(alpha_optim,
                              config.path,
                              extra='alpha_optim',
                              is_best=is_best,
                              parallel=False,
                              epoch=epoch + 1,
                              acc=top1,
                              last_state=((epoch + 1) >= config.epochs))
        utils.save_state_dict(w_optim,
                              config.path,
                              extra='w_optim',
                              is_best=is_best,
                              parallel=False,
                              epoch=epoch + 1,
                              acc=top1,
                              last_state=((epoch + 1) >= config.epochs))
        ############################################################################################################
        print("")
    logger.info("Best Genotype at {} epch.".format(best_epoch))
    logger.info("Final best Prec@1 = {:.4%}".format(best_top1))
    logger.info("Best Genotype = {}".format(best_genotype))
Esempio n. 27
0
def main(config, writer, logger):
    logger.info("Logger is set - training search start")

    input_size, input_channels, n_classes, train_data = utils.get_data(
        config.dataset, config.data_path, cutout_length=0, validation=False)

    net_crit = nn.CrossEntropyLoss().to(config.device)
    model = SearchCNNController(input_channels, config.init_channels, n_classes, config.layers, net_crit).to(config.device)

    # weights optimizer
    w_optim = torch.optim.SGD(model.weights(), config.w_lr, momentum=config.w_momentum,
                                    weight_decay=config.w_weight_decay)
    # alphas optimizer
    alpha_optim = torch.optim.Adam(model.alphas(), config.alpha_lr, betas=(config.alpha_beta1, config.alpha_beta2),
                                    weight_decay=config.alpha_weight_decay)

    # split data to train/validation
    n_train = len(train_data)
    split = n_train // 2
    indices = list(range(n_train))
    train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split])
    valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split:])
    train_loader = torch.utils.data.DataLoader(train_data,
                                                batch_size=config.batch_size,
                                                sampler=train_sampler,
                                                num_workers=config.workers,
                                                pin_memory=True)
    valid_loader = torch.utils.data.DataLoader(train_data,
                                                batch_size=config.batch_size,
                                                sampler=valid_sampler,
                                                num_workers=config.workers,
                                                pin_memory=True)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optim, config.epochs, eta_min=config.w_lr_min)
    architect = Architect(model, config.w_momentum, config.w_weight_decay)

    # training loop
    best_top1 = 0.
    for epoch in range(config.epochs):
        lr_scheduler.step()
        lr = lr_scheduler.get_lr()[0]

        model.print_alphas(logger)

        # training
        train(train_loader, valid_loader, model, architect, w_optim, alpha_optim, lr, epoch, config, writer, logger)

        # validation
        cur_step = (epoch+1) * len(train_loader)
        top1 = validate(valid_loader, model, epoch, cur_step, config, writer, logger)

        # log
        # genotype
        genotype = model.genotype()
        logger.info("genotype = {}".format(genotype))

        # genotype as a image
        plot_path = os.path.join(config.plot_path, "EP{:02d}".format(epoch+1))
        caption = "Epoch {}".format(epoch+1)
        plot(genotype.normal, plot_path + "-normal", caption)
        plot(genotype.reduce, plot_path + "-reduce", caption)

        # save
        if best_top1 < top1:
            best_top1 = top1
            best_genotype = genotype
            is_best = True
        else:
            is_best = False
        utils.save_checkpoint(model, config.path, is_best)

    logger.info("Final best Prec@1 = {:.4%}".format(best_top1))
    logger.info("Best Genotype = {}".format(best_genotype))
Esempio n. 28
0
def main():
    logger.info("Logger is set - training start")

    # set default gpu device id
    torch.cuda.set_device(config.gpus[0])

    # set seed
    np.random.seed(config.seed)
    torch.manual_seed(config.seed)
    torch.cuda.manual_seed_all(config.seed)

    torch.backends.cudnn.benchmark = True

    # get data with meta info
    dahai_train_dataset = utils.MyDataset(data_dir=TRAIN_DATA_PATH, )
    dahai_dev_dataset = utils.MyDataset(data_dir=DEV_DAHAI_DATA_PATH, )
    # zhikang_test_dataset = utils.MyDataset(window_size=WINDOW_SIZE,
    #                                     window_step=WINDOW_STEP_DEV,
    #                                     data_path=TEST_ZHIKANG_DATA_PATH,
    #                                     voice_embed_path=TEST_ZHIKANG_VOICE_EMBEDDING_PATH,
    #                                     w2i=w2i,
    #                                     sent_max_len=SENT_MAX_LEN,
    #                                     )

    train_data = utils.DataProvider(batch_size=config.batch_size,
                                    dataset=dahai_train_dataset,
                                    is_cuda=config.is_cuda)
    dev_data = utils.DataProvider(batch_size=config.batch_size,
                                  dataset=dahai_dev_dataset,
                                  is_cuda=config.is_cuda)
    # test_data = utils.DataProvider(batch_size=config.batch_size, dataset=zhikang_test_dataset, is_cuda=config.is_cuda)

    print("train data nums:", len(train_data.dataset), "dev data nums:",
          len(dev_data.dataset))

    net_crit = nn.CrossEntropyLoss(reduction="none").to(device)
    model = SearchCNNController(config.embedding_dim,
                                config.init_channels,
                                config.n_classes,
                                config.layers,
                                net_crit,
                                config=config,
                                n_nodes=config.n_nodes,
                                device_ids=config.gpus)
    model = model.to(device).float()

    # weights optimizer
    w_optim = torch.optim.SGD(model.weights(),
                              config.w_lr,
                              momentum=config.w_momentum,
                              weight_decay=config.w_weight_decay)
    # alphas optimizer
    alpha_optim = torch.optim.Adam(model.alphas(),
                                   config.alpha_lr,
                                   betas=(0.5, 0.999),
                                   weight_decay=config.alpha_weight_decay)

    ######  余弦退火-调整学习率
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        w_optim, config.epochs, eta_min=config.w_lr_min)
    architect = Architect(model, config.w_momentum, config.w_weight_decay)

    # training loop
    best_acc = 0.
    best_genotype = model.genotype()
    while True:
        epoch = train_data.epoch
        if epoch > config.epochs - 1:
            break
        lr_scheduler.step()
        lr = lr_scheduler.get_lr()[0]

        model.print_alphas(logger)

        # training
        train(train_data, dev_data, epoch, model, architect, w_optim,
              alpha_optim, lr)

        # validation
        cur_step = train_data.iteration
        valid_acc = validate(dev_data, model, epoch, cur_step)

        # log
        # genotype
        genotype = model.genotype()
        logger.info("genotype = {}".format(genotype))

        # genotype as a image
        plot_path = os.path.join(config.plot_path,
                                 "EP{:02d}".format(epoch + 1))
        caption = "Epoch {}".format(epoch + 1)
        plot(genotype.normal, plot_path + "-normal", caption)
        plot(genotype.reduce, plot_path + "-reduce", caption)

        # save
        if best_acc < valid_acc:
            best_acc = valid_acc
            best_genotype = genotype
            is_best = True
        else:
            is_best = False
        utils.save_checkpoint(model, config.path, is_best)
        print("")

    logger.info("Final best Prec@1 = {:.4%}".format(best_acc))
    logger.info("Best Genotype = {}".format(best_genotype))
Esempio n. 29
0
def run_worker():
    rpc.init_rpc(name=f"trainer_{config.rank}",
                 rank=config.rank,
                 world_size=config.world_size)
    logger.info("Logger is set - training start")

    # set default gpu device id
    torch.cuda.set_device(config.gpus[0])

    # set seed
    np.random.seed(config.seed)
    torch.manual_seed(config.seed)
    torch.cuda.manual_seed_all(config.seed)

    torch.backends.cudnn.benchmark = True

    # get data with meta info
    # input_size, input_channels, n_classes, train_data = utils.get_data(
    #     config.dataset, config.data_path, cutout_length=0, validation=False)
    #
    net_crit = nn.CrossEntropyLoss().to(device)
    # model = SearchCNNController(input_channels, config.init_channels, n_classes, config.layers,
    #                             net_crit, device_ids=config.gpus)
    # model = model.to(device)
    model = TrainerNet(net_crit)

    # weights optimizer
    # w_optim = torch.optim.SGD(model.weights(), config.w_lr, momentum=config.w_momentum,
    #                           weight_decay=config.w_weight_decay)
    w_optim = DistributedOptimizer(torch.optim.SGD,
                                   model.weights(),
                                   lr=config.w_lr,
                                   momentum=config.w_momentum,
                                   weight_decay=config.w_weight_decay)
    # alphas optimizer
    # alpha_optim = torch.optim.Adam(model.alphas(), config.alpha_lr, betas=(0.5, 0.999),
    #                                weight_decay=config.alpha_weight_decay)
    alpha_optim = DistributedOptimizer(torch.optim.Adam,
                                       model.alphas(),
                                       lr=config.alpha_lr,
                                       betas=(0.5, 0.999),
                                       weight_decay=config.alpha_weight_decay)

    # split data to train/validation
    n_train = len(train_data)
    split = n_train // 2
    world = config.world_size
    rank = config.rank
    indices = list(range(n_train))
    train_sampler = torch.utils.data.sampler.SubsetRandomSampler(
        indices[int(rank * split / world):int((rank + 1) * split / world)])
    valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(
        indices[split + int(rank * (n_train - split) / world):split +
                int(int((rank + 1) * (n_train - split) / world))])
    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=config.batch_size,
                                               sampler=train_sampler,
                                               num_workers=config.workers,
                                               pin_memory=True)
    valid_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=config.batch_size,
                                               sampler=valid_sampler,
                                               num_workers=config.workers,
                                               pin_memory=True)

    # lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
    #     w_optim, config.epochs, eta_min=config.w_lr_min)
    lrs_rrefs = []
    for opt_rref in w_optim.remote_optimizers:
        lrs_rrefs.append(
            rpc.remote(opt_rref.owner(),
                       create_lr_scheduler,
                       args=(opt_rref, )))

    v_model = SearchCNNController(input_channels,
                                  config.init_channels,
                                  n_classes,
                                  config.layers,
                                  nn.CrossEntropyLoss().to(device),
                                  device_ids=config.gpus).to(device)
    architect = Architect(model, v_model, config.w_momentum,
                          config.w_weight_decay, noise_add)

    if noise_add:
        logger.info("Adding noise")
        for param in model.parameters():
            shape_gaussian[param.data.shape] = gaussian.MultivariateNormal(
                torch.zeros(param.data.shape), torch.eye(param.data.shape[-1]))
    else:
        logger.info("Not adding noise")

    # training loop
    best_top1 = 0.
    for epoch in range(config.epochs):

        with dist_autograd.context() as cid:
            futs = []
            for lrs_rref in lrs_rrefs:
                futs.append(
                    rpc.rpc_async(lrs_rref.owner(),
                                  lrs_step,
                                  args=(lrs_rref, )))
            [fut.wait() for fut in futs]
            lr = remote_method(get_lrs_value,
                               lrs_rrefs.owner(),
                               args=(lrs_rrefs[0], ))
        # lr_scheduler.step()
        # lr = lr_scheduler.get_lr()[0]

        # model.print_alphas(logger)

        # training
        train(train_loader, valid_loader, model, architect, w_optim,
              alpha_optim, lr, epoch)

        # validation
        cur_step = (epoch + 1) * len(train_loader)
        top1 = validate(valid_loader, model, epoch, cur_step)

        # log
        # genotype
        genotype = model.genotype()
        logger.info("genotype = {}".format(genotype))

        # genotype as a image
        plot_path = os.path.join(config.plot_path,
                                 "EP{:02d}".format(epoch + 1))
        caption = "Epoch {}".format(epoch + 1)
        plot(genotype.normal, plot_path + "-normal", caption)
        plot(genotype.reduce, plot_path + "-reduce", caption)

        # save
        if best_top1 < top1:
            best_top1 = top1
            best_genotype = genotype
            is_best = True
        else:
            is_best = False
        utils.save_checkpoint(model, config.path, is_best)
        print("")

    logger.info("Final best Prec@1 = {:.4%}".format(best_top1))
    logger.info("Best Genotype = {}".format(best_genotype))
    rpc.shutdown()
Esempio n. 30
0
def main():
    logger.info("Logger is set - training start")

    # set default gpu device id
    torch.cuda.set_device(config.gpus[0])

    # set seed
    np.random.seed(config.seed)
    torch.manual_seed(config.seed)
    torch.cuda.manual_seed_all(config.seed)

    torch.backends.cudnn.benchmark = True

    # get data with meta info
    input_size, input_channels, n_classes, train_data, val_dat, test_dat = utils.get_data(
        config.dataset,
        config.data_path,
        cutout_length=0,
        validation=True,
        validation2=True,
        img_resize=config.img_resize)

    net_crit = nn.CrossEntropyLoss().to(device)
    model = SearchCNNController(input_channels,
                                config.init_channels,
                                n_classes,
                                config.layers,
                                net_crit,
                                device_ids=config.gpus)
    #comment if generating onnix graph
    model = model.to(device)

    # weights optimizer
    w_optim = torch.optim.SGD(model.weights(),
                              config.w_lr,
                              momentum=config.w_momentum,
                              weight_decay=config.w_weight_decay)
    # alphas optimizer
    alpha_optim = torch.optim.Adam(model.alphas(),
                                   config.alpha_lr,
                                   betas=(0.5, 0.999),
                                   weight_decay=config.alpha_weight_decay)

    #balanced split to train/validation
    print(train_data)

    # split data to train/validation
    n_train = len(train_data) // int(config.data_train_proportion)
    n_val = len(val_dat)
    n_test = len(test_dat)
    split = n_train // 2
    indices1 = list(range(n_train))
    indices2 = list(range(n_val))
    indices3 = list(range(n_test))
    train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices1)
    valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices2)
    test_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices3)

    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=config.batch_size,
                                               sampler=train_sampler,
                                               num_workers=config.workers,
                                               pin_memory=True)
    valid_loader = torch.utils.data.DataLoader(val_dat,
                                               batch_size=config.batch_size,
                                               sampler=valid_sampler,
                                               num_workers=config.workers,
                                               pin_memory=True)
    test_loader = torch.utils.data.DataLoader(test_dat,
                                              batch_size=config.batch_size,
                                              sampler=test_sampler,
                                              num_workers=config.workers,
                                              pin_memory=True)

    #load
    if (config.load):
        model, config.epochs, w_optim, alpha_optim, net_crit = utils.load_checkpoint(
            model, config.epochs, w_optim, alpha_optim, net_crit,
            '/content/MyDarts/searchs/custom/checkpoint.pth.tar')
    #uncomment if saving onnix graph
    """
    dummy_input = Variable(torch.randn(1, 3, 64, 64))
    torch.onnx.export(model, dummy_input, "rsdarts.onnx", verbose=True)
    input_np = np.random.uniform(0, 1, (1, 3, 64, 64))
    input_var = Variable(torch.FloatTensor(input_np))
    from pytorch2keras.converter import pytorch_to_keras
    # we should specify shape of the input tensor
    output = model(input_var)
    k_model = pytorch_to_keras(model, input_var, (3, 64, 64,), verbose=True)

    error = check_error(output, k_model, input_np)
    if max_error < error:
        max_error = error

    print('Max error: {0}'.format(max_error))
    a=2/0
    """
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        w_optim, config.epochs, eta_min=config.w_lr_min)
    architect = Architect(model, config.w_momentum, config.w_weight_decay)

    #model  = torch.load('/content/pt.darts/searchs/custom/checkpoint.pth.tar')

    #print("Loaded!")
    # training loop
    best_top1 = 0.
    best_top_overall = -999
    config.epochs = 300  #BUG, config epochs ta com algum erro
    for epoch in range(config.epochs):
        lr_scheduler.step()
        lr = lr_scheduler.get_lr()[0]

        model.print_alphas(logger)

        print("###################TRAINING#########################")
        # training
        #sample rs arch
        arch = sample_arch(model)
        #import pickle
        #arch = pickle.load( open( "best_arch.p", "rb" ) )
        train(train_loader, valid_loader, model, arch, w_optim, alpha_optim,
              lr, epoch)
        print("###################END TRAINING#########################")

        # validation
        cur_step = (epoch + 1) * len(train_loader)
        print("###################VALID#########################")
        top1, top_overall, _, _ = validate(valid_loader,
                                           model,
                                           arch,
                                           epoch,
                                           cur_step,
                                           overall=True)
        print("###################END VALID#########################")

        # test
        print("###################TEST#########################")
        _, _, preds, targets = validate(test_loader,
                                        model,
                                        arch,
                                        epoch,
                                        cur_step,
                                        overall=True,
                                        debug=True)
        s = [preds, targets]
        import pickle
        pickle.dump(s, open("predictions_" + str(epoch + 1) + ".p", "wb"))
        #print("predictions: ",preds)
        #print("targets:",targets)
        print("###################END TEST#########################")

        # log
        # genotype
        #print("Model Alpha:",model.alpha_normal)
        genotype = model.genotype()
        logger.info("genotype = {}".format(genotype))

        # genotype as a image
        plot_path = os.path.join(config.plot_path,
                                 "EP{:02d}".format(epoch + 1))
        caption = "Epoch {}".format(epoch + 1)
        print("Genotype normal:", genotype.normal)
        plot(genotype.normal, plot_path + "-normal", caption)
        plot(genotype.reduce, plot_path + "-reduce", caption)

        # save
        if best_top1 < top1:
            best_top1 = top1
            best_genotype = genotype
            best_arch = arch
            is_best = True
            import pickle
            pickle.dump(best_arch, open("best_arch.p", "wb"))
            print('best_arch:', best_arch)
            print("saved!")
        else:
            is_best = False
        #save best overall(macro avg of f1 prec and recall)
        if (best_top_overall < top_overall):
            best_top_overall = top_overall
            best_genotype_overall = genotype
            is_best_overall = True
        else:
            is_best_overall = False

        utils.save_checkpoint(model, epoch, w_optim, alpha_optim, net_crit,
                              config.path, is_best, is_best_overall)

    logger.info("Final best Prec@1 = {:.4%}".format(best_top1))
    logger.info("Best Genotype = {}".format(best_genotype))
    logger.info("Best Genotype Overall = {}".format(best_genotype_overall))
Esempio n. 31
0
def run(choice,
        create_data=False,
        add_data=False,
        show_plot=False,
        create_pdf=False,
        show_pdf=False):
    global n
    global d
    global rep_SameGraph
    global FILENAMEZ
    global csv_filename
    global initial_h0
    global exponent
    global length
    global variant

    global alpha_vec
    global beta_vec
    global gamma_vec
    global s_vec
    global clip_on_vec
    global numMaxIt_vec

    # Plotting Parameters
    global xtick_lab
    global xtick_labels
    global ytick_lab
    global xmax
    global xmin
    global ymin
    global ymax
    global labels
    global facecolor_vec
    global draw_std_vec
    global linestyle_vec
    global linewidth_vec
    global marker_vec
    global markersize_vec
    global legend_location

    global option_vec
    global learning_method_vec

    global Macro_Accuracy
    global EC
    global constraints
    global weight_vec
    global randomize_vec
    global k
    global err
    global avoidNeighbors
    global convergencePercentage_W
    global stratified
    global gradient
    global doubly_stochastic
    global num_restarts
    global numberOfSplits
    global H_heuristic

    global select_lambda_vec
    global lambda_vec
    global f_vec
    global H0c

    # -- Setup
    CHOICE = choice
    #300 Prop37, 400 MovieLens, 500 Yelp, 600 Flickr, 700 DBLP, 800 Enron
    experiments = [CHOICE]
    CREATE_DATA = create_data
    ADD_DATA = add_data
    SHOW_PDF = show_pdf
    SHOW_PLOT = show_plot
    CREATE_PDF = create_pdf

    SHOW_FIG = SHOW_PLOT or SHOW_PDF or CREATE_PDF
    STD_FILL = True
    TIMING = False
    CALCULATE_DATA_STATISTICS = False

    # -- Default Graph parameters
    rep_SameGraph = 10  # iterations on same graph

    initial_h0 = None  # initial vector to start finding optimal H
    exponent = -0.3
    length = 5
    variant = 1

    alpha_vec = [0] * 10
    beta_vec = [0] * 10
    gamma_vec = [0] * 10
    s_vec = [0.5] * 10
    clip_on_vec = [True] * 10
    numMaxIt_vec = [10] * 10

    # Plotting Parameters
    xtick_lab = [0.001, 0.01, 0.1, 1]
    xtick_labels = ['0.1\%', '1\%', '10\%', '100\%']
    ytick_lab = np.arange(0, 1.1, 0.1)
    xmax = 1
    xmin = 0.0001
    ymin = 0.3
    ymax = 0.7
    labels = ['GS', 'LCE', 'MCE', 'DCE', 'DCEr']
    facecolor_vec = [
        'black', "#55A868", "#4C72B0", "#8172B2", "#C44E52", "#CCB974",
        "#64B5CD"
    ]
    draw_std_vec = [False] * 4 + [True]
    linestyle_vec = ['dashed'] + ['solid'] * 10
    linewidth_vec = [4, 4, 2, 1, 2, 2]
    marker_vec = [None, 'o', 'x', '^', 'v', '+']
    markersize_vec = [0, 8, 8, 8, 8, 8, 8]

    option_vec = ['opt1', 'opt2', 'opt3', 'opt4', 'opt5', 'opt6']
    learning_method_vec = ['GT', 'LHE', 'MHE', 'DHE', 'DHE']

    Macro_Accuracy = False
    EC = True  # Non-backtracking for learning
    constraints = True  # True
    weight_vec = [None] * 3 + [10, 10] * 2
    randomize_vec = [False] * 4 + [True] * 2
    k = 3
    err = 0
    avoidNeighbors = False
    convergencePercentage_W = None
    stratified = True
    gradient = True
    doubly_stochastic = True
    num_restarts = None

    raw_std_vec = range(10)
    numberOfSplits = 1

    select_lambda_vec = [False] * 20
    lambda_vec = None

    f_vec = [0.9 * pow(0.1, 1 / 5)**x for x in range(21)]
    FILENAMEZ = ""
    legend_location = ""
    fig_label = ""
    H_heuristic = ""

    def choose(choice):
        global n
        global d
        global rep_SameGraph
        global FILENAMEZ
        global initial_h0
        global exponent
        global length
        global variant

        global alpha_vec
        global beta_vec
        global gamma_vec
        global s_vec
        global clip_on_vec
        global numMaxIt_vec

        # Plotting Parameters
        global xtick_lab
        global xtick_labels
        global ytick_lab
        global xmax
        global xmin
        global ymin
        global ymax
        global labels
        global facecolor_vec
        global draw_std_vec
        global linestyle_vec
        global linewidth_vec
        global marker_vec
        global markersize_vec
        global legend_location

        global option_vec
        global learning_method_vec

        global Macro_Accuracy
        global EC
        global constraints
        global weight_vec
        global randomize_vec
        global k
        global err
        global avoidNeighbors
        global convergencePercentage_W
        global stratified
        global gradient
        global doubly_stochastic
        global num_restarts
        global numberOfSplits
        global H_heuristic

        global select_lambda_vec
        global lambda_vec
        global f_vec

        # -- Default Graph parameters

        if choice == 0:
            None

        elif choice == 304:  ## with varying weights
            FILENAMEZ = 'prop37'
            Macro_Accuracy = True
            gradient = True
            fig_label = 'Prop37'
            legend_location = 'lower right'
            n = 62000
            d = 34.8
            select_lambda_vec = [False] * 5
            f_vec = [0.9 * pow(0.1, 1 / 5)**x for x in range(21)]

        elif choice == 305:  # DCEr Only experiment
            choose(605)
            choose(304)

            select_lambda_vec = [False] * 6

        elif choice == 306:
            choose(304)
            select_lambda_vec = [False] * 3 + [True] * 3
            lambda_vec = [1] * 11 + [10] * 10  # same length as f_vec

            learning_method_vec.append('Holdout')
            labels.append('Holdout')

        elif choice == 307:  # heuristic comparison
            choose(304)
            select_lambda_vec = [False] * 3 + [True] * 3
            lambda_vec = [1] * 11 + [10] * 10  # same length as f_vec
            learning_method_vec.append('Heuristic')
            labels.append('Heuristic')
            H_heuristic = np.array([[.476, .0476, .476], [.476, .0476, .476],
                                    [.476, .476, .0476]])

        # -- MovieLens dataset
        elif choice == 401:
            FILENAMEZ = 'movielens'
            Macro_Accuracy = True
            gradient = True
            fig_label = 'MovieLens'
            legend_location = 'upper left'

            n = 26850
            d = 25.0832029795

        elif choice == 402:
            choose(401)
            select_lambda_vec = [False] * 3 + [
                True
            ] * 3  # allow to choose lambda for different f in f_vec

            lambda_vec = [1] * 11 + [10] * 10  # same length as f_vec

        elif choice == 403:
            choose(402)
            ymin = 0.3
            ymax = 1.0
            learning_method_vec.append('Holdout')
            labels.append('Holdout')

        elif choice == 404:
            choose(401)

            select_lambda_vec = [
                True
            ] * 3  # allow to choose lambda for different f in f_vec
            lambda_vec = [1] * 11 + [10] * 10  # same length as f_vec

            labels = ['GS', 'DCEr', 'Homophily']
            facecolor_vec = ['black', "#C44E52", "#64B5CD"]
            draw_std_vec = [False, True, False]
            linestyle_vec = ['dashed'] + ['solid'] * 10
            linewidth_vec = [4, 2, 2, 2, 2]
            marker_vec = [None, '^', 'v', '+']
            markersize_vec = [0, 8, 8, 8, 8, 8, 8]

            weight_vec = [None, 10, None]
            option_vec = ['opt1', 'opt2', 'opt3', 'opt4', 'opt5', 'opt6']
            randomize_vec = [False, True, False]
            learning_method_vec = ['GT', 'DHE']  #TODO

        elif choice == 405:  # DCEr ONLY experiment
            choose(605)
            choose(401)
            learning_method_vec += ['Holdout']
            labels += ['Holdout']

        elif choice == 406:  # comparison with a static heuristic matrix
            choose(402)
            learning_method_vec += ['Heuristic']
            labels += ['Heuristic']
            H_heuristic = np.array([[.0476, .476, .476], [.476, .0476, .476],
                                    [.476, .476, .0476]])

        elif choice == 407:
            choose(402)
            ymin = 0.3
            ymax = 1.0
            lambda_vec = [1] * 21  # same length as f_vec

        elif choice == 408:
            choose(402)
            ymin = 0.3
            ymax = 1.0
            lambda_vec = [10] * 21  # same length as f_vec

        # DO NOT RUN WITH CREATE_DATA=True, if you do please restore the data from
        # data/sigmod-movielens-fig.csv
        elif choice == 409:
            choose(402)
            facecolor_vec = [
                'black', "#55A868", "#4C72B0", "#8172B2", "#8172B2", "#C44E52",
                "#C44E52", "#CCB974", "#64B5CD"
            ]
            labels = [
                'GS', 'LCE', 'MCE', 'DCE1', 'DCE10', 'DCEr1', 'DCEr10',
                'Holdout'
            ]
            draw_std_vec = [False] * 5 + [True] * 2 + [False]
            linestyle_vec = ['dashed'] + ['solid'] * 10
            linewidth_vec = [2, 2, 2, 2, 2, 2, 2, 2]
            marker_vec = [None, 'o', 'x', 's', 'p', '^', 'v', '+']
            markersize_vec = [0, 8, 8, 8, 8, 8, 8, 8]
            option_vec = [
                'opt1', 'opt2', 'opt3', 'opt4', 'opt5', 'opt6', 'opt7', 'opt8'
            ]
            legend_location = 'upper left'
            ymin = 0.3
            ymax = 1.0
            lambda_vec = [10] * 21  # same length as f_vec

        # -- Yelp dataset
        elif choice == 501:
            FILENAMEZ = 'yelp'
            Macro_Accuracy = True
            weight_vec = [None] * 3 + [10, 10]
            gradient = True
            ymin = 0.1
            ymax = 0.75
            fig_label = 'Yelp'
            legend_location = 'upper left'

            n = 4301900  # for figure
            d = 6.56  # for figure

        # -- Flickr dataset
        #elif choice == 601:
        #    FILENAMEZ = 'flickr'
        #    Macro_Accuracy = True
        #    fig_label = 'Flickr'
        #    legend_location = 'lower right'
        #    ymin = 0.3
        #    ymax = 0.7
        #    n = 2007369
        #    d = 18.1

        #elif choice == 602: ## with varying weights
        #    choose(601)

        #    select_lambda_vec = [False] * 4 + [True]*2  # allow to choose lambda for different f in f_vec
        #    f_vec = [0.9 * pow(0.1, 1 / 5) ** x for x in range(21)]
        #    lambda_vec = [1] * 11 + [10] * 10  # same length as f_vec

        #elif choice == 603:     ## with varying weights
        #    choose(602)

        #    select_lambda_vec = [False] * 3 + [True] * 2  # allow to choose lambda for different f in f_vec
        #    # lambda_vec = [1] * 5 + [5] * 5 + [10] * 5 + [1] * 6  # same length as f_vec

        #elif choice == 604:     ## with weight = 1
        #    choose(603)

        #    lambda_vec = [0.5] * 21  # same length as f_vec

        # -- Flickr dataset
        elif choice == 601:
            FILENAMEZ = 'flickr'
            Macro_Accuracy = True
            fig_label = 'Flickr'
            legend_location = 'lower right'
            n = 2007369
            d = 18.1

        elif choice == 602:  ## with varying weights
            choose(601)

            select_lambda_vec = [False] * 4 + [
                True
            ]  # allow to choose lambda for different f in f_vec
            f_vec = [0.9 * pow(0.1, 1 / 5)**x for x in range(21)]
            lambda_vec = [1] * 11 + [10] * 10  # same length as f_vec

        elif choice == 603:  ## with varying weights
            choose(602)

            select_lambda_vec = [False] * 3 + [
                True
            ] * 2  # allow to choose lambda for different f in f_vec
            # lambda_vec = [1] * 5 + [5] * 5 + [10] * 5 + [1] * 6  # same length as f_vec

        elif choice == 604:  ## with weight = 1
            draw_std_vec = [4]
            choose(603)

            lambda_vec = [0.5] * 21  # same length as f_vec

        elif choice == 605:
            choose(601)
            facecolor_vec = [
                'black', "#55A868", "#4C72B0", "#8172B2", "#C44E52", "#CCB974",
                "#64B5CD", 'orange'
            ]
            draw_std_vec = [False] + [True] * 10
            linestyle_vec = ['dashed'] + ['solid'] * 10
            linewidth_vec = [3] * 10
            marker_vec = [None, 'o', 'x', '^', 'v', '+', 'o', 'x']
            markersize_vec = [0] + [8] * 10

            randomize_vec = [True] * 8
            option_vec = [
                'opt1', 'opt2', 'opt3', 'opt4', 'opt5', 'opt6', 'opt7', 'opt8'
            ]

            learning_method_vec = [
                'GT', 'DHE', 'DHE', 'DHE', 'DHE', 'DHE', 'DHE'
            ]
            select_lambda_vec = [False] * 8
            f_vec = [0.9 * pow(0.1, 1 / 5)**x for x in range(21)]
            lambda_vec = [1] * 11 + [10] * 10  # same length as f_vec
            weight_vec = [0, 0, 1, 2, 5, 10, 15]

            labels = ['GT'] + [
                i + ' {}'.format(weight_vec[ix])
                for ix, i in enumerate(['DCEr'] * 6)
            ]

        elif choice == 606:  # heuristic experiment
            choose(602)
            labels.append('Heuristic')
            learning_method_vec.append('Heuristic')
            H_heuristic = np.array([[.0476, .476, .476], [.476, .0476, .476],
                                    [.476, .476, .0476]])

        # -- DBLP dataset
        elif choice == 701:
            FILENAMEZ = 'dblp'
            Macro_Accuracy = True
            ymin = 0.2
            ymax = 0.5
            fig_label = 'DBLP'
            legend_location = 'lower right'
            n = 2241258  # for figure
            d = 26.11  # for figure

        # -- ENRON dataset
        elif choice == 801:
            FILENAMEZ = 'enron'
            Macro_Accuracy = True
            ymin = 0.3
            ymax = 0.75
            fig_label = 'Enron'
            f_vec = [0.9 * pow(0.1, 1 / 5)**x for x in range(21)]
            legend_location = 'upper left'
            n = 46463  # for figures
            d = 23.4  # for figures

        elif choice == 802:  ### WITH ADAPTIVE WEIGHTS
            choose(801)

            select_lambda_vec = [False] * 4 + [
                True
            ] * 2  # allow to choose lambda for different f in f_vec
            f_vec = [0.9 * pow(0.1, 1 / 5)**x for x in range(21)]
            lambda_vec = [1] * 11 + [10] * 10  # same length as f_vec

        elif choice == 803:  ### WITH ADAPTIVE WEIGHTS
            choose(802)

            lambda_vec = [1] * 5 + [5] * 5 + [10] * 5 + [
                1
            ] * 6  # same length as f_vec

        elif choice == 804:
            choose(803)

        elif choice == 805:
            choose(605)
            choose(801)
            #learning_method_vec += ['Holdout']
            #labels += ['Holdout']
        elif choice == 806:  # Heuristic experiment
            choose(802)
            learning_method_vec += ['Heuristic']
            labels += ['Heuristic']
            H_heuristic = np.array([[0.76, 0.08, 0.08, 0.08],
                                    [0.08, 0.08, 0.76, 0.08],
                                    [0.08, 0.76, 0.08, 0.76],
                                    [0.08, 0.08, 0.76, 0.08]])

        elif choice == 821:
            FILENAMEZ = 'enron'
            Macro_Accuracy = True
            constraints = True  # True
            gradient = True
            option_vec = ['opt1', 'opt2', 'opt3', 'opt4', 'opt5']
            learning_method_vec = ['GT', 'LHE', 'MHE', 'DHE', 'DHE']
            weight_vec = [None] * 3 + [0.2, 0.2]

            randomize_vec = [False] * 4 + [True]
            xmin = 0.0001
            ymin = 0.0
            ymax = 0.7
            labels = ['GS', 'LCE', 'MCE', 'DCE', 'DCE r']
            facecolor_vec = [
                'black', "#55A868", "#4C72B0", "#8172B2", "#C44E52", "#CCB974",
                "#64B5CD"
            ]
            draw_std_vec = [4]
            linestyle_vec = ['dashed'] + ['solid'] * 10
            linewidth_vec = [4, 4, 2, 1, 2]
            marker_vec = [None, 'o', 'x', '^', 'v', '+']
            markersize_vec = [0, 8, 8, 8, 8, 8, 8]
            fig_label = 'Enron'
            legend_location = 'lower right'
            n = 46463  # for figures
            d = 23.4  # for figures

            alpha = 0.0
            beta = 0.0
            gamma = 0.0
            s = 0.5
            numMaxIt = 10

            select_lambda_vec = [False] * 3 + [True] * 2
            lambda_vec = [0.2] * 13 + [10] * 8  # same length as f_vec

        # -- Cora dataset
        elif choice == 901:
            FILENAMEZ = 'cora'
            Macro_Accuracy = True
            constraints = True  # True
            option_vec = ['opt1', 'opt2', 'opt3', 'opt4', 'opt5']
            learning_method_vec = ['GT', 'LHE', 'MHE', 'DHE', 'DHE']
            weight_vec = [None] * 3 + [10, 10]

            numMaxIt_vec = [10] * 10
            randomize_vec = [False] * 4 + [True]
            gradient = True
            xmin = 0.001
            ymin = 0.0
            ymax = 0.9
            labels = ['GT', 'LCE', 'MCE', 'DCE', 'DCE r']
            facecolor_vec = [
                'black', "#55A868", "#4C72B0", "#8172B2", "#C44E52", "#CCB974",
                "#64B5CD"
            ]
            draw_std_vec = [4]
            linestyle_vec = ['dashed'] + ['solid'] * 10
            linewidth_vec = [4, 4, 2, 1, 2]
            marker_vec = [None, 'o', 'x', '^', 'v', '+']
            markersize_vec = [0, 8, 8, 8, 8, 8, 8]
            fig_label = 'Cora'
            legend_location = 'lower right'
            n = 2708
            d = 7.8

        # -- Citeseer dataset
        elif CHOICE == 1001:
            FILENAMEZ = 'citeseer'
            Macro_Accuracy = True
            constraints = True  # True
            option_vec = ['opt1', 'opt2', 'opt3', 'opt4', 'opt5']
            learning_method_vec = ['GT', 'LHE', 'MHE', 'DHE', 'DHE']
            weight_vec = [None] * 3 + [10, 10]

            numMaxIt_vec = [10] * 10
            randomize_vec = [False] * 4 + [True]
            gradient = True
            xmin = 0.001
            ymin = 0.0
            ymax = 0.75
            labels = ['GT', 'LCE', 'MCE', 'DCE', 'DCE r']
            facecolor_vec = [
                'black', "#55A868", "#4C72B0", "#8172B2", "#C44E52", "#CCB974",
                "#64B5CD"
            ]
            draw_std_vec = [4]
            linestyle_vec = ['dashed'] + ['solid'] * 10
            linewidth_vec = [4, 4, 2, 1, 2]
            marker_vec = [None, 'o', 'x', '^', 'v', '+']
            markersize_vec = [0, 8, 8, 8, 8, 8, 8]
            fig_label = 'Citeseer'
            legend_location = 'lower right'
            n = 3312
            d = 5.6

        elif CHOICE == 1101:
            FILENAMEZ = 'hep-th'
            Macro_Accuracy = True
            constraints = True  # True
            option_vec = ['opt1', 'opt2', 'opt3', 'opt4', 'opt5']
            learning_method_vec = ['GT', 'LHE', 'MHE', 'DHE', 'DHE']
            weight_vec = [None] * 3 + [10, 10]

            numMaxIt_vec = [10] * 10
            randomize_vec = [False] * 4 + [True]
            gradient = True
            xmin = 0.0001
            ymin = 0.0
            ymax = 0.1
            labels = ['GT', 'LCE', 'MCE', 'DCE', 'DCE r']
            facecolor_vec = [
                'black', "#55A868", "#4C72B0", "#8172B2", "#C44E52", "#CCB974",
                "#64B5CD"
            ]
            draw_std_vec = [4]
            linestyle_vec = ['dashed'] + ['solid'] * 10
            linewidth_vec = [4, 4, 2, 1, 2]
            marker_vec = [None, 'o', 'x', '^', 'v', '+']
            markersize_vec = [0, 8, 8, 8, 8, 8, 8]
            fig_label = 'Hep-th'
            legend_location = 'lower right'
            n = 27770
            d = 5.6

        elif choice == 1102:
            choose(1101)
            Macro_Accuracy = True

        elif CHOICE == 1204:
            FILENAMEZ = 'pokec-gender'
            Macro_Accuracy = True
            constraints = True  # True
            option_vec = ['opt1', 'opt2', 'opt3', 'opt4', 'opt5']
            learning_method_vec = ['GT', 'LHE', 'MHE', 'DHE', 'DHE']
            weight_vec = [None] * 3 + [10, 10]

            numMaxIt_vec = [10] * 10
            randomize_vec = [False] * 4 + [True]
            gradient = True
            xmin = 0.000015
            ymin = 0.0
            ymax = 0.75
            labels = ['GT', 'LCE', 'MCE', 'DCE', 'DCE r']
            facecolor_vec = [
                'black', "#55A868", "#4C72B0", "#8172B2", "#C44E52", "#CCB974",
                "#64B5CD"
            ]
            draw_std_vec = [0, 3, 4, 4, 4, 4]
            linestyle_vec = ['dashed'] + ['solid'] * 10
            linewidth_vec = [4, 4, 2, 1, 2]
            marker_vec = [None, 'o', 'x', '^', 'v', '+']
            markersize_vec = [0, 8, 8, 8, 8, 8, 8]
            fig_label = 'Pokec-Gender'
            legend_location = 'lower right'
            n = 1632803
            d = 54.6

        else:
            raise Warning("Incorrect choice!")

    for choice in experiments:

        choose(choice)
        filename = 'Fig_End-to-End_accuracy_realData_{}_{}'.format(
            choice, FILENAMEZ)
        csv_filename = '{}.csv'.format(filename)

        header = [
            'currenttime', 'method', 'f', 'accuracy', 'precision', 'recall',
            'learntime', 'proptime'
        ]
        if CREATE_DATA:
            save_csv_record(join(data_directory, csv_filename),
                            header,
                            append=False)

        # print("choice: {}".format(choice))

        # --- print data statistics
        if CALCULATE_DATA_STATISTICS:

            Xd, W = load_Xd_W_from_csv(
                join(realDataDir, FILENAMEZ) + '-classes.csv',
                join(realDataDir, FILENAMEZ) + '-neighbors.csv')

            X0 = from_dictionary_beliefs(Xd)
            n = len(Xd.keys())
            d = (len(W.nonzero()[0]) * 2) / n

            k = len(X0[0])

            print("FILENAMEZ:", FILENAMEZ)
            print("k:", k)
            print("n:", n)
            print("d:", d)

            # -- Graph statistics
            n_vec = calculate_nVec_from_Xd(Xd)
            print("n_vec:\n", n_vec)
            d_vec = calculate_average_outdegree_from_graph(W, Xd=Xd)
            print("d_vec:\n", d_vec)
            P = calculate_Ptot_from_graph(W, Xd)
            print("P:\n", P)
            for i in range(k):
                Phi = calculate_degree_correlation(W, X0, i, NB=True)
                print("Degree Correlation, Class {}:\n{}".format(i, Phi))

            # -- Various compatibilities
            H0 = estimateH(X0,
                           W,
                           method='MHE',
                           variant=1,
                           distance=1,
                           EC=EC,
                           weights=1,
                           randomize=False,
                           constraints=True,
                           gradient=gradient,
                           doubly_stochastic=doubly_stochastic)
            print("H0 w/  constraints:\n", np.round(H0, 2))
            #raw_input() # Why?

            H2 = estimateH(X0,
                           W,
                           method='MHE',
                           variant=1,
                           distance=1,
                           EC=EC,
                           weights=1,
                           randomize=False,
                           constraints=True,
                           gradient=gradient,
                           doubly_stochastic=doubly_stochastic)
            H4 = estimateH(X0,
                           W,
                           method='DHE',
                           variant=1,
                           distance=1,
                           EC=EC,
                           weights=2,
                           randomize=False,
                           gradient=gradient,
                           doubly_stochastic=doubly_stochastic)
            H5 = estimateH(X0,
                           W,
                           method='DHE',
                           variant=1,
                           distance=1,
                           EC=EC,
                           weights=2,
                           randomize=False,
                           constraints=True,
                           gradient=gradient,
                           doubly_stochastic=doubly_stochastic)
            H6 = estimateH(X0,
                           W,
                           method='DHE',
                           variant=1,
                           distance=2,
                           EC=EC,
                           weights=10,
                           randomize=False,
                           gradient=gradient,
                           doubly_stochastic=doubly_stochastic)
            H7 = estimateH(X0,
                           W,
                           method='DHE',
                           variant=1,
                           distance=2,
                           EC=EC,
                           weights=10,
                           randomize=False,
                           constraints=True,
                           gradient=gradient,
                           doubly_stochastic=doubly_stochastic)

            print()
            # print("H MCE w/o constraints:\n", np.round(H0, 3))
            print("H MCE w/  constraints:\n", np.round(H2, 3))
            # print("H DCE 2 w/o constraints:\n", np.round(H4, 3))
            print("H DCE 2 w/  constraints:\n", np.round(H5, 3))
            # print("H DCE 10 w/o constraints:\n", np.round(H6, 3))
            print("H DCE 20 w/  constraints:\n", np.round(H7, 3))

            print()
            H_row_vec = H_observed(W, X0, 3, NB=True, variant=1)
            print("H_est_1:\n", np.round(H_row_vec[0], 3))
            print("H_est_2:\n", np.round(H_row_vec[1], 3))
            print("H_est_3:\n", np.round(H_row_vec[2], 3))

        # --- Create data
        if CREATE_DATA or ADD_DATA:

            Xd, W = load_Xd_W_from_csv(
                join(realDataDir, FILENAMEZ) + '-classes.csv',
                join(realDataDir, FILENAMEZ) + '-neighbors.csv')

            X0 = from_dictionary_beliefs(Xd)
            n = len(Xd.keys())  ## number of nodes in graph
            k = len(X0[0])
            d = (len(W.nonzero()[0]) * 2) / n
            #print(n)
            #print(d)
            #print("contraint = {}".format(constraints))
            #print('select lambda: {}'.format(len(select_lambda_vec)))
            #print('learning method: {}'.format(len(learning_method_vec)))
            #print('alpha: {}'.format(len(alpha_vec)))
            #print('beta: {}'.format(len(beta_vec)))
            #print('gamma: {}'.format(len(gamma_vec)))
            #print('s: {}'.format(len(s_vec)))
            #print('maxit: {}'.format(len(numMaxIt_vec)))
            #print('weight: {}'.format(len(weight_vec)))
            #print('randomize: {}'.format(len(randomize_vec)))
            # ---  Calculating True Compatibility matrix
            H0 = estimateH(X0,
                           W,
                           method='MHE',
                           variant=1,
                           distance=1,
                           EC=EC,
                           weights=1,
                           randomize=False,
                           constraints=constraints,
                           gradient=gradient,
                           doubly_stochastic=doubly_stochastic)
            # print(H0)
            H0c = to_centering_beliefs(H0)

            num_results = len(f_vec) * len(learning_method_vec) * rep_SameGraph

            # Starts a thread pool with at least 2 threads, and a lot more if you happen to be on a supercomputer
            pool = multiprocessing.Pool(max(2,
                                            multiprocessing.cpu_count() - 4))

            f_processes = f_vec * rep_SameGraph
            workers = []
            results = [(X0, W, f, ix)
                       for ix, f in enumerate(f_vec)] * rep_SameGraph
            # print('Expected results: {}'.format(num_results))
            try:  # hacky fix due to a bug in 2.7 multiprocessing
                # Distribute work for evaluating accuracy over the thread pool using
                # a hacky method due to python 2.7 multiprocessing not being fully
                # featured
                pool.map_async(multi_run_wrapper, results).get(num_results * 2)
            except multiprocessing.TimeoutError as e:
                continue
            finally:
                pool.close()
                pool.join()

        # -- Read data for all options and plot
        df1 = pd.read_csv(join(data_directory, csv_filename))
        acc_filename = '{}_accuracy_plot.pdf'.format(filename)
        pr_filename = '{}_PR_plot.pdf'.format(filename)
        if TIMING:
            print('=== {} Timing Results ==='.format(FILENAMEZ))
            print('Prop Time:\navg: {}\nstddev: {}'.format(
                np.average(df1['proptime'].values),
                np.std(df1['proptime'].values)))
            for learning_method in labels:
                rs = df1.loc[df1["method"] == learning_method]
                avg = np.average(rs['learntime'])
                std = np.std(rs['learntime'])
                print('{} Learn Time:\navg: {}\nstd: {}'.format(
                    learning_method, avg, std))

        sslhv.plot(df1,
                   join(figure_directory, acc_filename),
                   n=n,
                   d=d,
                   k=k,
                   labels=labels,
                   dataset=FILENAMEZ,
                   line_styles=linestyle_vec,
                   xmin=xmin,
                   ymin=ymin,
                   xmax=xmax,
                   ymax=ymax,
                   marker_sizes=markersize_vec,
                   draw_stds=draw_std_vec,
                   markers=marker_vec,
                   line_colors=facecolor_vec,
                   line_widths=linewidth_vec,
                   legend_location=legend_location,
                   show=SHOW_PDF,
                   save=CREATE_PDF,
                   show_plot=SHOW_PLOT)