def manipulate_homophily(self, strategy_func, strategy_name, pick_strategy,
                             manipulation_clas, network_name):
        self.global_homophilies = []
        class_partitions = []
        nodes_with_manipulation_clas = [
            node for node in self.G.nodes()
            if self.get_node_class(node) == manipulation_clas
        ]
        class_partitions.append(len(nodes_with_manipulation_clas) / self.size)
        homo_list_before = self.local_homophily()
        nodes_to_remove = [
            node for node in self.G.nodes()
            if self.get_node_class(node) != manipulation_clas
        ]
        utils.save_to_file(homo_list_before, network_name,
                           '{0}_homo_list_before'.format(strategy_name))
        ''' add, remove or change node '''
        strategy_func(nodes_to_remove, nodes_with_manipulation_clas,
                      class_partitions, pick_strategy, manipulation_clas)

        homo_list_after = self.local_homophily()
        utils.save_to_file(homo_list_after, network_name,
                           '{0}_homo_list_after'.format(strategy_name))
        utils.save_to_file(self.global_homophilies, network_name,
                           '{0}_global_homophilies'.format(strategy_name))
        utils.plot_local_homophily(homo_list_before, homo_list_after,
                                   network_name, strategy_name)
        utils.plot_global_homophily(self.global_homophilies, network_name,
                                    strategy_name)
        utils.plot_all(class_partitions, self.global_homophilies,
                       self.homophily_per_clas, manipulation_clas,
                       network_name, strategy_name)
    def train(self):
        print("Beginning training")
        if self.opts['optimizer'] == 'adam':
            learning_rates = [
                i[0] for i in self.opts['learning_rate_schedule']
            ]
            iterations_list = [
                i[1] for i in self.opts['learning_rate_schedule']
            ]
            total_num_iterations = iterations_list[-1]
            it = 0
            lr_counter = 0
            lr = learning_rates[lr_counter]
            lr_iterations = iterations_list[lr_counter]
            while it < total_num_iterations:
                if it % 1000 == 0:
                    print("\nIteration %i" % it, flush=True)
                if it % 100 == 0:
                    print('.', end='', flush=True)
                it += 1
                if it > lr_iterations:
                    lr_counter += 1
                    lr = learning_rates[lr_counter]
                    lr_iterations = iterations_list[lr_counter]

                self.sess.run(self.train_step,
                              feed_dict={
                                  self.learning_rate:
                                  lr,
                                  self.input:
                                  self.sample_minibatch(self.batch_size)
                              })
                if self.opts[
                        'loss_reconstruction'] == 'L2_squared+adversarial':
                    self.sess.run(self.adv_cost_train_step,
                                  feed_dict={
                                      self.learning_rate:
                                      lr,
                                      self.input:
                                      self.sample_minibatch(self.batch_size)
                                  })

                if (self.opts['print_log_information'] is True) and (it % 100
                                                                     == 0):
                    utils.print_log_information(self, it)

                if self.opts['make_pictures_every'] is not None:
                    if it % self.opts['make_pictures_every'] == 0:
                        utils.plot_all(self, it)

                if it % self.opts['save_every'] == 0:
                    self.save(it)
        # once training is complete, calculate disentanglement metric
        if 'disentanglement_metric' in self.opts:
            if self.opts['disentanglement_metric'] is True:
                self.disentanglement = disentanglement_metric.Disentanglement(
                    self)
                self.disentanglement.do_all(it)
def process_in_question_one(datalist,
                            idx,
                            city_name,
                            ma_mode=0,
                            saved_folder=None,
                            saved=True):
    #First to do moving average
    simple_average_data = utils.SampleMovingAverage(datalist[idx])
    culmative_average_data = utils.CumulativeMovingAverage(datalist[idx])
    expontial_average_data = utils.ExponentialMovingAverage(datalist[idx])
    draw_data_list = [
        datalist[idx], simple_average_data, culmative_average_data,
        expontial_average_data
    ]
    data_lengend = [
        "Orginal {} Data".format(city_name), "Simple Moving average data",
        "Culmulative Averge Data", "Exponential Moving Average Data"
    ]
    ma_data_list = [
        simple_average_data, culmative_average_data, expontial_average_data
    ]
    # Draw the data
    utils.plot_all(
        draw_data_list,
        data_lengend,
        "Date(only show the point of 0:00 in the X axe)",
        "Tide value ",
        "Tide value per day of {} with Moving Average".format(city_name),
        xticks=xticks_para,
        figsize=(16, 10),
        saved="{}/Moving_Average_{}.png".format(saved_folder, city_name))

    # Select to use which kind of Move Average Data, default is the simple one
    simple_average_data = ma_data_list[ma_mode]

    # Remove the Bias and Trend of the Data for further processing
    simple_average_data = utils.remove_bias_and_trend(simple_average_data)

    utils.plot_image(simple_average_data,
                     "Time(hours)",
                     "Tide Value",
                     "{} Tide reduced Data".format(city_name),
                     "{} Data".format(city_name),
                     saved="{}/{}_processed".format(saved_folder, city_name))

    # Draw the ACF plot of the data
    utils.draw_acf(simple_average_data,
                   saved="{}/{}_acf.png".format(saved_folder, city_name))

    # Draw the PACF plot of the data
    utils.draw_pacf(simple_average_data,
                    saved='{}/{}_pacf.png'.format(saved_folder, city_name))

    # Draw the ampltitude Specturm and Power Specturm
    utils.draw_spectrum(simple_average_data, city_name, saved_folder)

    return simple_average_data
def benchmark_mc(player, nb_games, N, reverse=False):
    """
    """
    legend = ['vs random player', 'vs Monte Carlo peer', 'vs UCT player']

    print('\nSimulating games against random player...')
    rand = benchmark_mc_single(player, nb_games, ttt.RandomPlayer(), reverse)

    print('\nSimulating games against Monte Carlo peer...')
    mc = benchmark_mc_single(player, nb_games, ttt.MonteCarloPlayer(N),
                             reverse)

    print('\nSimulating games against upper confidence tree player...')
    uct = benchmark_mc_single(player, nb_games, ttt.UCTPlayer(N), reverse)

    ut.plot_all('Games', 'Average victory rate', legend,
                np.vstack((rand, mc, uct)))
def benchmark_uct(player, nb_games, expl_params, N, reverse=False):
    """
    """
    legend = [f'c = {c}' for c in expl_params]

    print('\nSimulating games against random player...')
    rand = benchmark_uct_single(player, nb_games, expl_params,
                                ttt.RandomPlayer(), reverse)

    print('\nSimulating games against Monte Carlo player...')
    mc = benchmark_uct_single(player, nb_games, expl_params,
                              ttt.MonteCarloPlayer(N), reverse)

    print(
        '\nPlotting the results of the games played against random player...')
    ut.plot_all('Games', 'Average victory rate', legend, rand)

    print(
        '\nPlotting the results of the games played against Monte Carlo player...'
    )
    ut.plot_all('Games', 'Average victory rate', legend, mc)
Exemple #6
0
def main():
    args = get_train_args()
    utils.set_seed(args.seed)
    device = utils.get_device(args)

    # Initialize Dataset for each tasks
    assert args.dataset in ["splitMNIST", "permutedMNIST", 'fashionMNIST']
    if args.dataset == "splitMNIST" or args.dataset == 'fashionMNIST':
        labels_list = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
    elif args.dataset == 'permutedMNIST':
        labels_list = [list(range(10))] * 5
    
    # Run VCL
    task_final_accs, all_accs = vcl.run_vcl(args, device, labels_list)
    
    # Plots
    config_str = '_{}_coreset_{}'.format(args.dataset, args.coreset_size)
    utils.plot_small(task_final_accs, config_str)
    utils.plot_all(all_accs, config_str)
    
    avg_acc = np.mean(all_accs[-1])
    print ("Final Average Accuracy: {}".format(avg_acc))
    def train(self, it=0):
        if 'data_augmentation' in self.opts and self.opts[
                'data_augmentation'] is True:
            augment = True
        else:
            augment = False
        print("Beginning training")
        if self.opts['optimizer'] == 'adam':
            learning_rates = [
                i[0] for i in self.opts['learning_rate_schedule']
            ]
            iterations_list = [
                i[1] for i in self.opts['learning_rate_schedule']
            ]
            total_num_iterations = iterations_list[-1]
            lr_counter = 0
            lr = learning_rates[lr_counter]
            lr_iterations = iterations_list[lr_counter]
            while it < total_num_iterations:
                if it % 1000 == 0:
                    print("\nIteration %i" % it, flush=True)
                if it % 100 == 0:
                    print('.', end='', flush=True)
                it += 1
                while it > lr_iterations:
                    lr_counter += 1
                    lr = learning_rates[lr_counter]
                    lr_iterations = iterations_list[lr_counter]

                self.sess.run(self.train_step,
                              feed_dict={
                                  self.learning_rate:
                                  lr,
                                  self.input:
                                  self.sample_minibatch(
                                      batch_size=self.batch_size,
                                      augment=augment)
                              })
                if self.opts['loss_reconstruction'] in [
                        'L2_squared+adversarial',
                        'L2_squared+adversarial+l2_filter',
                        'L2_squared+multilayer_conv_adv',
                        'L2_squared+adversarial+l2_norm', 'normalised_conv_adv'
                ]:
                    self.sess.run(self.adv_cost_train_step,
                                  feed_dict={
                                      self.learning_rate:
                                      lr,
                                      self.input:
                                      self.sample_minibatch(
                                          batch_size=self.batch_size,
                                          augment=augment)
                                  })

                if (self.opts['print_log_information'] is True) and (it % 100
                                                                     == 0):
                    utils.print_log_information(self, it)

                if self.opts['make_pictures_every'] is not None:
                    if it % self.opts['make_pictures_every'] == 0:
                        utils.plot_all(self, it)

                if it % self.opts['save_every'] == 0:
                    self.save(it)
        # once training is complete, calculate disentanglement metric
        if 'disentanglement_metric' in self.opts:
            if self.opts['disentanglement_metric'] is True:
                self.disentanglement = disentanglement_metric.Disentanglement(
                    self)
                self.disentanglement.do_all(it)

        # save random samples and test reconstructions for FID scores:
        if 'FID_score_samples' in self.opts:
            if self.opts['FID_score_samples'] is True:
                self.save_FID_samples()
Exemple #8
0
    for e in range(args.episode):
        state = env.reset()
        state = scaler.transform([state])
        for time in range(env.n_step):
            action = agent.act(state)
            next_state, reward, done, info = env.step(action)
            next_state = scaler.transform([next_state])
            if args.mode == 'train':
                agent.remember(state, action, reward, next_state, done)
            if args.mode == "test":
                daily_portfolio_value.append(info['cur_val'])
            state = next_state
            if done:

                if args.mode == "test" and e % 100 == 0:
                    plot_all(stock_name, daily_portfolio_value, env, test + 1)

                daily_portfolio_value = []
                # print("new_stock_owned is: ", info['new_stock_owned'])
                # print("cash_in_hand is: ", env.cash_in_hand)
                # print("new_stock_price is: ", info['new_stock_price'])
                final_stock_hold = env.stock_owned
                print("episode: {}/{}, episode end value: {}".format(
                    e + 1, args.episode, info['cur_val']))
                portfolio_value.append(
                    info['cur_val'])  # append episode end portfolio value

                break
            if args.mode == 'train' and len(agent.memory) > args.batch_size:
                agent.replay(args.batch_size)
        if args.mode == 'train' and (e + 1) % 10 == 0:  # checkpoint weights
Exemple #9
0
def train_and_output(content,
                     style,
                     n_fft=N_FFT,
                     n_filters=N_FILTERS,
                     filter_width=FILTER_WIDTH,
                     reduce_factor=1):
    content_filename = "inputs/" + content
    content_no_extention = content.split(".")[0]
    style_no_extention = style.split(".")[0]
    style_filename = "inputs/" + style

    x_c, fs_c = librosa.load(content_filename)
    x_s, fs_s = librosa.load(style_filename)
    a_content = read_audio_spectrum(x_c,
                                    fs_c,
                                    n_fft=n_fft,
                                    reduce_factor=reduce_factor)
    a_style = read_audio_spectrum(x_s,
                                  fs_s,
                                  n_fft=n_fft,
                                  reduce_factor=reduce_factor)

    n_samples = min(a_content.shape[1], a_style.shape[1])
    logging.info("content samples %s" % a_content.shape[1])
    logging.info("style samples %s" % a_style.shape[1])
    n_channels = min(a_content.shape[0], a_style.shape[0])

    # Truncate style to content frequency and time window (debatable)
    a_style = a_style[:n_channels, :n_samples]
    a_content = a_content[:n_channels, :n_samples]

    g = tf.Graph()
    with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:
        # data shape is "[batch, in_height, in_width, in_channels]",
        # model = random_models.DoubleLayerConv(filter_width, n_channels, n_samples, n_filters)
        model = random_models.SingleLayerConv(filter_width, n_channels,
                                              n_samples, n_filters)
        # model = random_models.SingleLayer2DConv(7, 7, n_channels, n_samples, 32)
        x = model.generate_input(placeholder=True)

        content_layer, feature_layer = model.get_feature(x)
        a_content_tf, a_style_tf = model.transform(a_content, a_style)

        content_features = content_layer.eval(feed_dict={x: a_content_tf})
        style_features = feature_layer.eval(feed_dict={x: a_style_tf})
        n_filters = style_features.shape[-1]
        features = np.reshape(style_features, (-1, n_filters))
        style_gram = np.matmul(features.T, features) / n_samples

    result, initial = train(n_samples, model, content_features, style_gram)

    initial_spectrogram = np.zeros_like(a_content)
    initial_spectrogram[:n_channels, :] = np.exp(
        model.to_spectrogram(initial)) - 1
    final_spectrogram = np.zeros_like(a_content)
    final_spectrogram[:n_channels, :] = np.exp(
        model.to_spectrogram(result)) - 1

    # This code is supposed to do phase reconstruction
    out_name = '%s_to_%s_%s_fft_%s_width_%s_n.wav' % (
        content_no_extention, style_no_extention, n_fft, filter_width,
        n_filters)
    output_filename = fft_to_audio(out_name, final_spectrogram, fs_c)
    display(Audio(output_filename))
    plot_all(a_content, a_style, final_spectrogram, initial_spectrogram)
def style_transfer(num_classes, n_samples, n_channels, content_tensor,
                   style_tensor, a_content, a_style):
    alpha = 1e-2
    with tf.Session() as session:
        model = SoundCNN(num_classes)
        saver = tf.train.Saver()
        saver.restore(session, 'trained_model/model.ckpt')
        x_np = np.random.randn(1, 1, n_samples, n_channels).astype(
            np.float32) * 1e-3
        x = tf.Variable(x_np, name="x")
        content_features = model.h_conv1.eval(feed_dict={
            model.x: content_tensor,
            model.keep_prob: 1.0,
            model.is_train: False
        })
        style_features = model.h_conv1.eval(feed_dict={
            model.x: style_tensor,
            model.keep_prob: 1.0,
            model.is_train: False
        })
        n_filters = style_features.shape[-1]
        features = np.reshape(style_features, (-1, n_filters))
        style_gram = np.matmul(features.T, features) / n_samples
        logging.log(logging.INFO, "Style gram")
        logging.log(logging.INFO, style_gram)

        conv1 = conv2d(x, model.W_conv1)
        batch_norm1 = tf.contrib.layers.batch_norm(
            conv1,
            center=True,
            scale=True,
            is_training=False,
        )
        h_conv1 = tf.nn.relu(batch_norm1)

        end = h_conv1
        style = h_conv1

        content_loss = alpha * 2 * tf.nn.l2_loss(end - content_features)

        _, height, width, number = map(lambda i: i.value, style.get_shape())
        print(style.get_shape())
        feats = tf.reshape(style, (-1, number))
        gram = tf.matmul(tf.transpose(feats), feats) / n_samples
        style_loss = 2 * tf.nn.l2_loss(gram - style_gram)

        loss = content_loss + style_loss

        opt = tf.contrib.opt.ScipyOptimizerInterface(loss,
                                                     method='L-BFGS-B',
                                                     options={'maxiter': 300},
                                                     var_list=[x])

        tf.global_variables_initializer().run()

        initial_gram = gram.eval()
        initial_content = end.eval()
        initial_vector = x.eval()
        start_loss = loss.eval()

        logging.info('Start loss: %s', start_loss)
        logging.info('Started optimization.')
        opt.minimize(session)
        logging.info('Final loss: %s', loss.eval())

        end_gram = gram.eval()
        end_content = end.eval()

        result = x.eval()

        logging.info('style_difference')
        logging.info(end_gram - initial_gram)

        logging.info('content_difference')
        logging.info(end_content - initial_content)

        final_result = np.zeros_like(a_content)
        final_result[:n_channels, :] = np.exp(result[0, 0].T) - 1

        initial_spectrogram = np.zeros_like(a_content)
        initial_spectrogram[:n_channels, :] = np.exp(initial_vector[0,
                                                                    0].T) - 1
        plot_all(a_content, a_style, final_result, initial_spectrogram)

        return final_result
Exemple #11
0
from utils import plot_all, calculate_ber_by_mer
import sys
import argparse

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('-m', '--mer', help='MER value in dB', type=float)
    parser.add_argument('-o, --qam-order',
                        help='QAM order (m-value)',
                        type=int,
                        dest='qam_order')
    parser.add_argument('action',
                        help='Action to execute: calc_ber lub plot_all')

    args = parser.parse_args()

    if args.action == 'calc_ber':
        if not (args.mer or args.qam_order):
            raise Exception('No argument for mer lub qam-order')

        if not args.qam_order in [4, 16, 64, 128, 256]:
            raise Exception('qam-order must be a power of 2')

        ber, error = calculate_ber_by_mer(args.mer, args.qam_order)
        output = {'mer': args.mer, 'ber': ber, 'error': error}
        print(output)

    if args.action == 'plot_all':
        plot_all()
# utils.plot_all(class_partition, global_homophilies, homophily_per_clas, '80', 'CSphd', 'remove_with_probability')

# class_partition = utils.read_from_file('Yeast', 'remove_with_probability_class_partitions')
# homophily_per_clas = utils.read_json('Yeast', 'remove_with_probability_homophily_per_clas.json')
# global_homophilies = utils.read_from_file('Yeast', 'remove_with_probability_global_homophilies')
# utils.plot_all(class_partition, global_homophilies, homophily_per_clas, 'U', 'Yeast', 'remove_with_probability')

### add_big_random

class_partition = utils.read_from_file('blogs',
                                       'add_big_random_class_partitions')
homophily_per_clas = utils.read_json('blogs',
                                     'add_big_random_homophily_per_clas.json')
global_homophilies = utils.read_from_file('blogs',
                                          'add_big_random_global_homophilies')
utils.plot_all(class_partition, global_homophilies, homophily_per_clas, '1',
               'blogs', 'add_big_random')

class_partition = utils.read_from_file('AMD',
                                       'add_big_random_class_partitions')
homophily_per_clas = utils.read_json('AMD',
                                     'add_big_random_homophily_per_clas.json')
global_homophilies = utils.read_from_file('AMD',
                                          'add_big_random_global_homophilies')
utils.plot_all(class_partition, global_homophilies, homophily_per_clas, 'E',
               'AMD', 'add_big_random')

class_partition = utils.read_from_file('CSphd',
                                       'add_big_random_class_partitions')
homophily_per_clas = utils.read_json('CSphd',
                                     'add_big_random_homophily_per_clas.json')
global_homophilies = utils.read_from_file('CSphd',