Exemplo n.º 1
0
def lolpolpolpo():

    # Generate a random flame
    f = GenRandom(*(randopt for i in range(random.randint(2, 5))))
    # Normalize weights of xforms in this flame
    # to a sum of 0.5
    utils.normalize_weights(f, norm=0.5)

    # Choose a random xform.  Delete it and create a new one
    # with the same weight using variations in special_vars
    # choosing 3 of them
    delx = random.choice(f.xform)
    del_weight = delx.weight
    del_color = delx.color

    delx.delete()

    Xform.random(f, xv=special_vars, n=3, xw=del_weight, col=del_color)

    # Add one more all-linear xform with weight 0.5
    # with the magic rotation & offset
    lastx = f.add_xform(weight=0.5, color_speed=0)

    lastx.rotate(random.uniform(rand_angle_min, rand_angle_max))
    lastx.c = random.uniform(-0.5, 0.5)
    lastx.f = random.uniform(-0.5, 0.5)

    f.reframe()

    return f
Exemplo n.º 2
0
def lolpolpolpo():

    # Generate a random flame
    f = GenRandomFlame(randopt, numbasic=5)
    # Normalize weights of xforms in this flame
    # to a sum of 0.5
    utils.normalize_weights(f,norm=0.5)

    # Choose a random xform.  Delete it and create a new one
    # with the same weight using variations in special_vars
    # choosing 3 of them
    delx = random.randint(0,len(f.xform)-1)
    delxw = f.xform[delx].weight
    delcol = f.xform[delx].color
    
    f.xform[delx].delete()

    Xform.random(f, xv=special_vars, n=3, xw=delxw, col=delcol)

    # Add one more all-linear xform with weight 0.5
    # with the magic rotation & offset
    lastx = f.add_xform(weight=0.5, color_speed=0)

    lastx.rotate(random.uniform(rand_angle_min,rand_angle_max))
    lastx.c = random.uniform(-0.5, 0.5)
    lastx.f = random.uniform(-0.5, 0.5)

    # reframe and name the flame
    f.reframe()
    
    return f
Exemplo n.º 3
0
    def fit(self,
            successes,
            trials,
            n_samples=1000,
            baseline=0.0,
            values=None,
            smoothing=1.0):
        '''
        Generate the weights for each arm based on bandit history.

        Parameters:
            successes (array): A 1 x n array with total successes for each arm
               trials (array): A 1 x n array with total trials for each arm
              n_samples (int): The number of samples to pull from each arm's distribution
                               for Thompson Sampling.
             baseline (float): The minimum weight to give each ar
               values (array): A 1 x n array with the reward value for each arm, or None
            smoothing (float): The constant factor by which to divide all trials and successes

        Updates
            self.weights (array): A 1 x n array with normalized weights for each arm
        '''

        self.values = utils.set_values(values, len(trials))
        self.samples = utils.get_samples(trials, successes, n_samples,
                                         smoothing, self.values)
        self._raw_weights = utils.get_weights(self.samples)
        self.weights = utils.normalize_weights(self._raw_weights, baseline)
Exemplo n.º 4
0
    def generate_portfolio(self, **kwargs):
        kwargs = dotdict(kwargs)
        symbols = list(kwargs.cov_matrix.columns)
        self.gene_length = len(symbols)

        # Create initial genes
        initial_genes = self.generate_initial_genes(symbols)

        for i in range(self.iterations):
            # Select
            top_genes = self.select(kwargs.sample_returns, initial_genes)
            # print("Iteration %d Best Sharpe Ratio: %.3f" % (i, top_genes[0][0]))
            top_genes = [item[1] for item in top_genes]

            # Mutate
            mutated_genes = self.mutate(top_genes)
            initial_genes = mutated_genes

        top_genes = self.select(kwargs.sample_returns, initial_genes)
        best_gene = top_genes[0][1]
        # Gene is a distribution of weights for different stocks
        # transposed_gene = np.array(best_gene).transpose()
        # returns = np.dot(return_matrix, transposed_gene)
        # returns_cumsum = np.cumsum(returns)
        n_best = normalize_weights(best_gene)
        weights = {symbols[x]: n_best[x] for x in range(0, len(best_gene))}
        return weights
Exemplo n.º 5
0
def AdaBoost_Algo(train_set, how_many_times_to_run, hypo_function):
    weights = []
    for i in range(0, len(train_set)):  # give equal weights to any data point
        weights.append(1 / len(train_set))
    H_set_of_hypos = []
    for i in range(0, how_many_times_to_run):  # run r times
        lowest_err_hypo = hypo_function(
            train_set, weights)  # get the best hypo (weighted err wise)
        alpha_for_hypo = (1 / 2) * ln(
            (1 - lowest_err_hypo[3]) / lowest_err_hypo[3])
        H_set_of_hypos.append(
            (lowest_err_hypo,
             alpha_for_hypo))  # collect all hypos for this round
        if lowest_err_hypo[
                3] >= 0.5:  # if eps (hypos weighted err) is at least half you can "skip" round - as alpha zero
            break
        utils.update_weights(alpha_for_hypo, lowest_err_hypo, train_set,
                             weights, len(train_set),
                             hypo_function)  # update and normalize weights
        utils.normalize_weights(weights, len(weights))
    return H_set_of_hypos
Exemplo n.º 6
0
def calculate_style_loss(original_style, generated_style, style_layer_weights):
    normalized_weights = normalize_weights(style_layer_weights)
    gram_original = [gram_matrix(layer) for layer in original_style]
    gram_generated = [gram_matrix(layer) for layer in generated_style]

    style_loss = 0
    for i in range(len(original_style)):
        layer = original_style[i]
        # Layers have shape of n_batch * n_activation_height * n_activation_width * n_channel
        num_channel = layer.shape[-1]
        activation_size = layer.shape[1] * layer.shape[2]
        style_loss = style_loss + (normalized_weights[i] * tf.reduce_sum(
            (gram_generated[i] - gram_original[i]) ** 2) / (4 * num_channel**2 * activation_size**2))

    return style_loss
Exemplo n.º 7
0
    def generate_portfolio(self, **kwargs):
        """
        Inspired by: https://srome.github.io/Eigenvesting-II-Optimize-Your-Portfolio-With-Optimization/
        """
        kwargs = dotdict(kwargs)

        inverse_cov_matrix = np.linalg.pinv(kwargs.cov_matrix)
        ones = np.ones(len(inverse_cov_matrix))
        inverse_dot_ones = np.dot(inverse_cov_matrix, ones)
        min_var_weights = inverse_dot_ones / np.dot(inverse_dot_ones, ones)
        min_var_weights = normalize_weights(min_var_weights)
        weights = {
            kwargs.cov_matrix.columns[i]: min_var_weights[i]
            for i in range(min_var_weights.shape[0])
        }
        return weights
Exemplo n.º 8
0
 def generate_portfolio(self, **kwargs):
     """
     Inspired by: https://srome.github.io/Eigenvesting-I-Linear-Algebra-Can-Help-You-Choose-Your-Stock-Portfolio/
     """
     kwargs = dotdict(kwargs)
     eigh_values, eigh_vectors = np.linalg.eigh(kwargs.cov_matrix)
     # We don't need this but in case someone wants to analyze
     # market_eigen_portfolio = eig_vectors[:, -1] / np.sum(eig_vectors[:, -1])
     # This is a portfolio that is uncorrelated to market and still yields good returns
     eigen_portfolio = eigh_vectors[:, -kwargs.p_number] / \
         np.sum(eigh_vectors[:, -kwargs.p_number])
     # if kwargs.long_only:
     #     weights = {kwargs.cov_matrix.columns[i]: max(0, eigen_portfolio[i])
     #                for i in range(eigen_portfolio.shape[0])}
     # else:
     eigen_portfolio = normalize_weights(eigen_portfolio)
     weights = {
         kwargs.cov_matrix.columns[i]: eigen_portfolio[i]
         for i in range(eigen_portfolio.shape[0])
     }
     return weights
Exemplo n.º 9
0
    def fit(self, successes, trials, n_samples=1000, baseline=0.0, values=None, smoothing=1.0):
        '''
        Generate the weights for each arm based on bandit history.

        Parameters:
            successes (array): A 1 x n array with total successes for each arm
               trials (array): A 1 x n array with total trials for each arm
              n_samples (int): The number of samples to pull from each arm's distribution
                               for Thompson Sampling.
             baseline (float): The minimum weight to give each ar
               values (array): A 1 x n array with the reward value for each arm, or None
            smoothing (float): The constant factor by which to divide all trials and successes

        Updates
            self.weights (array): A 1 x n array with normalized weights for each arm
        '''

        self.values = utils.set_values(values, len(trials))
        self.samples = utils.get_samples(trials, successes, n_samples, smoothing, self.values)
        self._raw_weights = utils.get_weights(self.samples)
        self.weights = utils.normalize_weights(self._raw_weights, baseline)
Exemplo n.º 10
0
    def generate_portfolio(self, **kwargs):
        """
        Inspired by: Eigen Portfolio Selection:
        A Robust Approach to Sharpe Ratio Maximization,
        https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3070416
        """
        kwargs = dotdict(kwargs)
        inverse_cov_matrix = np.linalg.pinv(kwargs.cov_matrix)
        ones = np.ones(len(inverse_cov_matrix))

        numerator = np.dot(inverse_cov_matrix, kwargs.pred_returns)
        denominator = np.dot(np.dot(ones.transpose(), inverse_cov_matrix),
                             kwargs.pred_returns)
        msr_portfolio_weights = numerator / denominator

        msr_portfolio_weights = normalize_weights(msr_portfolio_weights)

        weights = {
            kwargs.cov_matrix.columns[i]: msr_portfolio_weights[i]
            for i in range(len(msr_portfolio_weights))
        }

        return weights
Exemplo n.º 11
0
    files = glob(file_path)
models, res = [], []
for f in files:
    try:
        if pull_from_db:
            data = np.load('{}.npz'.format(f['file_path']))
        else:
            data = np.load(f)
        model_name = data['model_name']
        siamese = data['siamese']
        dataset = data['dataset']
        inner_steps = data['inner_steps']
        # Using weight norm?
        wn = data['wn']
        if wn:
            num_objects_center = normalize_weights(data, 'num_objects',
                                                   'center')  # noqa
            num_objects_scale = normalize_weights(data, 'num_objects',
                                                  'scale')  # noqa
            object_size_center = normalize_weights(data, 'object_size',
                                                   'center')  # noqa
            object_size_scale = normalize_weights(data, 'object_size',
                                                  'scale')  # noqa
            object_location_center = normalize_weights(data, 'object_location',
                                                       'center')  # noqa
            object_location_scale = normalize_weights(data, 'object_location',
                                                      'scale')  # noqa
        else:
            num_objects_center = data['num_objects_center']
            num_objects_scale = data['num_objects_scale']
            object_size_center = data['object_size_center']
            object_size_scale = data['object_size_scale']
Exemplo n.º 12
0
def main():
    from vgg16 import model

    dirname = os.path.dirname(os.path.abspath(__file__))
    layer_idx = 18

    img_disp = cv2.imread('{}/images/woh.png'.format(dirname))
    img_disp = cv2.resize(img_disp, (224, 224))
    img = img_disp[np.newaxis, :, :, :]
    img = img.astype(np.float32)
    img = img - np.array([103.939, 116.779, 123.68])  # bgr

    for i, layer in enumerate(model.layers):
        print(i, layer)

    compute_weight = True
    is_changed = True

    while True:
        if is_changed:
            is_changed = False
            out = utils.activation(img, model, layer_idx)
            if len(out.shape) == 4:
                is_conv = True
                is_fc = False
                out = np.transpose(out, (3, 1, 2, 0))
            else:
                is_conv = False
                is_fc = True
                out = np.transpose(out, (1, 0))

            out = utils.normalize(out)
            disp = utils.combine_and_fit(out,
                                         is_conv=is_conv,
                                         is_fc=is_fc,
                                         disp_w=800)

            cv2.imshow('input', img_disp)
            cv2.imshow('disp', disp)

        if compute_weight:
            compute_weight = False
            weight = model.get_weights()[
                0]  # only first layer is interpretable for *me*
            weight = utils.normalize_weights(weight, 'conv')
            weight = np.transpose(weight, (3, 0, 1, 2))
            weight_disp = utils.combine_and_fit(weight,
                                                is_weights=True,
                                                disp_w=400)
            cv2.imshow('weight_disp', weight_disp)

        val = cv2.waitKey(1) & 0xFF

        if val == ord('q'):
            break
        elif val == ord('w'):
            if layer_idx < 22:
                layer_idx += 1
                is_changed = True
                print(model.layers[layer_idx].name)
        elif val == ord('s'):
            if layer_idx > 1:
                layer_idx -= 1
                is_changed = True
                print(model.layers[layer_idx].name)
Exemplo n.º 13
0
def experiment(param_path, category, dataset_dir, steps, n, used_wn=True):
    """Run a dataset-adv experiment. Pull from DB or use defaults."""
    # Set default training parameters
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(('Using device: {}'.format(device)))

    # Load dataset params and get name of dataset
    old_cat = np.copy(category)
    category = category.replace('_', ' ')
    assert os.path.exists(param_path), 'Could not find {}'.format(param_path)
    dataset = param_path.split(os.path.sep)[-1].split('_')[0]
    params = np.load(param_path)
    if used_wn:
        # Dataset optimized using weight norm
        if dataset == 'biggan':
            params = {
                'noise_vector_center_g': params.f.noise_vector_center_g,
                'noise_vector_center_v': params.f.noise_vector_center_v,
                'noise_vector_scale_g': params.f.noise_vector_scale_g,
                'noise_vector_scale_v': params.f.noise_vector_scale_v,
                'noise_vector_factor_g': params.f.noise_vector_factor_g,
                'noise_vector_factor_v': params.f.noise_vector_factor_v,
            }
        elif dataset == 'psvrt':
            raise NotImplementedError
    else:
        if dataset == 'biggan':
            params = {
                'noise_vector_center': params.f.noise_vector_center,
                'noise_vector_scale': params.f.noise_vector_scale,
                'noise_vector_factor': params.f.noise_vector_factor
            }
        elif dataset == 'psvrt':
            raise NotImplementedError
    if dataset == 'biggan':
        num_classes = 1000
    elif dataset == 'psvrt':
        num_classes = 2
    else:
        raise NotImplementedError(dataset)

    # Add hyperparams and model info to DB
    dt = datetime.datetime.fromtimestamp(
        time.time()).strftime('%Y-%m-%d-%H_%M_%S')
    ds_name = os.path.join(dataset_dir, '{}_{}'.format(dataset, dt))

    # Create results directory
    utils.make_dir(dataset_dir)
    utils.make_dir(ds_name)

    # Initialize dataset object
    if dataset == 'biggan':
        img_size = 256
    elif dataset == 'psvrt':
        img_size = 80  # 160
    ds = import_module('data_generators.{}'.format(dataset))
    P = ds.Generator(dataset=dataset,
                     img_size=img_size,
                     device=device,
                     siamese=False,
                     task='sd',
                     wn=False,
                     num_classes=num_classes)
    P = P.to(device)
    if used_wn:
        lambda_0_r = utils.normalize_weights(P=params,
                                             name='noise_vector',
                                             prop='center')
        lambda_0_scale_r = utils.normalize_weights(P=params,
                                                   name='noise_vector',
                                                   prop='scale')
        lambda_0_factor_r = utils.normalize_weights(P=params,
                                                    name='noise_vector',
                                                    prop='factor')
    else:
        raise NotImplementedError

    # Pull out original params
    class_vector = one_hot_from_names([category]).repeat(n, 0)
    lambda_0 = P.dists[0]['lambda_0'].cpu().numpy()
    lambda_0_scale = P.dists[0]['lambda_0_scale'].cpu().numpy()
    lambda_0_factor = P.dists[0]['lambda_0_factor'].cpu().numpy()
    interp = np.linspace(0, 1, steps)
    f = plt.figure()
    for b, itp in tqdm(enumerate(interp),
                       desc='Interpolation steps',
                       total=steps):
        # Do linear combinations of original and final
        i_lambda_0 = (1 - itp) * lambda_0 + itp * lambda_0_r
        i_lambda_scale_0 = (1 - itp) * lambda_0_scale + itp * lambda_0_scale_r
        i_lambda_factor_0 = (1 -
                             itp) * lambda_0_factor + itp * lambda_0_factor_r
        it_params = {
            'noise_vector_center': i_lambda_0,
            'noise_vector_scale': i_lambda_scale_0,
            'noise_vector_factor': i_lambda_factor_0
        }
        with torch.no_grad():
            images, labels = P.sample_batch(n,
                                            force_params=it_params,
                                            class_vector=class_vector,
                                            force_mean=True)
        images = images.detach().cpu().numpy().transpose(0, 2, 3, 1)
        norm_mean = P.norm_mean.cpu().numpy().reshape(1, 1, 3)
        norm_std = P.norm_std.cpu().numpy().reshape(1, 1, 3)
        images = images * norm_std + norm_mean
        row_ids = np.arange(b, steps * n, steps) + 1
        for idx, row in enumerate(row_ids):
            plt.subplot(n, steps, row)
            plt.axis('off')
            plt.imshow(images[idx])
    f.text(0.05,
           0.5,
           'Mean image -2 to +2 SDs',
           va='center',
           rotation='vertical')
    f.text(0.5,
           0.04,
           'Interpolation from start to optimized parameters',
           ha='center')
    plt.gcf().subplots_adjust(left=0.15)
    # plt.tight_layout()
    # plt.ylabel('Mean image -2 to +2 SDs')
    # plt.xlabel('Interpolation from start to optimized parameters')
    plt.savefig(os.path.join(ds_name, '{}.pdf'.format(old_cat)), dpi=300)
    plt.show()