Ejemplo n.º 1
0
 def __init__(self, ):
     self.sampling_mpu_0x68 = Sampling(AK8963_ADDRESS, MPU9050_ADDRESS_68,
                                       MPU9050_ADDRESS_68, 1, GFS_1000,
                                       AFS_8G, AK8963_BIT_16,
                                       AK8963_MODE_C100HZ)
     self.sampling_mpu_0x69 = Sampling(AK8963_ADDRESS, MPU9050_ADDRESS_69,
                                       None, 1, GFS_1000, AFS_8G,
                                       AK8963_BIT_16, AK8963_MODE_C100HZ)
Ejemplo n.º 2
0
 def fisher_sample(self):
     if self.sampler is None:
         print("preprocessing")
         if self.dico_fisher is None:
             print('you need to compute the fisher information first')
             return
         self.sampler = Sampling(self.build_mean(), self.dico_fisher)
         print('sampling ok')
     config = self.network.get_config()
     if self.network.__class__.__name__=='Sequential':
         new_model = Sequential.from_config(config)
     else:
         new_model = Model.from_config(config)
     new_params = self.sampler.sample()
     """
     means = self.sampler.mean
     
     for key in means:
         if np.max(np.abs(means[key] - new_params[key]))==0:
             print key
     print('kikou')
     import pdb; pdb.set_trace()
     """
     #tmp_prob = self.sampler.prob(new_params)
     new_model.compile(loss=self.network.loss,
                       optimizer=str.lower(self.network.optimizer.__class__.__name__),
                       metrics = self.network.metrics)
     new_model.set_weights(self.network.get_weights())
     self.copy_weights(new_model, new_params)
     return new_model
Ejemplo n.º 3
0
 def test_sampling(self):
     init = initilaze_topic_model()
     init.initilize()
     sampleman = Sampling(init.xcorpus, init.ycorpus)
     sampleman.sampling(init.TOPICS, init.xcounts, init.ycounts, init.docid,
                        init.different_word)
     print sampleman.xcorpus
     print sampleman.ycorpus
Ejemplo n.º 4
0
    def __init__(self, data_infile, fasttext_model_path, triplet_margin=0.1):

        self.sampling = Sampling(data_infile, fasttext_model_path)

        self.amount_negative_names = 1
        self.triplet_margin = triplet_margin
        self.anchor_margin = 0

        self.loss_weights = {'synonym': 1, 'proto': 1}

        torch.autograd.set_detect_anomaly(True)
Ejemplo n.º 5
0
    def __init__(self, data_infile, fasttext_model_path, triplet_margin=0.1):

        self.sampling = Sampling(data_infile, fasttext_model_path)

        self.amount_negative_names = 1
        self.triplet_margin = triplet_margin
        self.anchor_margin = 0

        self.loss_weights = {
            'semantic_similarity': 1,
            'contextual': 1,
            'grounding': 1
        }

        torch.autograd.set_detect_anomaly(True)
Ejemplo n.º 6
0
 def __init__(self, latent_dim, seed):
     super(encoder, self).__init__()
     np.random.seed(seed)
     self.layer_1 = Conv2D(
         filters=32,
         kernel_size=(4, 4),
         activation="relu",
         strides=2,
         padding="same",
         kernel_initializer=tf.keras.initializers.HeNormal(seed))
     self.layer_2 = Conv2D(
         filters=32,
         kernel_size=(4, 4),
         activation="relu",
         strides=2,
         padding="same",
         kernel_initializer=tf.keras.initializers.HeNormal(seed))
     self.layer_3 = Conv2D(
         filters=64,
         kernel_size=(4, 4),
         activation="relu",
         strides=2,
         padding="same",
         kernel_initializer=tf.keras.initializers.HeNormal(seed))
     self.layer_4 = Conv2D(
         filters=64,
         kernel_size=(4, 4),
         activation="relu",
         strides=2,
         padding="same",
         kernel_initializer=tf.keras.initializers.HeNormal(seed))
     self.layer_5 = Dense(
         units=128,
         activation='relu',
         kernel_initializer=tf.keras.initializers.HeNormal(seed))
     self.dense_log_var = Dense(
         units=latent_dim,
         kernel_initializer=tf.keras.initializers.HeNormal(seed))
     self.dense_mean = Dense(
         units=latent_dim,
         kernel_initializer=tf.keras.initializers.HeNormal(seed))
     self.sampling = Sampling()
     self.batch_norm_1 = BatchNormalization()
     self.batch_norm_2 = BatchNormalization()
     self.batch_norm_3 = BatchNormalization()
     self.batch_norm_4 = BatchNormalization()
     self.flatten = Flatten()
Ejemplo n.º 7
0
    def __create_sequences(self, X, labels, sampling=True):
        sequences = self.tokenizer.texts_to_sequences(X)

        data = pad_sequences(sequences, padding='post',
                             maxlen=self.params['max_length'])

        indices = np.arange(data.shape[0])
        np.random.shuffle(indices)
        data = data[indices]
        labels = labels[indices]

        if sampling:
            sample = Sampling(2., .5)
            x_train, y_train = sample.perform_sampling(data, labels, [0, 1])
        else:
            x_train, y_train = data, labels

        return x_train, y_train
Ejemplo n.º 8
0
        with open(path, "r", encoding=encoding) as f:
            for line in f:
                words = tokenize(line.strip())
                if len(words) < window_size + 1:
                    continue
                for i in range(len(words)):
                    example = (
                        words[max(0, i - window_size):i] +
                        words[min(i + 1, len(words)
                                  ):min(len(words), i + window_size) + 1],
                        words[i])
                    examples.append(Example.fromlist(example, fields))
        super(CBOWDataset, self).__init__(examples, fields, **kwargs)


if __name__ == '__main__':
    test_path = '/home/lightsmile/NLP/corpus/novel/test.txt'
    dataset = CBOWDataset(test_path, Fields)
    print(len(dataset))
    print(dataset[0])
    print(dataset[0].context)
    print(dataset[0].target)

    TARGET.build_vocab(dataset)

    from sampling import Sampling

    samp = Sampling(TARGET.vocab)

    print(samp.sampling(3))
Ejemplo n.º 9
0
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(X)
sequences = tokenizer.texts_to_sequences(X)
word_index = tokenizer.word_index

data = pad_sequences(sequences, padding='post', maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)

indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]

num_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
sample = Sampling(2., .5)
x_train, y_train = sample.perform_sampling(data[:-num_validation_samples],
                                           labels[:-num_validation_samples],
                                           [0, 1])
x_val = data[-num_validation_samples:]
y_val = labels[-num_validation_samples:]
print('Number of entries in each category:')
print('training: ', y_train.sum(axis=0))
print('validation: ', y_val.sum(axis=0))

model = Word2Vec.load('1ft.modelFile')

embeddings_index = {}
embedding_matrix = np.random.random((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
    embedding_vector = model.wv[word]
Ejemplo n.º 10
0
#!/usr/bin/env python

import numpy as np
import fitsio
import scipy.optimize as optimize
from sampling import Sampling

sampling = Sampling(nsamples=1000)
sampling.set_flux(total_flux=1000., noise=0.001)


def mem_function(u, A, f, llambda):
    Ar = (A.dot(u) - f)
    As = (Ar**2).sum()
    Bs = (u * np.log(u)).sum()
    val = As + llambda * Bs
    grad = 2. * A.T.dot(Ar) + llambda * (1. + np.log(u))
    return (val, grad)


def mem_fit(sampling, llambda=1.e-2):
    S_M0 = np.ones(sampling.nx * sampling.ny)
    bounds = zip([1.e-5] * len(S_M0), [None] * len(S_M0))
    bounds = [x for x in bounds]
    flux = sampling.flux
    results = optimize.minimize(mem_function,
                                S_M0,
                                args=(sampling.A, flux, llambda),
                                method='L-BFGS-B',
                                jac=True,
                                bounds=bounds)
Ejemplo n.º 11
0
def comp_sketch(matrix, objective, load_N=False, save_N=False, N_dir='../N_file/', **kwargs):
    """
    Given matrix A, the function comp_sketch computes a sketch for A and performs further operations on PA.
    It returns the total running time and the desired quantity.

    parameter:
        matrix: a RowMatrix object storing the matrix [A b]
        objective: either 'x' or 'N'
            'x': the function returns the solution to the problem min_x || PA[:,:-1]x - PA[:,-1] ||_2
            'N': the function returns a square matrix N such that PA[:,:-1]*inv(N) is a matrix with orthonormal columns
        load_N: load the precomputed N matrices if possible (it reduces the actual running time for sampling sketches)
        save_N: save the computed N matrices for future use
        sketch_type: either 'projection' or 'sampling'
        projection_type: cw, gaussian, rademacher or srdht
        c: projection size
        s: sampling size (for sampling sketch only)
        k: number of independent trials to run
    """

    sketch_type = kwargs.get('sketch_type')

    if not os.path.exists(N_dir):
        os.makedirs(N_dir)

    if objective == 'x':
        
        if sketch_type == 'projection':
            projection = Projections(**kwargs)
            t = time.time()
            x = projection.execute(matrix, 'x', save_N)
            t = time.time() - t

            if save_N:
                logger.info('Saving N matrices from projections!')
                N = [a[0] for a in x]
                x = [a[1] for a in x]
                # saving N
                filename = N_dir + 'N_' + matrix.name + '_projection_' + kwargs.get('projection_type') + '_c' + str(int(kwargs.get('c'))) + '_k' + str(int(kwargs.get('k')))+ '.dat'
                data = {'N': N, 'time': t}
                pickle_write(filename,data)
 
        elif sketch_type == 'sampling':
            s = kwargs.get('s')
            new_N_proj = 0
            N_proj_filename = N_dir + 'N_' + matrix.name + '_projection_' + kwargs.get('projection_type') + '_c' + str(int(kwargs.get('c'))) + '_k' + str(int(kwargs.get('k'))) +'.dat'

            if load_N and os.path.isfile(N_proj_filename):
                logger.info('Found N matrices from projections, loading them!')
                N_proj_filename = N_dir + 'N_' + matrix.name + '_projection_' + kwargs.get('projection_type') + '_c' + str(int(kwargs.get('c'))) + '_k' + str(int(kwargs.get('k'))) +'.dat'
                result = pickle_load(N_proj_filename)
                N_proj = result['N']
                t_proj = result['time']
            else: # otherwise, compute it
                t = time.time()
                projection = Projections(**kwargs)
                N_proj = projection.execute(matrix, 'N')
                t_proj = time.time() - t
                new_N_proj = 1

            sampling = Sampling(N=N_proj)
            t = time.time()
            x = sampling.execute(matrix, 'x', s, save_N )
            t = time.time() - t + t_proj

            if save_N and new_N_proj:
                logger.info('Saving N matrices from projections!')
                #filename = N_dir + 'N_' + matrix.name + '_projection_' + kwargs.get('projection_type') + '_c' + str(int(kwargs.get('c'))) + '_k' + str(int(kwargs.get('k'))) + '.dat'
                data = {'N': N_proj, 'time': t_proj}
                pickle_write(N_proj_filename,data)

            if save_N:
                logger.info('Saving N matrices from sampling!')
                N = [a[0] for a in x]
                x = [a[1] for a in x]
                filename = N_dir + 'N_' + matrix.name + '_sampling_s' + str(int(kwargs.get('s'))) + '_' + kwargs.get('projection_type') + '_c' + str(int(kwargs.get('c'))) + '_k' + str(int(kwargs.get('k'))) + '.dat'
                data = {'N': N, 'time': t}
                pickle_write(filename,data)

        else:
            raise ValueError('Please enter a valid sketch type!')
        return x, t

    elif objective == 'N':
        if sketch_type == 'projection':
            N_proj_filename = N_dir + 'N_' + matrix.name + '_projection_' + kwargs.get('projection_type') + '_c' + str(int(kwargs.get('c'))) + '_k' + str(int(kwargs.get('k'))) + '.dat'

            if load_N and os.path.isfile(N_proj_filename):
                logger.info('Found N matrices from projections, loading them!')
                result = pickle_load(N_proj_filename)
                N = result['N']
                t = result['time']
            else:
                t = time.time()
                projection = Projections(**kwargs)
                N = projection.execute(matrix, 'N')
                t = time.time() - t

                if save_N:
                    logger.info('Saving N matrices from projections!')
                    data = {'N': N, 'time': t}
                    pickle_write(N_proj_filename,data)

        elif sketch_type == 'sampling':
            s = kwargs.get('s')
            new_N_proj = 0
            new_N_samp = 0

            N_samp_filename = N_dir + 'N_' + matrix.name + '_sampling_s' + str(int(kwargs.get('s'))) + '_' + kwargs.get('projection_type') + '_c' + str(int(kwargs.get('c'))) + '_k' + str(int(kwargs.get('k'))) + '.dat'
            N_proj_filename = N_dir + 'N_' + matrix.name + '_projection_' + kwargs.get('projection_type') + '_c' + str(int(kwargs.get('c'))) + '_k' + str(int(kwargs.get('k'))) + '.dat'

            if load_N and os.path.isfile(N_samp_filename):
                logger.info('Found N matrices from sampling, loading them!')
                result = pickle_load(N_samp_filename)
                N = result['N']
                t = result['time']

            elif load_N and os.path.isfile(N_proj_filename):
                logger.info('Found N matrices from projections, loading them!')
                result = pickle_load(N_proj_filename)
                N_proj = result['N']
                t_proj = result['time']

                sampling = Sampling(N=N_proj)
                t = time.time()
                N = sampling.execute(matrix, 'N', s)
                t = time.time() - t + t_proj
                new_N_samp = 1

            else:
                t = time.time()
                projection = Projections(**kwargs)
                N_proj = projection.execute(matrix, 'N')
                t_proj = time.time() - t
                new_N_proj = 1

                t = time.time()
                sampling = Sampling(N=N_proj)
                N = sampling.execute(matrix, 'N', s)
                t = time.time() - t + t_proj
                new_N_samp = 1

            if save_N and new_N_proj:
                logger.info('Saving N matrices from projections!')
                data = {'N': N_proj, 'time': t_proj}
                pickle_write(N_proj_filename,data)

            if save_N and new_N_samp:
                logger.info('Saving N matrices from sampling!')
                data = {'N': N, 'time': t}
                pickle_write(N_samp_filename,data)

        else:
            raise ValueError('Please enter a valid sketch type!')
        return N, t
    else:
        raise ValueError('Please enter a valid objective!')
Ejemplo n.º 12
0
path1 = "/Users/liumeiyu/Downloads/IMG_7575.JPG"
path2 = "/Users/liumeiyu/Downloads/test1.jpg"
path3 = "/Users/liumeiyu/Downloads/test2.jpg"

A = Histogram(path1)
B = Smooth(path1)
C = Change(path1)
D = Base(path1)
E = Binary(path1)
F = D_E(path1)
G = Warp(path1)
H = Cvt(path1)
K = Edge_detection(path1)
L = Segmentation(path1)
M = Mosaic(path3)
N = Sampling(path1)
P = Fusion(path1)

# A.img_histogram_trans()
# A.img_histogram()

# B.linear_smooth_np()
# B.linear_smooth()
# B.box_smooth()
# B.gaussian_smooth()
# B.median_smooth()
# B.median_smooth_x(5)

# C.fft_high_change(60)
# C.change_cv()