Exemplo n.º 1
0
    def _init_params(self, X):
        init = self.init
        n_samples, n_features = X.shape
        n_components = self.n_components

        if (init == 'kmeans'):
            km = Kmeans(n_components)
            clusters, mean, cov = km.cluster(X)
            coef = sp.array([c.shape[0] / n_samples for c in clusters])
            comps = [multivariate_normal(mean[i], cov[i], allow_singular=True)
                     for i in range(n_components)]
        elif (init == 'rand'):
            coef = sp.absolute(sprand.randn(n_components))
            coef = coef / coef.sum()
            means = X[sprand.permutation(n_samples)[0: n_components]]
            clusters = [[] for i in range(n_components)]
            for x in X:
                idx = sp.argmin([spla.norm(x - mean) for mean in means])
                clusters[idx].append(x)

            comps = []
            for k in range(n_components):
                mean = means[k]
                cov = sp.cov(clusters[k], rowvar=0, ddof=0)
                comps.append(multivariate_normal(mean, cov, allow_singular=True))

        self.coef = coef
        self.comps = comps
    def train(self, dataset, targets, lamda=0):
        """ dataset: matrix of dimensions n x input
            targets: column vector of dimension n x output """

        # Choose random center vectors from training set
        self.centers = random.permutation(dataset)[:self.hidden_length]

        # Calculate data variance
        self.variance = np.var(dataset)

        # Calculate activations of RBFs
        green_matrix = self.calc_activation(dataset)

        # Calculate output weights
        if lamda == 0:
            self.W = dot(pinv(green_matrix), targets)  # With pseudoinverse
        else:
            green_matrix_transpose = np.transpose(green_matrix)
            # With operator lambda
            self.W = dot(inv(dot(green_matrix_transpose, green_matrix) + lamda * np.identity(self.hidden_length)),
                         dot(green_matrix_transpose, targets))

        # Get error
        result = self.test(dataset)
        error = self.cost_function(targets, result)
        return error
Exemplo n.º 3
0
    def fit(self, X):
        n_samples, n_features = X.shape
        n_classes = self.n_classes
        max_iter = self.max_iter
        tol = self.tol

        rand_center_idx = sprand.permutation(n_samples)[0:n_classes]
        center = X[rand_center_idx].T
        responsilibity = sp.zeros((n_samples, n_classes))

        for iter in range(max_iter):
            # E step
            dist = sp.expand_dims(X, axis=2) - sp.expand_dims(center, axis=0)
            dist = spla.norm(dist, axis=1)**2
            min_idx = sp.argmin(dist, axis=1)
            responsilibity.fill(0)
            responsilibity[sp.arange(n_samples), min_idx] = 1

            # M step
            center_new = sp.dot(X.T, responsilibity) / sp.sum(responsilibity, axis=0)
            diff = center_new - center
            print('K-Means: {0:5d} {1:4e}'.format(iter, spla.norm(diff) / spla.norm(center)))
            if (spla.norm(diff) < tol * spla.norm(center)):
                break

            center = center_new

        self.center = center.T
        self.responsibility = responsilibity

        return self
Exemplo n.º 4
0
def generate_data(N=100, true_params=secret_true_params, seed=42):
    x = np.linspace(-2.5, 2.5, N)
    y1 = my_model(x, *true_params)
    y2 = 1.0 * random.normal(size=N)
    # Create the data
    data = np.array([x, y1 + y2]).T
    # Shuffle the data
    permuted_data = random.permutation(data)
    # Save the data
    np.savetxt("dataN%d.txt" % N, data)
    return data
Exemplo n.º 5
0
def generate_data(N=100, true_params=secret_true_params,
                  seed = 42):
  x = np.linspace(-2.5, 2.5, N)
  y1 = my_model(x, *true_params)
  y2 = 1.0 * random.normal(size=N)
  # Create the data
  data = np.array([x,y1+y2]).T
  # Shuffle the data
  permuted_data = random.permutation(data)
  # Save the data
  np.savetxt("dataN%d.txt"%N, data)
  return data
def setUpNextState(initial_state, server_prob, n_process=n_process):
    sum_initial_state = sum(initial_state)
    arrival_times = getRandomArrivalServiceTimes(n_process, arrival_rate, None)[0]
    time_start = arrival_times[sum_initial_state]
    # next two lines optimizes to avoid the processing of whole queeue, instead only processes which are needed
    # are processed
    arrival_times = arrival_times[arrival_times <= (time_start+time_interval)]
    n_process= arrival_times.size

    initial_states = zip(initial_state, [arrival_times[sum_initial_state]] * len(initial_state))
    server_address_table_forced = random.permutation(concatenate([ones(state) * i for i,state in enumerate(initial_state)]))
    server_address_table = concatenate([server_address_table_forced, digitize(uniform.rvs(size = n_process- sum_initial_state), cumsum(server_prob))])
    server_arrival_times = [arrival_times[server_address_table == i] for i in range(n_server)]
    server_service_times = [getRandomArrivalServiceTimes((server_address_table == i).sum(), None, service_rate[i])[1] for i in range(n_server)]
    results = map(mm1, server_arrival_times, server_service_times, initial_states)
    final_state =  [r['queue_size_by_time'](time_start+time_interval, max_no_people) if r else 0 for r in results] 
    return tuple(final_state)
    def fit(self, X, Y):
        """fitting network with given X, Y
        
        :param X: matrix of dimensions n x indim
        :type X: [type]
        :param Y: column vector of dimension n x 1
        :type Y: [type]
        """

        # choose random center vectors from training set
        rnd_idx = random.permutation(X.shape[0])[:self.numCenters]
        self.centers = [X[i, :] for i in rnd_idx]

        # calculate activations of RBFs
        G = self._calcAct(X)

        # calculate output weights (pseudoinverse)
        self.W = dot(pinv(G), Y)
Exemplo n.º 8
0
 def set_random_centers(self, X):
     # choose random center vectors from training set
     rnd_idx = random.permutation(X.shape[0])[:self.n_centers]
     self.centers = [X[i, :] for i in rnd_idx]
     self.beta = np.full(self.n_centers, 8.0)
     print('center: {}'.format(self.centers))
arrival_rate = 1
service_rate = 1/6.0 * ones(3) #array([1.09, 2.000005, 1])
server_prob = array([0.25, 0.25, 0.5])
n_server = server_prob.size
n_process = 100
time_interval = 10

initial_state = (7,2,3)
arrival_times = getRandomArrivalServiceTimes(n_process, arrival_rate, None)[0]
sum_initial_state = sum(initial_state)
# preparing initial state for each mm1 simulation

initial_states = zip(initial_state, [arrival_times[sum_initial_state]] * len(initial_state))

# maps kth process to ith server
server_address_table_forced = random.permutation(concatenate([ones(state) * i for i,state in enumerate(initial_state)]))
print "forced server address table", server_address_table_forced
server_address_table = concatenate([server_address_table_forced, digitize(uniform.rvs(size = n_process-sum_initial_state), cumsum(server_prob))])
server_arrival_times = [arrival_times[server_address_table == i] for i in range(n_server)]
server_service_times = [
    getRandomArrivalServiceTimes((server_address_table == i).sum(), None, service_rate[i])[1]
    for i in range(n_server)
]

results = map(mm1, server_arrival_times, server_service_times, initial_states)
print "Mean QueueSize(1)", array([mean(result['queue_size']) for result in results])
print "Results[0]['queue_size']", results[0]['queue_size']
print "Results[1]['queue_size']", results[1]['queue_size']
print "Results[2]['queue_size']", results[2]['queue_size']
time_start = arrival_times[sum_initial_state] # I don't know why it shouldn't be sum_initial_state +1 instead
print "queue_size_by_time", time_start, [r['queue_size_by_time'](time_start) for r in results]
Exemplo n.º 10
0
filenames = []

testset = []

indir = 'train'
for file in os.listdir(indir):
   if(file.endswith('.jpg')):
      filenames.append(file)

testdir = 'test'
for file in os.listdir(testdir):
   if(file.endswith('.jpg')):
      testset.append(file)

for i in xrange(len(whales)):
    if(labelsDict[whales[i]] == -1):
        labelsDict[whales[i]] = []
    labelsDict[whales[i]].append(images[i])

testset = [int(re.search("w_(\\d+)\.jpg", x).group(1)) for x in testset]

for w in set(whales):
    allExamplesForW = labelsDict[w]
    allExamplesForW = [x for x in allExamplesForW if x not in testset]
    allExamplesForW = random.permutation(allExamplesForW)
    for i in allExamplesForW[0:(len(allExamplesForW)/2)+(random.randint(0,(len(allExamplesForW))%2+1))]:
        print("copying %d\n"%i)
        os.rename(("%s/w_%d.jpg") % (indir, i), ("%s/w_%d.jpg") %(validationDir, i))


Exemplo n.º 11
0
 def get_random_batch(self, batchSize):
    randInd = random.permutation(len(self.images))[:batchSize]
    return self.read_images([os.path.join(self.inputDir, x) for x in self.images[randInd]]), self.yTrain[randInd]
Exemplo n.º 12
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

from scipy import random as sprd

from lstm import LSTM

n = 10
x = sprd.permutation(n)
t = list(reversed(x))
hidden_size = 4
vocab_size = n

lstm = LSTM(vocab_size, hidden_size)
lstm.check_gradient(x, t)
Exemplo n.º 13
0
A = 5
E = 1.0
L = 2.0
myseed = 42

# Initialize the random with seed
random.seed(myseed)

# Hidden Model
x = np.linspace(-L, L, N)
y1 = A * np.cos(0.5*np.pi*x/L)
y2 = E * random.normal(size=N)

# Show
plt.plot(x, y1+y2, 'bs', alpha=0.5, label="Medicion")
plt.plot(x, y1,    'k-', lw=2.0, label="Relacion determinista")
plt.xlim([-2.5,2.5])
plt.ylim([-5,10])
plt.xlabel("x []")
plt.ylabel("y []")
plt.legend(numpoints=1, loc="lower center")
#plt.savefig("images/dataN%d.png"%N)
plt.show()

# Shuffle the data
data = np.array([x,y1+y2]).T
data = random.permutation(data)

# Save
np.savetxt("../data/data.txt", data)
Exemplo n.º 14
0
 def get_batch(self, size):
    randInd = random.permutation(len(self.train))[:size]
    return self.read_images([os.path.join(self.inputDir, x) for x in self.train[randInd]]), self.trainLabels[randInd]
Exemplo n.º 15
0
def siPermutation(X, times = 1):
    for i in xrange(times):
        X = random.permutation(X)
    return X
Exemplo n.º 16
0
def main():
    from argparse import ArgumentParser, RawDescriptionHelpFormatter
    headerhelp = \
'''
'''
    parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
                            description=headerhelp)
    parser.add_argument('inpath', help='The input mTZ file.')
    parser.add_argument('-n',
                        '--nobs-per-shell',
                        type=int,
                        default=1000,
                        help='Number of observations per shaell target.')

    args = parser.parse_args()

    from pymtz import read_mtz_file
    from scipy import random, corrcoef, array, nonzero, zeros, ones
    from scipy.stats import spearmanr
    from matplotlib.pyplot import figure, show

    print("Reading ", args.inpath, end='...', flush=True)
    dataset = read_mtz_file(args.inpath)
    print("done")
    Nmeas = dataset.GetReflectionNumber()
    print("Found %d measurements" % Nmeas)
    ashes = dataset.GetHKLhash()
    uniqhkl = set(ashes)
    Nunq = len(uniqhkl)
    Nshells = int(Nunq / args.nobs_per_shell)
    Ihl, shl = array(dataset.reflections).T[nonzero([
        (t in ['I', 'SIGI']) for t in dataset.GetLabels()
    ])[0]]
    whl = 1 / shl**2
    hkldict = dict.fromkeys(uniqhkl)
    for (i, ash) in enumerate(ashes):
        if hkldict[ash]:
            hkldict[ash].append(i)
        else:
            hkldict[ash] = [i]
    print("Found %d unique reflections" % (Nunq))
    print("Will subdivide data into %d resolution shells" % Nshells)
    edges = dataset.GetResolutionShells(Nshells)
    shell_column = dataset.GetShellColumn(edges)
    d12, cc12, sp12, p12, Ish = [], [], [], [], []
    for i in range(Nshells):
        Ish1, Ish2, shkl = [], [], []
        shind = (shell_column == i)
        for ash in set(ashes[shind]):
            mcity = len(hkldict[ash])
            if mcity > 1:
                Nhalf = int(mcity / 2)
                wr = list(ones(Nhalf)) + list(zeros(Nhalf))
                if len(wr) < mcity:
                    wr.append(random.random_integers(0, 1))
                wr = random.permutation(wr)
                Ish1.append(
                    sum(wr * Ihl[hkldict[ash]] * whl[hkldict[ash]]) /
                    sum(wr * whl[hkldict[ash]]))
                Ish2.append(
                    sum((1 - wr) * Ihl[hkldict[ash]] * whl[hkldict[ash]]) /
                    sum((1 - wr) * whl[hkldict[ash]]))
                shkl.append(dataset.hash2hkl(ash))
        cc12.append(corrcoef(Ish1, Ish2)[0][1])
        sp12t, p12t = spearmanr(Ish1, Ish2)
        sp12.append(sp12t)
        p12.append(p12t)
        Ish.append([Ish1, Ish2, shkl])
        d12.append(dataset.GetResolutionColumn()[shind].mean())
        print("%4d %7.2f %7.2f %7.2f %8d %5.3f %5.3f %6.3g" %
              (i + 1, edges[i], edges[i + 1], d12[-1], sum(shind), cc12[-1],
               sp12t, p12t))
    Ish_fig = figure(FigureClass=KDWindow)
    Ish_fig.set_data([Ish, array(d12), cc12, sp12])
    Ish_fig.plot()
    Ish_fig.canvas.mpl_connect('key_press_event', Ish_fig.onkeypress)
    show()