Example #1
0
def slice_sample(init_x, logprob, sigma=1.0, step_out=True, max_steps_out=1000, 
                 compwise=False, verbose=False):
    def direction_slice(direction, init_x):
        def dir_logprob(z):
            return logprob(direction*z + init_x)
    
        upper = sigma*npr.rand()
        lower = upper - sigma
        llh_s = np.log(npr.rand()) + dir_logprob(0.0)
    
        l_steps_out = 0
        u_steps_out = 0
        if step_out:
            while dir_logprob(lower) > llh_s and l_steps_out < max_steps_out:
                l_steps_out += 1
                lower       -= sigma
            while dir_logprob(upper) > llh_s and u_steps_out < max_steps_out:
                u_steps_out += 1
                upper       += sigma
            
        steps_in = 0
        while True:
            steps_in += 1
            new_z     = (upper - lower)*npr.rand() + lower
            new_llh   = dir_logprob(new_z)
            if np.isnan(new_llh):
                print new_z, direction*new_z + init_x, new_llh, llh_s, init_x, logprob(init_x)
                raise Exception("Slice sampler got a NaN")
            if new_llh > llh_s:
                break
            elif new_z < 0:
                lower = new_z
            elif new_z > 0:
                upper = new_z
            else:
                raise Exception("Slice sampler shrank to zero!")

        if verbose:
            print "Steps Out:", l_steps_out, u_steps_out, " Steps In:", steps_in

        return new_z*direction + init_x
    
    if not init_x.shape:
        init_x = np.array([init_x])

    dims = init_x.shape[0]
    if compwise:
        ordering = range(dims)
        npr.shuffle(ordering)
        cur_x = init_x.copy()
        for d in ordering:
            direction    = np.zeros((dims))
            direction[d] = 1.0
            cur_x = direction_slice(direction, cur_x)
        return cur_x
            
    else:
        direction = npr.randn(dims)
        direction = direction / np.sqrt(np.sum(direction**2))
        return direction_slice(direction, init_x)
    def __init__(self, size=None, n=None):

        n = n if n else  256        
        self.size = size if size else (256, 256)

        self.order = len(self.size)
        
        # Generate WAY more numbers than we need
        # because we are throwing out all the numbers not inside a unit
        # sphere.  Something of a hack but statistically speaking
        # it should work fine... or crash.
        G = (random.uniform(size=2*self.order*n)*2 - 1).reshape(-1, self.order)

        # GAH! How do I generalize this?!
        #length = hypot(G[:,i] for i in range(self.order))

        if self.order == 1:
            length = G[:,0]
        elif self.order == 2:
            length = hypot(G[:,0], G[:,1])
        elif self.order == 3:
            length = hypot(G[:,0], G[:,1], G[:,2])
        
        self.G = (G[length < 1] / (length[length < 1])[:,newaxis])[:n,]
        self.P = arange(n, dtype=int32)
        
        random.shuffle(self.P)
        
        self.idx_ar = indices(2*ones(self.order), dtype=int8).reshape(self.order, -1).T
        self.drop = poly1d((-6, 15, -10, 0, 0, 1.0))
Example #3
0
def random_inds(n, k):
    """Return k indices randomly from arange(n) in ascending order."""
    tmp = arange(n)
    shuffle(tmp)
    inds = tmp[:k]
    inds.sort()
    return inds
def random_opt(varlist, init_list, dframe, print_out=False):
    '''Optimize list by randomly adding variables,
       accept if score decreases to find local minimum.'''

    vlist = list(init_list)
#    score, result = fit_train_score(vlist, dframe)
    result, scores = do_kfold_cv(dframe, vlist, n_folds=10)
    score = np.mean(scores)
    if print_out:
        print("  >>> iter init len %d, iter_score %.4f" % (len(vlist), score))
    offset = len(vlist)  # offset by length of initial vlist
    indices = list(range(len(varlist) - offset))
    rnd.shuffle(indices)
    for ix in indices:
        ilist = list(vlist)
        ilist.append(varlist[ix + offset])
#        iscore, iresult = fit_train_score(ilist, dframe)
        iresult, iscores = do_kfold_cv(dframe, ilist, n_folds=10)
        iscore = np.mean(iscores)
        if print_out:
            print("  >>> iter len %d, iter_score %.4f" % (len(ilist), iscore))
        if iscore > score:
            vlist = list(ilist)
            result = iresult
            scores = iscores
            score = iscore

    print(">>> try len %d, score %.4f" % (len(vlist), score))
    print("vlist %s" % (vlist))
    return score, vlist, result, scores
def main():
    parser = argparse.ArgumentParser(description = 'Generate HITs for Amazon Mechnical Turk workers.')
    parser.add_argument('-f', help = 'The mtk data source file.')
    parser.add_argument('-o', help = 'The output file of used data.')

    args = parser.parse_args()

    data_sources = []
    if (args.f != None):
        data_sources = utils.load_file(args.f)
        random.shuffle(data_sources)

    db_collections = hit.setup_mongodb()
    data_metainfo = hit.regex_datasource(data_sources)
    images_metainfo = hit.query_imagedata_from_db(db_collections, data_metainfo)

    # data_labels: flickr high interesting 1, flickr low interesting 2, pinterest [3, 4, 5]
    data_labels = data_metainfo[0]
    # data_ids: (flickr, pinterest) image id
    data_ids = data_metainfo[1]

    data_count_limit = 50

    for begin_index in range(0, len(data_sources), data_count_limit):
        print("index: " + str(begin_index))
        generate_hits(data_sources[begin_index:begin_index + data_count_limit], begin_index, args, data_ids[begin_index:begin_index + data_count_limit], images_metainfo)

    sys.exit(0)
Example #6
0
def cv(model, mode, k_fold, X, Y):
    """
    cross validation

    parameters:
    model: model object used for fitting
    k_fold: number fold to devide the data
    X: input data numpy arraya
    Y: class label numpy array
    """
    if len(X) % k_fold:
        fold_size = int(len(X) / k_fold)
    else:
        fold_size = int(len(X) // k_fold)
    indx = list(range(len(X)))
    shuffle(indx)
    test_indx = [indx[i: i + fold_size] for i in range(0, len(indx), fold_size)]

    test_error = []
    train_error = []

    for fold in test_indx:
        train_indx = list(set(indx) - set(fold))
        X_train = X[train_indx]
        Y_train = Y[train_indx]
        X_test = X[fold]
        Y_test = Y[fold]

        fit = model(X_train, Y_train, mode)
        train_error.append(fit.predict(X_train, Y_train)[1])
        test_error.append(fit.predict(X_test, Y_test)[1])
    return train_error, test_error
Example #7
0
def subsample_otu_zero(otu, ss_fraction, zero_fraction):
    """Evenly subsample an OTU and randomly set zero_fraction entries = 0."""
    ss_otu = subsample_otu_evenly(otu, ss_fraction)
    inds = arange(len(ss_otu))
    shuffle(inds)
    ss_otu[inds[: int(len(inds) * zero_fraction)]] = 0
    return ss_otu
    def _reset(self, shuffle=True):
        """Resets the read pointer back to the beginning of the data set. If
        ``shuffle`` is set to True, also creates a new random order for
        iterating the input lines.

        :type shuffle: bool
        :param shuffle: also shuffles the input sentences, unless set to False
        """

        self._next_line = 0
        if shuffle:
            logging.debug("Generating a random order of input lines.")

            samples = []
            for (start, stop), sample_size in \
                zip(self._sentence_pointers.pointer_ranges, self._sample_sizes):

                population = numpy.arange(start, stop, dtype='int64')
                # No duplicates, unless we need more sentences than there are
                # in the file.
                replace = sample_size > len(population)
                sample = random.choice(population, sample_size, replace=replace)
                samples.append(sample)
            self._order = numpy.concatenate(samples)
            for _ in range(10):
                random.shuffle(self._order)
def random_opt(clf, varlist, init_list, loans_df, loans_y, score_fn=get_cv_score, rescale=True, print_out=False):
    '''Optimize list by randomly adding variables,
       accept if score decreases to find local minimum.'''

    vlist = list(init_list)
    score, vstd, vscores = score_fn(clf, vlist, loans_df, loans_y, rescale)
    if print_out:
        print("  >>> iter init len %d, iter_score %.4f" % (len(vlist), score))
    offset = len(vlist)  # offset by length of initial vlist
    indices = list(range(len(varlist) - offset))
    rnd.shuffle(indices)
    for ix in indices:
        ilist = list(vlist)
        ilist.append(varlist[ix + offset])
        iscore, istd, iscores = score_fn(clf, ilist, loans_df, loans_y, rescale)
        if print_out:
            print("  >>> iter len %d, iter_score %.4f" % (len(ilist), iscore))
        if iscore > score:
            vlist = list(ilist)
            score, vstd, vscores = iscore, istd, iscores

    print(">>> try len %d, score %.4f +- %.4f" % (len(vlist), score, 2 * vstd))
    print("vlist %s" % (vlist))
    # return dict ?
    return score, vlist, vscores
Example #10
0
def make_file_list(gtzan_path, n_folds=5,):
    """
    Generates lists
    """
    audio_path = os.path.join(gtzan_path,'audio')
    out_path = os.path.join(gtzan_path,'lists')
    files_list = []
    for ext in ['.au', '.mp3', '.wav']:
        files = U.getFiles(audio_path, ext)
        files_list.extend(files)
    random.shuffle(files_list)
    
    if not os.path.exists(out_path):
        os.makedirs(out_path)
    
    audio_list_path = os.path.join(out_path, 'audio_files.txt')
    open(audio_list_path,'w').writelines(['%s\n' % f for f in files_list])
    
    annotations = get_annotations(files_list)

    ground_truth_path = os.path.join(out_path, 'ground_truth.txt')
    open(ground_truth_path,'w').writelines(generate_mirex_list(files_list, annotations))
    generate_ground_truth_pickle(ground_truth_path)

    folds = get_folds(files_list, n_folds=n_folds)
    
    ### Single fold for quick experiments
    create_fold(0, 1, folds, annotations, out_path)
    
    for n in range(n_folds):
        create_fold(n, n_folds, folds, annotations, out_path)
Example #11
0
def simple_num_driver():
    print("Started at: " + str(datetime.datetime.now()))
    logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
    logger = logging.getLogger()

    xs = []
    maxlen = 100
    max_features=maxlen + 1
    from numpy.random import shuffle
    r = range(1, maxlen + 1, 1)
    for i in range(1000):
        shuffle(r)
        new_x = r[::]
        xs.append(new_x)
    ys = xs
    xs = np.asarray(xs)
    def to_one_hot(id):
        zeros = [0] * max_features
        zeros[id] = 1
        return zeros
    ys = map(lambda x: map(to_one_hot, x), ys)
    ys = np.asarray(ys)
    print("XS Shape: ", xs.shape)
    print("YS Shape: ", ys.shape)
    seq2seq(xs, ys, max_features, maxlen)
Example #12
0
def pick_suitable_URL(outlinks):
    '''
    Given a list of URLs, find one (at random) and find all images, text
     associated with that URL. If the text is suitably long, and there
     are images, then output the URL, a list of images, and the text
    '''
    li = 0
    lt = 0
    random.shuffle(outlinks)
    while (li < 1) | (lt < 400):
        try:
            thenewURL = outlinks.pop()
            #print("PSU {}".format(thenewURL))
        except:
            # return empty variables if no suitable links are found
            return '', [], []
        else:
            #print("Trying {}".format(thenewURL))
            theTxt = get_text(thenewURL)
            images = get_imgURLs(thenewURL)
            images = ban_urls(images)
            random.shuffle(images)
            li = len(images)
            lt = len(theTxt)
    return thenewURL, images, theTxt
Example #13
0
    def batch_creating_template(self,
                                input_vals,
                                preprocess,
                                get_prev_elems,
                                get_elems,
                                add_input_to_batch):
        batches = []
        batch = [[] for i in xrange(self.n_inputs)]
        input_vals = preprocess(input_vals)
        prev_elems = get_prev_elems(input_vals)

        for input_val in input_vals:
            elems = get_elems(input_val)
            n_samples = len(batch[-1])

            if self._is_batch_boundary(elems, prev_elems, n_samples):
                prev_elems = elems
                batches.append(batch)
                batch = [[] for i in xrange(self.n_inputs)]
            batch = add_input_to_batch(batch, input_val)

        if len(batch[0]) > 0:
            batches.append(batch)

        shuffle(batches)
        return batches
Example #14
0
File: RF.py Project: zht200/KNN-RF
 def train(self, x, y):
     # combine x,y into one dataset
     data = np.zeros([len(x), len(x[0])+1])
     data[:, 0:len(x[0])] = x
     data[:, len(x[0])] = y
     self.forest =[]
     B = self.B
     for b in range(0, B):
         if self.Bagging:
             self.count = 0
             shuffle(data)
             # bootstrapping: select 60% data points randomly
             randomdata = data[:(len(x) * 0.6)]
             # node of tree, [0]: split value, [1] split attribute, [2] left child ID, [3] right child ID
             self.tree = np.zeros((randomdata.size, 4))
             self.buildtree(randomdata)
             # add the new tree to the forest
             self.forest.append(self.tree[0:self.count+1])
             self.tree = None
             randomdata = None
         else:
             self.count = 0
             self.tree = np.zeros((data.size, 4))
             self.buildtree(data)
             self.forest.append(self.tree[0:self.count+1])
             self.tree = None
Example #15
0
 def __getVectorC(self, length):
     # ensure at least 1/3 positive values
     data = npr.randint(-10, 10, [length, 1])
     posCount = np.ceil(length / 3)
     data[:posCount, 0] = npr.randint(0, 10, posCount) 
     npr.shuffle(data)
     return np.matrix(data)
Example #16
0
def evolve(inital_gene_pop, ref_gene, generations):
    """Evolve a gene population to maximized graphic dissimilarity.
    Convinience function with reduced number of params."""
    # initial, generation 0 children 
    gene_children = [coerce_gene(gene, ref_gene) for gene in inital_gene_pop]
    # make mutation variance generator
    vg = var_gen(generations)
    fitness_means = []
    fitness_maxs = []
    # run through generation number of selection cycles
    for gen in range(generations):
        # DEFINE ME
        df_and_params = [gaussian, 0, vg.next()]
        ec, cc, mc = selection(gene_children, ref_gene, df_and_params,
            elite_children=.02, crossover_children=.8, mutation_children=.18, 
            fitness_function='graphic_dissimilarity')
        # coerce cc and mc since elite children haven't been mutated or crossed
        # so their summary stats are still the same.
        cc = [coerce_gene(i, ref_gene) for i in cc]
        mc = [coerce_gene(i, ref_gene) for i in mc]
        gene_children = ec+cc+mc
        # shuffle may be unnecessary
        shuffle(gene_children)
        tmp =[fitness(i,ref_gene) for i in gene_children]
        fitness_means.append(mean(tmp))
        fitness_maxs.append(max(tmp))
    # final coerce step because gene_children have different summary stats
    #res = [coerce_gene(i,ref_gene) for i in gene_children]
    return gene_children, fitness_means, fitness_maxs
def main():
    parser = argparse.ArgumentParser(description = 'Generate HITs for Amazon Mechnical Turk workers.')
    parser.add_argument('-f', help = 'The mtk data source file.')
    parser.add_argument('-o', help = 'The output file of used data.')
    parser.add_argument('-m', default = 'normal', help = 'The running mode in {normal, qua_init, qua}.')
    parser.add_argument('-q', help = 'The qualification type id.')
    parser.add_argument('-t', default = 'sandbox', help = 'The type of Mechanical Turk.')


    args = parser.parse_args()

    if (args.m == 'qua' and args.q == None):
        print('Please give qualification type id if running in qualification mode.')
        sys.exit(0)

    data_sources = []
    if (args.f != None):
        data_sources = utils.load_file(args.f)
        if (args.m != 'qua'):
            random.shuffle(data_sources)

    data_count_limit = 100

    for begin_index in range(0, len(data_sources), data_count_limit):
        print("index: " + str(begin_index))
        generate_hits(args.t, data_sources[begin_index:begin_index + data_count_limit], begin_index, args)

    sys.exit(0)
 def push_back_students(self):
     more_pushing = True
     students_to_kick = {}
     while more_pushing:
         more_pushing = False
         self.sorted_section_indices.sort(key = lambda sec_ind: -self.section_capacities[sec_ind])
         for sec_ind in self.sorted_section_indices:
             students_to_push = numpy.transpose(numpy.nonzero((self.enroll_orig[:, sec_ind, self.section_schedules[sec_ind,:]].any(axis=1) * (True - self.enroll_final[:,:,self.section_schedules[sec_ind,:]].any(axis=(1,2))))))
             more_pushing |= bool(len(students_to_push))
             for student_ind in students_to_push:
                 self.enroll_final[student_ind, sec_ind, self.section_schedules[sec_ind,:]] = True
                 if self.section_capacities[sec_ind] > 0:
                     self.section_capacities[sec_ind] -= 1
                 else:
                     if not sec_ind in students_to_kick.keys():
                         students_to_kick[sec_ind] = numpy.transpose(numpy.nonzero((self.enroll_final*self.request)[:, sec_ind, self.section_schedules[sec_ind,:]].any(axis=1)))
                         pq = Queue.PriorityQueue()
                         random.shuffle(students_to_kick[sec_ind])
                         for [student] in students_to_kick[sec_ind]:
                             old_sections = numpy.transpose(numpy.nonzero(self.enroll_orig[student, :, self.section_schedules[sec_ind,:]].any(axis=1)))
                             if not len(old_sections):
                                 pq.put((0, random.random(), student), False)
                             for [old_section] in old_sections:
                                 pq.put((self.section_scores[old_section], random.random(), student), False)
                         students_to_kick[sec_ind] = pq
                     try:
                         self.enroll_final[students_to_kick[sec_ind].get(False)[2], sec_ind, self.section_schedules[sec_ind,:]] = False
                     except Queue.Empty:
                         pass
Example #19
0
def take_random_ids(all_ids, num_to_take):
    """takes num_to_take ids from shuffled list of all_ids"""
    if len(all_ids) < num_to_take:
        raise ValueError('trying to take too many ids from all ids')
    l = deepcopy(all_ids)
    shuffle(l)
    return l[:num_to_take]
Example #20
0
	def _mapCols(e, params):
		from numpy import random
		from disco.core import Params
		m, n = params.m, params.n
		output = []
		if n > 0:
			elems = e.split(",")
			l = range(0, m)
			random.shuffle(l)
			for elem in elems:
				retVal = []
				j = int(elem)
				nnz = m * (1.0 - params.sparsity)
				stepSize = int(m / nnz)
				k = int(random.random() * (m % nnz))
				while k<m:
					i = l[k]
					k += stepSize
					val = params.lb + (params.ub-params.lb) * random.random()
					retVal.append("%d,%d,%.14f" % (i,j,val))
					# break output into tuples so reduce can distribute the load
					if len(retVal) > 1000:
						output += [(";".join(retVal), "")]
						retVal = []
				if len(retVal) > 0:
					output += [(";".join(retVal), "")]
		return output
Example #21
0
def circle_stats(circ):
    npr.shuffle(circ)
    npr.shuffle(circ.T)
    sums = circ.sum(axis=0)
    sums = np.sort(sums)
    plt.plot(sums)
    plt.savefig("./pics/circle_stats")
Example #22
0
def time_varying_coefficients(d, timelines, constant=False, independent=0, randgen= random.exponential):
    """
    Time vary coefficients

    d: the dimension of the dataset
    timelines: the observational times
    constant: True for constant coefficients 
    independent: the number of coffients to set to 0 (covariate is ind of survival), or 
      a list of covariates to make indepent.
    randgen: how scalar coefficients (betas) are sampled.

    returns a matrix (t,d+1) of coefficients
    """
    t = timelines.shape[0]
    try:
      a = np.arange(d)
      random.shuffle(a)
      independent = a[:independent]
    except IndexError:
      pass

    n_funcs = len(FUNCS)
    coefficients = np.zeros((t,d))
    data_generators = []
    for i in range(d):
       f = FUNCS[random.randint(0,n_funcs)] if not constant else constant_
       if i in independent:
          beta = 0
       else:
          beta = randgen((1-constant)*0.5/d)
       coefficients[:,i] = f(timelines,alpha=randgen(2000.0/t), beta=beta)
       data_generators.append(f.func_doc)

    df_coefficients = pd.DataFrame( coefficients, columns = data_generators, index = timelines)
    return df_coefficients
Example #23
0
def balanced_sample_maker(X, y, random_seed=None):
    """ return a balanced data set by oversampling minority class
        current version is developed on assumption that the positive
        class is the minority.

    Parameters:
    ===========
    X: {numpy.ndarrray}
    y: {numpy.ndarray}
    """
    uniq_levels = unique(y)
    uniq_counts = {level: sum(y == level) for level in uniq_levels}

    if not random_seed is None:
        random.seed(random_seed)

    # find observation index of each class levels
    groupby_levels = {}
    for ii, level in enumerate(uniq_levels):
        obs_idx = [idx for idx, val in enumerate(y) if val == level]
        groupby_levels[level] = obs_idx

    # oversampling on observations of positive label
    sample_size = uniq_counts[0]
    over_sample_idx = random.choice(groupby_levels[1], size=sample_size, replace=True).tolist()
    balanced_copy_idx = groupby_levels[0] + over_sample_idx
    random.shuffle(balanced_copy_idx)

    return X[balanced_copy_idx, :], y[balanced_copy_idx]
Example #24
0
def read_file(filename, n_fold):
 
    data_sources = []
    parts = []
    
    part_of_data = utils.load_file(filename)
    part_of_data = part_of_data[1:len(part_of_data)]
    part_of_data = filter_content(part_of_data)
        
    parts.append(part_of_data)
    
    parts = [item for sublist in parts for item in sublist]
    data_sources = array(parts)

    random.shuffle(data_sources)

    data_count_limit = len(data_sources) / n_fold
    folds = []
    count = 0
    for begin_index in range(0, len(data_sources), data_count_limit):
        end_index = begin_index + data_count_limit

        if (count == n_fold - 1):
            end_index = len(data_sources)

        print("begin: " + str(begin_index))
        print("end: " + str(end_index))

        folds.append(data_sources[begin_index:end_index])

        count += 1

    return folds
Example #25
0
 def _get_random_field(self):
     if self.seed == True:
         np.random.seed(101)
     '''simulates the Gaussian random field'''
     # evaluate the eigenvalues and eigenvectors of the autocorrelation
     # matrix
     _lambda, phi = self.eigenvalues
     # simulation points from 0 to 1 with an equidistant step for the LHS
     randsim = linspace(0, 1, len(self.xgrid) + 1) - 0.5 / (len(self.xgrid))
     randsim = randsim[1:]
     # shuffling points for the simulation
     shuffle(randsim)
     # matrix containing standard Gauss distributed random numbers
     xi = transpose(
         ones((self.nsim, len(self.xgrid))) * array([norm().ppf(randsim)]))
     # eigenvalue matrix
     LAMBDA = eye(len(self.xgrid)) * _lambda
     # cutting out the real part
     ydata = dot(dot(phi, (LAMBDA) ** 0.5), xi).real
     if self.distr_type == 'Gauss':
         # scaling the std. distribution
         scaled_ydata = ydata * self.stdev + self.mean
     elif self.distr_type == 'Weibull':
         # setting Weibull params
         Pf = norm().cdf(ydata)
         scaled_ydata = weibull_min(
             self.shape, scale=self.scale, loc=self.loc).ppf(Pf)
     self.reevaluate = False
     rf = reshape(scaled_ydata, len(self.xgrid))
     if self.non_negative_check == True:
         if (rf < 0).any():
             raise ValueError, 'negative value(s) in random field'
     return rf
Example #26
0
	def examineExample(self, i):
		E = self.computeError(i);
		r = E*self.targetData[i];
		if (r < -self.error and self.alpha[i] < self.C) or (r > self.error and self.alpha[i] > 0):
			# Heuristic for choosing second example

			# First heuristic: find the maximum |E1 - E2|
			j = -1
			dE = 0
			for k in xrange(self.numOfData):
				if fabs(self.computeError(k) - self.computeError(i)) > dE and i != k:
					dE = fabs(self.computeError(k) - self.computeError(i))
					j = k
			if j != -1:
				if self.takeStep(i, j):
					return True

			# Second heuristic: amongst unbounded, try taking step
			# randomize
			index = [k for k in xrange(self.numOfData)]
			random.shuffle(index)
			for k in index:
				if self.isUnbounded(self.alpha[k]):
					if self.takeStep(i, k):
						return True

			random.shuffle(index)
			for k in index:
				if self.takeStep(i, k):
					return True
		return False
Example #27
0
File: ex2.py Project: tanay-bits/ml
def load_data():
    from numpy import random

    data = matrix(genfromtxt('wavy_data.csv', delimiter=','))
    random.shuffle(data)
    data = asarray(data)
    
    K = 3

    data_train = (k_fold_cross_validation(data, K))[0]
    data_test = (k_fold_cross_validation(data, K))[1]

    xfull = data[:,0]
    xfull.shape = (size(xfull),1)
    yfull = data[:,1]
    yfull.shape = (size(yfull),1)

    xtrain = data_train[:,0]
    xtrain.shape = (size(xtrain),1)
    ytrain = data_train[:,1]
    ytrain.shape = (size(ytrain),1)

    xtest = data_test[:,0]
    xtest.shape = (size(xtest),1)
    ytest = data_test[:,1]
    ytest.shape = (size(ytest),1)
    
    return xfull, yfull, xtrain, ytrain, xtest, ytest
def bootstrap_compare(data1, data2, n_runs = 10000):

	# difference between 1 and 2:
	diff_1_2 = np.mean(data1) - np.mean(data2);
	print 'The difference between the means is %f' % diff_1_2
	sz1 = len(data1);
	data = np.array(list(data1) + list(data2));
	diffs = np.array([0.0] * n_runs);
	for r in range(n_runs):
		# shuffle the data to assume that they are from the same distribution:
		npr.shuffle(data);
		# split in two and determine the difference:
		d1 = data[:sz1];
		d2 = data[sz1:];
		diffs[r] = np.mean(d1) - np.mean(d2);
	
	# sort the array of differences and find the index of the first value bigger than diff_1_2:
	sorted_diffs = np.sort(diffs);
	index = n_runs;
	for r in range(n_runs):
		if(sorted_diffs[r] > diff_1_2):
			print 'found!'
			index = r;
			break;
	alpha = float(index) / n_runs;
	print 'The difference between the mean of data1 and data2 has an alpha of %f' % alpha
	
	pl.figure();
	pl.hist(sorted_diffs, 30, facecolor=(0.7, 0.7,0.7));
	pl.text(diff_1_2, 100, '*', color=(0,0,0), weight='bold')
	
	
	
Example #29
0
def compare(imagesetlist,
            ax=None, rows=5, cols=20, vlims=None, grid=True, random=False):
    d = len(imagesetlist)

    n_images = imagesetlist[0].shape[0]
    imshape = imagesetlist[0].shape[1:]
    m, n = imshape[:2]
    n_channels = imshape[2] if len(imshape) > 2 else 1

    inds = np.arange(n_images)
    if random:
        npr.shuffle(inds)

    img_shape = (d*m*rows, n*cols)
    if n_channels > 1:
        img_shape = img_shape + (n_channels,)
    img = np.zeros(img_shape, dtype=imagesetlist[0].dtype)

    for ind in range(min(rows*cols, n_images)):
        i,j = (ind / cols, ind % cols)
        for k in xrange(d):
            img[(d*i+k)*m:(d*i+k+1)*m, j*n:(j+1)*n] = \
                imagesetlist[k][inds[ind],:].reshape(imshape)

    ax = show(img, ax=ax, vlims=vlims)

    if grid:
        for i in xrange(1,rows):
            ax.plot( [-0.5, img.shape[1]-0.5], (d*i*m-0.5)*np.ones(2), 'r-' )
        for j in xrange(1,cols):
            ax.plot( [j*n-0.5, j*n-0.5], [-0.5, img.shape[0]-0.5], 'r-')

        ax.set_xlim([-0.5, img.shape[1]-0.5])
        ax.set_ylim([-0.5, img.shape[0]-0.5])
        ax.invert_yaxis()
Example #30
0
def main():
    inp = 'DO NOT USE PC'
    div = 5
    deck = ['CA', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'CJ', 'CQ', 'CK',
            'DA', 'D2', 'D3', 'D4', 'D5', 'D6', 'D7', 'D8', 'D9', 'D10', 'DJ', 'DQ', 'DK',
            'HA', 'H2', 'H3', 'H4', 'H5', 'H6', 'H7', 'H8', 'H9', 'H10', 'HJ', 'HQ', 'HK',
            'SA', 'S2', 'S3', 'S4', 'S5', 'S6', 'S7', 'S8', 'S9', 'S10', 'SJ', 'SQ', 'SK',
            'JA', 'JB']

    nprnd.shuffle(deck)
    print(deck)

    # encryption
    print('encoding')
    split_list = divider(inp, div)
    keystream = gen_keystream(len(''.join(split_list)), copy.deepcopy(deck), div)
    print(split_list, keystream)
    inp_num = letter2num(split_list)
    key_num = letter2num(keystream)
    encode_num = [(x+y)%26 for x,y in zip(inp_num, key_num)]
    encode_num = [26 if x==0 else x for x in encode_num]
    encoded = divider(''.join([chr(x+64) for x in encode_num]), div)
    print(encoded)

    print('')
    # decryption
    print('decoding')
    keystream = gen_keystream(len(''.join(encoded)), copy.deepcopy(deck), div)
    print(encoded, keystream)
    inp_num = letter2num(encoded)
    key_num = letter2num(keystream)
    decode_num = [(x-y)%26 for x,y in zip(inp_num, key_num)]
    decode_num = [26 if x==0 else x for x in decode_num]
    decoded = divider(''.join([chr(x+64) for x in decode_num]), div)
    print(decoded)
Example #31
0
from pickle import dump
from numpy.random import rand
from numpy.random import shuffle


# load a clean dataset
def load_clean_sentences(filename):
    return load(open(filename, 'rb'))


# save a list of clean sentences to file
def save_clean_data(sentences, filename):
    dump(sentences, open(filename, 'wb'))
    print('Saved: %s' % filename)


# load dataset
raw_dataset = load_clean_sentences('english-german.pkl')

# reduce dataset size
n_sentences = 10000
dataset = raw_dataset[:n_sentences, :]
# random shuffle
shuffle(dataset)
# split into train/test
train, test = dataset[:9000], dataset[9000:]
# save
save_clean_data(dataset, 'english-german-both.pkl')
save_clean_data(train, 'english-german-train.pkl')
save_clean_data(test, 'english-german-test.pkl')
                category = -1.0

            elif (x % 2) == (y % 2):
                category = 1.0

            else:
                category = -1.0

            xvals = random.normal(xNorm, 0.05, numPoints)
            yvals = random.normal(yNorm, 0.05, numPoints)

            for i in range(numPoints):  # xrange in Python 2
                catData.append((xvals[i], yvals[i], category))

    catData = array(catData)
    random.shuffle(catData)

    knowns = catData[:, -1]
    data = catData[:, :-1]

    params = (0.1, )
    supports, steps, kernelArray = svmTrain(knowns, data, kernelGauss, params)

    score = svmSeparation(knowns, supports, kernelArray)
    print('Known data: %5.2f%% correct' % (score))

    print("\nSupport vector machine prediction boundaries\n")

    ds1x = []
    ds1y = []
    ds2x = []
Example #33
0
#程序文件Pex17_2.py
import numpy as np
from numpy.random import randint, rand, shuffle
from matplotlib.pyplot import plot, show, rc
a=np.loadtxt("Pdata17_2.txt")
xy,d=a[:,:2],a[:,2:]; N=len(xy)
w=50; g=10  #w为种群的个数,g为进化的代数
J=[]; 
for i in np.arange(w):
    c=np.arange(1,N-1); shuffle(c)
    c1=np.r_[0,c,101]; flag=1
    while flag>0:
        flag=0
        for m in np.arange(1,N-3):
            for n in np.arange(m+1,N-2):
                if d[c1[m],c1[n]]+d[c1[m+1],c1[n+1]]<\
                   d[c1[m],c1[m+1]]+d[c1[n],c1[n+1]]:
                    c1[m+1:n+1]=c1[n:m:-1]; flag=1
    c1[c1]=np.arange(N); J.append(c1)
J=np.array(J)/(N-1)
for k in np.arange(g):
    A=J.copy()
    c1=np.arange(w); shuffle(c1) #交叉操作的染色体配对组
    c2=randint(2,100,w)  #交叉点的数据
    for i in np.arange(0,w,2):
        temp=A[c1[i],c2[i]:N-1]  #保存中间变量
        A[c1[i],c2[i]:N-1]=A[c1[i+1],c2[i]:N-1]
        A[c1[i+1],c2[i]:N-1]=temp
    B=A.copy()
    by=[]  #初始化变异染色体的序号
    while len(by)<1: by=np.where(rand(w)<0.1)
Example #34
0
 def shuffle(self):
     self.__init__()
     shuffle(self.__set)
Example #35
0
 # fake_pred = torch.rand(1, GRID_NUM, GRID_NUM, 30)
 # decoder(fake_pred)
 CONTINUE = False  # continue from breakpoint
 model = Yolov1(backbone_name='resnet50')
 model.load_model()
 # predict_one_img('../test_img/000001.jpg', model)
 # test_img_dir = '../test_img'
 test_img_dir = '/Users/chenlinwei/Dataset/VOC0712/VOC2012test/JPEGImages'
 for root, dirs, files in os.walk(test_img_dir, topdown=True):
     if test_img_dir == root:
         print(root, dirs, files)
         files = [
             i for i in files
             if any([j in i for j in ['jpg', 'png', 'jpeg', 'gif', 'tiff']])
         ]
         shuffle(files)
         if CONTINUE:
             with open(osp.join(test_img_dir, 'tested.txt'), 'a') as _:
                 pass
             with open(osp.join(test_img_dir, 'tested.txt'), 'r') as txt:
                 txt = txt.readlines()
                 txt = [i.strip() for i in txt]
                 print(txt)
                 files = [i for i in files if i not in txt]
             for file in files:
                 file_path = os.path.join(root, file)
                 print(f'*** testing:{file_path}')
                 predict_one_img(file_path, model)
                 with open(osp.join(test_img_dir, 'tested.txt'),
                           'a') as txt:
                     txt.write(file + '\n')
    monitor='Real_Monitor', color=[0,0,0], colorSpace='rgb',
    blendMode='avg', useFBO=True)
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
    frameDur = 1.0 / round(expInfo['frameRate'])
else:
    frameDur = 1.0 / 60.0  # could not measure, so guess

# Initialize components for Routine "Task_Start"
Task_StartClock = core.Clock()
changetime = 12
nochangetime = 3.14

imagenumarray =['0001','0002','0004','0005','0006','0008','0009','0011','0014','0016','0018','0019','0022','0023','0024','0028','0029','0031','0038','0039','0040','0041','0044','0045','0048','0049','0050','0051','0052','0054','0055','0056','0058','0060','0066','0067','0069','0070','0071','0074','0075','0076','0078','0079','0080','0084','0087','0089','0093','0097','0098','0099','0100','0102','0106','0107','0109','0110','0111','0112','0114','0116','0117','0118','0119','0122','0123','0124','0125','0126','0127','0128']
shuffle(imagenumarray)
Task_Instructions = visual.TextStim(win=win, name='Task_Instructions',
    text="In this task you will be presented with photographs of various scenes. In some of these scenes, a specific object in the scene may periodically appear or disappear several times in a row. Your task is to press the 'spacebar' key AS SOON AS you see such a change occuring. After making that response, you will be asked to left-click on where the change occured with the mouse. Do remember to press the spacebar AS SOON as you actually see the change though as that response is what will determine your score! It is not important to click on the location quickly but it is important to press the spacebar as soon as you see a change. Press the spacebar to see a few examples of these changes.",
    font='Arial',
    pos=(0, 0), height=0.075, wrapWidth=None, ori=0, 
    color=[-1.000,-1.000,-1.000], colorSpace='rgb', opacity=1,
    depth=-1.0);

# Initialize components for Routine "Examples"
ExamplesClock = core.Clock()
ExampleImage1 = visual.ImageStim(
    win=win, name='ExampleImage1',units='pix', 
    image='sin', mask=None,
    ori=0, pos=(0, 0), size=(1024, 768),
    color=[1,1,1], colorSpace='rgb', opacity=1,
    flipHoriz=False, flipVert=False,
Example #37
0
dim = 64
k = 4 # downscale size
gene_l1_factor = 0.9
beta1 = 0.5
learning_rate_start = 0.00020
learning_rate_half_life = 5000
dataset = '/home/data/houruibing/CelebA/img_align_celeba'
test_vector = 16 # num of test images
checkpoint_period = 10000
summary_period = 200
train_time = 60 * 5 # time in minutes to train the model

#prepare data
filenames = tf.gfile.ListDirectory(dataset)
filenames = sorted(filenames)
random.shuffle(filenames)
filenames = [os.path.join(dataset, f) for f in filenames]
train_filenames = filenames[: -test_vector]
test_filenames = filenames[-test_vector: ]

#checkpoint_dir = 'checkpoint/CelebA'
#if not os.path.exists(checkpoint_dir):
#    os.makedirs(checkpoint_dir)

#train_dir = 'generate_images/CelebA'
#if not os.path.exists(train_dir):
#    os.makedirs(train_dir)
    
checkpoint_dir = 'checkpoint/CelebA'
if not os.path.exists(checkpoint_dir):
    os.makedirs(checkpoint_dir)
Example #38
0
def sokoban_layout(limit = 1000, egocentric = False, objects = True, stage=0, test=False):
    assert objects
    list = ["sokoban_layout",limit,
            ("egocentric" if egocentric else "global"),
            ("object"     if objects    else "global"),
            stage,
            ("test" if test else "train"),]
    path = os.path.join(latplan.__path__[0],"puzzles","-".join(map(str,list))+".npz")
    import gym
    import pddlgym
    import imageio
    pre_layouts     = []
    suc_layouts     = []
    if egocentric:
        layout_mode = "egocentric_layout"
    else:
        layout_mode = "layout"

    env = gym.make("PDDLEnvSokoban-v0" if not test else "PDDLEnvSokobanTest-v0")
    env.fix_problem_index(stage)
    init, _ = env.reset()
    init_layout = env.render(mode=layout_mode)

    # reachability analysis
    player = (init_layout == pddlgym.rendering.sokoban.PLAYER)
    wall   = (init_layout == pddlgym.rendering.sokoban.WALL)
    reachable = compute_reachability_sokoban(wall,player)
    relevant = np.maximum(reachable, wall)
    print(f"{wall.sum()} wall objects:")
    print(wall)
    print(f"{reachable.sum()} reachable objects:")
    print(reachable)
    print(f"{relevant.sum()} relevant objects:")
    print(relevant)
    relevant = relevant.reshape(-1)

    def successor(obs):
        env.set_state(obs)
        for action in env.action_space.all_ground_literals(obs, valid_only=True):
            env.set_state(obs)
            obs2, _, _, _ = env.step(action)
            yield obs2

    max_g = 0
    for obs, close_list in dijkstra(init, float("inf"), successor, include_nonleaf=True, limit=limit):
        max_g = max(max_g,close_list[obs]["g"])
        pobs = close_list[obs]["parent"]
        if pobs is None:
            continue
        env.set_state(pobs)
        pre_layouts.append(env.render(mode=layout_mode))

        env.set_state(obs)
        suc_layouts.append(env.render(mode=layout_mode))

    pre_layouts = np.array(pre_layouts)
    suc_layouts = np.array(suc_layouts)
    print(pre_layouts.shape)
    print("max",pre_layouts.max(),"min",pre_layouts.min())
    B, H, W = pre_layouts.shape
    pre_layouts = pre_layouts.reshape((B,H*W))
    suc_layouts = suc_layouts.reshape((B,H*W))

    # shuffling
    random_indices = np.arange(len(pre_layouts))
    nr.shuffle(random_indices)
    pre_layouts = pre_layouts[random_indices]
    suc_layouts = suc_layouts[random_indices]

    tile = 16
    bboxes = tiled_bboxes(B, H, W, tile)

    if not egocentric:
        pre_layouts = pre_layouts[:,relevant]
        suc_layouts = suc_layouts[:,relevant]
        bboxes = bboxes[:,relevant]

    # make it into a one-hot repr
    eye = np.eye(pddlgym.rendering.sokoban.NUM_OBJECTS)
    # B, H, W, C
    pre_classes = eye[pre_layouts]
    suc_classes = eye[suc_layouts]
    print(pre_classes.shape)

    np.savez_compressed(path,pres=pre_classes,sucs=suc_classes,bbox=bboxes,picsize=[H*tile,W*tile,3],max_g=max_g)
 def shuffle(self):
     shuffle(self.data)
Example #40
0
N = len(z_avg)
indices = np.array(range(N))
quota = 0.60
tests = 2000
q_max = int(0.21 * quota * N)

err_avg = np.zeros(q_max - 1)
q_vec = np.array(range(1, q_max))

for k in range(tests):

    if k % 100 == 0:
        print("k = " + str(k))

    rng.shuffle(indices)
    ind_train = indices[:int(quota * N)]
    ind_valid = indices[int(quota * N):]
    """
    ind_train = indices[int(0.5*(1-quota)*N):N-int(0.5*(1-quota)*N)]
    ind_valid = np.concatenate((indices[:int(0.5*(1-quota)*N)],indices[N-int(0.5*(1-quota)*N):]))
    """

    err = []
    for q in range(1, q_max):
        p = np.polyfit(z_avg[ind_train], x_avg[ind_train], q)
        x_hat = np.polyval(p, z_avg)
        err.append(np.sqrt(np.sum((x_hat[ind_valid] - x_avg[ind_valid])**2)))
    err_avg += np.array(err)

err_avg /= tests
Example #41
0
 def on_epoch_end(self):
     self.indexes = arange(len(self.imagePaths))
     if self.shuffle:
         shuffle(self.indexes)
Example #42
0
def sokoban_image(limit = 1000, egocentric = False, objects = True, stage=0, test=False):
    list = ["sokoban_image",limit,
            ("egocentric" if egocentric else "global"),
            ("object"     if objects    else "global"),
            stage,
            ("test" if test else "train"),]
    path = os.path.join(latplan.__path__[0],"puzzles","-".join(map(str,list))+".npz")
    import gym
    import pddlgym
    import imageio
    pre_images     = []
    suc_images     = []
    if egocentric:
        image_mode  = "egocentric_crisp"
    else:
        image_mode  = "human_crisp"

    env = gym.make("PDDLEnvSokoban-v0" if not test else "PDDLEnvSokobanTest-v0")
    env.fix_problem_index(stage)
    init, _ = env.reset()
    init_layout = env.render(mode="layout")

    # reachability analysis
    player = (init_layout == pddlgym.rendering.sokoban.PLAYER)
    wall   = (init_layout == pddlgym.rendering.sokoban.WALL)
    reachable = compute_reachability_sokoban(wall,player)
    relevant = np.maximum(reachable, wall)
    print(f"{wall.sum()} wall objects:")
    print(wall)
    print(f"{reachable.sum()} reachable objects:")
    print(reachable)
    print(f"{relevant.sum()} relevant objects:")
    print(relevant)
    relevant = relevant.reshape(-1)

    def successor(obs):
        env.set_state(obs)
        for action in env.action_space.all_ground_literals(obs, valid_only=True):
            env.set_state(obs)
            obs2, _, _, _ = env.step(action)
            yield obs2

    pairs = []
    max_g = 0
    for obs, close_list in dijkstra(init, float("inf"), successor, include_nonleaf=True, limit=limit):
        max_g = max(max_g,close_list[obs]["g"])
        pobs = close_list[obs]["parent"]
        if pobs is None:
            continue
        pairs.append((pobs,obs))

    threads = 16
    pairss = []
    len_per_thread = 1+(len(pairs) // threads)
    for i in range(threads):
        pairss.append(pairs[i*len_per_thread:(i+1)*len_per_thread])

    from multiprocessing import Pool
    with Pool(threads) as p:
        for sub in tqdm.tqdm(p.imap(render_sokoban,
                                    zip(pairss,
                                        [image_mode]*threads))):
            pre_images_sub  = sub[0]
            suc_images_sub  = sub[1]
            pre_images.extend(pre_images_sub)
            suc_images.extend(suc_images_sub)

    pre_images = np.array(pre_images)
    suc_images = np.array(suc_images)
    print(pre_images.shape)
    print("max",pre_images.max(),"min",pre_images.min())

    # shuffling
    random_indices = np.arange(len(pre_images))
    nr.shuffle(random_indices)
    pre_images = pre_images[random_indices]
    suc_images = suc_images[random_indices]

    if not objects:
        # whole image
        np.savez_compressed(path,pres=pre_images,sucs=suc_images)
        return

    # image
    tile = 16
    B, H, W, C = pre_images.shape

    pre_images = image_to_tiled_objects(pre_images, tile)
    suc_images = image_to_tiled_objects(suc_images, tile)
    bboxes = tiled_bboxes(B, H//tile, W//tile, tile)
    print(pre_images.shape,bboxes.shape)

    # prune the unreachable regions
    if not egocentric:
        pre_images = pre_images[:,relevant]
        suc_images = suc_images[:,relevant]
        bboxes = bboxes[:,relevant]
        print(pre_images.shape,bboxes.shape)

    # note: bbox can be reused for pres and sucs
    picsize = [H,W,C]
    np.savez_compressed(path,pres=pre_images,sucs=suc_images,bboxes=bboxes,picsize=picsize,max_g=max_g)
Example #43
0
def alpha_beta_move(board, turn, depth = 0, alpha = (-inf,-inf), beta = (inf,inf), evaluation = lambda x: 0):
    dummy_board = np.copy(board) # we don't want to change the board state

    swap_player = {1:-1,-1:1} # So we can change whose turn
    options = cccc.available_moves(board) # get legal moves
    random.shuffle(options) # should inherit move order instead of randomizing


#     if len(options) == 1:
#         update_move(board,options[0])
#         if cccc.winner(dummy_board):
#             return (inf,options[0])
#         else:
#             return (0,options[0])   
    
    best_value = (-inf,-inf)
    
    if not options:
        print board, cccc.game_over(board)
        print 'oops, no available moves'
    cand_move = options[0]
    if depth == 0: 
        for x in options:
            update_move(dummy_board,x,turn)
            op_value = (evaluation(dummy_board*swap_player[turn]) , depth)

            if tuple(-1 * el for el in op_value) > best_value:
                cand_move = x
                best_value = tuple(-1 * el for el in op_value)
                alpha = max(alpha, best_value)
    #        print depth,-op_value, best_value, cand_move,alpha,beta
            if alpha >= beta:
    #                print 'pruned'
                break   #alpha-beta cutoff
            unupdate_move(dummy_board,x)
    else:
    
    
    
        for x in options:

    #        dummy_board = np.copy(board)
    #        height= np.where(board[:,x]==0)[0][-1] #connect four only
    #        dummy_board[height, x] = turn
            update_move(dummy_board,x,turn)
        
            if cccc.winner(dummy_board): #should check over and tied too
                return((inf,depth), x)
            
            if cccc.is_full(dummy_board): #This assumes you can't lose on your turn
                return((0,depth) , x)
            
            op_value,_ = alpha_beta_move( dummy_board,
                                            swap_player[turn],
                                            depth-1,
                                            alpha = tuple(-1 * el for el in beta),
                                            beta = tuple(-1 * el for el in alpha),
                                            evaluation = evaluation)

            if tuple(-1 * el for el in op_value) > best_value:
                cand_move = x
                best_value = tuple(-1 * el for el in op_value)
                alpha = max(alpha, best_value)
    #        print depth,-op_value, best_value, cand_move,alpha,beta
            if alpha >= beta:
    #                print 'pruned'
                break   #alpha-beta cutoff
            unupdate_move(dummy_board,x)
    #        dummy_board[height, x] = 0
    return (best_value, cand_move)
Example #44
0
def load(relativePath, labels, n=None, validationPercentage=None):

    filenameImagesTrain = list(glob(join(relativePath, 'Train', 'Images',
                                         '*')))
    pathListLabelsTrain = []
    for l in labels:
        pathListLabelsTrain.append(join(relativePath, 'Train', 'Labels', l))

    filenameImagesValidation = list(
        glob(join(relativePath, 'Validation', 'Images', '*')))
    pathListLabelsValidation = []
    for l in labels:
        pathListLabelsValidation.append(
            join(relativePath, 'Validation', 'Labels', l))

    if n is None:

        dataTrain = []
        for i in range(len(filenameImagesTrain)):
            data = []
            data.append(filenameImagesTrain[i])
            for l in pathListLabelsTrain:
                data.append(
                    list(
                        glob(
                            join(
                                l, ''.join(
                                    (filenameImagesTrain[i].split(sep)[-1]
                                     ).split('.')[:-1]) + '.*')))[0])
            dataTrain.append(data)

        dataValidation = []
        if validationPercentage is None:
            for i in range(len(filenameImagesValidation)):
                data = []
                data.append(filenameImagesValidation[i])
                for l in pathListLabelsValidation:
                    data.append(
                        list(
                            glob(
                                join(
                                    l, ''.join(
                                        (filenameImagesValidation[i].split(sep)
                                         [-1]).split('.')[:-1]) + '.*')))[0])
                dataValidation.append(data)
        else:
            for i in range(int(
                    len(filenameImagesTrain) * validationPercentage)):
                data = []
                data.append(filenameImagesValidation[i])
                for l in pathListLabelsValidation:
                    data.append(
                        list(
                            glob(
                                join(
                                    l, ''.join(
                                        (filenameImagesValidation[i].split(sep)
                                         [-1]).split('.')[:-1]) + '.*')))[0])
                dataValidation.append(data)
    else:
        if validationPercentage is None:
            validationPercentage = 0.5

        nTrain = int(n * (1.0 - validationPercentage) /
                     len(filenameImagesTrain))
        nValidation = int(n * validationPercentage /
                          len(filenameImagesValidation))

        dataTrain = []
        for i in range(nTrain):
            data = []
            data.append(filenameImagesTrain[i])
            for l in pathListLabelsTrain:
                data.append(
                    list(
                        glob(
                            join(
                                l, ''.join(
                                    (filenameImagesTrain[i].split(sep)[-1]
                                     ).split('.')[:-1]) + '.*')))[0])
            dataTrain.append(data)

        dataValidation = []
        for i in range(nValidation):
            data = []
            data.append(filenameImagesValidation[i])
            for l in pathListLabelsValidation:
                data.append(
                    list(
                        glob(
                            join(
                                l, ''.join(
                                    (filenameImagesValidation[i].split(sep)[-1]
                                     ).split('.')[:-1]) + '.*')))[0])
            dataValidation.append(data)

    shuffle(dataTrain)
    shuffle(dataValidation)

    def splitLabelsImages(data, labels):
        dataSplit = hsplit(asarray(data), array(range(1, len(labels) + 1)))
        images = dataSplit[0]
        labels = dataSplit[1:]
        return images, labels

    trainImages, trainLabels = splitLabelsImages(dataTrain, labels)

    validationImages, validationLabels = splitLabelsImages(
        dataValidation, labels)

    return trainImages, trainLabels, validationImages, validationLabels
Example #45
0
def train(g_model,
          d_model,
          gan_model,
          dataset,
          latent_dim,
          n_epochs=100,
          n_batch=128):
    itr = 0

    bat_per_epo = int(dataset.shape[0] / n_batch)
    half_batch = int(n_batch / 2)
    n_steps = bat_per_epo * n_epochs
    # manually enumerate epochs
    for i in range(n_steps):
        # enumerate batches over the training set
        for j in range(n_critic):
            itr += 1
            # get randomly selected 'real' samples
            X_real, y_real = generate_real_samples(dataset, half_batch)
            # update discriminator model weights
            d_loss1 = d_model.train_on_batch(X_real, y_real)
            # generate 'fake' examples
            X_fake, y_fake = generate_fake_samples(g_model, latent_dim,
                                                   half_batch)
            # update discriminator model weights
            d_loss2 = d_model.train_on_batch(X_fake, y_fake)
            if (itr % 1000 == 0):
                (_, _), (images1, _) = cifar10.load_data()
                shuffle(images1)
                images1 = images1[:1000]
                x_fake, y_fake = generate_fake_samples(g_model,
                                                       latent_dim,
                                                       n_samples=1000)
                images2 = x_fake
                images2 = (images2 + 1) / 2
                images2 = 255 * images2
                print('Loaded', images1.shape, images2.shape)
                # convert integer to floating point values
                images1 = images1.astype('float32')
                images2 = images2.astype('float32')
                # resize images
                images1 = scale_images(images1, (299, 299, 3))
                images2 = scale_images(images2, (299, 299, 3))
                print('Scaled', images1.shape, images2.shape)
                # pre-process images
                images1 = preprocess_input(images1)
                images2 = preprocess_input(images2)
                # calculate fid
                fid = calculate_fid(model_in, images1, images2)
                print('FID: %.3f' % fid)
                fid_score.append(fid)
                epoch_arr.append(i)

        # prepare points in latent space as input for the generator
        X_gan = generate_latent_points(latent_dim, n_batch)
        # create inverted labels for the fake samples
        y_gan = ones((n_batch, 1))
        # update the generator via the discriminator's error
        g_loss = gan_model.train_on_batch(X_gan, y_gan)
        # summarize loss on this batch
        print('>%d, %d/%d, d1=%.3f, d2=%.3f g=%.3f' %
              (i + 1, j + 1, bat_per_epo, d_loss1, d_loss2, g_loss))
        # evaluate the model performance, sometimes
        if (i + 1) % 500 == 0:
            summarize_performance(i, g_model, d_model, dataset, latent_dim)
if expInfo['frameRate']!=None:
    frameDur = 1.0/round(expInfo['frameRate'])
else:
    frameDur = 1.0/60.0 # couldn't get a reliable measure so guess

# Initialize components for Routine "trial"
trialClock = core.Clock()
ISI = core.StaticPeriod(win=win, screenHz=expInfo['frameRate'], name='ISI')
# replace these list items with whatever your filename conventions are:
colors = ['yellow', 'white', 'orange', 'magenta', 'green', 'gray', 'cyan', 'blue']
shapes = ['triangle', 'square', 'line', 'invertedTriangle', 'hexagon', 'diamond', 'cross', 'circle']
rewards = [0.5, 1, 2, 4] * 2
conditions = ['go', 'go', 'go', 'go', 'stop', 'stop', 'stop', 'stop']
trialDetailsList = []

shuffle(colors)
shuffle(shapes)

for i, color in enumerate(colors): # cycle through each color and keep track of an index number
    trialDetails = {} # a dictionary of key-value pairs
    trialDetails['fileName'] = shapes[i] + color + '.gif'
    trialDetails['reward'] = rewards[i]
    trialDetails['condition'] = conditions[i]
    trialDetailsList.append(trialDetails)

shuffle(trialDetailsList) # do this now to ensure that order of presentation of rewards and conditions is also shuffled

ConditionOne = trialDetailsList[0]
ConditionTwo = trialDetailsList[1]
ConditionThree = trialDetailsList[2]
ConditionFour = trialDetailsList[3]
seed(213)  # DON'T CHANGE

### YOUR CODE HERE ###
mean_winnings = 0

num_trials = 1000000
# This sets the feedback interval so we know the program hasn't crashed.
feedback = int(np.round(num_trials / 10))
earns_money = 0

# red : 1, green : 2; blue : 3
red = [1] * 60
green = [2] * 30
blue = [3] * 10
balls = red + green + blue
shuffle(balls)

for t in range(1, num_trials + 1):
    # To see the progress.
    if t % feedback == 0:
        print(np.round(100 * t / num_trials, 1),
              '%  complete:   earned money expectation =', earns_money / t)
    indices = np.random.choice(100, 3, replace=False)
    chosen = [balls[indices[0]], balls[indices[1]], balls[indices[2]]]
    red_num = chosen.count(1)
    toll = randint(1, 7) + randint(1, 7)
    if red_num > 1:
        earns_money += toll
    else:
        earns_money -= toll
Example #48
0
    def init_event(self, event_file, first_init = False):
        if self.verbose > 0:
            print('\n\nLoading event:', event_file, '\n\n')

        self.hits, self.cells, self.particles, self.truth = load_event(event_file)

        self.hits_xyz = self.hits.values[:,1:4]
        self.hits_r = np.sqrt(np.power(self.hits_xyz[:,0],2)+np.power(self.hits_xyz[:,1],2))

        self.hits_phi_x = np.sign(self.hits_xyz[:,1]) * np.arccos(self.hits_xyz[:,0] / self.hits_r)
        self.hits_phi_y = np.sign(-self.hits_xyz[:,0]) * np.arccos(self.hits_xyz[:,1] / self.hits_r)
        self.hits_theta = np.arctan2(self.hits_xyz[:,2], self.hits_r)
        
        self.hits_xyzrphiphitheta = np.concatenate((
            self.hits_xyz,
            np.reshape(self.hits_r,(-1,1)),
            np.reshape(self.hits_phi_x,(-1,1)),
            np.reshape(self.hits_phi_y,(-1,1)),
            np.reshape(self.hits_theta,(-1,1))),
            axis=1)

        if self.std_scale:
            if first_init:
                self.coord_rescaler = preprocessing.StandardScaler().fit(self.hits_xyzrphiphitheta)
            self.hits_xyzrphiphitheta = self.coord_rescaler.transform(self.hits_xyzrphiphitheta)

        hits_module_array = self.hits.values[:,4:]
        self.hits_module = []
        for module in hits_module_array:
            self.hits_module.append(tuple(module))

        # Collect all ids except for id 0.
        self.track_unique_ids = np.unique(np.append([0],self.truth.values[:,1]))
        self.track_unique_ids = self.track_unique_ids[1:]
        rand.shuffle(self.track_unique_ids)
        self.ntracks = len(self.track_unique_ids)
        self.ntrack_iter = 0
        
        # Preprocess tracks
        self.tracks = {}
        self.track_hits = {}
        self.track_coords = {}
        self.track_hits_vols = {}
        self.track_mod_onehot = {}
        
        for track_unique_id in self.track_unique_ids:
            hit_rows = self.truth['particle_id'] == track_unique_id
            self.tracks[track_unique_id] = self.truth[hit_rows].drop('particle_id',axis=1)
            self.track_hits[track_unique_id] = self.tracks[track_unique_id]['hit_id'].values[:]
            self.track_coords[track_unique_id] = []
            self.track_hits_vols[track_unique_id] = []
            self.track_mod_onehot[track_unique_id] = []

            for hit_id in np.nditer(self.track_hits[track_unique_id]):
                # Append to track_coords array
                self.track_coords[track_unique_id].append(self.hits_xyzrphiphitheta[hit_id-1])
        
                # Find volumes of hits
                self.track_mod_onehot[track_unique_id].append(self.onehot_module[self.hits_module[hit_id-1]])
            
            # Pad track_vol_onehot
            if len(self.track_hits[track_unique_id]) > 1:
                self.track_mod_onehot[track_unique_id] = self.track_mod_onehot[track_unique_id][1:]
                self.track_mod_onehot[track_unique_id].append(self.onehot_module[(0,0,0)])
            else:
                self.track_mod_onehot[track_unique_id] = [self.onehot_module[(0,0,0)]]
            self.track_coords[track_unique_id] = np.array(self.track_coords[track_unique_id])
Example #49
0
from Tkinter import *
from numpy import array
from numpy.random import shuffle

ANCHO = 8
ALTO = 8
MINAS = 10
N = ANCHO * ALTO

# Estos son los colores que tienen los numeros del 0 al 8.
colores = 'white blue green red purple black maroon turquoise gray'.split()

# Crear juego al azar.
campo = array([-1] * MINAS + [0] * (N - MINAS))
shuffle(campo)
campo = campo.reshape((ANCHO, ALTO))

# Contar las minas vecinas en cada celda.
for i in range(ANCHO):
    for j in range(ALTO):
        if campo[i, j] != -1:
            vecindad = campo[max(i - 1, 0):i + 2, max(j - 1, 0):j + 2]
            cuenta = (vecindad == -1).sum()
            campo[i, j] = cuenta

w = Tk()

juego_terminado = BooleanVar()
juego_terminado.set(False)

faltantes = IntVar()
 def prepItems(self, items):
     seed(self.seed)
     rtn = [o for o in items]
     shuffle(rtn)
     return rtn
Example #51
0
def generate_samples_from_adni2(adni_root,
                                max_augm_params,
                                augm_factor,
                                prefix_name='alz',
                                test_prc=0.25,
                                shuffle_data=True,
                                debug=True):

    stage_dirs = {'AD': '/AD/', 'MCI': '/MCI/', 'NC': '/NC/'}

    stage_dirs_root = {k: adni_root + v for k, v in stage_dirs.items()}

    class_size = {
        k: len(os.listdir(stage_dirs_root[k]))
        for k in stage_dirs_root
    }
    print('source patients:', class_size)

    ts = int(min(class_size.values()) * test_prc)
    test_size = {k: ts for k in stage_dirs_root}
    train_size = {k: class_size[k] - test_size[k] for k in stage_dirs_root}

    print('source patients used for train & validation:', train_size)
    print('source patients used for test', test_size)

    train_size_balanced = int(max(train_size.values()) * augm_factor)
    test_size_balanced = int(max(test_size.values()) * augm_factor)
    print(
        'train & validation data will be augmented to %d samples by each class'
        % train_size_balanced)
    print('test data will be augmented to %d samples by each class' % ts)

    sample_sets = {
        'train': XSet(name=prefix_name + '_train'),
        'test_0': XSet(name=prefix_name + '_test_0'),
        'test_1': XSet(name=prefix_name + '_test_1'),
        'test_2': XSet(name=prefix_name + '_test_2'),
    }

    for k in stage_dirs_root:
        stage_dir = stage_dirs[k]
        patient_dirs = os.listdir(stage_dirs_root[k])
        rnd.shuffle(patient_dirs)

        test_dirs = patient_dirs[:test_size[k]]
        train_dirs = patient_dirs[test_size[k]:]

        train_lists = [(k, stage_dir + d + '/SMRI/', stage_dir + d + '/MD/')
                       for d in train_dirs]
        test_lists = [(k, stage_dir + d + '/SMRI/', stage_dir + d + '/MD/')
                      for d in test_dirs]

        sample_sets['train'].add_all(
            generate_augm_set(train_lists, train_size_balanced,
                              max_augm_params))
        sample_sets['test_0'].add_all(generate_augm_set(
            test_lists, None, None))
        sample_sets['test_1'].add_all(
            generate_augm_set(test_lists, test_size_balanced,
                              AugmParams(max_augm_params.shift, sigma=0.0)))
        sample_sets['test_2'].add_all(
            generate_augm_set(test_lists, test_size_balanced, max_augm_params))

    if shuffle_data:
        for s in sample_sets:
            sample_sets[s].shuffle()

    if debug:
        for s in sample_sets:
            sample_sets[s].print()

    return sample_sets
    thisInfo = copy.copy(info)
    #now add any specific info for this staircase
    thisInfo['thisStart'] = thisStart  #we might want to keep track of this
    thisStair = data.StairHandler(startVal=thisStart,
                                  extraInfo=thisInfo,
                                  nTrials=50,
                                  nUp=1,
                                  nDown=3,
                                  minVal=0.5,
                                  maxVal=8,
                                  stepSizes=[4, 4, 2, 2, 1, 1])
    stairs.append(thisStair)

for trialN in range(info['nTrials']):
    shuffle(
        stairs
    )  #this shuffles 'in place' (ie stairs itself is changed, nothing returned)
    #then loop through our randomised order of staircases for this repeat
    for thisStair in stairs:
        thisIntensity = thisStair.next()
        print 'start=%.2f, current=%.4f' % (thisStair.extraInfo['thisStart'],
                                            thisIntensity)

        #---------------------
        #run your trial and get an input
        #---------------------
        keys = event.waitKeys(
        )  #(we can simulate by pushing left for 'correct')
        if 'left' in keys: wasCorrect = True
        else: wasCorrect = False
Example #53
0
import pickle
from numpy.random import choice, shuffle

candidate_names = ["P + G", "PA + G", "PA + W"]
candidates = [0, 1, 2]

# probabilities = [0.7, 0.2, 0.1]

slide = [0, 0, 0, 0, 1, 1, 2, 2]

choices = []

for i in range(0, 10):
    shuffle(slide)
    draw = slide
    # draw = choice(candidates, 8,
    #               p=probabilities)
    choices.append(draw)
    print(i, end="\t")
    for j in draw:
        print(candidate_names[j].ljust(10) + ' | ', end="\t")
    print("")
    print("-" * 10 * 13)

pickle.dump(choices, open("choices.pkl", "wb"))
Example #54
0
    def route_move(state):
        """Swaps two cities in the route."""
        a = random.randint(0, len(state) - 1)
        b = random.randint(0, len(state) - 1)
        state[a], state[b] = state[b], state[a]

    def route_energy(state):
        """Calculates the length of the route."""
        e = 0
        for i in range(len(state)):
            e += distance(cities[state[i - 1]], cities[state[i]])
        return e

    # Start with the cities listed in random order
    randState = cities.keys()
    random.shuffle(randState)

    # Minimize the distance to be traveled by simulated annealing with a
    # manually chosen temperature schedule
    annealer = Annealer(route_energy, route_move)
    state, e = annealer.anneal(randState, 10000000, 0.01,
                               18000 * len(randState), 9)
    while state[0] != 'New York City':
        state = state[1:] + state[:1]  # rotate NYC to start
    print("%i mile route:" % route_energy(state))
    for city in state:
        print("\t", city)

    # Minimize the distance to be traveled by simulated annealing with an
    # automatically chosen temperature schedule
    autoSchedule = annealer.auto(randState, 4)
Example #55
0
print("Started at: " + str(datetime.datetime.now()))

logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
                    level=logging.INFO)
logger = logging.getLogger()

xs = []

maxlen = 100
max_features = maxlen + 1
from numpy.random import shuffle

r = range(1, maxlen + 1, 1)

for i in range(1000):
    shuffle(r)
    new_x = r[::]
    xs.append(new_x)


def to_one_hot(id):
    zeros = [0] * max_features
    zeros[id] = 1
    return zeros


xs = np.asarray(xs)

ys = map(lambda x: map(to_one_hot, x), xs)
ys = np.asarray(ys)
Example #56
0
def shuffle(l):
    R.shuffle(l)
    return l
from numpy.random import seed
from numpy.random import shuffle

seed(1)

sequence = [i for i in range(20)]
print(sequence)

shuffle(sequence)
print(sequence)
Example #58
0
    def prepare_epoch(self):
        """
        Read random videos of the database
        """
        subdirnames = [
            '/01/', '/02/', '/03/', '/04/', '/05/', '/06/', '/07/', '/08/',
            '/09/'
        ]
        filenames = [
            '001.png', '002.png', '003.png', '004.png', '005.png', '006.png',
            '007.png', '008.png', '009.png', '010.png', '011.png', '012.png',
            '013.png', '014.png', '015.png'
        ]
        burst_nums = np.random.randint(len(subdirnames),
                                       size=len(self.categories * 10))
        frame_nums = np.random.randint(self.past_frames,
                                       high=len(filenames) -
                                       self.future_frames,
                                       size=len(self.categories * 10))
        i = 0
        self.videos = []
        self.keys = []
        for c in self.categories:
            paths = self.video_paths_dict[c]
            paths = np.random.permutation(paths)
            for p in paths[:3]:
                dir_path = p + subdirnames[burst_nums[i]]
                if not (os.path.exists(dir_path)):
                    continue
                video = []
                for f in range(frame_nums[i] - self.past_frames,
                               frame_nums[i] + self.future_frames + 1):
                    img = cv2.imread(dir_path + filenames[f])
                    img = np.asarray(img, dtype=np.float32)
                    if self.color:
                        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                    else:
                        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                        img = np.expand_dims(img, 2)
                    img = np.asarray(img, dtype=np.float32)
                    img = img / 255.
                    video.append(img)
                ref_image = video[self.past_frames]
                video = np.asarray(video, dtype=np.float32)

                video_noised = video + self.sigma * np.random.randn(
                    video.shape[0], video.shape[1], video.shape[2],
                    video.shape[3])
                video_noised = np.asarray(video_noised, dtype=np.float32)

                video_search_gray = rgb_to_gray(video if self.oracle_mode ==
                                                1 else video_noised)

                nn = self.ps.compute(video_search_gray, self.past_frames)

                self.videos.append((ref_image, video_noised, nn))
                ys = range(2 * self.patch_width,
                           ref_image.shape[0] - 2 * self.patch_width,
                           self.patch_stride)
                xs = range(2 * self.patch_width,
                           ref_image.shape[1] - 2 * self.patch_width,
                           self.patch_stride)
                xx, yy = np.meshgrid(xs, ys)
                xx = np.asarray(xx.flatten(), dtype=np.uint32)
                yy = np.asarray(yy.flatten(), dtype=np.uint32)
                self.keys.append(
                    np.stack([i * np.ones([len(xx)], dtype=np.uint32), xx,
                              yy]).T)

                i = i + 1
        self.keys = np.concatenate(self.keys, axis=0)
        self.num_keys = self.keys.shape[0]
        self.indices = [i for i in range(self.num_keys)]

        random.shuffle(self.indices)
Example #59
0
# Generate a randomized list of angle and direction pairs. Each pair is
# represented as a single integer. The index of the angle (in the angles array)
# is multiplied by 10, and the index of the direction (in the directions array)
# is added to it. Positive values represent horizontal pairs, and negative
# represent vertical.
pairs = list(range(0))
for i in range(trials):
    for j in range(len(anglesH)):  # Loop through horizontal angles
        for k in range(len(directionsH)):  # Loop through horizontal directions
            pairs.append((j * 10) + k)
    if not horizontalOnly:
        for l in range(len(anglesV)):
            for m in range(len(directionsV)):
                pairs.append(-((l * 10) + m))
shuffle(pairs)  # Randomize the pairs list

run = 0  # Store the number of trials completed
for pair in pairs:  # Loop through the list of pairs
    if (pair >= 0):  # Horizontal pairs
        angle = anglesH[int(pair / 10)]  # Angle index = pair/10
        dir = directionsH[(pair % 10)]  # Direction index = pair%10
    else:  # Vertical pairs
        angle = anglesV[abs(int(pair / 10))]  # Angle index = pair/10
        dir = directionsV[abs(pair % 10)]  # Direction index = pair%10

    size = angle / 10  # Set initial letter height
    if (size == 0):  # Ensure initial letter height is not 0
        size = 1

    # Initialize trial variables related to staircase algorithm
Example #60
0
def main(wordvecfile="sentiment-data/word-vectors-refine.txt",
         trainfile="sentiment-data/train.csv",
         testfile="sentiment-data/test.csv"):

    word_to_index, index_to_word, word_vectors = read_word_vectors(wordvecfile)
    word_vectors = np.array(word_vectors, dtype=np.float32)

    train_data, train_labels, maxsenlen, minsenlen = read_data(trainfile)
    train_labels = np.array(train_labels)
    no_train_sentences = len(train_data)
    train_data_ints = np.zeros((no_train_sentences, maxsenlen), dtype=np.int32)
    print("Maximum sentence length in training data: ", maxsenlen)
    print("Minimum sentence length in training data: ", minsenlen)
    print("Total no. of sentences in training data : ", no_train_sentences)

    # convert each sentence into integer sequence
    for i, sentence in enumerate(train_data):
        train_data_ints[i, :] = get_sentence_in_word_indices(
            train_data[i], word_to_index, maxsenlen)

    test_data, test_labels, maxsenlen_test, minsenlen_test = read_data(
        testfile)
    test_labels = np.array(test_labels)
    no_test_sentences = len(test_data)
    test_data_ints = np.zeros((no_test_sentences, maxsenlen), dtype=np.int32)

    assert (maxsenlen_test <= maxsenlen)

    print("Maximum sentence length in testing data: ", maxsenlen_test)
    print("Minimum sentence length in testing data: ", minsenlen_test)
    print("Total no. of sentences in testing data : ", no_test_sentences)

    # convert each test sentence into integer sequence
    for i, sentence in enumerate(test_data):
        test_data_ints[i, :] = get_sentence_in_word_indices(
            test_data[i], word_to_index, maxsenlen)

    # RNN Parameters
    batch_size = 100
    n_tr_batches = np.int(np.ceil(no_train_sentences / batch_size))

    # Split the training data into different batches
    train_data_indices = np.arange(no_train_sentences)
    nr.shuffle(train_data_indices)
    train_data_indices = np.array_split(train_data_indices, n_tr_batches)
    batched_train_data = [
        train_data_ints[indices] for indices in train_data_indices
    ]
    batched_train_labels = [
        train_labels[indices] for indices in train_data_indices
    ]

    n_vrnn_cell = 64
    n_classes = 2
    maxiter = 10
    wordvecdim = 50

    # reset the default graph
    tf.reset_default_graph()

    # Create placeholder for labels
    t_labels = tf.placeholder(tf.float32, [None, n_classes])  # labels
    t_data = tf.placeholder(tf.int32,
                            [None, maxsenlen])  # training or test data

    # Create variables to hold the 3D tensor data of examples, words in sentences, word vectors
    indata = tf.nn.embedding_lookup(word_vectors, t_data)

    # Setup VRNN
    vrnn_cell = tf.nn.rnn_cell.BasicRNNCell(n_vrnn_cell)
    outputs, state = tf.nn.dynamic_rnn(vrnn_cell, indata, dtype=tf.float32)

    # weights for last softmax
    W = tf.Variable(tf.random_uniform([n_vrnn_cell, n_classes]))
    b = tf.Variable(tf.constant(0.1, shape=[n_classes]))

    H = tf.transpose(outputs, [1, 0, 2])
    h_final = tf.gather(H, int(H.get_shape()[0]) - 1)
    prediction = tf.matmul(h_final, W) + b

    correct_prediction = tf.equal(tf.argmax(prediction, 1),
                                  tf.argmax(t_labels, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=prediction,
                                                labels=t_labels))
    optimizer = tf.train.AdamOptimizer().minimize(loss)

    sess = tf.Session()
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())

    for epoch in xrange(maxiter):
        for i in xrange(n_tr_batches):
            sess.run(optimizer, {
                t_data: batched_train_data[i],
                t_labels: batched_train_labels[i]
            })

        if ((epoch + 1) % 2 == 0):
            save_path = saver.save(sess,
                                   "models/pretrained_vrnn.ckpt",
                                   global_step=epoch)
            print("Saved checkpoint to %s" % save_path)

    print(
        "Accuracy: ",
        sess.run(accuracy,
                 feed_dict={
                     t_data: test_data_ints,
                     t_labels: test_labels
                 }))