def run_experiment():
    pattern = re.compile("lda_([0-9]+)\.pb")
    data_dir = "data"
    files = [
        (re.search(pattern, f).group(1), join(data_dir, f))
        for f in listdir(data_dir)
        if isfile(join(data_dir, f)) and re.match(pattern, f)
    ]
    cmd_str = "peircebayes {} -n 100 -m lda -t -s {}"
    cmd_str2 = "peircebayes {} -n 100 -m lda -t -s {} -a cgs"
    np.random.seed(1234)
    start = time.time()
    for i, f in files:
        print i
        # sample 10 times
        for j, seed in enumerate(np.random.choice(5000, 10, replace=False) + 1):
            call_cmd(cmd_str.format(f, seed))
            phi = np.load("/tmp/peircebayes/avg_samples.npz")["arr_1"]
            np.savez(join(data_dir, "phi_{}_{}".format(i, j)), **{"phi": phi})
            call_cmd("cp /tmp/peircebayes/lls.npz data/lls_{}_{}.npz".format(i, j))
            call_cmd(cmd_str2.format(f, seed))
            call_cmd("cp /tmp/peircebayes/lls.npz data/lls_cgs_{}_{}.npz".format(i, j))
    end = time.time()
    with open("data/time_pb", "w") as f:
        f.write(str(end - start))
    cmd_str_r = "Rscript run_lda.R"
    start = time.time()
    call_cmd(cmd_str_r)
    end = time.time()
    with open("data/time_r", "w") as f:
        f.write(str(end - start))
def main(options1):

    # Write parameters to a dataframe

    dice1 = np.zeros((len(options1["patient"]), 5))
    cc = 0

    # Get a dictionary of patient genders

    roc_data = []
    roc_s_data = []

    # Define the patient for testing

    p_testi = 6
    p_train = []
    p_test = 'RIT' + str(p_testi).zfill(3)
    print("Test case: ", p_test)

    for pp in options1["pat_loocv"]:
        candidate1 = 'RIT' + str(pp).zfill(3)
        p_train.append(candidate1)

    # remove test case from training
    p_train.remove(p_test)

    # ~~~~~~~~~~~~~~~ Main script ~~~~~~~~~~~~~~~~ #

    # find and save part variance for a dataset
    part_names = {"lumen": 1, "tumour": 2, "wall": 3, "bladder": 4}
    mean1, var1 = find_part_variance(p_train, options1, part_names)
    np.savez('learned_part_variance.npz', mean1=mean1['tumourb'], var1=var1['tumourb'])
Example #3
0
 def __init__(self, fndark, nblocksize):
     if (os.path.isfile(fndark+'-dark.npz')):
         npzfile=np.load(fndark+'-dark.npz');
         self.dmean=npzfile['dmean'];
         self.dstd=npzfile['dstd'];
         self.dbpm=npzfile['dbpm'];
     else:
         dark=Binary(fndark);
         nframes=dark.nframes; my=dark.my; mx=dark.mx;
         nblocks=nframes//nblocksize;
         
         bmed=np.zeros((nblocks,my,mx));
         bstd=np.zeros((nblocks,my,mx));
         for iblock in range(nblocks):
             t0=time.clock();
             a=dark.data[iblock*nblocksize:(iblock+1)*nblocksize];
             a,idx=dropbadframes(a);
             print '- read block, dropped bad, subtracted dark in '+str(time.clock()-t0)+'s';
             nfb=a.shape[0];                
             bmed[iblock,:,:]=np.median(a,axis=0);
             bstd[iblock,:,:]=np.std(a,axis=0);
         self.dmean=np.mean(bmed,axis=0);
         self.dstd=np.sqrt(np.sum((bstd)**2,axis=0));
         self.dbpm=self.dstd<(np.median(self.dstd)+5*np.std(self.dstd));
         self.dbpm=self.dstd<(np.median(self.dstd*self.dbpm)+5*np.std(self.dstd*self.dbpm));
         
         np.savez(fndark+'-dark',dmean=self.dmean,dstd=self.dstd,dbpm=self.dbpm);
         del dark;
Example #4
0
def save(model, timings, post_fix=""):
    print "Saving the model..."

    # ignore keyboard interrupt while saving
    start = time.time()
    s = signal.signal(signal.SIGINT, signal.SIG_IGN)

    model.save(
        model.state["save_dir"] + "/" + model.state["run_id"] + "_" + model.state["prefix"] + post_fix + "model.npz"
    )
    cPickle.dump(
        model.state,
        open(
            model.state["save_dir"]
            + "/"
            + model.state["run_id"]
            + "_"
            + model.state["prefix"]
            + post_fix
            + "state.pkl",
            "w",
        ),
    )
    numpy.savez(
        model.state["save_dir"] + "/" + model.state["run_id"] + "_" + model.state["prefix"] + post_fix + "timing.npz",
        **timings
    )
    signal.signal(signal.SIGINT, s)

    print "Model saved, took {}".format(time.time() - start)
Example #5
0
 def save(self, npz_file):
     '''Serialize to object to an npz file.'''
     np.savez(npz_file,
              chrom=self.chrom,
              sample_call_rate_full=self.sample.call_rate_full,
              sample_concordance=self.sample.concordance,
              sample_call_rate_partial=self.sample.call_rate_partial,
              sample_samples=self.sample.samples,
              
              snp_call_rate_imputed_full=self.snp.call_rate_imputed_full,
              snp_concordance_imputed_het=self.snp.concordance_imputed_het,
              snp_call_rate_imputed_partial=self.snp.call_rate_imputed_partial,
              snp_info=self.snp.info,
              snp_call_rate_training=self.snp.call_rate_training,
              snp_snps=self.snp.snps,
              snp_concordance_imputed=self.snp.concordance_imputed,
              snp_x=self.snp.x,
              snp_maf=self.snp.maf,
               
              maf_call_rate_imputed=self.maf.call_rate_imputed,
              maf_concordance_imputed_het=self.maf.concordance_imputed_het,
              maf_call_rate_training=self.maf.call_rate_training,
              maf_maf=self.maf.maf,
              maf_concordance_imputed=self.maf.concordance_imputed,
              # sample_index=self.sample_index,
              # pedigree=np.array([self.pedigree])
              )
Example #6
0
 def save(self):
     numpy.savez('timing.npz',
                 train=self.train_timing,
                 valid=self.valid_timing,
                 test=self.test_timing,
                 k=self.k)
     self.model.save()
Example #7
0
def get_flatcache(subject, xfmname, pixelwise=True, thick=32, sampler='nearest',
                  recache=False, height=1024, depth=0.5):
    cachedir = db.get_cache(subject)
    cachefile = os.path.join(cachedir, "flatverts_{height}.npz").format(height=height)
    if pixelwise and xfmname is not None:
        cachefile = os.path.join(cachedir, "flatpixel_{xfmname}_{height}_{sampler}_{extra}.npz")
        extra = "l%d"%thick if thick > 1 else "d%g"%depth
        cachefile = cachefile.format(height=height, xfmname=xfmname, sampler=sampler, extra=extra)

    if not os.path.exists(cachefile) or recache:
        print("Generating a flatmap cache")
        if pixelwise and xfmname is not None:
            pixmap = _make_pixel_cache(subject, xfmname, height=height, sampler=sampler, thick=thick, depth=depth)
        else:
            pixmap = _make_vertex_cache(subject, height=height)
        np.savez(cachefile, data=pixmap.data, indices=pixmap.indices, indptr=pixmap.indptr, shape=pixmap.shape)
    else:
        from scipy import sparse
        npz = np.load(cachefile)
        pixmap = sparse.csr_matrix((npz['data'], npz['indices'], npz['indptr']), shape=npz['shape'])
        npz.close()

    if not pixelwise and xfmname is not None:
        from scipy import sparse
        mapper = utils.get_mapper(subject, xfmname, sampler)
        pixmap = pixmap * sparse.vstack(mapper.masks)

    return pixmap
Example #8
0
def recordDVM(filename='voltdata.npz',sun=False,moon=False,recordLength=np.inf,verbose=True):
    ra = 0
    dec = 0
    raArr = np.ndarray(0)
    decArr = np.ndarray(0)
    lstArr = np.ndarray(0)
    jdArr = np.ndarray(0)
    voltArr = np.ndarray(0)
    
    startTime = time.time()

    while np.less(time.time()-startTime,recordLength):
        if sun:
            raDec = sunPos()
            ra = raDec[0]
            dec = raDec[1]
        startSamp = time.time()
        currVolt = getDVMData()
        currLST = getLST()
        currJulDay = getJulDay()
        raArr = np.append(raArr,ra)
        decArr = np.append(decArr,ra)
        voltArr = np.append(voltArr,currVolt)
        lstArr = np.append(lstArr,currLST)
        jdArr = np.append(jdArr,currJulDay)

        if verbose:
            print 'Measuring voltage: ' + str(currVolt) + ' (LST: ' + str(currLST) +'  ' + time.asctime() + ')'
        
        np.savez(filename,ra=raArr,dec=decArr,jd=jdArr,lst=lstArr,volts=voltArr)
        sys.stdout.flush()
        time.sleep(np.max([0,1.0-(time.time()-startSamp)]))
Example #9
0
        def get_npz(name):
            fname = 'npz_data/%s.npz' % name
            if self.use_saved_npz and path.isfile(fname):
                all_data = np.load(fname)
                # Each work contains many parts. Loop through each one.
                return [all_data[i] for i in all_data.files]

            music_file = self._get_path('data/', name + '.krn')
            if not path.isfile(music_file):
                music_file = music_file[:-3] + 'xml'
            if not path.isfile(music_file):
                raise Exception("Cannot find score for %s" % music_file[:-4])
            score = music21.converter.parse(music_file)
            all_arr = []
            for part in score.parts:
                arr = []
                for note in part.flat:
                    if isinstance(note, music21.note.Note):
                        elem = (note.ps, note.quarterLength)
                    elif isinstance(note, music21.note.Rest):
                        elem = (0.0, note.quarterLength)
                    else:
                        continue
                    arr.append(elem)
                all_arr.append(np.array(arr))
            if self.save_data:
                np.savez(fname, *all_arr)
            return all_arr
Example #10
0
def saveEnsemble(ensemble, filename=None, **kwargs):
    """Save *ensemble* model data as :file:`filename.ens.npz`.  If *filename*
    is ``None``, title of the *ensemble* will be used as the filename, after
    white spaces in the title are replaced with underscores.  Extension is
    :file:`.ens.npz`. Upon successful completion of saving, filename is
    returned. This function makes use of :func:`numpy.savez` function."""

    if not isinstance(ensemble, Ensemble):
        raise TypeError('invalid type for ensemble, {0}'
                        .format(type(ensemble)))
    if len(ensemble) == 0:
        raise ValueError('ensemble instance does not contain data')

    dict_ = ensemble.__dict__
    attr_list = ['_title', '_confs', '_weights', '_coords']
    if isinstance(ensemble, PDBEnsemble):
        attr_list.append('_labels')
        attr_list.append('_trans')
    if filename is None:
        filename = ensemble.getTitle().replace(' ', '_')
    attr_dict = {}
    for attr in attr_list:
        value = dict_[attr]
        if value is not None:
            attr_dict[attr] = value
            
    attr_dict['_atoms'] = np.array([dict_['_atoms'], 0])
    filename += '.ens.npz'
    ostream = openFile(filename, 'wb', **kwargs)
    np.savez(ostream, **attr_dict)
    ostream.close()
    return filename
Example #11
0
    def write(self, detector):

        # detector.data array is a 3-D numpy array
        # some of its dimensions may be as well ones and the array reduced to 0,1 or 2-D
        all_items = detector.data.size
        logger.info("Number of all items: {:d}".format(all_items))

        # prepare a cut to select values which norm is greater than threshold
        # default value of threshold is zero, in this case non-zero values will be selected
        # cut will be 3-D arrays of booleans
        # note that numpy allocates here same amount of memory as for original data
        thres_cut = np.abs(detector.data) > self.threshold
        passed_items = np.sum(thres_cut)
        logger.info("Number of items passing threshold: {:d}".format(passed_items))
        logger.info("Sparse matrix compression rate: {:g}".format(passed_items / all_items))

        # select indices which pass threshold
        # we get here a plain python tuple of 3-elements
        # first element is numpy array of indices along X-axis, second for Y axis and third for Z
        # note that such table cannot be used directly to index numpy arrays
        indices = np.argwhere(thres_cut)

        # select data which pass threshold and save it as plain 1-D numpy array
        filtered_data = detector.data[thres_cut]

        # save file to NPZ file format
        np.savez(file=self.filename,
                 data=filtered_data,
                 indices=indices,
                 shape=detector.data.shape)

        return 0
Example #12
0
def test_npzfile_dict():
    s = StringIO.StringIO()
    x = np.zeros((3, 3))
    y = np.zeros((3, 3))

    np.savez(s, x=x, y=y)
    s.seek(0)

    z = np.load(s)

    assert 'x' in z
    assert 'y' in z
    assert 'x' in z.keys()
    assert 'y' in z.keys()

    for f, a in z.iteritems():
        assert f in ['x', 'y']
        assert_equal(a.shape, (3, 3))

    assert len(z.items()) == 2

    for f in z:
        assert f in ['x', 'y']

    assert 'x' in list(z.iterkeys())
Example #13
0
    def mutation_histogram(self, records, mut, filename):
        """Records is a pd.Dataframe."""
        if os.path.exists(filename + '.npz'):
            logging.critical(filename + '.npz esists.')
            return filename
        if records.shape[0] < self.min_seqs:
            return ''

        igs = [IgRecord(x.to_dict()) for _, x in records.iterrows()]
        igsimilarity_learn = copy.deepcopy(self.igsimilarity)
        igsimilarity_learn.correct = self.correction
        igsimilarity_learn.rm_duplicates = True
        if not self.correction:
            igsimilarity_learn.tol = 1000
        else:
            igsimilarity_learn.correct_by = self.correction

        sim_func = igsimilarity_learn.pairwise
        logging.info("Computing %s", filename)
        dnearest = parallel_distance.dnearest_intra_padding(
            igs, sim_func, filt=lambda x: x > 0, func=max)

        if not os.path.exists(filename.split('/')[0]):
            os.makedirs(filename.split('/')[0])
        np.savez(filename, X=dnearest, mut=mut)

        # Plot distance distribution
        title = "Similarities for {:.3f}-{:.3f}%" \
                .format(np.min(records['MUT']), np.max(records['MUT']))
        plot_hist(dnearest, self.bins, title, filename)
        return filename
Example #14
0
def save(destination, train, valid, test, vocab):
    np.savez(destination,
             vocab=np.array(vocab),
             train=train,
             valid=valid,
             test=test,
             vocab_size=len(vocab))
Example #15
0
    def assertCubeDataAlmostEqual(self, cube, reference_filename, *args, **kwargs):
        reference_path = self.get_result_path(reference_filename)
        if self._check_reference_file(reference_path):
            kwargs.setdefault("err_msg", "Reference file %s" % reference_path)

            result = np.load(reference_path)
            if isinstance(result, np.lib.npyio.NpzFile):
                self.assertIsInstance(cube.data, ma.MaskedArray, "Cube data was not a masked array.")
                # Avoid comparing any non-initialised array data.
                data = cube.data.filled()
                np.testing.assert_array_almost_equal(data, result["data"], *args, **kwargs)
                np.testing.assert_array_equal(cube.data.mask, result["mask"])
            else:
                np.testing.assert_array_almost_equal(cube.data, result, *args, **kwargs)
        else:
            self._ensure_folder(reference_path)
            logger.warning("Creating result file: %s", reference_path)
            if isinstance(cube.data, ma.MaskedArray):
                # Avoid recording any non-initialised array data.
                data = cube.data.filled()
                with open(reference_path, "wb") as reference_file:
                    np.savez(reference_file, data=data, mask=cube.data.mask)
            else:
                with open(reference_path, "wb") as reference_file:
                    np.save(reference_file, cube.data)
Example #16
0
def get_flat_distribution(y):
    start = time.time()
    logging.debug(y.shape)
    rows,cols = y.shape
    x_min = 0 
    x_max = 2500
    bin_width = 100
    n_bins = int((x_max-x_min)/bin_width)
    lst = [bin(i,i+bin_width) for i in np.arange(x_min,x_max,bin_width)]
    new_array = np.zeros((1,2))
    new_array_list = []

    for row in range(rows):
        if row == 0:
            new_array_list.append(y[row])
            [lst[i].in_bin(y[0][0][0]) for i in range(n_bins)]
        else:
            for i in range(n_bins):
                if(lst[i].in_bin(y[row][0][0]) and not lst[i].full):
                    new_array_list.append(y[row])
            if(row%1000000 == 0):
                logging.debug(row)
    stop = time.time()
    logging.debug("time elapsed running through rows"+str(stop - start))
    new_array = np.vstack(new_array_list)
    stop = time.time()
    logging.debug("time elapsed stacking"+str(stop - start))
    logging.debug("new array shape:")
    logging.debug(new_array.shape)
    
    rows,cols = new_array.shape
    [lst[i].print_object() for i in range(n_bins)]
    np.savez(file_name+"flat", new_array)
    return new_array
Example #17
0
def check_lambda(dirnm, datanm_train, datanm_valid, datanm_orig_train, datanm_orig_valid, samples_per_class, Cs, num_classes):
    spct = 10*70
    tdata, tlabels = load_full(dirnm+datanm_train, spct)
    print tdata.shape, tlabels.shape

    spct = 10
    otdata, otlabels = load_full(dirnm+datanm_orig_train, spct)

    spct = 10*30
    vdata, vlabels = load_full(dirnm+datanm_valid, spct)

    spct = 10
    ovdata, ovlabels = load_full(dirnm+datanm_orig_valid, spct)

    # artif
    ans = np.zeros((len(Cs), 4))

    for i, C in enumerate(Cs):
        clf = LogisticRegression(C  =C,     penalty='l2', multi_class = 'ovr',
                                 tol=0.001, n_jobs = -1, verbose = 0, solver = 'newton-cg')
        clf.fit(tdata, tlabels)

        out_train = clf.predict_proba(tdata)
        out_valid = clf.predict_proba(vdata)
        out_train_real = clf.predict_proba(otdata)
        out_valid_real = clf.predict_proba(ovdata)

        ans[i, 0] += log_loss(tlabels, out_train)
        ans[i, 1] += log_loss(vlabels, out_valid)
        ans[i, 2] += log_loss(otlabels, out_train_real)
        ans[i, 3] += log_loss(ovlabels, out_valid_real)

    np.savez("logreg_lambda", ans= ans, Cs = Cs, num_classes = num_classes, samples_per_class = samples_per_class)
    return ans
Example #18
0
	def save(self, directory, suffix=""):
		fname = "mlp_" + ("_".join([str(l) for l in self.__layer_sizes])) + suffix + ".npz"
		path = os.path.join(directory, fname)
		with open(path, "w") as f:
			mats_to_save = [np.array(self.__layer_sizes)] + self.__W + self.__b
			np.savez(f, *mats_to_save)
		return path
Example #19
0
def save_npz(file, obj, compression=True):
    """Saves an object to the file in NPZ format.

    This is a short-cut function to save only one object into an NPZ file.

    Args:
        file (str or file-like): Target file to write to.
        obj: Object to be serialized. It must support serialization protocol.
        compression (bool): If ``True``, compression in the resulting zip file
            is enabled.

    .. seealso::
        :func:`chainer.serializers.load_npz`

    """
    if isinstance(file, six.string_types):
        with open(file, 'wb') as f:
            save_npz(f, obj, compression)
        return

    s = DictionarySerializer()
    s.save(obj)
    if compression:
        numpy.savez_compressed(file, **s.target)
    else:
        numpy.savez(file, **s.target)
Example #20
0
def check_vb(dirnm, datanm_train, datanm_valid, C, num_classes):
    spct = 10*70
    tdata, tlabels = load_full(dirnm+datanm_train, spct)
    #print tdata.shape, tlabels.shape

    spct = 10*30
    vdata, vlabels = load_full(dirnm+datanm_valid, spct)

    h = np.arange(0, 310, 10)
    h[0] +=1
    # artif
    ans = np.zeros((h.size, 2))

    tind = kget(tlabels, num_classes, h[-1])
    vind = kget(vlabels, num_classes, h[-1])

    for l in xrange(0, h.size):

        clf = LogisticRegression(C  =C,     penalty='l2', multi_class = 'ovr',
                                 tol=0.001, n_jobs = -1, verbose = 0, solver = 'newton-cg')
        clf.fit(tdata[tind[:h[l]*num_classes]], tlabels[tind[:h[l]*num_classes]])

        out_train = clf.predict_proba(tdata[tind[:h[l]*num_classes]])
        out_valid = clf.predict_proba(vdata[vind[:h[l]*num_classes]])

        ans[l, 0] += log_loss(tlabels[tind[:h[l]*num_classes]], out_train)
        ans[l, 1] += log_loss(vlabels[vind[:h[l]*num_classes]], out_valid)

    np.savez("logreg_bv", ans= ans, C = C, num_classes = num_classes)
    return ans
Example #21
0
def tissots_indicatrix(outfile, sub, radius=10, spacing=50, maxfails=100): 
    tissots = []
    allcenters = []
    for hem in ["lh", "rh"]:
        fidpts, fidpolys = db.get_surf(sub, "fiducial", hem)
        #G = make_surface_graph(fidtri)
        surf = polyutils.Surface(fidpts, fidpolys)
        nvert = fidpts.shape[0]
        tissot_array = np.zeros((nvert,))

        centers = [np.random.randint(nvert)]
        cdists = [surf.geodesic_distance(centers)]
        while True:
            ## Find possible vertices
            mcdist = np.vstack(cdists).min(0)
            possverts = np.nonzero(mcdist > spacing)[0]
            #possverts = np.nonzero(surf.geodesic_distance(centers) > spacing)[0]
            if not len(possverts):
                break
            ## Pick random vertex
            centervert = possverts[np.random.randint(len(possverts))]
            centers.append(centervert)
            print("Adding vertex %d.." % centervert)
            dists = surf.geodesic_distance([centervert])
            cdists.append(dists)

            ## Find appropriate set of vertices
            selverts = dists < radius
            tissot_array[selverts] = 1

        tissots.append(tissot_array)
        allcenters.append(np.array(centers))

    np.savez(outfile, left=tissots[0], right=tissots[1], centers=allcenters)
Example #22
0
def save_errors(filename, running_error, err_type='error'):
    running_error = np.asarray(running_error)
    savename = filename.split('.')
    savename = savename[0] + err_type + '.npz'
    if err_type == 'error':
        if os.path.isfile(savename):
            arr = np.load(savename)['running_error']
            running_error = np.hstack((arr, running_error))
    elif err_type == 'acc':
        if os.path.isfile(savename):
            arr = np.load(savename)['running_error']
            running_error = np.hstack((arr, running_error))
    elif err_type == 'val_acc':
        if os.path.isfile(savename):
            arr = np.load(savename)['running_error']
            running_error = np.hstack((arr, running_error))
    np.savez(savename, running_error=running_error)
    fig = plt.figure()
    plt.plot(running_error)
    plt.xlabel('Iterations')
    if err_type == 'error':
        plt.ylabel('Error')
    elif err_type == 'acc':
        plt.ylabel('Accuracy')
    elif err_type == 'val_acc':
        plt.ylabel('Validation Accuracy')
    plt.savefig(savename.replace('.npz','.png'))
    plt.close()
def getData(gps, dt, target_name, channel_names, chunk_size):
    print 'Acquiring Data...\n'

    target = getChannel('trend', target_name, gps, dt)
    bad = []
    chunks = [np.asarray(range(len(channel_names))[i:i + chunk_size])
              for i in xrange(0, len(channel_names), chunk_size)]

    for i in xrange(len(chunks)):
        gc.collect()
        good = []
        aux = np.empty((dt, len(chunks[i])), object)
        for j in xrange(len(chunks[i])):
            aux[:, j] = getChannel('trend', channel_names[chunks[i][j]], gps,
                                   dt).data
            if aux[:, j][0] != np.mean(aux[:, j]):
                good.append(j)
            else:
                print 'Constant Channel: ' + channel_names[chunks[i][j]]
                bad.append(i * chunk_size + j)
            if (i * len(chunks[i]) + j) % 10 == 0:
                print '\nLoaded ' + str(int(((i * chunk_size + j) /
                                        float(len(channel_names))) * 100))
                                        + '% : ' + str(i * chunk_size + j)
                                        + ' of ' + str(len(channel_names))
                                        + '\n'

        aux = aux[:, good]
        channel_names[chunks[i]] = channel_names[chunks[i][good]]
        np.savez('nonna_storage/data_chunk_' + str(i), aux=aux,
                 channel_names=channel_names[chunks[i]])
def packageMergedSpec():
    dataDir = getPackageDir('SIMS_SKYBRIGHTNESS_DATA')
    outDir = os.path.join(dataDir, 'ESO_Spectra/MergedSpec')

    # A large number of the background components only depend on Airmass, so we can merge those together
    npzs = ['LowerAtm/Spectra.npz',
            'ScatteredStarLight/scatteredStarLight.npz',
            'UpperAtm/Spectra.npz']
    files = [os.path.join(dataDir, 'ESO_Spectra', npz) for npz in npzs]


    temp = np.load(files[0])

    wave = temp['wave'].copy()
    spec = temp['spec'].copy()
    spec['spectra'] = spec['spectra']*0.
    spec['mags'] = spec['mags']*0.

    for filename in files:
        restored = np.load(filename)
        spec['spectra'] += restored['spec']['spectra']
        try:
            flux = 10.**(-0.4*(restored['spec']['mags']-np.log10(3631.)))
        except:
            import pdb ; pdb.set_trace()
        flux[np.where(restored['spec']['mags'] == 0.)] = 0.
        spec['mags'] += flux

    spec['mags'] = -2.5*np.log10(spec['mags'])+np.log10(3631.)

    np.savez(os.path.join(outDir,'mergedSpec.npz'), spec=spec, wave=wave, filterWave=temp['filterWave'])
Example #25
0
    def save(self, path):
        savedir = smartutils.create_folder(pjoin(path, type(self).__name__))
        smartutils.save_dict_to_json_file(pjoin(savedir, "hyperparams.json"), self.hyperparameters)

        params = {param.name: param.get_value() for param in self.parameters}
        assert len(self.parameters) == len(params)  # Implies names are all unique.
        np.savez(pjoin(savedir, "params.npz"), **params)
Example #26
0
    def __call__(self, u, x, t, n):
        # Save solution u to a file using numpy.savez
        if self.filename is not None:
            name = 'u%04d' % n  # array name
            kwargs = {name: u}
            fname = '.' + self.filename + '_' + name + '.dat'
            np.savez(fname, **kwargs)
            self.t.append(t[n])  # store corresponding time value
            if n == 0:           # save x once
                np.savez('.' + self.filename + '_x.dat', x=x)

        # Animate
        if n % self.skip_frame != 0:
            return
        # Plot u and mark medium x=x_L and x=x_R
        x_L, x_R = self.medium
        umin, umax = self.yaxis
        title = 'Nx=%d' % (x.size-1)
        if self.title:
            title = self.title + ' ' + title
        if self.backend is None:
            # native matplotlib animation
            if n == 0:
                self.plt.ion()
                self.lines = self.plt.plot(
                    x, u, 'r-',
                    [x_L, x_L], [umin, umax], 'k--',
                    [x_R, x_R], [umin, umax], 'k--')
                self.plt.axis([x[0], x[-1],
                               self.yaxis[0], self.yaxis[1]])
                self.plt.xlabel('x')
                self.plt.ylabel('u')
                self.plt.title(title)
                self.plt.text(0.75, 1.0, 'C=0.25')
                self.plt.text(0.32, 1.0, 'C=1')
                self.plt.legend(['t=%.3f' % t[n]])
            else:
                # Update new solution
                self.lines[0].set_ydata(u)
                self.plt.legend(['t=%.3f' % t[n]])
                self.plt.draw()
        else:
            # scitools.easyviz animation
            self.plt.plot(x, u, 'r-',
                          [x_L, x_L], [umin, umax], 'k--',
                          [x_R, x_R], [umin, umax], 'k--',
                          xlabel='x', ylabel='u',
                          axis=[x[0], x[-1],
                                self.yaxis[0], self.yaxis[1]],
                          title=title,
                          show=self.screen_movie)
        # pause
        if t[n] == 0:
            time.sleep(2)  # let initial condition stay 2 s
        else:
            if self.pause is None:
                pause = 0.2 if u.size < 100 else 0
            time.sleep(pause)

        self.plt.savefig('frame_%04d.png' % (n))
Example #27
0
    def save(self, file):
        """
        Saves data from a CorpusSent object as an `npz` file.
        
        :param file: Designates the file to which to save data. See
            `numpy.savez` for further details.
        :type file: str-like or file-like object
            
        :returns: None

        :See Also: :class: Corpus, :meth: Corpus.save, :meth: numpy.savez
        """
	
	print 'Saving corpus as', file
        arrays_out = dict()
        arrays_out['corpus'] = self.corpus
        arrays_out['words'] = self.words
        arrays_out['sentences'] = self.sentences
        arrays_out['context_types'] = np.asarray(self.context_types)

        for i,t in enumerate(self.context_data):
            key = 'context_data_' + self.context_types[i]
            arrays_out[key] = t

        np.savez(file, **arrays_out)
Example #28
0
def convert_npys_to_npzs(npy_files, arr_key, output_dir):
    """Create a number of NPY files.

    Parameters
    ----------
    npy_files = list of str
        Paths to the created set of files.

    arr_key : str
        Name to write the array under in the npz archive.

    output_dir : str
        Path under which to write data.

    Returns
    -------
    npz_files : list of str
        Newly created NPZ files.
    """
    npz_files = []
    for fpath in npy_files:
        data = {arr_key: np.load(fpath)}
        npz_path = os.path.join(output_dir, "{}.npz".format(filebase(fpath)))
        np.savez(npz_path, **data)
        npz_files.append(npz_path)

    return npz_files
Example #29
0
def curvature(outfile, subject, smooth=20, **kwargs):
    curvs = []
    for pts, polys in db.get_surf(subject, "fiducial"):
        surf = polyutils.Surface(pts, polys)
        curv = surf.smooth(surf.mean_curvature(), smooth)
        curvs.append(curv)
    np.savez(outfile, left=curvs[0], right=curvs[1])
Example #30
0
def write_potential(N=2.5, pphw=20, amplitude=1.0, sigmax=1e-1, sigmay=1e-1,
                    L=100., W=1.0, x_R0=0.05, y_R0=0.4, loop_type='Bell',
                    init_phase=0.0, shape='RAP', plot=True,
                    plot_dimensions=False, direction='right',
                    boundary_only=False, with_boundary=False, boundary_phase=0.0,
                    theta=0.0, smearing=False, verbose=True, linearized=False):

    p = Potential(N=N, pphw=pphw, amplitude=amplitude, sigmax=sigmax,
                  sigmay=sigmay, x_R0=x_R0, y_R0=y_R0, init_phase=init_phase,
                  shape=shape, L=L, W=W, loop_type=loop_type,
                  direction=direction, boundary_only=boundary_only,
                  with_boundary=with_boundary, theta=theta,
                  verbose=verbose, linearized=linearized)

    if not boundary_only:
        imag, imag_vector = p.imag, p.imag_vector
        real, real_vector = p.real, p.real_vector
    X, Y = p.X, p.Y

    if not boundary_only:
        if plot:
            import matplotlib.pyplot as plt
            if plot_dimensions:
                plt.figure(figsize=(L, W))
            plt.pcolormesh(X, Y, imag, cmap='RdBu_r')
            plt.savefig("imag.png")
            plt.pcolormesh(X, Y, real, cmap='RdBu_r')
            plt.savefig("real.png")

        np.savetxt("potential_imag.dat", zip(xrange(len(imag_vector)), imag_vector),
                   fmt=["%i", "%.12f"])
        np.savetxt("potential_real.dat", zip(xrange(len(real_vector)), real_vector),
                   fmt=["%i", "%.12f"])
        if shape != 'science':
            np.savez("potential_imag_xy.npz", X=X, Y=Y, P=imag_vector,
                     X_nodes=p.xnodes, Y_nodes=p.ynodes,
                     sigmax=sigmax, sigmay=sigmay)

    if shape == 'RAP':
        xi_lower, xi_upper = p.WG.get_boundary(theta=theta, smearing=smearing,
                                               boundary_phase=boundary_phase)
        # set last element to 0 (xi_lower) or W (xi_upper)
        print "WARNING: end of boundary not set zero!"
        # xi_lower[-1] = 0.0
        # xi_upper[-1] = W
        np.savetxt("upper.boundary", zip(xrange(p.nx), xi_upper))
        np.savetxt("lower.boundary", zip(xrange(p.nx), xi_lower))
        eps, delta = p.WG.get_cycle_parameters()
        np.savetxt("boundary.eps_delta", zip(eps, delta))
    if shape == 'RAP_TQD':
        eps_prime, delta_prime, theta_prime = p.WG.get_quantum_driving_parameters()
        xi_lower, xi_upper = p.WG.get_boundary(eps=eps_prime, delta=delta_prime,
                                               theta=theta_prime,
                                               smearing=smearing)
        # set last element to 0 (xi_lower) or W (xi_upper)
        xi_lower[-1] = 0.0
        xi_upper[-1] = W
        np.savetxt("upper.boundary", zip(xrange(p.nx), xi_upper))
        np.savetxt("lower.boundary", zip(xrange(p.nx), xi_lower))
        np.savetxt("boundary.eps_delta_theta", zip(eps_prime, delta_prime, theta_prime))
    else:
        seq[:, 1] *= ratio
        seq[:, 1] += 0.1 + 0.4 * (1 - ratio)
        seq[:, 0] += 0.1

    seq = seq * (img_width - 1)
    seq = seq.astype(int)

    img = np.zeros((img_width, img_width), dtype=np.float32)

    for i in range(len(seq)):
        x, y = seq[i, 0], seq[i, 1]
        img[y, x] = 255.0

    return img


data_path = "data/CharacterTrajectories.npz"

series_data = np.load(data_path)

img_data = {}
img_data['x_train'] = [
    series_to_img(seq, 28) for seq in series_data['x_train']
]
img_data['x_test'] = [series_to_img(seq, 28) for seq in series_data['x_test']]
img_data['y_train'] = series_data['y_train']
img_data['y_test'] = series_data['y_test']

np.savez("data/CharacterTrajectories_Image.npz", **img_data)
Example #32
0
 def _save_output(self):
     np.savez(self.outfnm+'_ID', bin_ctrs_fnm=self.bin_ctrs_fnm, bin_ctrs_CG_fnm=self.bin_ctrs_CG_fnm, dtraj_AA_fnm=self.dtraj_AA_fnm, dtraj_CG_fnm=self.dtraj_CG_fnm, tau=self.tau, prior=self.prior, nts=self.nts, nmfpt=self.nmfpt, nmfptb=self.nmfptb, EQ_ref=self.EQ_ref, NSweep_p_Samp=self.NSweep_p_Samp, NSweep_eq=self.NSweep_eq, lamb0=self.lamb0, dlamb0=self.dlamb0, beta0C=self.beta0C)
Example #33
0
def _main():
    annotation_path = 'train.txt'
    log_dir = 'logs/000/'
    classes_path = 'model_data/coco_classes.txt'
    anchors_path = 'model_data/yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (416, 416)  # multiple of 32, hw

    model, bottleneck_model, last_layer_model = create_model(
        input_shape,
        anchors,
        num_classes,
        freeze_body=2,
        weights_path='model_data/yolo_weights.h5'
    )  # make sure you know what you freeze

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(
        log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        monitor='val_loss',
        save_weights_only=True,
        save_best_only=True,
        period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=3,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)

    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines) * val_split)
    num_train = len(lines) - num_val

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if True:
        # perform bottleneck training
        if not os.path.isfile("bottlenecks.npz"):
            print("calculating bottlenecks")
            batch_size = 8
            bottlenecks = bottleneck_model.predict_generator(
                data_generator_wrapper(lines,
                                       batch_size,
                                       input_shape,
                                       anchors,
                                       num_classes,
                                       random=False,
                                       verbose=True),
                steps=(len(lines) // batch_size) + 1,
                max_queue_size=1)
            np.savez("bottlenecks.npz",
                     bot0=bottlenecks[0],
                     bot1=bottlenecks[1],
                     bot2=bottlenecks[2])

        # load bottleneck features from file
        dict_bot = np.load("bottlenecks.npz")
        bottlenecks_train = [
            dict_bot["bot0"][:num_train], dict_bot["bot1"][:num_train],
            dict_bot["bot2"][:num_train]
        ]
        bottlenecks_val = [
            dict_bot["bot0"][num_train:], dict_bot["bot1"][num_train:],
            dict_bot["bot2"][num_train:]
        ]

        # train last layers with fixed bottleneck features
        batch_size = 8
        print("Training last layers with bottleneck features")
        print('with {} samples, val on {} samples and batch size {}.'.format(
            num_train, num_val, batch_size))
        last_layer_model.compile(optimizer='adam',
                                 loss={
                                     'yolo_loss': lambda y_true, y_pred: y_pred
                                 })
        last_layer_model.fit_generator(
            bottleneck_generator(lines[:num_train], batch_size, input_shape,
                                 anchors, num_classes, bottlenecks_train),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=bottleneck_generator(lines[num_train:], batch_size,
                                                 input_shape, anchors,
                                                 num_classes, bottlenecks_val),
            validation_steps=max(1, num_val // batch_size),
            epochs=30,
            initial_epoch=0,
            max_queue_size=1)
        model.save_weights(log_dir + 'trained_weights_stage_0.h5')

        # train last layers with random augmented data
        model.compile(
            optimizer=Adam(lr=1e-3),
            loss={
                # use custom yolo_loss Lambda layer.
                'yolo_loss': lambda y_true, y_pred: y_pred
            })
        batch_size = 16
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        model.fit_generator(data_generator_wrapper(lines[:num_train],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
                            steps_per_epoch=max(1, num_train // batch_size),
                            validation_data=data_generator_wrapper(
                                lines[num_train:], batch_size, input_shape,
                                anchors, num_classes),
                            validation_steps=max(1, num_val // batch_size),
                            epochs=50,
                            initial_epoch=0,
                            callbacks=[logging, checkpoint])
        model.save_weights(log_dir + 'trained_weights_stage_1.h5')

    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    if True:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        model.compile(optimizer=Adam(lr=1e-4),
                      loss={
                          'yolo_loss': lambda y_true, y_pred: y_pred
                      })  # recompile to apply the change
        print('Unfreeze all of the layers.')

        batch_size = 4  # note that more GPU memory is required after unfreezing the body
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        model.fit_generator(
            data_generator_wrapper(lines[:num_train], batch_size, input_shape,
                                   anchors, num_classes),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(lines[num_train:],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
            validation_steps=max(1, num_val // batch_size),
            epochs=100,
            initial_epoch=50,
            callbacks=[logging, checkpoint, reduce_lr, early_stopping])
        model.save_weights(log_dir + 'trained_weights_final.h5')
def main(out_dir,
         confs,
         plot_fname='metrics',
         metrics_fname='metrics.csv',
         logger=None):

    logger = logging.getLogger('plot_results_ksp')

    out_dirs = [c.dataOutDir for c in confs]
    logger.info('--------')
    logger.info('Self-learning on: ')
    logger.info(out_dirs)
    logger.info('out_dir: ')
    logger.info(out_dir)
    logger.info('--------')

    l_dataset = learning_dataset.LearningDataset(confs[0], pos_thr=0.5)

    plot_curves(out_dir, confs, plot_fname, metrics_fname, logger)

    l_ksp_scores = list()
    l_ksp_ss_scores = list()
    l_ksp_ss_thr_scores = list()

    for i in range(len(confs)):

        file_ = os.path.join(confs[i].dataOutDir, 'metrics.npz')
        logger.info('Loading ' + file_)
        npzfile = np.load(file_)

        l_ksp_scores.append(npzfile['ksp_scores'])
        l_ksp_ss_scores.append(npzfile['ksp_ss_scores'])
        l_ksp_ss_thr_scores.append(npzfile['ksp_ss_thr_scores'])

    # Make plots
    mean_ksp_scores = np.mean(np.asarray(l_ksp_scores), axis=0)
    mean_ksp_ss_scores = np.mean(np.asarray(l_ksp_ss_scores), axis=0)
    mean_ksp_ss_thr_scores = np.mean(np.asarray(l_ksp_ss_thr_scores), axis=0)

    std_ksp_scores = np.std(np.asarray(l_ksp_scores), axis=0)
    std_ksp_ss_scores = np.std(np.asarray(l_ksp_ss_scores), axis=0)
    std_ksp_ss_thr_scores = np.std(np.asarray(l_ksp_ss_thr_scores), axis=0)

    path_ = os.path.join(out_dir, 'dataset.npz')
    data = dict()
    data['mean_ksp_scores'] = mean_ksp_scores
    data['mean_ksp_ss_scores'] = mean_ksp_ss_scores
    data['mean_ksp_ss_thr_scores'] = mean_ksp_ss_thr_scores
    data['std_ksp_scores'] = std_ksp_scores
    data['std_ksp_ss_scores'] = std_ksp_ss_scores
    data['std_ksp_ss_thr_scores'] = std_ksp_ss_thr_scores

    np.savez(path_, **data)

    logger.info('Saving KSP, PM and SS merged frames...')
    gt = l_dataset.gt
    frame_dir = 'ksp_pm_frames'
    frame_path = os.path.join(out_dir, frame_dir)
    if (os.path.exists(frame_path)):
        logger.info('[!!!] Frame dir: ' + frame_path +
                    ' exists. Delete to rerun.')
    else:
        os.mkdir(frame_path)
        c0 = confs[0]
        with progressbar.ProgressBar(maxval=len(c0.frameFileNames)) as bar:
            for f in range(len(c0.frameFileNames)):
                cont_gt = segmentation.find_boundaries(gt[..., f],
                                                       mode='thick')
                idx_cont_gt = np.where(cont_gt)
                im = utls.imread(c0.frameFileNames[f])
                im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
                for c in confs:
                    im = gaze.drawGazePoint(c.myGaze_fg, f, im, radius=7)

                bar.update(f)
                plt.subplot(241)
                plt.imshow(mean_ksp_scores[..., f])
                plt.title('mean KSP')
                plt.subplot(242)
                plt.imshow(std_ksp_scores[..., f])
                plt.title('std KSP')
                plt.subplot(243)
                plt.imshow(mean_ksp_ss_scores[..., f])
                plt.title('mean KSP+SS')
                plt.subplot(244)
                plt.imshow(std_ksp_ss_scores[..., f])
                plt.title('std KSP+SS')
                plt.subplot(245)
                plt.imshow(mean_ksp_ss_thr_scores[..., f])
                plt.title('mean KSP+SS -> PM -> (thr = %0.2f)' % (c.pm_thr))
                plt.subplot(246)
                plt.imshow(std_ksp_ss_thr_scores[..., f])
                plt.title('std KSP+SS -> PM -> (thr = %0.2f)' % (c.pm_thr))
                plt.subplot(247)
                plt.imshow(im)
                plt.title('image')
                plt.suptitle('frame: ' + str(f))
                plt.savefig(os.path.join(frame_path, 'f_' + str(f) + '.png'),
                            dpi=200)
def calibrate_scale(video, out_dir, frame_range, args):
    # COLMAP reconstruction.
    print_banner("COLMAP reconstruction")

    colmap_dir = pjoin(video.path, 'colmap_dense')
    src_meta_file = pjoin(colmap_dir, "metadata.npz")

    colmap = COLMAPProcessor(args.colmap_bin_path)
    dense_dir = colmap.dense_dir(colmap_dir, 0)

    if os.path.isfile(src_meta_file):
        print("Checked metadata file exists.")
    else:
        color_dir = prepare_colmap_color(video)

        if not colmap.check_dense(
            dense_dir, color_dir, valid_ratio=args.dense_frame_ratio
        ):
            path_args = [color_dir, colmap_dir]
            mask_path = pjoin(video.path, 'colmap_mask')
            if os.path.isdir(mask_path):
                path_args.extend(['--mask_path', mask_path])
            colmap_args = COLMAPParams().parse_args(
                args=path_args + ['--dense_max_size', str(args.size)],
                namespace=args
            )

            colmap.process(colmap_args)

        intrinsics, extrinsics = make_camera_params_from_colmap(
            video.path, colmap.sparse_dir(colmap_dir, 0)
        )
        np.savez(src_meta_file, intrinsics=intrinsics, extrinsics=extrinsics)

    # Convert COLMAP dense depth maps to .raw file format.
    print_banner("Convert COLMAP depth maps")

    converted_depth_fmt = pjoin(
        video.path, "depth_colmap_dense", "depth", "frame_{:06d}.raw"
    )

    # convert colmap dense depths to .raw
    converted_depth_dir = os.path.dirname(converted_depth_fmt)
    dense_depth_dir = pjoin(dense_dir, "stereo", "depth_maps")
    frames = frame_range.frames()
    if not check_frames(
        dense_depth_dir, colmap.dense_depth_suffix(), converted_depth_dir, "",
        frame_names={f"frame_{i:06d}.png" for i in frames},
    ):
        os.makedirs(converted_depth_dir, exist_ok=True)
        colmap_depth_fmt = pjoin(
            dense_depth_dir, "frame_{:06d}.png" + colmap.dense_depth_suffix()
        )
        for i in frames:
            colmap_depth_fn = colmap_depth_fmt.format(i)
            if not os.path.isfile(colmap_depth_fn):
                logging.warning(
                    "[SCALE CALIBRATION] %s does not exist.",
                    colmap_depth_fn
                )
                continue
            cmp_depth = load_colmap.read_array(colmap_depth_fn)
            inv_cmp_depth = 1.0 / cmp_depth
            ix = np.isinf(inv_cmp_depth) | (inv_cmp_depth < 0)
            inv_cmp_depth[ix] = float("nan")
            image_io.save_raw_float32_image(
                converted_depth_fmt.format(i), inv_cmp_depth
            )
        with SuppressedStdout():
            visualization.visualize_depth_dir(
                converted_depth_dir, converted_depth_dir,
                force=True, min_percentile=0, max_percentile=99,
            )

    # Compute scaled depth maps
    print_banner("Compute per-frame scales")

    scaled_depth_dir = pjoin(out_dir, "depth_scaled_by_colmap_dense", "depth")
    scaled_depth_fmt = pjoin(scaled_depth_dir, "frame_{:06d}.raw")
    scales_file = pjoin(out_dir, "scales.csv")
    src_depth_fmt = pjoin(
        video.path, f"depth_{args.model_type}", "depth", "frame_{:06d}.raw"
    )
    frames = frame_range.frames()

    if (
        check_frames(
            converted_depth_dir, ".png",
            os.path.dirname(scaled_depth_fmt), ".raw"
        )
        and os.path.isfile(scales_file)
    ):
        src_to_colmap_scales = np.loadtxt(scales_file, delimiter=',')
        assert src_to_colmap_scales.shape[0] >= len(frames) * args.dense_frame_ratio \
            and src_to_colmap_scales.shape[1] == 2, \
            (f"scales shape is {src_to_colmap_scales.shape} does not match "
             + f"({len(frames)}, 2) with threshold {args.dense_frame_ratio}")
        print("Existing scales file loaded.")
    else:
        # Scale depth maps
        os.makedirs(scaled_depth_dir, exist_ok=True)
        src_to_colmap_scales_map = {}

        for i in frames:
            converted_depth_fn = converted_depth_fmt.format(i)
            if not os.path.isfile(converted_depth_fn):
                logging.warning("[SCALE CALIBRATION] %s does not exist",
                    converted_depth_fn)
                continue
            # convert colmap_depth to raw
            inv_cmp_depth = image_io.load_raw_float32_image(converted_depth_fn)
            # compute scale for init depths
            inv_src_depth = image_io.load_raw_float32_image(src_depth_fmt.format(i))
            # src_depth * scale = (1/inv_src_depth) * scale == cmp_depth
            inv_cmp_depth = cv2.resize(
                inv_cmp_depth, inv_src_depth.shape[:2][::-1],
                interpolation=cv2.INTER_NEAREST
            )
            ix = np.isfinite(inv_cmp_depth)

            if np.sum(ix) / ix.size < args.dense_pixel_ratio:
                # not enough pixels are valid and hence the frame is invalid.
                continue

            scales = (inv_src_depth / inv_cmp_depth)[ix]
            scale = np.median(scales)
            print(f"Scale[{i}]: median={scale}, std={np.std(scales)}")
            # scale = np.median(inv_depth) * np.median(cmp_depth)
            src_to_colmap_scales_map[i] = float(scale)
            scaled_inv_src_depth = inv_src_depth / scale
            image_io.save_raw_float32_image(
                scaled_depth_fmt.format(i), scaled_inv_src_depth
            )
        with SuppressedStdout():
            visualization.visualize_depth_dir(
                scaled_depth_dir, scaled_depth_dir, force=True
            )

        # Write scales.csv
        xs = sorted(src_to_colmap_scales_map.keys())
        ys = [src_to_colmap_scales_map[x] for x in xs]
        src_to_colmap_scales = np.stack((np.array(xs), np.array(ys)), axis=-1)
        np.savetxt(scales_file, src_to_colmap_scales, delimiter=",")

    valid_frames = {int(s) for s in src_to_colmap_scales[:, 0]}

    # Scale the extrinsics' translations
    scaled_meta_file = pjoin(out_dir, "metadata_scaled.npz")
    if os.path.isfile(scaled_meta_file):
        print("Scaled metadata file exists.")
    else:
        scales = src_to_colmap_scales[:, 1]
        mean_scale = scales.mean()
        print(f"[scales] mean={mean_scale}, std={np.std(scales)}")

        with np.load(src_meta_file) as meta_colmap:
            intrinsics = meta_colmap["intrinsics"]
            extrinsics = meta_colmap["extrinsics"]

        extrinsics[..., -1] /= mean_scale
        np.savez(
            scaled_meta_file,
            intrinsics=intrinsics,
            extrinsics=extrinsics,
            scales=src_to_colmap_scales,
        )

        color_fmt = pjoin(video.path, "color_down", "frame_{:06d}.raw")
        vis_dir = pjoin(out_dir, "vis_calibration_dense")
        visualize_all_calibration(
            extrinsics, intrinsics, scaled_depth_fmt,
            color_fmt, frame_range, vis_dir,
        )

    return valid_frames
def main(csv_file, image_base_path, output_path):
    image_paths = []
    with open(csv_file, 'r') as f:
        f.readline()
        for line in f:
            id_ = line.split(',')[0]
            image_paths.append(image_base_path + '/' + id_[0] + '/' + id_[1] +
                               '/' + id_[2] + '/' + id_ + '.jpg')
    num_images = len(image_paths)

    tf.logging.set_verbosity(tf.logging.INFO)
    tf.logging.info('done! Found %d images', num_images)

    # Parse DelfConfig proto.
    config = delf_config_pb2.DelfConfig()
    with tf.gfile.FastGFile(CONFIG_PATH, 'r') as f:
        text_format.Merge(f.read(), config)

    # Create output directory if necessary.
    if not os.path.exists(output_path):
        os.makedirs(output_path)

    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        # Reading list of images.
        filename_queue = tf.train.string_input_producer(image_paths,
                                                        shuffle=False)
        reader = tf.WholeFileReader()
        _, value = reader.read(filename_queue)
        image_tf = tf.image.decode_jpeg(value, channels=3)

        with tf.Session() as sess:
            init_op = tf.global_variables_initializer()
            sess.run(init_op)

            extractor_fn = MakeExtractor(sess, config)

            # Start input enqueue threads.
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            start = time.clock()
            for i in tqdm.tqdm(range(num_images)):
                # Write to log-info once in a while.
                if i == 0:
                    tf.logging.info(
                        'Starting to extract DELF features from images...')
                elif i % _STATUS_CHECK_ITERATIONS == 0:
                    elapsed = (time.clock() - start)
                    tf.logging.info(
                        'Processing image %d out of %d, last %d '
                        'images took %f seconds', i, num_images,
                        _STATUS_CHECK_ITERATIONS, elapsed)
                    start = time.clock()

                # # Get next image.
                im = sess.run(image_tf)

                # If descriptor already exists, skip its computation.
                out_desc_filename = os.path.splitext(
                    os.path.basename(image_paths[i]))[0] + '.npz'
                out_desc_fullpath = os.path.join(output_path,
                                                 out_desc_filename)
                if tf.gfile.Exists(out_desc_fullpath):
                    tf.logging.info('Skipping %s', image_paths[i])
                    continue

                # Extract and save features.
                (locations_out, descriptors_out, feature_scales_out,
                 attention_out) = extractor_fn(im)

                np.savez(out_desc_fullpath,
                         loc=locations_out,
                         desc=descriptors_out.astype(np.float32),
                         feat=feature_scales_out)

            # Finalize enqueue threads.
            coord.request_stop()
            coord.join(threads)
Example #37
0
 def save(self, filename):
     print('saving to {}'.format(filename))
     np.savez(filename,
              tss=np.asarray(self.tss),
              vals=np.asarray(self.vals))
Example #38
0
################################################################################
# Save training samples to disk
################################################################################

text_seq_batch = np.zeros((T, N), dtype=np.int32)
imcrop_batch = np.zeros((N, input_H, input_W, 3), dtype=np.uint8)
label_coarse_batch = np.zeros((N, featmap_H, featmap_W, 1), dtype=np.bool)
label_fine_batch = np.zeros((N, input_H, input_W, 1), dtype=np.bool)

if not os.path.isdir(data_folder):
    os.mkdir(data_folder)
for n_batch in range(num_batch):
    print('saving batch %d / %d' % (n_batch + 1, num_batch))
    batch_begin = n_batch * N
    batch_end = (n_batch + 1) * N
    for n_sample in range(batch_begin, batch_end):
        processed_im, text_seq, labels_coarse, labels_fine = shuffled_training_samples[
            n_sample]
        text_seq_batch[:, n_sample - batch_begin] = text_seq
        imcrop_batch[n_sample - batch_begin, ...] = processed_im
        label_coarse_batch[n_sample - batch_begin,
                           ...] = labels_coarse[:, :, np.newaxis]
        label_fine_batch[n_sample - batch_begin, ...] = labels_fine[:, :,
                                                                    np.newaxis]

    np.savez(file=data_folder + data_prefix + '_' + str(n_batch) + '.npz',
             text_seq_batch=text_seq_batch,
             imcrop_batch=imcrop_batch,
             label_coarse_batch=label_coarse_batch,
             label_fine_batch=label_fine_batch)
Example #39
0
                    i * len(ms) + j, res * len(batch), recovery_metric,
                    str(datetime.timedelta(seconds=remaining)).split('.')[0]))

        print("{} elapsed.".format(
            str(datetime.timedelta(seconds=time.time() -
                                   start)).split('.')[0]))
        xs = []
        cs = []
        rank = []
        for r in data.keys():
            data[r] = np.mean(data[r], axis=0)
            for j in range(len(ms)):
                xs += [ms[j]]
                rank += [r]
                cs += [data[r][j]]
        np.savez(dataname, xs=xs, rank=rank, cs=cs)

    print("loading dataset from '{}'".format(dataname))
    f = np.load(dataname)
    rank = f['rank']
    xs = f['xs']
    cs = f['cs']

    fig, axs = plt.subplots(1, 1, figsize=(6, 4))
    axs.set_title('Recovery of MNIST Images')
    new_cs = []
    for c in cs:
        if c < 0.10:
            new_cs.append((0, 1 - c, 1 - c, 1))
        else:
            new_cs.append((1, 1 - c, 1 - c, 1))
Example #40
0
def To_npz(pkl):
    classify = pkl.split("[")[1].split("]")[0]
    classify = classify.split(",")
    classify_phase = []

    for i in range(len(classify)):
        classify_phase.append(int(classify[i]))

    if (dimension == 1):
        classify_phase_change = copy.copy(classify_phase)
        classify_phase = [5, 1, 3, 7]  # Don't forget to change !!!

    particle_data[2] = pkl.split("N=")[1][0]
    number_of_particle = int(particle_data[2]) * int(particle_data[2])

    if (abs(classify_phase[0] - classify_phase[1]) == 1):
        if (classify_phase[0] == 1):
            line = "delta=[0.5, -0.5]"  #"mu=1"
        elif (classify_phase[0] == 3):
            line = "mu=3"
        elif (classify_phase[0] == 7):
            line = "mu=5"
        elif (classify_phase[0] == 5):
            line = "mu=-1"
    elif (classify_phase[0] % 2 == 1):
        line = "delta=0.5"
    elif (classify_phase[0] % 2 == 0):
        line = "delta=-0.5"
    else:
        print("please input the correct phase !!!")

    def get_test_data(phase):
        data = []
        file = np.load((path + '/test/{},{}_test,N={},{}.npz').format(
            particle_data[0], particle_data[1], particle_data[2], line))
        for i in range(len(phase)):
            cut = [len(file[kind_of_data[1]]), 0]
            for j in range(len(file[kind_of_data[1]])):
                if (cut[0] > j and int(file[kind_of_data[1]][j]) == phase[i]):
                    cut[0] = j
                if (cut[1] < j and int(file[kind_of_data[1]][j]) == phase[i]):
                    cut[1] = j
            data += file[kind_of_data[0]][cut[0]:cut[1] + 1].tolist()

        return np.array(data)

    try:
        model = torch.load(pkl)
    except:
        if (dimension == 1):

            class DNN(nn.Module):
                def forward(self, x):
                    x = x.view(x.size(0), -1)
                    x = self.layer(x)
                    x = self.out(x)
                    output = nn.functional.softmax(x, dim=1)
                    return output
        elif (dimension == 2 or dimension == 4):

            class CNN(nn.Module):
                def forward(self, x):
                    x = self.conv1(x)
                    x = self.conv2(x)
                    x = x.view(x.size(0), -1)
                    x = self.layer(x)
                    x = self.out(x)
                    output = nn.functional.softmax(x, dim=1)
                    return output

        model = torch.load(pkl)

    def Probability(data, target):
        p = []
        for i in range(len(data)):
            if (dimension == 1):
                output = model(
                    torch.tensor(data[i]).reshape(-1, number_of_particle *
                                                  2).float().cuda())
            elif (dimension == 2):
                output = model(
                    torch.tensor(
                        data[i]).reshape(1, 1, number_of_particle,
                                         number_of_particle).float().cuda())
            elif (dimension == 4):
                output = model(
                    torch.tensor(
                        data[i]).reshape(-1, int(particle_data[2]),
                                         int(particle_data[2]),
                                         int(particle_data[2]),
                                         int(particle_data[2])).float().cuda())
            p.append(output[0][target].cpu().data.numpy())
        return p

    target = []
    if (dimension != 1):
        for i in range(len(classify_phase)):
            target.append(Probability(get_test_data(classify_phase), i))
            ##plt.plot(range(len(target[i])),target[i],"o")
        #print(pkl.split(".pkl")[0])
        #plt.savefig(pkl.split(".pkl")[0])
        #plt.show()
        #plt.clf()

        if (len(classify_phase) == 2):
            np.savez("./BA_npzfile/" + pkl.split(".pkl")[0],
                     phase1=target[0],
                     phase2=target[1])
        elif (len(classify_phase) == 4):
            np.savez("./BA_npzfile/" + pkl.split(".pkl")[0],
                     phase1=target[0],
                     phase2=target[1],
                     phase3=target[2],
                     phase4=target[3])
        else:
            print("please input the correct phase !!!")

    else:
        for i in range(
                len(classify_phase_change) -
                int(classify_phase_change[2] == 9)):
            target.append(Probability(get_test_data(classify_phase), i))
            #plt.plot(range(len(target[i])),target[i],"o")
        #plt.savefig(pkl.split(".pkl")[0])
        #plt.clf()
        #plt.show()
        #plt.clf()

        if (classify_phase_change[2] == 9):
            np.savez("./G_npzfile/" + pkl.split(".pkl")[0],
                     phase1=target[0],
                     phase2=target[1])
        elif (classify_phase_change[2] == 11):
            np.savez("./G_npzfile/" + pkl.split(".pkl")[0],
                     phase1=target[0],
                     phase2=target[1],
                     phase3=target[2])
        else:
            print("please input the correct phase !!!")
Example #41
0
def ImageProcessing(n_boards, board_w, board_h, board_dim):
    #Initializing variables
    board_n = board_w * board_h
    opts = []
    ipts = []
    npts = np.zeros((n_boards, 1), np.int32)
    intrinsic_matrix = np.zeros((3, 3), np.float32)
    distCoeffs = np.zeros((5, 1), np.float32)
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)

    # prepare object points based on the actual dimensions of the calibration board
    # like (0,0,0), (25,0,0), (50,0,0) ....,(200,125,0)
    objp = np.zeros((board_h * board_w, 3), np.float32)
    objp[:, :2] = np.mgrid[0:(board_w * board_dim):board_dim,
                           0:(board_h * board_dim):board_dim].T.reshape(-1, 2)

    #Loop through the images.  Find checkerboard corners and save the data to ipts.
    for i in range(1, n_boards + 1):

        #Loading images
        print('Loading... Calibration_Image' + str(i) + '.png')
        image = cv2.imread('Calibration_Image' + str(i) + '.png')
        if (type(image) is not None):
            #Converting to grayscale
            grey_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

            #Find chessboard corners
            found, corners = cv2.findChessboardCorners(
                grey_image, (board_w, board_h),
                cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_NORMALIZE_IMAGE)
            print(found)

            if found == True:

                #Add the "true" checkerboard corners
                opts.append(objp)

                #Improve the accuracy of the checkerboard corners found in the image and save them to the ipts variable.
                cv2.cornerSubPix(grey_image, corners, (20, 20), (-1, -1),
                                 criteria)
                ipts.append(corners)

                #Draw chessboard corners
                cv2.drawChessboardCorners(image, (board_w, board_h), corners,
                                          found)

                #Show the image with the chessboard corners overlaid.
                cv2.imshow("Corners", image)

                char = cv2.waitKey(10)

    cv2.destroyWindow("Corners")

    print('')
    print('Finished processes images.')

    #Calibrate the camera
    print('Running Calibrations...')
    print(' ')
    ret, intrinsic_matrix, distCoeff, rvecs, tvecs = cv2.calibrateCamera(
        opts, ipts, grey_image.shape[::-1], None, None)

    #Save matrices
    print('Intrinsic Matrix: ')
    print(str(intrinsic_matrix))
    print(' ')
    print('Distortion Coefficients: ')
    print(str(distCoeff))
    print(' ')

    #Save data
    print('Saving data file...')
    np.savez('calibration_data',
             distCoeff=distCoeff,
             intrinsic_matrix=intrinsic_matrix)
    print('Calibration complete')

    #Calculate the total reprojection error.  The closer to zero the better.
    tot_error = 0
    for i in range(len(opts)):
        imgpoints2, _ = cv2.projectPoints(opts[i], rvecs[i], tvecs[i],
                                          intrinsic_matrix, distCoeff)
        error = cv2.norm(ipts[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
        tot_error += error

    print("total reprojection error: ", tot_error / len(opts))

    #Undistort Images

    #Scale the images and create a rectification map.
    newMat, ROI = cv2.getOptimalNewCameraMatrix(intrinsic_matrix,
                                                distCoeff,
                                                image_size,
                                                alpha=crop,
                                                centerPrincipalPoint=1)
    mapx, mapy = cv2.initUndistortRectifyMap(intrinsic_matrix,
                                             distCoeff,
                                             None,
                                             newMat,
                                             image_size,
                                             m1type=cv2.CV_32FC1)

    for i in range(1, n_boards + 1):

        #Loading images
        print('Loading... Calibration_Image' + str(i) + '.png')
        image = cv2.imread('Calibration_Image' + str(i) + '.png')
        if (type(image) is not None):

            # undistort
            dst = cv2.remap(image, mapx, mapy, cv2.INTER_LINEAR)

            cv2.imshow('Undisorted Image', dst)

            char = cv2.waitKey(0)

    cv2.destroyAllWindows()
def train(params):

    """
    parameters set
    """
    NUM_NODES = params['number of nodes in the cluster']
    env = LraClusterEnv(num_nodes=NUM_NODES)
    batch_size = params['batch_size']
    ckpt_path_1 = "./checkpoint/" + params['path'] + "1/model.ckpt"
    ckpt_path_2 = "./checkpoint/" + params['path'] + "2/model.ckpt"
    ckpt_path_3 = "./checkpoint/" + params['path'] + "3/model.ckpt"
    ckpt_path_rec_1 = ckpt_path_1
    ckpt_path_rec_2 = ckpt_path_2
    ckpt_path_rec_3 = ckpt_path_3


    np_path = "./checkpoint/" + params['path'] + "/optimal_file_name.npz"
    Recover = params['recover']
    nodes_per_group = int(params['nodes per group'])
    replay_size = params['replay size']
    training_times_per_episode = 1  # TODO: if layers changes, training_times_per_episode should be modified
    safety_requirement = 0.0
    """
    Build Network
    """
    n_actions = nodes_per_group  #: 3 nodes per group
    n_features = int(n_actions * (env.NUM_APPS + 1) + 1 + env.NUM_APPS + 3*NUM_ATTRIBUTES)  #: 3*9+1 = 28
    RL_1 = PolicyGradient(
        n_actions=n_actions,
        n_features=n_features,
        learning_rate=params['learning rate'],
        suffix=str(100) + '1a',
        safety_requirement=safety_requirement)

    RL_2 = PolicyGradient(
        n_actions=n_actions,
        n_features=n_features,
        learning_rate=params['learning rate'],
        suffix=str(100) + '2a',
        safety_requirement=safety_requirement)

    RL_3 = PolicyGradient(
        n_actions=n_actions,
        n_features=n_features,
        learning_rate=params['learning rate'],
        suffix=str(100) + '3a',
        safety_requirement=safety_requirement)
    app_node_set = app_node_map()
    node_att = node_attribute()
    useExternal = False

    """
    Training
    """
    start_time = time.time()
    global_start_time = start_time
    number_optimal = []
    observation_episode_1, action_episode_1, reward_episode_1, safety_episode_1 = [], [], [], []
    observation_optimal_1, action_optimal_1, reward_optimal_1, safety_optimal_1 = [], [], [], []

    observation_episode_2, action_episode_2, reward_episode_2, safety_episode_2 = [], [], [], []
    observation_optimal_2, action_optimal_2, reward_optimal_2, safety_optimal_2 = [], [], [], []

    observation_episode_3, action_episode_3, reward_episode_3, safety_episode_3 = [], [], [], []
    observation_optimal_3, action_optimal_3, reward_optimal_3, safety_optimal_3 = [], [], [], []

    epoch_i = 0

    thre_entropy = 0.1
    names = locals()
    for i in range(0, 10):
        names['highest_tput_' + str(i)] = 0
        names['observation_optimal_1_' + str(i)] = []
        names['action_optimal_1_' + str(i)] = []
        names['observation_optimal_2_' + str(i)] = []
        names['action_optimal_2_' + str(i)] = []
        names['observation_optimal_3_' + str(i)] = []
        names['action_optimal_3_' + str(i)] = []
        names['reward_optimal_1_' + str(i)] = []
        names['reward_optimal_2_' + str(i)] = []
        names['reward_optimal_3_' + str(i)] = []
        names['safety_optimal_1_' + str(i)] = []
        names['safety_optimal_2_' + str(i)] = []
        names['safety_optimal_3_' + str(i)] = []
        names['number_optimal_' + str(i)] = []
        names['optimal_range_' + str(i)] = 1.05
        names['lowest_vio_' + str(i)] = 500
        names['observation_optimal_1_vio_' + str(i)] = []
        names['action_optimal_1_vio_' + str(i)] = []
        names['observation_optimal_2_vio_' + str(i)] = []
        names['action_optimal_2_vio_' + str(i)] = []
        names['observation_optimal_3_vio_' + str(i)] = []
        names['action_optimal_3_vio_' + str(i)] = []
        names['reward_optimal_vio_1_' + str(i)] = []
        names['reward_optimal_vio_2_' + str(i)] = []
        names['reward_optimal_vio_3_' + str(i)] = []
        names['safety_optimal_vio_1_' + str(i)] = []
        names['safety_optimal_vio_2_' + str(i)] = []
        names['safety_optimal_vio_3_' + str(i)] = []
        names['number_optimal_vio_' + str(i)] = []
        names['optimal_range_vio_' + str(i)] = 1.1

    def store_episode_1(observations, actions):
        observation_episode_1.append(observations)
        action_episode_1.append(actions)

    def store_episode_2(observations, actions):
        observation_episode_2.append(observations)
        action_episode_2.append(actions)

    def store_episode_3(observations, actions):
        observation_episode_3.append(observations)
        action_episode_3.append(actions)

    tput_origimal_class = 0
    source_batch_, index_data_ = batch_data(NUM_CONTAINERS, env.NUM_APPS)  # index_data = [0,1,2,0,1,2]

    while epoch_i < params['epochs']:
        if Recover:
            RL_1.restore_session(ckpt_path_rec_1)
            RL_2.restore_session(ckpt_path_rec_2)
            RL_3.restore_session(ckpt_path_rec_3)
            Recover = False

        observation = env.reset().copy()  # (9,9)
        source_batch = source_batch_.copy()
        index_data = index_data_.copy()

        """
        Episode
        """
        """
        first layer
        """
        source_batch_first = source_batch_.copy()
        observation_first_layer = np.zeros([nodes_per_group, env.NUM_APPS], int)
        interval = 9
        atttribute_first = np.array([sum(node_att[current: current+interval]) for current in range(0, len(node_att), interval)]).astype(int)
        observation_first_layer = np.concatenate((observation_first_layer, atttribute_first), axis=1)
        for inter_episode_index in range(NUM_CONTAINERS):

            appid = index_data[inter_episode_index]
            source_batch_first[appid] -= 1
            observation_first_layer_copy = observation_first_layer.copy()
            observation_first_layer_copy[:, appid] += 1
            observation_first_layer_copy = np.append(observation_first_layer_copy, observation_first_layer_copy[:,0:20].sum(axis=1).reshape(nodes_per_group, 1), axis=1)
            observation_first_layer_copy = np.array(observation_first_layer_copy).reshape(1, -1)
            observation_first_layer_copy = np.append(observation_first_layer_copy, appid).reshape(1, -1)
            observation_first_layer_copy = np.append(observation_first_layer_copy, np.array(source_batch_first)).reshape(1, -1)
            if useExternal:
                action_1 = inter_episode_index%3
                prob_weights = []
            else:
                action_1, prob_weights = RL_1.choose_action(observation_first_layer_copy.copy())

            observation_first_layer[action_1, appid] += 1

            store_episode_1(observation_first_layer_copy, action_1)

        """
        second layer
        """
        observation_second_layer_aggregation = np.empty([0, env.NUM_APPS], int)  # 9*20
        number_cont_second_layer = []
        interval = 3
        atttribute_second = np.array([sum(node_att[current: current + interval]) for current in range(0, len(node_att), interval)]).astype(int)
        for second_layer_index in range(nodes_per_group):
            rnd_array = observation_first_layer[second_layer_index,0:20].copy()
            source_batch_second, index_data = batch_data_sub(rnd_array)
            observation_second_layer = np.zeros([nodes_per_group, env.NUM_APPS], int)
            NUM_CONTAINERS_second = sum(source_batch_second)
            number_cont_second_layer.append(NUM_CONTAINERS_second)
            observation_second_layer = np.concatenate((observation_second_layer, atttribute_second[second_layer_index*3 : (second_layer_index+1)*3, :]), axis=1)
            for inter_episode_index in range(NUM_CONTAINERS_second):
                appid = index_data[inter_episode_index]
                source_batch_second[appid] -= 1
                observation_second_layer_copy = observation_second_layer.copy()
                observation_second_layer_copy[:, appid] += 1
                observation_second_layer_copy = np.append(observation_second_layer_copy, observation_second_layer_copy[:,0:20].sum(axis=1).reshape(nodes_per_group, 1), axis=1)
                observation_second_layer_copy = np.array(observation_second_layer_copy).reshape(1, -1)
                observation_second_layer_copy = np.append(observation_second_layer_copy, appid).reshape(1, -1)
                observation_second_layer_copy = np.append(observation_second_layer_copy, np.array(source_batch_second)).reshape(1, -1)
                if useExternal:
                    action_2 = inter_episode_index % 3
                    prob_weights = []
                else:
                    action_2, prob_weights = RL_2.choose_action(observation_second_layer_copy.copy())

                observation_second_layer[action_2, appid] += 1

                store_episode_2(observation_second_layer_copy, action_2)

            observation_second_layer_aggregation = np.append(observation_second_layer_aggregation, observation_second_layer[:,0:20], 0)

        """
        third layer
        """
        observation_third_layer_aggregation = np.empty([0, env.NUM_APPS], int)  # 9*20
        number_cont_third_layer = []
        interval = 1
        atttribute_third = np.array([sum(node_att[current: current + interval]) for current in range(0, len(node_att), interval)]).astype(int)

        for third_layer_index in range(nodes_per_group * nodes_per_group):
            rnd_array = observation_second_layer_aggregation[third_layer_index,0:20].copy()
            source_batch_third, index_data = batch_data_sub(rnd_array)
            observation_third_layer = np.zeros([nodes_per_group, env.NUM_APPS], int)
            NUM_CONTAINERS_third = sum(source_batch_third)
            number_cont_third_layer.append(NUM_CONTAINERS_third)
            observation_third_layer = np.concatenate((observation_third_layer, atttribute_third[third_layer_index*3 : (third_layer_index+1)*3, :]), axis=1)
            for inter_episode_index in range(NUM_CONTAINERS_third):
                appid = index_data[inter_episode_index]
                source_batch_third[appid] -= 1
                observation_third_layer_copy = observation_third_layer.copy()
                observation_third_layer_copy[:, appid] += 1
                observation_third_layer_copy = np.append(observation_third_layer_copy, observation_third_layer_copy[:,0:20].sum(axis=1).reshape(nodes_per_group, 1), axis=1)
                observation_third_layer_copy = np.array(observation_third_layer_copy).reshape(1, -1)
                observation_third_layer_copy = np.append(observation_third_layer_copy, appid).reshape(1, -1)
                observation_third_layer_copy = np.append(observation_third_layer_copy, np.array(source_batch_third)).reshape(1, -1)
                if useExternal:
                    action_3 = inter_episode_index % 3
                    prob_weights = []
                else:
                    action_3, prob_weights = RL_3.choose_action(observation_third_layer_copy.copy())
                observation_third_layer[action_3, appid] += 1
                store_episode_3(observation_third_layer_copy, action_3)
            observation_third_layer_aggregation = np.append(observation_third_layer_aggregation, observation_third_layer[:,0:20], 0)

        """
        After an entire allocation, calculate total throughput, reward
        """
        env.state = observation_third_layer_aggregation.copy()
        tput_state = env.state
        tput = env.get_tput_total_env() / NUM_CONTAINERS
        assert sum(sum(env.state)) == NUM_CONTAINERS
        assert (env.state.sum(0) == source_batch_).all()

        list_check_sum = sum(env.state.sum(1) > params['container_limitation per node'])
        list_check_node = 0
        for app in range(20):
            node_now = np.where(env.state[:,app]>0)[0]
            for node_ in node_now:
                if node_ not in app_node_set[app]:
                    list_check_node += env.state[node_,app]
        list_check = list_check_sum + list_check_node
        reward_ratio = (tput - 0)
        list_check_ratio = list_check + max(max(env.state.sum(1)- params['container_limitation per node']), 0)

        safety_episode_1 = [list_check_ratio * 1.0] * len(observation_episode_1)
        reward_episode_1 = [reward_ratio * 1.0] * len(observation_episode_1)
        safety_episode_2 = [list_check_ratio * 1.0] * len(observation_episode_2)
        reward_episode_2 = [reward_ratio * 1.0] * len(observation_episode_2)
        safety_episode_3 = [list_check_ratio * 1.0] * len(observation_episode_3)
        reward_episode_3 = [reward_ratio * 1.0] * len(observation_episode_3)

        RL_1.store_tput_per_episode(tput, epoch_i, list_check, [], [], list_check)
        RL_2.store_tput_per_episode(tput, epoch_i, list_check, [],[],[])
        RL_3.store_tput_per_episode(tput, epoch_i, list_check, [],[],[])

        RL_1.store_training_samples_per_episode(observation_episode_1, action_episode_1, reward_episode_1, safety_episode_1)
        RL_2.store_training_samples_per_episode(observation_episode_2, action_episode_2, reward_episode_2, safety_episode_2)
        RL_3.store_training_samples_per_episode(observation_episode_3, action_episode_3, reward_episode_3, safety_episode_3)

        """
        check_tput_quality(tput)
        """
        if names['lowest_vio_' + str(tput_origimal_class)] > list_check_ratio:
            names['lowest_vio_' + str(tput_origimal_class)] = list_check_ratio
            names['observation_optimal_1_vio_' + str(tput_origimal_class)], names['action_optimal_1_vio_' + str(tput_origimal_class)], names['observation_optimal_2_vio_' + str(tput_origimal_class)], names['action_optimal_2_vio_' + str(tput_origimal_class)],  names['number_optimal_vio_' + str(tput_origimal_class)], names['safety_optimal_vio_1_' + str(tput_origimal_class)], names['safety_optimal_vio_2_' + str(tput_origimal_class)], names['safety_optimal_vio_3_' + str(tput_origimal_class)] = [], [], [], [], [], [], [], []
            names['observation_optimal_3_vio_' + str(tput_origimal_class)], names['action_optimal_3_vio_' + str(tput_origimal_class)] = [], []
            names['reward_optimal_vio_' + str(tput_origimal_class)] = []
            names['observation_optimal_1_vio_' + str(tput_origimal_class)].extend(observation_episode_1)
            names['action_optimal_1_vio_' + str(tput_origimal_class)].extend(action_episode_1)
            names['observation_optimal_2_vio_' + str(tput_origimal_class)].extend(observation_episode_2)
            names['action_optimal_2_vio_' + str(tput_origimal_class)].extend(action_episode_2)
            names['observation_optimal_3_vio_' + str(tput_origimal_class)].extend(observation_episode_3)
            names['action_optimal_3_vio_' + str(tput_origimal_class)].extend(action_episode_3)
            names['number_optimal_vio_' + str(tput_origimal_class)].append(NUM_CONTAINERS)
            names['safety_optimal_vio_1_' + str(tput_origimal_class)].extend(safety_episode_1)
            names['safety_optimal_vio_2_' + str(tput_origimal_class)].extend(safety_episode_2)
            names['safety_optimal_vio_3_' + str(tput_origimal_class)].extend(safety_episode_3)
            names['reward_optimal_vio_' + str(tput_origimal_class)].extend(reward_episode_1)

            names['optimal_range_vio_' + str(tput_origimal_class)] = 1.1
        elif names['lowest_vio_' + str(tput_origimal_class)] >= list_check_ratio / names['optimal_range_vio_' + str(tput_origimal_class)]:
            names['observation_optimal_1_vio_' + str(tput_origimal_class)].extend(observation_episode_1)
            names['action_optimal_1_vio_' + str(tput_origimal_class)].extend(action_episode_1)
            names['observation_optimal_2_vio_' + str(tput_origimal_class)].extend(observation_episode_2)
            names['action_optimal_2_vio_' + str(tput_origimal_class)].extend(action_episode_2)
            names['observation_optimal_3_vio_' + str(tput_origimal_class)].extend(observation_episode_3)
            names['action_optimal_3_vio_' + str(tput_origimal_class)].extend(action_episode_3)
            names['number_optimal_vio_' + str(tput_origimal_class)].append(NUM_CONTAINERS)
            names['safety_optimal_vio_1_' + str(tput_origimal_class)].extend(safety_episode_1)
            names['safety_optimal_vio_2_' + str(tput_origimal_class)].extend(safety_episode_2)
            names['safety_optimal_vio_3_' + str(tput_origimal_class)].extend(safety_episode_3)
            names['reward_optimal_vio_' + str(tput_origimal_class)].extend(reward_episode_1)

        if list_check <= 0.0:
            if names['highest_tput_' + str(tput_origimal_class)] < tput:
                names['highest_tput_' + str(tput_origimal_class)] = tput

                names['observation_optimal_1_' + str(tput_origimal_class)], names['action_optimal_1_' + str(tput_origimal_class)], names['observation_optimal_2_' + str(tput_origimal_class)], names['action_optimal_2_' + str(tput_origimal_class)],\
                names['reward_optimal_1_' + str(tput_origimal_class)],names['reward_optimal_2_' + str(tput_origimal_class)],names['reward_optimal_3_' + str(tput_origimal_class)], \
                names['number_optimal_' + str(tput_origimal_class)],\
                names['safety_optimal_1_' + str(tput_origimal_class)],names['safety_optimal_2_' + str(tput_origimal_class)],names['safety_optimal_3_' + str(tput_origimal_class)]\
                    = [], [], [], [], [], [], [], [], [], [], []
                names['observation_optimal_3_' + str(tput_origimal_class)], names['action_optimal_3_' + str(tput_origimal_class)] = [], []
                names['observation_optimal_1_' + str(tput_origimal_class)].extend(observation_episode_1)
                names['action_optimal_1_' + str(tput_origimal_class)].extend(action_episode_1)
                names['observation_optimal_2_' + str(tput_origimal_class)].extend(observation_episode_2)
                names['action_optimal_2_' + str(tput_origimal_class)].extend(action_episode_2)
                names['observation_optimal_3_' + str(tput_origimal_class)].extend(observation_episode_3)
                names['action_optimal_3_' + str(tput_origimal_class)].extend(action_episode_3)
                names['number_optimal_' + str(tput_origimal_class)].append(NUM_CONTAINERS)
                names['safety_optimal_1_' + str(tput_origimal_class)].extend(safety_episode_1)
                names['safety_optimal_2_' + str(tput_origimal_class)].extend(safety_episode_2)
                names['safety_optimal_3_' + str(tput_origimal_class)].extend(safety_episode_3)
                names['reward_optimal_1_' + str(tput_origimal_class)].extend(reward_episode_1)
                names['reward_optimal_2_' + str(tput_origimal_class)].extend(reward_episode_2)
                names['reward_optimal_3_' + str(tput_origimal_class)].extend(reward_episode_3)
                names['optimal_range_' + str(tput_origimal_class)] = 1.05

            elif names['highest_tput_' + str(tput_origimal_class)] < tput * names['optimal_range_' + str(tput_origimal_class)]:
                names['observation_optimal_1_' + str(tput_origimal_class)].extend(observation_episode_1)
                names['action_optimal_1_' + str(tput_origimal_class)].extend(action_episode_1)
                names['observation_optimal_2_' + str(tput_origimal_class)].extend(observation_episode_2)
                names['action_optimal_2_' + str(tput_origimal_class)].extend(action_episode_2)
                names['observation_optimal_3_' + str(tput_origimal_class)].extend(observation_episode_3)
                names['action_optimal_3_' + str(tput_origimal_class)].extend(action_episode_3)
                names['number_optimal_' + str(tput_origimal_class)].append(NUM_CONTAINERS)
                names['safety_optimal_1_' + str(tput_origimal_class)].extend(safety_episode_1)
                names['safety_optimal_2_' + str(tput_origimal_class)].extend(safety_episode_2)
                names['safety_optimal_3_' + str(tput_origimal_class)].extend(safety_episode_3)
                names['reward_optimal_1_' + str(tput_origimal_class)].extend(reward_episode_1)
                names['reward_optimal_2_' + str(tput_origimal_class)].extend(reward_episode_2)
                names['reward_optimal_3_' + str(tput_origimal_class)].extend(reward_episode_3)

        observation_episode_1, action_episode_1, reward_episode_1, safety_episode_1 = [], [], [], []
        observation_episode_2, action_episode_2, reward_episode_2, safety_episode_2 = [], [], [], []
        observation_episode_3, action_episode_3, reward_episode_3, safety_episode_3 = [], [], [], []

        """
        Each batch, RL.learn()
        """
        if (epoch_i % batch_size == 0) & (epoch_i > 1):
            for replay_class in range(0,1):

                number_optimal = names['number_optimal_' + str(replay_class)]

                reward_optimal_1 = names['reward_optimal_1_' + str(replay_class)]
                reward_optimal_2 = names['reward_optimal_2_' + str(replay_class)]
                reward_optimal_3 = names['reward_optimal_3_' + str(replay_class)]
                safety_optimal_1 = names['safety_optimal_1_' + str(replay_class)]
                safety_optimal_2 = names['safety_optimal_2_' + str(replay_class)]
                safety_optimal_3 = names['safety_optimal_3_' + str(replay_class)]

                observation_optimal_1 = names['observation_optimal_1_' + str(replay_class)]
                action_optimal_1 = names['action_optimal_1_' + str(replay_class)]
                observation_optimal_2 = names['observation_optimal_2_' + str(replay_class)]
                action_optimal_2 = names['action_optimal_2_' + str(replay_class)]
                observation_optimal_3 = names['observation_optimal_3_' + str(replay_class)]
                action_optimal_3 = names['action_optimal_3_' + str(replay_class)]
                buffer_size = int(len(number_optimal))

                if buffer_size < replay_size:
                    # TODO: if layers changes, training_times_per_episode should be modified
                    RL_1.ep_obs.extend(observation_optimal_1)
                    RL_1.ep_as.extend(action_optimal_1)
                    RL_1.ep_rs.extend(reward_optimal_1)
                    RL_1.ep_ss.extend(safety_optimal_1)

                    RL_2.ep_obs.extend(observation_optimal_2)
                    RL_2.ep_as.extend(action_optimal_2)
                    RL_2.ep_rs.extend(reward_optimal_2)
                    RL_2.ep_ss.extend(safety_optimal_2)

                    RL_3.ep_obs.extend(observation_optimal_3)
                    RL_3.ep_as.extend(action_optimal_3)
                    RL_3.ep_rs.extend(reward_optimal_3)
                    RL_3.ep_ss.extend(safety_optimal_3)

                else:
                    replay_index = np.random.choice(range(buffer_size), size=replay_size, replace=False)
                    for replay_id in range(replay_size):
                        replace_start = replay_index[replay_id]
                        start_location = sum(number_optimal[:replace_start])
                        stop_location = sum(number_optimal[:replace_start+1])
                        RL_1.ep_obs.extend(observation_optimal_1[start_location: stop_location])
                        RL_1.ep_as.extend(action_optimal_1[start_location: stop_location])
                        RL_1.ep_rs.extend(reward_optimal_1[start_location: stop_location])
                        RL_1.ep_ss.extend(safety_optimal_1[start_location: stop_location])

                        RL_2.ep_obs.extend(observation_optimal_2[start_location: stop_location])
                        RL_2.ep_as.extend(action_optimal_2[start_location: stop_location])
                        RL_2.ep_rs.extend(reward_optimal_2[start_location: stop_location])
                        RL_2.ep_ss.extend(safety_optimal_2[start_location: stop_location])

                        RL_3.ep_obs.extend(observation_optimal_3[start_location: stop_location])
                        RL_3.ep_as.extend(action_optimal_3[start_location: stop_location])
                        RL_3.ep_rs.extend(reward_optimal_3[start_location: stop_location])
                        RL_3.ep_ss.extend(safety_optimal_3[start_location: stop_location])

            if not RL_1.start_cpo:
                for replay_class in range(0,1):
                    number_optimal = names['number_optimal_vio_' + str(replay_class)]
                    safety_optimal_1 = names['safety_optimal_vio_1_' + str(replay_class)]
                    safety_optimal_2 = names['safety_optimal_vio_2_' + str(replay_class)]
                    safety_optimal_3 = names['safety_optimal_vio_3_' + str(replay_class)]
                    reward_optimal = names['reward_optimal_vio_' + str(replay_class)]

                    observation_optimal_1 = names['observation_optimal_1_vio_' + str(replay_class)]
                    action_optimal_1 = names['action_optimal_1_vio_' + str(replay_class)]
                    observation_optimal_2 = names['observation_optimal_2_vio_' + str(replay_class)]
                    action_optimal_2 = names['action_optimal_2_vio_' + str(replay_class)]
                    observation_optimal_3 = names['observation_optimal_3_vio_' + str(replay_class)]
                    action_optimal_3 = names['action_optimal_3_vio_' + str(replay_class)]

                    buffer_size = int(len(number_optimal))

                    if buffer_size < replay_size:
                        RL_1.ep_obs.extend(observation_optimal_1)
                        RL_1.ep_as.extend(action_optimal_1)
                        RL_1.ep_ss.extend(safety_optimal_1)
                        RL_1.ep_rs.extend(reward_optimal)
                        RL_2.ep_obs.extend(observation_optimal_2)
                        RL_2.ep_as.extend(action_optimal_2)
                        RL_2.ep_rs.extend(reward_optimal)
                        RL_2.ep_ss.extend(safety_optimal_2)
                        RL_3.ep_obs.extend(observation_optimal_3)
                        RL_3.ep_as.extend(action_optimal_3)
                        RL_3.ep_rs.extend(reward_optimal)
                        RL_3.ep_ss.extend(safety_optimal_3)

                    else:
                        replay_index = np.random.choice(range(buffer_size), size=replay_size, replace=False)
                        for replay_id in range(replay_size):
                            replace_start = replay_index[replay_id]
                            start_location = sum(number_optimal[:replace_start])
                            stop_location = sum(number_optimal[:replace_start+1])
                            RL_1.ep_obs.extend(observation_optimal_1[start_location: stop_location])
                            RL_1.ep_as.extend(action_optimal_1[start_location: stop_location])
                            RL_1.ep_rs.extend(reward_optimal[start_location: stop_location])
                            RL_1.ep_ss.extend(safety_optimal_1[start_location: stop_location])
                            RL_2.ep_obs.extend(observation_optimal_2[start_location: stop_location])
                            RL_2.ep_as.extend(action_optimal_2[start_location: stop_location])
                            RL_2.ep_rs.extend(reward_optimal[start_location: stop_location])
                            RL_2.ep_ss.extend(safety_optimal_2[start_location: stop_location])
                            RL_3.ep_obs.extend(observation_optimal_3[start_location: stop_location])
                            RL_3.ep_as.extend(action_optimal_3[start_location: stop_location])
                            RL_3.ep_rs.extend(reward_optimal[start_location: stop_location])
                            RL_3.ep_ss.extend(safety_optimal_3[start_location: stop_location])
            RL_1.learn(epoch_i, thre_entropy, Ifprint=True)
            RL_2.learn(epoch_i, thre_entropy)
            optim_case = RL_3.learn(epoch_i, thre_entropy)

        """
        checkpoint, per 1000 episodes
        """
        if (epoch_i % 5000 == 0) & (epoch_i > 1):
            for class_replay in range(0,1):
                highest_value = names['highest_tput_' + str(class_replay)]
                print("\n epoch: %d, highest tput: %f" % (epoch_i, highest_value))
            RL_1.save_session(ckpt_path_1)
            RL_2.save_session(ckpt_path_2)
            RL_3.save_session(ckpt_path_3)
            np.savez(np_path, tputs=np.array(RL_1.tput_persisit), candidate=np.array(RL_1.episode), vi_perapp=np.array(RL_1.ss_perapp_persisit), vi_coex=np.array(RL_1.ss_coex_persisit), vi_sum=np.array(RL_1.ss_sum_persisit))
            """
            optimal range adaptively change
            """
            for class_replay in range(0, 1):
                number_optimal = names['number_optimal_' + str(class_replay)]
                count_size = int(len(number_optimal))
                if (count_size > 300):
                    names['optimal_range_' + str(class_replay)] *= 0.99
                    names['optimal_range_' + str(class_replay)] = max(names['optimal_range_' + str(class_replay)], 1.01)
                    start_location = sum(names['number_optimal_' + str(class_replay)][:-50]) * training_times_per_episode
                    names['observation_optimal_1_' + str(class_replay)] = names['observation_optimal_1_' + str(class_replay)][start_location:]
                    names['action_optimal_1_' + str(class_replay)] = names['action_optimal_1_' + str(class_replay)][start_location:]
                    names['observation_optimal_2_' + str(class_replay)] = names['observation_optimal_2_' + str(class_replay)][start_location:]
                    names['action_optimal_2_' + str(class_replay)] = names['action_optimal_2_' + str(class_replay)][start_location:]
                    names['observation_optimal_3_' + str(class_replay)] = names['observation_optimal_3_' + str(class_replay)][start_location:]
                    names['action_optimal_3_' + str(class_replay)] = names['action_optimal_3_' + str(class_replay)][start_location:]
                    names['number_optimal_' + str(class_replay)] = names['number_optimal_' + str(class_replay)][-50:]
                    names['safety_optimal_1_' + str(class_replay)] = names['safety_optimal_1_' + str(class_replay)][start_location:]
                    names['safety_optimal_2_' + str(class_replay)] = names['safety_optimal_2_' + str(class_replay)][start_location:]
                    names['safety_optimal_3_' + str(class_replay)] = names['safety_optimal_3_' + str(class_replay)][start_location:]
                    names['reward_optimal_1_' + str(class_replay)] = names['reward_optimal_1_' + str(class_replay)][start_location:]
                    names['reward_optimal_2_' + str(class_replay)] = names['reward_optimal_2_' + str(class_replay)][start_location:]
                    names['reward_optimal_3_' + str(class_replay)] = names['reward_optimal_3_' + str(class_replay)][start_location:]

                print("optimal_range:", names['optimal_range_' + str(class_replay)])
            thre_entropy *= 0.5
            thre_entropy = max(thre_entropy, 0.01)

        epoch_i += 1
def main():
    #sys.argv[0] is name of script.

    epoch_time_list = [2, 3, 4, 6, 10, 30]
    K_list = [30, 50, 100, 150]

    K_num = len(K_list)
    t_num = len(epoch_time_list)
    rep_num = 5

    epoch_num = 300

    epoch_timescale_vec = np.zeros((t_num))  #units of sqrt(K) M
    K_vec = np.zeros((K_num))  #units of M
    seed_vec = np.zeros((rep_num))

    num_alive_array = np.zeros((K_num, t_num, rep_num, epoch_num + 2))
    num_alive_array[:] = np.nan

    for K_ind in range(K_num):
        for t_ind in range(t_num):
            for rep_ind in range(rep_num):

                file_name = 'epoch_time_scaling0_K{}_t{}_rep{}.npz'.format(
                    K_ind, t_ind, rep_ind)

                with np.load(file_name) as data:
                    exp_data = data['class_obj'].item()

                num_alive = np.sum(exp_data['n_alive'], axis=0)
                epochs = len(num_alive)
                num_alive_array[K_ind, t_ind, rep_ind, :epochs] = num_alive

                seed_vec[rep_ind] = exp_data['seed']

            epoch_timescale_vec[t_ind] = exp_data['epoch_timescale']
        K_vec[K_ind] = -exp_data['K']

    D = exp_data['D']
    m = exp_data['m']
    M = exp_data['M']
    mu = exp_data['mu']
    gamma = exp_data['gamma']
    logN = -exp_data['thresh']

    data = {
        't_num': t_num,
        'K_num': K_num,
        'rep_num': rep_num,
        'epoch_num': epoch_num,
        'epoch_timescale_vec': epoch_timescale_vec,
        'logN_vec': logN_vec,
        'seed_vec': seed_vec,
        'num_alive_array': num_alive_array,
        'D': D,
        'm': m,
        'M': M,
        'mu': mu,
        'gamma': gamma,
        'logN': logN
    }

    file_name = 'K_init_scaling0_summary'
    np.savez(file_name, data=data)
Example #44
0
Lzs = [0] * train_iters

sess = tf.InteractiveSession()

saver = tf.train.Saver()  # saves variables learned during training
tf.global_variables_initializer().run()
#saver.restore(sess, "/tmp/draw/drawmodel.ckpt") # to restore from model, uncomment this line

for i in range(train_iters):
    xtrain = next_batch(train_data,
                        batch_size)  # xtrain is (batch_size x img_size)
    feed_dict = {x: xtrain}
    results = sess.run(fetches, feed_dict)
    Lxs[i], Lzs[i], _ = results
    if i % 50 == 0:
        print("iter=%d : Lx: %f Lz: %f" % (i, Lxs[i], Lzs[i]))

## TRAINING FINISHED ##

canvases = sess.run(cs, feed_dict)  # generate some examples
canvases = np.array(canvases)  # T x batch x img_size

out_file = os.path.join(FLAGS.data_dir, "draw_data.npz")
np.savez(out_file, canvases=canvases, loss=[Lxs, Lzs])
print("Outputs saved in file: %s" % out_file)

ckpt_file = "model/drawmodel.ckpt"
print("Model saved in file: %s" % saver.save(sess, ckpt_file))

sess.close()
Example #45
0
    pred_roidb = []
    sub_roidb = []
    obj_roidb = []
    for roidb_id in range(N_test):
        if (roidb_id + 1) % 10 == 0:
            print(roidb_id + 1)
        roidb_use = test_roidb[roidb_id]
        if len(roidb_use['rela_gt']) == 0:
            pred_roidb.append({})
            continue
        pred_rela, pred_rela_score, pred_sub, pred_obj = vnet.test_predicate(
            sess, roidb_use)
        pred_roidb_temp = {
            'pred_rela': pred_rela,
            'pred_rela_score': pred_rela_score,
            'sub_box_dete': roidb_use['sub_box_gt'],
            'obj_box_dete': roidb_use['obj_box_gt'],
            'sub_dete': roidb_use['sub_gt'],
            'obj_dete': roidb_use['obj_gt'],
            'original_pred': roidb_use['rela_gt']
        }
        pred_roidb.append(pred_roidb_temp)
        sub_roidb.append(pred_sub)
        obj_roidb.append(pred_obj)
roidb = {}
roidb['pred_roidb'] = pred_roidb
roidb['subVec'] = sub_roidb
roidb['objVec'] = obj_roidb

np.savez(save_path, roidb=roidb)
Example #46
0
    def streaming(self):

        # collect images for training
        print("Host: ", self.host_name + ' ' + self.host_ip)
        print("Connection from: ", self.client_address)
        print("Start collecting images...")
        print("Press 'q' or 'x' to finish...")
        start = cv2.getTickCount()

        # stream video frames one by one
        try:

            self.frame = 1

            while self.send_inst:
                self.stream_bytes += self.connection.read(9102)

                first = self.stream_bytes.find(b'\xff\xd8')
                last = self.stream_bytes.find(b'\xff\xd9')

                if first != -1 and last != -1:
                    self.jpg = self.stream_bytes[first:last + 2]
                    self.stream_bytes = self.stream_bytes[last + 2:]
                    self.image = cv2.imdecode(
                        np.frombuffer(self.jpg, dtype=np.uint8),
                        cv2.IMREAD_GRAYSCALE)

                    self.clicks_total = self.clicks_forward + self.clicks_left + self.clicks_right
                    cv2.putText(
                        self.image, "FW: {}, LT: {}, RT: {}, TOTAL: {}".format(
                            self.clicks_forward, self.clicks_left,
                            self.clicks_right, self.clicks_total), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, .45, (255, 255, 0), 1)
                    # select lower half of the image
                    #height, width = image.shape            不在collect里处理图片了,训练之前预处理再说
                    #roi = image[int(height/2):height, :]
                    cv2.imshow('image', self.image)

                    if self.frame % 300 == 0:
                        self.currunt_time = time.time()

                        print('实时帧率:{}'.format(
                            300 / (self.currunt_time - self.last_time)))
                        self.last_time = self.currunt_time

                    # reshape the roi image into a vector
                    #img_array = roi.reshape(1, int(height/2) * width).astype(np.float32)
                    self.frame += 1
                    self.total_frame += 1
                    #print(np.shape(img_array),np.shape(X))
                    #exit()
                key = cv2.waitKeyEx(1) >> 16
                if key == 46:
                    break
                elif key == -1:
                    pass
                else:
                    #key_left = 37  key_up = 38
                    #key_right = 39   key_down = 40
                    SendCmd.send(key)
                    print(key)

        finally:
            # save data as a numpy file
            end = cv2.getTickCount()
            # calculate streaming duration
            print("Streaming duration: , %.2fs" %
                  ((end - start) / cv2.getTickFrequency()))
            with open('./logs/log_img_collect.txt', 'a') as f:
                f.write('Date: ' + time.strftime('%x') + '\n')
                f.write('Time: ' + time.strftime('%X') + '\n')
                f.write('Total images: {}' + str(self.img_num) + '\n')
                f.write('Total frames: ' + str(self.total_frame) + '\n')
                f.write('Saved frames: ' + str(self.saved_frame) + '\n')
                f.write('Dropped frames: ' +
                        str(self.total_frame - self.saved_frame) + '\n')
                f.write('Forward clicks: ' + str(self.clicks_forward) + '\n')
                f.write('Forward-left clicks: ' + str(self.clicks_left) + '\n')
                f.write('Forward-right clicks: ' + str(self.clicks_right) +
                        '\n')
                f.write('-----------------------------\n')

            print('Forward clicks: {}'.format(self.clicks_forward))
            print('Forward-left clicks:{}'.format(self.clicks_left))
            print('Forward-right clicks: {}'.format(self.clicks_right))
            print('Total images: {}'.format(self.img_num))
            print('Total frame:{}'.format(self.total_frame))
            print('Saved frame:{}'.format(self.saved_frame))
            print('Dropped frame{}'.format(self.total_frame -
                                           self.saved_frame))
            self.file_name = str(int(time.time()))
            try:
                np.savez('training_images/label_array_ORIGINALS_{}.npz'.format(
                    self.file_name),
                         train_labels=self.y)
            except IOError as e:
                print(e)
            print("销毁串流线程...")
            self.connection.close()
            self.server_socket.close()
        for i in tqdm(range(hp.num_samples)):
            Dp, Dt, Fp = fit_least_squares(b_values_no0, X[i, :])
            Dp_pred[i, id] = Dp
            Dt_pred[i, id] = Dt
            Fp_pred[i, id] = Fp
            Dp_error[i, id] = np.ravel(Dp - Dp_truth[i])
            Dt_error[i, id] = np.ravel(Dt - Dt_truth[i])
            Fp_error[i, id] = np.ravel(Fp - Fp_truth[i])

    time_end = time()
    time = time_end - time_start

    data_file = './data/phantom_pred_LS'
    np.savez(data_file,
             Dp_pred=Dp_pred,
             Dt_pred=Dt_pred,
             Fp_pred=Fp_pred,
             time=time)

    data_file = './data/phantom_errors_LS'
    np.savez(data_file,
             Dp_error=Dp_error,
             Dt_error=Dt_error,
             Fp_error=Fp_error,
             time=time)

    print('Time: {}'.format(time))

    plt.boxplot(Dp_error)
    plt.show()
    plt.boxplot(Dt_error)
Example #48
0
parser = argparse.ArgumentParser()
parser.add_argument("out")
parser.add_argument("--cloud_in", default="/drop/points", nargs="?")
parser.add_argument("--dont_transform", action="store_true")
args = parser.parse_args()

from brett2 import ros_utils
from jds_utils import conversions
import roslib

roslib.load_manifest('tf')
import tf
import rospy
import numpy as np
import sensor_msgs.msg as sm
from brett2.ros_utils import transformPointCloud2

rospy.init_node("get_point_cloud")
listener = tf.TransformListener()
rospy.sleep(.3)

pc = rospy.wait_for_message(args.cloud_in, sm.PointCloud2)
if args.dont_transform:
    pc_tf = pc
else:
    pc_tf = transformPointCloud2(pc, listener, "base_footprint",
                                 pc.header.frame_id)

xyz, bgr = ros_utils.pc2xyzrgb(pc_tf)
np.savez(args.out, xyz=xyz, bgr=bgr)
Example #49
0
def run(args):
    print("loading from:", args.params_filepath)
    print("saving to:", args.exp_name)
    exp_dir = utils.set_up_experiment(exp_name=args.exp_name, phase='imitate')
    saver_dir = os.path.join(exp_dir, 'imitate', 'log')
    saver_filepath = os.path.join(saver_dir, 'checkpoint')
    np.savez(os.path.join(saver_dir, 'args'), args=args)
    summary_writer = tf.summary.FileWriter(
        os.path.join(exp_dir, 'imitate', 'summaries'))

    # build components
    env, act_low, act_high = utils.build_ngsim_env(args,
                                                   exp_dir,
                                                   vectorize=args.vectorize)
    data = utils.load_data(args.expert_filepath,
                           act_low=act_low,
                           act_high=act_high,
                           min_length=args.env_H + args.env_primesteps,
                           clip_std_multiple=args.normalize_clip_std_multiple,
                           ngsim_filename=args.ngsim_filename)
    critic = utils.build_critic(args, data, env, summary_writer)
    policy = utils.build_policy(args, env)
    recognition_model = utils.build_recognition_model(args, env,
                                                      summary_writer)
    baseline = utils.build_baseline(args, env)
    reward_handler = utils.build_reward_handler(args, summary_writer)
    validator = auto_validator.AutoValidator(
        summary_writer,
        data['obs_mean'],
        data['obs_std'],
        render=args.validator_render,
        render_every=args.render_every,
        flat_recurrent=args.policy_recurrent)

    # build algo
    saver = tf.train.Saver(max_to_keep=100, keep_checkpoint_every_n_hours=.5)
    sampler_args = dict(n_envs=args.n_envs) if args.vectorize else None
    if args.policy_recurrent:
        optimizer = ConjugateGradientOptimizer(
            max_backtracks=50, hvp_approach=FiniteDifferenceHvp(base_eps=1e-5))
    else:
        optimizer = None
    algo = GAIL(critic=critic,
                recognition=recognition_model,
                reward_handler=reward_handler,
                env=env,
                policy=policy,
                baseline=baseline,
                validator=validator,
                batch_size=args.batch_size,
                max_path_length=args.max_path_length,
                n_itr=args.n_itr,
                discount=args.discount,
                step_size=args.trpo_step_size,
                saver=saver,
                saver_filepath=saver_filepath,
                force_batch_sampler=False if args.vectorize else True,
                sampler_args=sampler_args,
                snapshot_env=False,
                plot=False,
                optimizer=optimizer,
                optimizer_args=dict(max_backtracks=50, debug_nan=True))

    # run it
    with tf.Session() as session:

        # running the initialization here to allow for later loading
        # NOTE: rllab batchpolopt runs this before training as well
        # this means that any loading subsequent to this is nullified
        # you have to comment of that initialization for any loading to work
        session.run(tf.global_variables_initializer())

        # loading
        if args.params_filepath != '':
            algo.load(args.params_filepath)

        # run training
        algo.train(sess=session)
        # gather full data to output
        uth_global = comm.gather(u['g'][0], root=0)
        uph_global = comm.gather(u['g'][1], root=0)
        h_global = comm.gather(h['g'][0], root=0) 
        c_global = comm.gather(c['g'][0], root=0)
#        om_global = comm.gather(om['g'][0], root=0)

        if rank == 0:
            # Save data
            uph_global = np.hstack(uph_global)
            uth_global = np.hstack(uth_global)
            h_global = np.hstack(h_global)
            c_global = np.hstack(c_global)
#            om_global = np.hstack(om_global)
            np.savez(os.path.join(output_folder, 'output_%i.npz' %file_num),
#                     p=p_global, om=om_global, vph=vph_global, vth=vth_global,
                     h=h_global, c=c_global, uph=uph_global, uth=uth_global,
                     t=np.array([t]), phi=phi[:,0], theta=theta_global)
            file_num += 1

            # Print iteration and maximum vorticity
            print('Iter:', i, 'Time:', t, 'h max:', np.max(np.abs(h_global)))

    nonlinear(state_vector,RHS)
    timestepper.step(dt, state_vector, S, L, M, P, RHS, LU)
    t += dt

#    # imposing that the m=0 mode of u,h,c are purely real
#    if i % 100 == 1:
#        state_vector.unpack(u,h,c)
#        u.require_grid_space()
#        u.require_coeff_space()
Example #51
0
def main(batch_size=100, num_epochs=20):
    # Load the dataset
    X_train, y_train, X_test, y_test = load_dataset('exp1_images.csv',
                                                    'exp1_labels.csv')
    X_train[X_train > 0.4] = 1
    X_train[X_train <= 0.4] = 0
    X_test[X_test > 0.4] = 1
    X_test[X_test <= 0.4] = 0
    X_train = (np.uint8(X_train))
    X_test = (np.uint8(X_test))
    training_samples = X_train.shape[0]
    test_samples = X_test.shape[0]
    # Prepare Theano variables for inputs and targets
    input_var = T.tensor4('inputs')
    target_var = T.ivector('targets')
    train_size = 60000
    # Create neural network model (depending on first command line parameter)
    print("Building model and compiling functions...")
    network = build_cnn(input_var)

    # Create a loss expression for training, i.e., a scalar objective we want
    # to minimize (for our multi-class problem, it is the cross-entropy loss):
    prediction = lasagne.layers.get_output(network)
    loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
    loss = loss.mean()
    # We could add some weight decay as well here, see lasagne.regularization.
    # Create update expressions for training, i.e., how to modify the
    # parameters at each training step. Here, we'll use Stochastic Gradient
    # Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.
    params = lasagne.layers.get_all_params(network, trainable=True)
    updates = lasagne.updates.nesterov_momentum(loss,
                                                params,
                                                learning_rate=0.03,
                                                momentum=0.9)

    # Create a loss expression for validation/testing. The crucial difference
    # here is that we do a deterministic forward pass through the network,
    # disabling dropout layers.
    test_prediction = lasagne.layers.get_output(network, deterministic=True)
    test_loss = lasagne.objectives.categorical_crossentropy(
        test_prediction, target_var)
    test_loss = test_loss.mean()
    # As a bonus, also create an expression for the classification accuracy:
    train_acc = T.mean(T.eq(T.argmax(prediction, axis=1), target_var),
                       dtype=theano.config.floatX)
    test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
                      dtype=theano.config.floatX)

    # Compile a function performing a training step on a mini-batch (by giving
    # the updates dictionary) and returning the corresponding training loss:
    train_fn = theano.function([input_var, target_var], [loss, train_acc],
                               updates=updates)

    # Compile a second function computing the validation loss and accuracy:
    val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
    pred_fn = theano.function([input_var], test_prediction)

    print('train epoch,minibatch,loss,test,elapsed')
    test_err = 0
    test_acc = 0
    # We iterate over epochs:
    for epoch in range(num_epochs):
        # In each epoch, we do a full pass over the training data:
        train_err = 0
        train_acc = 0
        train_batches = 0
        start_time = time.time()
        start_sample = 0
        if (training_samples > 60000):
            start_sample = random.randrange(0, (training_samples - 60000))
        start = time.time()
        for batch in iterate_minibatches(
                X_train[start_sample:(start_sample + 60000)],
                y_train[start_sample:(start_sample + 60000)],
                batch_size,
                shuffle=True):
            inputs, targets = batch
            err, acc = train_fn(inputs, targets)
            train_err += err
            train_acc += acc
            train_batches += 1
        end = time.time()
        batch_test = iterate_minibatches(X_test, y_test, 100, shuffle=True)
        inputs, targets = batch_test.next()
        test_err, test_acc = val_fn(inputs, targets)
        print('%02d,%02d/%i,%.4f,%06.4f,%.4f' %
              (epoch, train_batches * batch_size, train_size,
               train_err / train_batches, test_acc, (end - start)))

    np.savez('model_mnist.npz', *lasagne.layers.get_all_param_values(network))
    batch_valid = iterate_minibatches(X_test,
                                      y_test,
                                      test_samples,
                                      shuffle=False)
    inputs, targets = batch_valid.next()
    err, acc = val_fn(inputs, targets)
    print('samples:%d,loss %f,acc: %06.4f' % (test_samples, err, acc))
valid_dataset_output = np.array(valid_dataset_output)
test_dataset_input = np.array(test_dataset_input)
test_dataset_output = np.array(test_dataset_output)





# print("np.shape(Train)",np.shape(Train))
print("np.shape(Train[0])",np.shape(train_dataset_input))
print("np.shape(Train[1])",np.shape(train_dataset_output))

# print("np.shape(Valid)",np.shape(Valid))
print("np.shape(Valid[0])",np.shape(valid_dataset_input))
print("np.shape(Valid[1])",np.shape(valid_dataset_output))

# print("np.shape(Test)",np.shape(Test))
print("np.shape(Test[0])",np.shape(test_dataset_input))
print("np.shape(Test[1])",np.shape(test_dataset_output))

np.savez(dire_prefix+"train_dataset.npz",input = train_dataset_input,output = train_dataset_output )
np.savez(dire_prefix+"valid_dataset.npz",input = valid_dataset_input,output = valid_dataset_output)
np.savez(dire_prefix+"test_dataset.npz",input = test_dataset_input,output = test_dataset_output)







def export_onnx_with_validation(
        model: torch.nn.Module,  # or JITScriptModule
        inputs: Sequence[Union[torch.Tensor, Sequence[object]]],
        export_basepath: Text,
        input_names: Optional[List[Text]] = None,
        output_names: Optional[List[Text]] = None,
        use_npz: bool = True,
        *args,
        **kwargs) -> Sequence[Union[torch.Tensor, Sequence[object]]]:
    """
    export PyTorch model to ONNX model and export sample inputs and outputs in a Numpy file
    """

    is_tuple_or_list = lambda x: isinstance(x, (tuple, list))

    def tensors_to_arrays(tensors: Union[torch.Tensor, Iterable[
            Union[torch.Tensor, Iterable[Any]]]], ) -> List[np.ndarray]:
        if torch.is_tensor(tensors):
            return tensors.data.cpu().numpy()
        return list(map(tensors_to_arrays, tensors))

    def zip_dict(
            keys: Optional[Iterable[Any]],
            values: Sequence[Union[Any, Sequence[Any]]],
    ) -> MyDict[Text, Union[object, MyDict[Text, object]]]:
        keys = keys or range(len(values))
        ret = my_dict()
        for idx, (key, value) in enumerate(zip(keys, values)):
            is_key_list = is_tuple_or_list(key)
            is_value_list = is_tuple_or_list(value)
            assert is_key_list == is_value_list, 'keys and values mismatch'
            if is_value_list:
                ret[str(idx)] = zip_dict(key, value)
            else:
                ret[key] = value
        return ret

    torch_inputs = ensure_tuple(inputs)  # WORKAROUND: for torch.onnx
    outputs = torch.onnx.export(model,
                                torch_inputs,
                                export_basepath + '.onnx',
                                input_names=(input_names
                                             and flatten_list(input_names)),
                                output_names=(output_names
                                              and flatten_list(output_names)),
                                *args,
                                **kwargs)
    if outputs is None:  # WORKAROUND: for torch.onnx
        training = kwargs.get('training', False)
        with torch.onnx.set_training(model, training):
            outputs = model(*inputs)
    torch_outputs = ensure_tuple(outputs)

    inputs = zip_dict(input_names, tensors_to_arrays(torch_inputs))
    outputs = zip_dict(output_names, tensors_to_arrays(torch_outputs))
    if use_npz:
        np.savez(
            export_basepath + '.npz',
            inputs=inputs,
            outputs=outputs,
        )
    else:
        np.save(export_basepath + '.npy',
                np.asarray(my_dict(inputs=inputs, outputs=outputs)),
                allow_pickle=True)

    return torch_outputs
	def run(self, args):
		X_train, X_test, y_train, y_test = self._load_dataset(args)

		k = 0
		estimator, tuned_parameters = self._setup_instantiator(args)

		folds_time = []
		self.folds_predict = []
		self.folds_macro = []
		self.folds_micro = []

		print(estimator.get_params(deep=False))

		output, args.output = args.output, ""

		from xsklearn.ensemble import GaussianOutlierRemover, ThresholdOutlierRemover

		#out_remov = ThresoldOutlierRemover()
		#X_train, y_train = out_remov.fit_transform(X_train, y_train)

		tf_transformer = TfidfTransformer(norm=args.norm, use_idf=False,
											 smooth_idf=False, sublinear_tf=False)
		if self._tfidf(args):
			# Learn the idf vector from training set
			tf_transformer.fit(X_train)
			# Transform test and training frequency matrix
			# based on training idf vector		
			X_train = tf_transformer.transform(X_train)
			X_test = tf_transformer.transform(X_test)

		id_estimators = np.asarray([2, 3, 4, 5, 6, 8])

		n_estimators = id_estimators.shape[0]
		sets =  all_sets(np.arange(n_estimators))
		for i in sets:
			print(i)
			i = id_estimators[np.asarray(i)][:, np.newaxis]
			n_classes = len(np.unique(y_train))
			feats = (n_classes*i + np.arange(n_classes)).ravel()

			#out_remov = GaussianOutlierRemover(0.01)
			#X_train, y_train = out_remov.fit_transform(X_train, y_train)

			from sklearn.decomposition import PCA
			#pca = PCA(copy=True, n_components=2, whiten=False)
			#X_train = pca.fit_transform(X_train)
			#X_test = pca.transform(X_test)

			if(args.cv > 1):
				n_jobs = 1 if hasattr(estimator,"n_jobs") else args.n_jobs
				gs = GridSearchCV(estimator, tuned_parameters,
							 n_jobs=n_jobs, refit=False,
							 cv=args.cv, verbose=1, scoring='f1_micro')
				gs.fit(X_train, y_train)
				print(gs.best_score_, gs.best_params_)
				estimator.set_params(**gs.best_params_)
				print(estimator.get_params())

			e = clone(estimator)

			# fit and predict
			start = time.time()
			e.fit(X_train[:, feats], y_train)
			pred = e.predict(X_test[:, feats])
			end = time.time()

			if len(i) == 1:
				pred = np.unique(y_train).take(np.argmax(X_test[:, feats], axis=1), axis=0)

			#fi = e.feature_importances_.reshape((9,7)).T
			#print(fi/(fi.sum(1))[:, np.newaxis])

			import pickle
			from sklearn.externals import joblib

			if not (args.dump == "") :
				#pickle.dump(e, open(args.dump,'wb'))
				joblib.dump(e, args.dump)

			# force to free memory
			del e

			# stores fold results
			folds_time = folds_time + [end - start]
			self._evaluate_dump(k, y_test, pred, args)
			k = k + 1

		Qs = np.ones((2**n_estimators, 2**n_estimators))
		#init_coef(Qs, 1, n_estimators + 1)
		
		Qs[:, 1:(n_estimators+1)] = -1
		for i in range(2**n_estimators - 1):
			Qs[i + 1, np.asarray(sets[i]) + 1] = 1

		for i in range(n_estimators, 2**n_estimators - 1):
			Qs[:, i + 1] = Qs[:, np.asarray(sets[i]) + 1].prod(1) 


		
		w = np.dot(Qs[:, :].T, np.vstack((0.0, np.asarray(self.folds_macro)[:, np.newaxis])))/Qs[:, :].shape[0]
		#w = np.dot(Qs[1:, :].T, np.asarray(self.folds_micro)[:, np.newaxis])/Qs[1:, :].shape[0]
		
		SS = (2**n_estimators * w[1:]**2)

		print(SS/SS.sum()).T

		SS = SS/SS.sum()
		labels = np.asarray(["broof","lazy","bert","lxt","svm","nb","knn","rf","xt"])


		print("Threshold: %f" % ((1./Qs[1:, :].shape[0]) + np.std(SS)))
		ids, _ = np.where(SS >= 1./Qs[1:, :].shape[0])
		for i in ids:
			print(labels[id_estimators[sets[i]]], round(100*SS[i][0], 2))


		if not output == "":
			try:
				fil = np.load(output)
				mic = np.hstack((fil['micro'], np.asarray(self.folds_micro)[:,np.newaxis]))
				np.savez(output, Q=Qs, labels=labels[id_estimators], sets=sets, micro=mic)
			except(Exception):
				np.savez(output, Q=Qs, labels=labels[id_estimators], sets=sets, micro=np.asarray(self.folds_micro)[:,np.newaxis])

		print("F1-Score")
		print("\tMicro: ", np.average(self.folds_micro), np.std(self.folds_micro))
		print("\tMacro: ", np.average(self.folds_macro), np.std(self.folds_macro))

		print('loading time : ', self.datasetLoadingTime)
		print('times : ', np.average(folds_time), np.std(folds_time))
Example #55
0
 def generate(self, num, savepath):
     zs = npr.randn(num, self.dimz)
     xs = npr.randn(num) * self.stdx_ztrue + np.prod(zs, axis=1)
     np.savez(savepath, x=xs, z=zs, std=self.stdx_ztrue, size=num)
Example #56
0
import zlib
import zipfile

import numpy as np
np.savez('data.npz', B=B, C=C, p=p, allow_pickle=True)


def compress(file_names):
    print("File Paths:")
    print(file_names)

    # Select the compression mode ZIP_DEFLATED for compression
    # or zipfile.ZIP_STORED to just store the file
    compression = zipfile.ZIP_DEFLATED

    # create the zip file first parameter path/name, second mode
    with zipfile.ZipFile("result.zip", mode="w") as zf:
        for file_name in file_names:
            # Add file to the zip file
            # first parameter file to zip, second filename in zip
            zf.write('./' + file_name, file_name, compress_type=compression)


file_names = ["data.npz", "solution.ipynb"]
compress(file_names)
def evolve(central_mass, num_threads, length, length_units, resol, duration,
           duration_units, step_factor, save_number, save_options, save_path,
           npz, npy, hdf5, s_mass_unit, s_position_unit, s_velocity_unit,
           solitons, start_time):
    print('Initialising...')

    ##########################################################################################
    #SET INITIAL CONDITIONS

    if (length_units == ''):
        gridlength = length
    else:
        gridlength = convert(length, length_units, 'l')
    if (duration_units == ''):
        t = duration
    else:
        t = convert(duration, duration_units, 't')
    if (duration_units == ''):
        t0 = start_time
    else:
        t0 = convert(start_time, duration_units, 't')
    if (s_mass_unit == ''):
        cmass = central_mass
    else:
        cmass = convert(central_mass, s_mass_unit, 'm')

    Vcell = (gridlength / float(resol))**3

    ne.set_num_threads(num_threads)

    initsoliton_jit = numba.jit(initsoliton)

    ##########################################################################################
    # CREATE THE TIMESTAMPED SAVE DIRECTORY AND CONFIG.TXT FILE

    save_path = os.path.expanduser(save_path)
    tm = time.localtime()

    talt = ['0', '0', '0']
    for i in range(3, 6):
        if tm[i] in range(0, 10):
            talt[i - 3] = '{}{}'.format('0', tm[i])
        else:
            talt[i - 3] = tm[i]
    timestamp = '{}{}{}{}{}{}{}{}{}{}{}{}{}'.format(tm[0], '.', tm[1], '.',
                                                    tm[2], '_', talt[0], ':',
                                                    talt[1], ':', talt[2], '_',
                                                    resol)
    file = open('{}{}{}'.format('./', save_path, '/timestamp.txt'), "w+")
    file.write(timestamp)
    os.makedirs('{}{}{}{}'.format('./', save_path, '/', timestamp))
    file = open(
        '{}{}{}{}{}'.format('./', save_path, '/', timestamp, '/config.txt'),
        "w+")
    file.write(('{}{}'.format('resol = ', resol)))
    file.write('\n')
    file.write(('{}{}'.format('axion_mass (kg) = ', axion_mass)))
    file.write('\n')
    file.write(('{}{}'.format('length (code units) = ', gridlength)))
    file.write('\n')
    file.write(('{}{}'.format('duration (code units) = ', t)))
    file.write('\n')
    file.write(('{}{}'.format('start_time (code units) = ', t0)))
    file.write('\n')
    file.write(('{}{}'.format('step_factor  = ', step_factor)))
    file.write('\n')
    file.write(('{}{}'.format('central_mass (code units) = ', cmass)))
    file.write('\n\n')
    file.write(
        ('{}'.format('solitons ([mass, [x, y, z], [vx, vy, vz], phase]): \n')))
    for s in range(len(solitons)):
        file.write(('{}{}{}{}{}'.format('soliton', s, ' = ', solitons[s],
                                        '\n')))
    file.write(
        ('{}{}{}{}{}{}'.format('\ns_mass_unit = ', s_mass_unit,
                               ', s_position_unit = ', s_position_unit,
                               ', s_velocity_unit = ', s_velocity_unit)))
    file.write(
        '\n\nNote: If the above units are blank, this means that the soliton parameters were specified in code units'
    )
    file.close()

    loc = save_path + '/' + timestamp

    ##########################################################################################
    # SET UP THE REAL SPACE COORDINATES OF THE GRID

    gridvec = np.linspace(-gridlength / 2.0 + gridlength / float(2 * resol),
                          gridlength / 2.0 - gridlength / float(2 * resol),
                          resol)

    xarray = np.ones((resol, 1, 1))
    yarray = np.ones((1, resol, 1))
    zarray = np.ones((1, 1, resol))

    xarray[:, 0, 0] = gridvec
    yarray[0, :, 0] = gridvec
    zarray[0, 0, :] = gridvec

    distarray = ne.evaluate(
        "(xarray**2+yarray**2+zarray**2)**0.5")  # Radial coordinates

    ##########################################################################################
    # SET UP K-SPACE COORDINATES FOR COMPLEX DFT (NOT RHO DFT)

    kvec = 2 * np.pi * np.fft.fftfreq(resol, gridlength / float(resol))
    kxarray = np.ones((resol, 1, 1))
    kyarray = np.ones((1, resol, 1))
    kzarray = np.ones((1, 1, resol))
    kxarray[:, 0, 0] = kvec
    kyarray[0, :, 0] = kvec
    kzarray[0, 0, :] = kvec
    karray2 = ne.evaluate("kxarray**2+kyarray**2+kzarray**2")

    ##########################################################################################
    # INITIALISE SOLITONS WITH SPECIFIED MASS, POSITION, VELOCITY, PHASE

    f = np.load('./Soliton Profile Files/initial_f.npy')

    delta_x = 0.00001  # Needs to match resolution of soliton profile array file. Default = 0.00001

    warn = 0

    psi = pyfftw.zeros_aligned((resol, resol, resol), dtype='complex128')
    funct = pyfftw.zeros_aligned((resol, resol, resol), dtype='complex128')

    for k in range(len(solitons)):
        if (k != 0):
            if (not overlap_check(solitons[k], solitons[:k])):
                warn = 1
            else:
                warn = 0

    for s in solitons:
        mass = convert(s[0], s_mass_unit, 'm')
        position = convert(np.array(s[1]), s_position_unit, 'l')
        velocity = convert(np.array(s[2]), s_velocity_unit, 'v')
        # Note that alpha and beta parameters are computed when the initial_f.npy soliton profile file is generated.
        alpha = (mass / 3.883)**2
        beta = 2.454
        phase = s[3]
        funct = initsoliton_jit(funct, xarray, yarray, zarray, position, alpha,
                                f, delta_x)
        ####### Impart velocity to solitons in Galilean invariant way
        velx = velocity[0]
        vely = velocity[1]
        velz = velocity[2]
        funct = ne.evaluate(
            "exp(1j*(alpha*beta*t0 + velx*xarray + vely*yarray + velz*zarray -0.5*(velx*velx+vely*vely+velz*velz)*t0  + phase))*funct"
        )
        psi = ne.evaluate("psi + funct")

    rho = ne.evaluate("real(abs(psi)**2)")

    fft_psi = pyfftw.builders.fftn(psi, axes=(0, 1, 2), threads=num_threads)
    ifft_funct = pyfftw.builders.ifftn(funct,
                                       axes=(0, 1, 2),
                                       threads=num_threads)

    ##########################################################################################
    # COMPUTE SIZE OF TIMESTEP (CAN BE INCREASED WITH step_factor)

    delta_t = (gridlength / float(resol))**2 / np.pi

    min_num_steps = t / delta_t
    min_num_steps_int = int(min_num_steps + 1)
    min_num_steps_int = int(min_num_steps_int / step_factor)

    if save_number >= min_num_steps_int:
        actual_num_steps = save_number
        its_per_save = 1
    else:
        rem = min_num_steps_int % save_number
        actual_num_steps = min_num_steps_int + save_number - rem
        its_per_save = actual_num_steps / save_number

    h = t / float(actual_num_steps)

    ##########################################################################################
    # SETUP K-SPACE FOR RHO (REAL)

    rkvec = 2 * np.pi * np.fft.fftfreq(resol, gridlength / float(resol))
    krealvec = 2 * np.pi * np.fft.rfftfreq(resol, gridlength / float(resol))
    rkxarray = np.ones((resol, 1, 1))
    rkyarray = np.ones((1, resol, 1))
    rkzarray = np.ones(
        (1, 1, int(resol / 2) +
         1))  # last dimension smaller because of reality condition
    rkxarray[:, 0, 0] = rkvec
    rkyarray[0, :, 0] = rkvec
    rkzarray[0, 0, :] = krealvec
    rkarray2 = ne.evaluate("rkxarray**2+rkyarray**2+rkzarray**2")

    rfft_rho = pyfftw.builders.rfftn(rho, axes=(0, 1, 2), threads=num_threads)
    phik = rfft_rho(rho)  # not actually phik but phik is defined in next line
    phik = ne.evaluate("-4*3.141593*phik/rkarray2")
    phik[0, 0, 0] = 0
    irfft_phi = pyfftw.builders.irfftn(phik,
                                       axes=(0, 1, 2),
                                       threads=num_threads)

    ##########################################################################################
    # COMPUTE INTIAL VALUE OF POTENTIAL

    phisp = pyfftw.zeros_aligned((resol, resol, resol), dtype='float64')
    phisp = irfft_phi(phik)
    phisp = ne.evaluate("phisp-(cmass)/distarray")

    ##########################################################################################
    # PRE-LOOP ENERGY CALCULATION

    if (save_options[3]):
        egylist = []
        egpcmlist = []
        egpsilist = []
        ekandqlist = []
        mtotlist = []
        egyarr = pyfftw.zeros_aligned((resol, resol, resol), dtype='float64')

        egyarr = ne.evaluate('real((abs(psi))**2)')
        egyarr = ne.evaluate('real((-cmass/distarray)*egyarr)')
        egpcmlist.append(Vcell * np.sum(egyarr))
        tot = Vcell * np.sum(egyarr)

        egyarr = ne.evaluate(
            'real(0.5*(phisp+(cmass)/distarray)*real((abs(psi))**2))')
        egpsilist.append(Vcell * np.sum(egyarr))
        tot = tot + Vcell * np.sum(egyarr)

        funct = fft_psi(psi)
        funct = ne.evaluate('-karray2*funct')
        #ifft_calc = pyfftw.builders.ifftn(calc, axes=(0, 1, 2), threads=num_threads)
        funct = ifft_funct(funct)
        egyarr = ne.evaluate('real(-0.5*conj(psi)*funct)')
        ekandqlist.append(Vcell * np.sum(egyarr))
        tot = tot + Vcell * np.sum(egyarr)

        egylist.append(tot)

        egyarr = ne.evaluate('real((abs(psi))**2)')
        mtotlist.append(Vcell * np.sum(egyarr))

    ##########################################################################################
    # PRE-LOOP SAVE I.E. INITIAL CONFIG

    if (save_options[0]):
        if (npy):
            file_name = "rho_#{0}.npy".format(0)
            np.save(os.path.join(os.path.expanduser(loc), file_name), rho)
        if (npz):
            file_name = "rho_#{0}.npz".format(0)
            np.savez(os.path.join(os.path.expanduser(loc), file_name), rho)
        if (hdf5):
            file_name = "rho_#{0}.hdf5".format(0)
            file_name = os.path.join(os.path.expanduser(loc), file_name)
            f = h5py.File(file_name, 'w')
            dset = f.create_dataset("init", data=rho)
            f.close()
    if (save_options[2]):
        plane = rho[:, :, int(resol / 2)]
        if (npy):
            file_name = "plane_#{0}.npy".format(0)
            np.save(os.path.join(os.path.expanduser(loc), file_name), plane)
        if (npz):
            file_name = "plane_#{0}.npz".format(0)
            np.savez(os.path.join(os.path.expanduser(loc), file_name), plane)
        if (hdf5):
            file_name = "plane_#{0}.hdf5".format(0)
            file_name = os.path.join(os.path.expanduser(loc), file_name)
            f = h5py.File(file_name, 'w')
            dset = f.create_dataset("init", data=plane)
            f.close()
    if (save_options[1]):
        if (npy):
            file_name = "psi_#{0}.npy".format(0)
            np.save(os.path.join(os.path.expanduser(loc), file_name), psi)
        if (npz):
            file_name = "psi_#{0}.npz".format(0)
            np.savez(os.path.join(os.path.expanduser(loc), file_name), psi)
        if (hdf5):
            file_name = "psi_#{0}.hdf5".format(0)
            file_name = os.path.join(os.path.expanduser(loc), file_name)
            f = h5py.File(file_name, 'w')
            dset = f.create_dataset("init", data=psi)
            f.close()
    if (save_options[4]):
        line = rho[:, int(resol / 2), int(resol / 2)]
        file_name2 = "line_#{0}.npy".format(0)
        np.save(os.path.join(os.path.expanduser(loc), file_name2), line)

    ##########################################################################################
    # LOOP NOW BEGINS

    halfstepornot = 1  # 1 for a half step 0 for a full step

    tenth = float(
        save_number / 10
    )  #This parameter is used if energy outputs are saved while code is running.
    # See commented section below (line 585)

    clear_output()
    print("The total number of steps is %.0f" % actual_num_steps)
    if warn == 1:
        print(
            "WARNING: Significant overlap between solitons in initial conditions"
        )
    print('\n')
    tinit = time.time()

    for ix in range(actual_num_steps):
        if halfstepornot == 1:
            psi = ne.evaluate("exp(-1j*0.5*h*phisp)*psi")
            halfstepornot = 0
        else:
            psi = ne.evaluate("exp(-1j*h*phisp)*psi")
        funct = fft_psi(psi)
        funct = ne.evaluate("funct*exp(-1j*0.5*h*karray2)")
        psi = ifft_funct(funct)
        rho = ne.evaluate("real(abs(psi)**2)")
        phik = rfft_rho(
            rho)  # not actually phik but phik is defined on next line
        phik = ne.evaluate("-4*3.141593*(phik)/rkarray2")
        phik[0, 0, 0] = 0
        phisp = irfft_phi(phik)
        phisp = ne.evaluate("phisp-(cmass)/distarray")

        #Next if statement ensures that an extra half step is performed at each save point
        if (((ix + 1) % its_per_save) == 0):
            psi = ne.evaluate("exp(-1j*0.5*h*phisp)*psi")
            rho = ne.evaluate("real(abs(psi)**2)")
            halfstepornot = 1

            #Next block calculates the energies at each save, not at each timestep.
            if (save_options[3]):

                # Gravitational potential energy density associated with the central potential
                egyarr = ne.evaluate('real((abs(psi))**2)')
                egyarr = ne.evaluate('real((-cmass/distarray)*egyarr)')
                egpcmlist.append(Vcell * np.sum(egyarr))
                tot = Vcell * np.sum(egyarr)

                # Gravitational potential energy density of self-interaction of the condensate
                egyarr = ne.evaluate(
                    'real(0.5*(phisp+(cmass)/distarray)*real((abs(psi))**2))')
                egpsilist.append(Vcell * np.sum(egyarr))
                tot = tot + Vcell * np.sum(egyarr)

                funct = fft_psi(psi)
                funct = ne.evaluate('-karray2*funct')
                funct = ifft_funct(funct)
                egyarr = ne.evaluate('real(-0.5*conj(psi)*funct)')
                ekandqlist.append(Vcell * np.sum(egyarr))
                tot = tot + Vcell * np.sum(egyarr)

                egylist.append(tot)

                egyarr = ne.evaluate('real((abs(psi))**2)')
                mtotlist.append(Vcell * np.sum(egyarr))

        #Uncomment next section if partially complete energy lists desired as simulation runs.
        #In this way, some energy data will be saved even if the simulation is terminated early.

        # if (ix+1) % tenth == 0:
        #     label = (ix+1)/tenth
        #     file_name = "{}{}".format(label,'egy_cumulative.npy')
        #     np.save(os.path.join(os.path.expanduser(loc), file_name), egylist)
        #     file_name = "{}{}".format(label,'egpcm_cumulative.npy')
        #     np.save(os.path.join(os.path.expanduser(loc), file_name), egpcmlist)
        #     file_name = "{}{}".format(label,'egpsi_cumulative.npy')
        #     np.save(os.path.join(os.path.expanduser(loc), file_name), egpsilist)
        #     file_name = "{}{}".format(label,'ekandq_cumulative.npy')
        #     np.save(os.path.join(os.path.expanduser(loc), file_name), ekandqlist)

        ################################################################################
        # SAVE DESIRED OUTPUTS

        if (save_options[0] and ((ix + 1) % its_per_save) == 0):
            if (npy):
                file_name = "rho_#{0}.npy".format(int((ix + 1) / its_per_save))
                np.save(os.path.join(os.path.expanduser(loc), file_name), rho)
            if (npz):
                file_name = "rho_#{0}.npz".format(int((ix + 1) / its_per_save))
                np.savez(os.path.join(os.path.expanduser(loc), file_name), rho)
            if (hdf5):
                file_name = "rho_#{0}.hdf5".format(int(
                    (ix + 1) / its_per_save))
                file_name = os.path.join(os.path.expanduser(loc), file_name)
                f = h5py.File(file_name, 'w')
                dset = f.create_dataset("init", data=rho)
                f.close()
        if (save_options[2] and ((ix + 1) % its_per_save) == 0):
            plane = rho[:, :, int(resol / 2)]
            if (npy):
                file_name = "plane_#{0}.npy".format(
                    int((ix + 1) / its_per_save))
                np.save(os.path.join(os.path.expanduser(loc), file_name),
                        plane)
            if (npz):
                file_name = "plane_#{0}.npz".format(
                    int((ix + 1) / its_per_save))
                np.savez(os.path.join(os.path.expanduser(loc), file_name),
                         plane)
            if (hdf5):
                file_name = "plane_#{0}.hdf5".format(
                    int((ix + 1) / its_per_save))
                file_name = os.path.join(os.path.expanduser(loc), file_name)
                f = h5py.File(file_name, 'w')
                dset = f.create_dataset("init", data=plane)
                f.close()
        if (save_options[1] and ((ix + 1) % its_per_save) == 0):
            if (npy):
                file_name = "psi_#{0}.npy".format(int((ix + 1) / its_per_save))
                np.save(os.path.join(os.path.expanduser(loc), file_name), psi)
            if (npz):
                file_name = "psi_#{0}.npz".format(int((ix + 1) / its_per_save))
                np.savez(os.path.join(os.path.expanduser(loc), file_name), psi)
            if (hdf5):
                file_name = "psi_#{0}.hdf5".format(int(
                    (ix + 1) / its_per_save))
                file_name = os.path.join(os.path.expanduser(loc), file_name)
                f = h5py.File(file_name, 'w')
                dset = f.create_dataset("init", data=psi)
                f.close()
        if (save_options[4] and ((ix + 1) % its_per_save) == 0):
            line = rho[:, int(resol / 2), int(resol / 2)]
            file_name2 = "line_#{0}.npy".format(int((ix + 1) / its_per_save))
            np.save(os.path.join(os.path.expanduser(loc), file_name2), line)

        ################################################################################
        # UPDATE INFORMATION FOR PROGRESS BAR

        tint = time.time() - tinit
        tinit = time.time()
        prog_bar(actual_num_steps, ix + 1, tint)

    ################################################################################
    # LOOP ENDS

    clear_output()
    print('\n')
    print("Complete.")
    if warn == 1:
        print(
            "WARNING: Significant overlap between solitons in initial conditions"
        )

    if (save_options[3]):
        file_name = "egylist.npy"
        np.save(os.path.join(os.path.expanduser(loc), file_name), egylist)
        file_name = "egpcmlist.npy"
        np.save(os.path.join(os.path.expanduser(loc), file_name), egpcmlist)
        file_name = "egpsilist.npy"
        np.save(os.path.join(os.path.expanduser(loc), file_name), egpsilist)
        file_name = "ekandqlist.npy"
        np.save(os.path.join(os.path.expanduser(loc), file_name), ekandqlist)
        file_name = "masslist.npy"
        np.save(os.path.join(os.path.expanduser(loc), file_name), mtotlist)
Example #58
0
    pvals, cfu, corrmats = cf.evaluate(inputs, torch.tensor(yhat_cff))
else:
    raise RuntimeError(f"Unknown mode for CUF {cfu_config['type']}")

# -------------------------------------------------------------------------
# Output results
# -------------------------------------------------------------------------
logger.info("Write out results...")
datadump = os.path.join(result_dir, "datadump")
np.savez(datadump,
         cfus=cfu,
         cfu_uc=cfu_uc,
         cfu_buc=cfu_buc,
         pvals=pvals,
         corrmats=corrmats,
         yhat_cff=yhat_cff,
         yhat_uc=yhat_uc,
         yhat_buc=yhat_buc,
         wdagger=wdagger,
         theta=theta,
         theta_uc=theta_uc,
         theta_buc=theta_buc,
         data=data)

logger.info("Write out used config file...")
res_config_path = os.path.join(result_dir, 'config.json')
with open(res_config_path, 'w') as f:
    json.dump(config, f, indent=2)

# -------------------------------------------------------------------------
# Plot some results
# -------------------------------------------------------------------------
Example #59
0
import numpy as np
from scipy.ndimage import uniform_filter1d
from scipy.interpolate import interp1d
from functions.currents import *

filename = 'data/figure4.npz'
data = np.load(filename)
t = data['t']

t_fig4, av_I_cap_sn_fig4, av_I_leak_sn_fig4, av_I_pump_sn_fig4, av_I_Na_sn_fig4, av_I_DR_sn_fig4, av_I_stim_sn_fig4 = membrane_currents_sn(filename, t, stim_i=150e-12, stim_start=1, stim_end=8)
t_fig4, av_I_cap_dn_fig4, av_I_leak_dn_fig4, av_I_pump_dn_fig4, av_I_AHP_dn_fig4, av_I_Ca_dn_fig4, av_I_KC_dn_fig4 = membrane_currents_dn(filename, t)
t_fig4, av_I_cap_sg_fig4, av_I_leak_sg_fig4, av_I_pump_sg_fig4, av_I_Kir_sg_fig4 = membrane_currents_sg(filename, t)
t_fig4, av_I_cap_dg_fig4, av_I_leak_dg_fig4, av_I_pump_dg_fig4, av_I_Kir_dg_fig4 = membrane_currents_dg(filename, t)

np.savez('data/figureS2', t_fig4=t_fig4, av_I_cap_sn_fig4=av_I_cap_sn_fig4, av_I_leak_sn_fig4=av_I_leak_sn_fig4, \
    av_I_pump_sn_fig4=av_I_pump_sn_fig4, av_I_Na_sn_fig4=av_I_Na_sn_fig4, av_I_DR_sn_fig4=av_I_DR_sn_fig4, \
    av_I_stim_sn_fig4=av_I_stim_sn_fig4, \
    av_I_cap_dn_fig4=av_I_cap_dn_fig4, av_I_leak_dn_fig4=av_I_leak_dn_fig4, av_I_pump_dn_fig4=av_I_pump_dn_fig4, \
    av_I_AHP_dn_fig4=av_I_AHP_dn_fig4, av_I_Ca_dn_fig4=av_I_Ca_dn_fig4, av_I_KC_dn_fig4=av_I_KC_dn_fig4, \
    av_I_cap_sg_fig4=av_I_cap_sg_fig4, av_I_leak_sg_fig4=av_I_leak_sg_fig4, \
    av_I_pump_sg_fig4=av_I_pump_sg_fig4, av_I_Kir_sg_fig4=av_I_Kir_sg_fig4, \
    av_I_cap_dg_fig4=av_I_cap_dg_fig4, av_I_leak_dg_fig4=av_I_leak_dg_fig4, \
    av_I_pump_dg_fig4=av_I_pump_dg_fig4, av_I_Kir_dg_fig4=av_I_Kir_dg_fig4)
Example #60
0
    def meas_parallel(
            i):  #, def_param=(lock_em, lock_indx, em_par, indx_par)):
        #print("Is running on ",multiprocessing.current_process().name)
        print(str(100 * (i / 412500.)), "% of the way through \r")
        eml, idm = measure_spec(shared_fluxes[i])
        emls = eml["EW"][:,
                         np.logical_or(emlines['name'] ==
                                       'Ha', emlines['restwave'] ==
                                       3727.092)].reshape(1, -1)
        idms = idm[
            "INDX"][:,
                    np.logical_or(
                        np.logical_or(indx_names == 'D4000', indx_names ==
                                      'Fe5270'),
                        np.logical_or(
                            np.logical_or(indx_names == 'Fe5335', indx_names ==
                                          'HDeltaA'),
                            np.logical_or(indx_names == 'Hb', indx_names ==
                                          'Mgb')))].reshape(1, -1)

        emls_err = eml["EWERR"][:,
                                np.logical_or(emlines['name'] ==
                                              'Ha', emlines['restwave'] ==
                                              3727.092)].reshape(1, -1)
        idms_err = idm[
            "INDXERR"][:,
                       np.logical_or(
                           np.logical_or(indx_names == 'D4000', indx_names ==
                                         'Fe5270'),
                           np.logical_or(
                               np.logical_or(indx_names ==
                                             'Fe5335', indx_names ==
                                             'HDeltaA'),
                               np.logical_or(indx_names == 'Hb', indx_names ==
                                             'Mgb')))].reshape(1, -1)

        lu = np.append(emls, idms, axis=1).reshape(
            1, 1, 8
        )  # Halpha 0th,  OII 1st, Hbeta 2nd, MgB 3rd, Fe5270 4th, Fe5335 5th, HDeltaA 6th, D4000 7th
        lu_err = np.append(emls_err, idms_err, axis=1).reshape(
            1, -1
        )  # Halpha 0th,  OII 1st, Hbeta 2nd, MgB 3rd, Fe5270 4th, Fe5335 5th, HDeltaA 6th, D4000 7th
        print("current time, ", time.time(), " for iteration: ", str(i))
        with l:
            if os.path.isfile("../data/em_indx_par_pool_mapped.npz"):
                with np.load("../data/em_indx_par_pool_mapped.npz") as ei_lu:
                    np.savez("../data/em_indx_par_pool_mapped.npz",
                             lookup=np.append(ei_lu["lookup"], lu, axis=0))
                with np.load("../data/em_indx_err_par_pool_mapped.npz"
                             ) as ei_lu_err:
                    np.savez("../data/em_indx_err_par_pool_mapped.npz",
                             lookuperr=np.append(ei_lu_err["lookuperr"],
                                                 lu_err,
                                                 axis=0))
                with np.load(
                        "../data/iteration_number_em_indx_par_pool_mapped.npz"
                ) as iter_num:
                    np.savez(
                        "../data/iteration_number_em_indx_par_pool_mapped.npz",
                        idx=np.append(iter_num["idx"], [i], axis=0))
            else:
                np.savez("../data/em_indx_par_pool_mapped.npz", lookup=lu)
                np.savez("../data/em_indx_err_par_pool_mapped.npz",
                         lookuperr=lu_err)
                np.savez(
                    "../data/iteration_number_em_indx_par_pool_mapped.npz",
                    idx=[i])

        return i