Ejemplo n.º 1
0
def main():
    data = np.genfromtxt("config/test_data.dat",dtype=float,delimiter='\t')

    x_vals = [x[0] for x in data]
    y_vals = [x[1] for x in data] 

    fit.fit(x_vals,y_vals)
 def fit(self, X, y):
     fit.fit(X, y, self.estimator, self.cv_results_, self.best_score_,
             self.best_params_, self.param_grid, self.cv_n, self.cv_type)
     best_index = self.cv_results_['mean_test_score'].index(
         max(self.cv_results_['mean_test_score']))
     self.best_score_ = max(self.cv_results_['mean_test_score'])
     self.best_params_ = self.cv_results_['params'][best_index]
Ejemplo n.º 3
0
def classify(trainX, trainY, testX, testY):
    """ Uses the Bayesian Classifier to classify the test data. """
    trainC = getClasses(trainY)
    P = estimatePosterior(trainX, trainC, testX)
    E = fit(testX, P)
    (e_rate, se, interval) = error.confidenceInterval(testY, E)
    return (P, E, e_rate, se, interval)
def retrieve(measured_intensity, measured_intensity1, measured_intensity2,
             measured_intensity3, phase_obj, phase_obj1, phase_obj2,
             phase_obj3, phase_modulate1, phase_modulate2, phase_modulate3,
             defocus_term):
    unet = U_Net(in_ch=1, out_ch=1).cuda()
    # unet = Slim_Net(in_ch=1, out_ch=1).cuda()
    mse_loss, mse_loss2, net_input, unet = fit(
        net=unet,
        net_input=measured_intensity.type(dtype),
        ref_intensity1=measured_intensity1.type(dtype),
        ref_intensity2=measured_intensity2.type(dtype),
        ref_intensity3=measured_intensity3.type(dtype),
        num_iter=10000,
        LR=0.1,
        lr_decay_epoch=0,
        add_noise=False,
        gt=Variable(torch.tensor(phase_obj)),
        cosLR=False,
        reducedLR=False,
        modulate1=Variable(torch.tensor(phase_modulate1)),
        modulate2=Variable(torch.tensor(phase_modulate2)),
        modulate3=Variable(torch.tensor(phase_modulate3)),
        defocus_term=defocus_term)
    # modulate1=None,
    # modulate2=Variable(torch.tensor(phase_modulate2)))

    # output = unet(net_input.type(dtype)).data.cpu().squeeze(0).numpy()[0]
    best_unet = U_Net(in_ch=1, out_ch=1).cuda()
    best_unet.load_state_dict(torch.load('best.pth'))
    output, _, _, _, _, _, _ = best_unet(measured_intensity.type(dtype),
                                         defocus_term.type(dtype))
    output = output.data.cpu().squeeze(0).numpy()[0]
    # output = unet(measured_intensity.type(dtype)).data.cpu().squeeze(0).numpy()[0]

    return mse_loss, mse_loss2, output
Ejemplo n.º 5
0
def fit_withrebin(f,
                  p,
                  fitfuncin=None,
                  p0=None,
                  minfreq=0,
                  binsize=0.1,
                  minpoints=10,
                  sampling=4,
                  **args):
    """Without modification, this fits log-binned to a powerlaw plus
constant. Change fitfunc and supply correct p0 to fit for
arbitary function. Binning parameters are as in "rebin"; returns
parameters and uncertainties for successful fit, full message otherwise.
Minpoints and sampling really shouldn't matter here.
Extra args are passed through to leastsq (eg, maxfev=).
"""
    if fitfuncin:  #need function in rescaled logarithmicaly
        fitfunc = lambda p, x: log10(fitfuncin(p, 10**x))
    else:
        fitfunc = lambda p, x: log10(abs(p[0]) * (10**x)**p[1] + abs(p[2]))
    p[p == 0] = min(p[p > 0])
    ind = f > minfreq
    logbinf, logbinp, logbine = rebin(f[ind], p[ind], binsize, minpoints,
                                      sampling)
    if p0 == None:
        p0 = [1000, -1, 1e-5]
    return fit(logbinf, logbinp, logbine, p0, fitfunc, **args)
Ejemplo n.º 6
0
def classify(probabilities, testX, testY):
    """ Uses the sum rule to combine two predictions and then classify the data. """
    n = len(testX)
    P = sum_rule(probabilities, n)
    E = fit(testX, P)
    (e_rate, se, interval) = error.confidenceInterval(testY, E)
    return (P, E, e_rate, se, interval)
Ejemplo n.º 7
0
def paramsOpt(filtr,params,func):
    qp=quotes[[i for i,x in enumerate(quotes.papel) if x in papeis]]
    #qp=filtr(qp)
    daystoexp=np.array([x.days for x in qp.vencimento-qp.date])
    precoacao=np.array([petr4[petr4.date==xdate]['high'][0] for xdate in qp.date])
    p=params
    p=fit.fit(func,p,[qp.exercicio,precoacao,daystoexp],qp.high)
    return p
Ejemplo n.º 8
0
def paramsOpt(filtr, params, func):
    qp = quotes[[i for i, x in enumerate(quotes.papel) if x in papeis]]
    #qp=filtr(qp)
    daystoexp = np.array([x.days for x in qp.vencimento - qp.date])
    precoacao = np.array(
        [petr4[petr4.date == xdate]['high'][0] for xdate in qp.date])
    p = params
    p = fit.fit(func, p, [qp.exercicio, precoacao, daystoexp], qp.high)
    return p
Ejemplo n.º 9
0
def main():
    parser = argparse.ArgumentParser(
        description='train unet',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    fit.add_fit_args(parser)
    seed = 2012310818
    np.random.seed(seed)
    parser.set_defaults(batch_size=16,
                        num_epochs=40,
                        lr=0.0001,
                        optimizer='adam'
                        # lr_step_epochs='10',
                        # lr_factor = 0.8
                        )
    args = parser.parse_args()
    kv = mx.kvstore.create(args.kv_store)

    unet = segnet(workspace=workspace)
    train, val, lt, lv = data_iter(args, kv)
    fit.fit(args, unet[2], data_iter)
Ejemplo n.º 10
0
    def test_2(self):
        # устанавливаю параметры
        nn_params = create_nn_params()
        nn_params.with_bias = False
        nn_params.with_adap_lr = True
        nn_params.lr = 0.01
        nn_params.act_fu = SIGMOID
        nn_params.alpha_sigmoid = 0.056
        nn_map = (2, 3, 1)

        X = [[1, 1], [1, 0], [0, 1], [0, 0]]
        Y_and = [[1], [1], [1], [0]]

        b_c_new = [
            0
        ] * bc_bufLen  # буффер для сериализации матричных элементов и входов
        initiate_layers(nn_params, nn_map, len(nn_map))
        fit(b_c_new, nn_params, 7, X, Y_and, 100)

        print("in test_1 after learn. matr")
        for i in nn_params.list_:
            print(i.matrix)
        # сериализуем
        compil_serializ(nn_params, b_c_new, nn_params.list_,
                        len(nn_map) - 1, "weight_file")

        # десериализуем
        nn_params_new = create_nn_params()
        deserializ(nn_params_new, nn_params_new.list_, "weight_file")

        print("in test_1 after deserializ. matr")
        for i in nn_params_new.list_:
            print(i.matrix)

        # предсказание
        print(answer_nn_direct(nn_params_new, [1, 1], 1))

        # предсказание наоборот
        print("*ON CONTRARY*")
        answer_nn_direct_on_contrary(nn_params_new, [0], 1)
        print("-------------")
Ejemplo n.º 11
0
def application(environ, start_response):
    logpath = "%s/winti.log" % os.getenv("OPENSHIFT_DATA_DIR")
    ctype = "text/plain"

    if environ["PATH_INFO"] == "/history":
        lines = open(logpath).readlines()
        response_body = "".join(lines)
    elif environ["PATH_INFO"] == "/current":
        lines = open(logpath).readlines()
        response_body = lines[-1]
    elif environ["PATH_INFO"] == "/filter":
        lines = open("logpath", "r")
        output = []
        for line in lines:
            for line in lines:
                if not line.find(",,") >= 0:
                    if not line.endswith(","):
                        output.append(line)
        lines.close()
        response_body = "".join(output)
    elif environ["PATH_INFO"].startswith("/predict/"):
        s = re.compile("/predict/(\d+)")
        m = s.match(environ["PATH_INFO"])
        predpeople = int(m.group(1))
        histstamps = []
        histpeople = []
        lines = open(logpath).readlines()
        for line in lines:
            try:
                stamp, people, date = line.split(",")
            except:
                stamp, people = line.strip().split(",")
                date = None
            if stamp == "" or people == "":
                continue
            histstamps.append(int(stamp))
            histpeople.append(int(people))
        fitlambda = fit.fit(histpeople, histstamps)
        predstamp = int(fitlambda(predpeople))
        preddate = datetime.datetime.strftime(datetime.datetime.fromtimestamp(predstamp), "%a, %-d. %B %Y, %-H:%M")
        response_body = "Predicted date when the population of Winterthur reaches %s: %s" % (m.group(1), preddate)
    elif environ["PATH_INFO"] == "/source":
        response_body = "https://github.engineering.zhaw.ch/spio/wintipoptracker.git"
    else:
        response_body = "This is a data source/data service informing about the population of Winterthur. Use /filter to filter /current or /history or /predict/{number-of-inhabitants} as invocation methods. Use /source to get a pointer to the service implementation source code."
    response_body = response_body.encode("utf-8")

    status = "200 OK"
    response_headers = [("Content-Type", ctype), ("Content-Length", str(len(response_body)))]

    start_response(status, response_headers)
    return [response_body]
Ejemplo n.º 12
0
def fit_quality(time, parameters, noise, repetitions):
    """
    Apply the fitting routine a number of times, as given by
    `repetitions`, and return informations about the fit performance.
    """
    results = []
    errors = []

    from numpy.random import seed

    alpha_psp = AlphaPSP()

    for _ in xrange(repetitions):
        seed()

        value = noisy_psp(time=time, noise=noise, **parameters)
        fit_result = fit(alpha_psp, time, value, noise,
                         fail_on_negative_cov=[True, True, True, False, False])
        if fit_result is not None:
            result, error, chi2, success = fit_result
            if chi2 < 1.5 and success:
                print chi2, result
                results.append(result)
                errors.append(error)
        else:
            print "fit failed:",
            print fit_result

    keys = alpha_psp.parameter_names()

    result_dict = dict(((key, []) for key in keys))
    error_dict = dict(((key, []) for key in keys))

    for result in results:
        for r, key in zip(result, keys):
            result_dict[key].append(r)

    for error in errors:
        for r, key in zip(p.diag(error), keys):
            error_dict[key].append(p.sqrt(r))
            if p.isnan(p.sqrt(r)):
                print "+++++++", r

    return ([p.mean(result_dict[key]) for key in keys],
            [p.std(result_dict[key]) for key in keys],
            len(results),
            keys,
            [result_dict[key] for key in keys],
            [error_dict[key] for key in keys])
Ejemplo n.º 13
0
    def fit(self, train_x, train_y, test_x, test_y):
        for i in range(2):
            fittedParams, mse = fit(self.model, self.paramcount, train_x,
                                    train_y, test_x, test_y)
            if i == 0:
                error = mse
                bestparam = fittedParams
            if (mse < error):
                error = mse
                bestparam = fittedParams

        self.params = bestparam

        self.error = error

        return error
Ejemplo n.º 14
0
def application(environ, start_response):
    logpath = "%s/winti.log" % os.getenv("OPENSHIFT_DATA_DIR")
    ctype = 'text/plain'
    if environ['PATH_INFO'] == '/history':
        lines = open(logpath).readlines()
        response_body = "".join(lines)
    elif environ['PATH_INFO'] == '/current':
        lines = open(logpath).readlines()
        response_body = lines[-1]
    elif environ['PATH_INFO'] == '/troll':
        top = Tkinter.Tk()
        def helloCallBack():
            tkMessageBox.showinfo( "Hello Python", "Hello World")
        B = Tkinter.Button(top, text ="Hello", command = helloCallBack)
        B.pack()
        top.mainloop()
        response_body = "lalalalalla"
    elif environ['PATH_INFO'].startswith('/predict/'):
        s = re.compile("/predict/(\d+)")
        m = s.match(environ['PATH_INFO'])
        predpeople = int(m.group(1))
        histstamps = []
        histpeople = []
        lines = open(logpath).readlines()
        for line in lines:
            try:
                stamp, people, date = line.split(",")
            except:
                stamp, people = line.strip().split(",")
                date = None
            if stamp == "" or people == "":
                continue
            histstamps.append(int(stamp))
            histpeople.append(int(people))
        fitlambda = fit.fit(histpeople, histstamps)
        predstamp = int(fitlambda(predpeople))
        preddate = datetime.datetime.strftime(datetime.datetime.fromtimestamp(predstamp), "%a, %-d. %B %Y, %-H:%M")
        response_body = "Predicted date when the population of Winterthur reaches %s: %s" % (m.group(1), preddate)
    elif environ['PATH_INFO'] == '/source':
        response_body = "blah blah blah blah bk"
    else:
        response_body = "This is a data source/data service informing about the population of Winterthur. Use /current or /history or /predict/{number-of-inhabitants} as invocation methods. Use /source to get a pointer to the service implementation source code."
    response_body = response_body.encode('utf-8')
    status = '200 OK'
    response_headers = [('Content-Type', ctype), ('Content-Length', str(len(response_body)))]
    start_response(status, response_headers)
    return [response_body]
Ejemplo n.º 15
0
    def to_fit(self, x, y, erry):
        ''' Decompose a single spectrum using current parameters '''

        if ( (not self.par['SG_winlen']) or (not self.par['SG_order']) ):
            print('SG_winlen or SG_order is unset')
            return

        status, results = fit.fit(
            x,
            y,
            erry,
            SG_winlen    = self.par['SG_winlen'],
            SG_order     = self.par['SG_order'],
            y_thresh     = self.par['y_thresh'],
            band_frac    = self.par['band_frac'],
            derv2_thresh = self.par['derv2_thresh']
        )
        return results
Ejemplo n.º 16
0
def fit_withrebin(f,p,fitfuncin=None,p0=None,minfreq=0,binsize=0.1,minpoints=10,sampling=4,
                  **args):
    """Without modification, this fits log-binned to a powerlaw plus
constant. Change fitfunc and supply correct p0 to fit for
arbitary function. Binning parameters are as in "rebin"; returns
parameters and uncertainties for successful fit, full message otherwise.
Minpoints and sampling really shouldn't matter here.
Extra args are passed through to leastsq (eg, maxfev=).
"""
    if fitfuncin: #need function in rescaled logarithmicaly
        fitfunc = lambda p,x: log10(fitfuncin(p,10**x))
    else:
        fitfunc = lambda p,x : log10(abs(p[0]) * (10**x)**p[1] + abs(p[2]))
    p[p==0] = min(p[p>0])
    ind = f>minfreq
    logbinf, logbinp, logbine = rebin(f[ind],p[ind],binsize,minpoints,sampling)
    if p0==None:
        p0 = [1000,-1,1e-5]
    return fit(logbinf,logbinp,logbine,p0,fitfunc,**args)
Ejemplo n.º 17
0
    def fit(self, funcs, quiet=False):

        f = fit(x=self.plotx, y=self.ploty, funcs=funcs)
        plsq = f.go()

        # Generate a new values of x (500) to make plotted
        # functions look good!

        step = (self.plotx.max() - self.plotx.min()) / 500
        x = arange(self.plotx.min(), self.plotx.max(), step)

        self.fitx = x
        self.fity = f.evalfunc(x=x)

        if self.plotted:
            hold(True)
            plot(self.fitx, self.fity, 'b-')

        return plsq
Ejemplo n.º 18
0
    def fit(self, funcs, quiet = False):

        f = fit(x = self.plotx, y = self.ploty, funcs = funcs)
        plsq = f.go()

        # Generate a new values of x (500) to make plotted
        # functions look good!

        step = ( self.plotx.max() - self.plotx.min() ) / 500
        x = arange(self.plotx.min(), self.plotx.max(), step)

        self.fitx = x
        self.fity = f.evalfunc(x = x)

        if self.plotted:
            hold(True)
            plot(self.fitx,self.fity, 'b-')

        return plsq
    def load(self,f):
        for l in f:
            l=l.strip()
            if l.startswith("END DATASET"):
                break
            elif l.startswith("FIT"):
                key=int(l.split()[1])
                self.list_of_keys.append(key)
                self.fits[key]=fit(self.list_datafiles,self.datafile,self.index,key)
                self.fits[key].load(f)
                self.number_fits=max(self.number_fits,key+1)

            elif l.startswith("Label:"):
                self.label=l[7:]

            else:
                if len(l.split())>1:
                    variable=l.split()[0]
                    self.info[variable][0]=l[len(variable)+1:]
        self.calculate()
        for key in self.list_of_keys:
            self.fits[key].start_fit()
Ejemplo n.º 20
0
	def fit_T1(self, save_file=None):
		from fit import fit
		from fitmodels import func_kohlrausch, guess_kohlrausch	

		p0 = guess_kohlrausch(self.target[:, 0], self.target[:, 1])
		self.pprint(p0)
		fit_result = fit(func_kohlrausch, p0, self.target[:, 0], self.target[:, 1])
		self.pprint(fit_result)
		p0 = fit_result[0]
		self.info("Final t1: %r" % (1 / p0[1]))
		t1_misguess = log10(self.T1guess * p0[1])
		if abs(t1_misguess) > 0.74:
			print "*** WARNING: T1guess is off by 10**%r." % \
				t1_misguess

		if save_file is not None:
			self.target[:, 2] = self.target[:, 1] - \
				func_kohlrausch(p0, self.target[:, 0])
			savetxt(save_file, self.target)

		self.fit_result = fit_result
		return 1 / p0[1]
Ejemplo n.º 21
0
def training_fcn(kwargs):

    j = kwargs['j']
    true_params = np.append(kwargs['hgts'][j],
                            np.append(kwargs['FWHMs'][j], kwargs['cens'][j]))

    # Produce initial guesses
    status, result = fit.fit(kwargs['vel'][j],
                             kwargs['ytb'][j],
                             kwargs['erry'][j],
                             SG_winlen=kwargs['SG_winlen'],
                             SG_order=kwargs['SG_order'],
                             y_thresh=kwargs['y_thresh'],
                             derv2_thresh=kwargs['derv2_thresh'])

    # If nothing was found, skip to next iteration
    if (status == 0):
        print('Nothing found in this spectrum,  continuing...')
        return 0, 0, true_params // 3

    guess_params = result['init_pars']

    return compare_pars(guess_params, true_params)
Ejemplo n.º 22
0
def make_plot():
    logfile = "/home/durant/data/ultracam/ScoX1/18_run025_n_trm.red.log"
    ccd=1
    band="low"
    mjd,rel,err,mjd_utc,rate,e = ultraxte.correlate(logfile,ccd,root+logfiles[logfile]+mid+band+'.lc',fclip=2)
    start,lag,output,output2,output3,ave1,ave2,std1,std2=corr(50,30,mjd[rel>0.03]*24*3600,rel[rel>0.03],mjd_utc*24*3600,rate)
    imshow(output,extent=[lag[0]-0.26502320542931557/2,lag[-1]+0.26502320542931557/2,-15,start[-1]-start[0]+15])
    fitfunc = lambda p,x : abs(p[0]) * s.gauss(x,p[1],p[2])
    errfunc = lambda p,x,y : fitfunc(p,x) - y
    from fit import fit
    res=[]
    for i in range(len(start)):
        temp = fit(lag,output[i],bart_err(output2[i],output3[i]),p0[:],fitfunc,assume=0)
        res.append(temp)
    peak = n.ones(len(start))*1.0
    err = n.ones(len(start))*1.0
    try:
        peak[i] = res[i][0][1]
        err[i] = res[i][1][1]
    except:
        pass
    s.errorbar(peak2[peak2!=1],start[peak2!=1]-start[0],xerr=err2[peak2!=1],yerr=0,fmt='ko',capsize=0)
    s.xlabel("$\delta t$ (s)")
    s.ylabel("$T$ (s)")
Ejemplo n.º 23
0
def main():
    print "Hello world"
    xdata = [1, 2, 3, 4, 5]
    temp = xdata
    ydata = np.square(temp)
    a = fit.fit(xdata, ydata)
    yfunc = xdata
    print ("xdata", xdata)
    # for i in range(1, 6):
        # yfunc[i - 1] = fit.equation(i, a)
    print ("xdata", xdata)
    print a
    print yfunc
    plt.title("Fitted Plot in red vs Actual points in black")
    plt.grid(True)
    plt.xlabel("X")
    plt.ylabel("Y")
    xx = np.linspace(np.min(xdata), np.max(xdata))
    #plt.xlim([xdata[0]-20, xdata[-1] + 20])
    #plt.ylim([ydata[0]-2000, ydata[-1] + 2000])
    plt.plot(xx, fit.equation(xx, a), 'r-')
    #, label="Fitted Curve")
    plt.plot(xdata, ydata, 'ko')
    plt.show()
Ejemplo n.º 24
0
def auto_fit(cors, options=None):
    logging.info("Finding best fit range")
    logging.debug("Temporarily setting the logger to warnings only")

    individual_fitfun = functions[options.function].individual(options.period)

    print cors
    ranges = []
    for cor in cors:
        ranges.append(fit.best_fit_range(individual_fitfun, cor, options)[0])

    print "best ranges"
    print ranges
    #exit()
    # logger = logging.getLogger()
    # previous_loglevel = logger.level
    # ALWAYSINFO = 26
    # logger.setLevel(ALWAYSINFO)
    # import itertools
    # tend = options.period/2
    # print tend

    # tends = [tend] * len(cors)



    # allowed_tmins = range(5,tend-5)
    # tmins = itertools.product(allowed_tmins, repeat=2)
    # tmins = [(x,y) for x,y in tmins if x>y]
    # best_ranges = []
    # for tmin in tmins:
    #     multicor = mergecors(cors, tmin, tends)
    #     funct = functions[options.function](Nt=options.period, ranges=zip(tmin, tends))
    #     try:
    #         _, _, qual = fit.fit(funct, multicor, min(multicor.times), max(multicor.times),
    #                              bootstraps=1, return_chi=False, return_quality=True, options=options)
    #         metric = qual
    #         best_ranges.append((metric,tmin))
    #     except RuntimeError:
    #         logging.warn("Fitter failed, skipping this tmin,tmax {},{}".format(*tmin))
    #     except fit.InversionError:
    #         logging.warn("Covariance matrix failed, skipping this tmin,tmax {},{}".format(*tmin))
    #     except Exception as e:
    #         logging.warn("Fitter failed error {}".format(e))
    #         logging.warn("Fitter failed, skipping this tmin,tmax {},{}".format(*tmin))

    # logger.setLevel(previous_loglevel)
    # logging.debug("Restored logging state to original")

    # sortedranges = [(metric, tmins) for metric, tmins in sorted(best_ranges, reverse=True)]
    # print sortedranges[:5]
    # exit()
    # for _, tmin in sortedranges[:10]:
    #     try:
    #         multicor = mergecors(cors, tmin, tends)
    #         funct = functions[options.function](Nt=options.period, ranges=zip(tmin, tends))
    #         fit.fit(funct, multicor, min(multicor.times), max(multicor.times),
    #                 filestub=options.output_stub, return_chi=False, return_quality=True, options=options)
    #     except RuntimeError:
    #         logging.warn("Fitter failed, skipping this tmin,tmax {},{}".format(*tmin))
    #     except fit.InversionError:
    #         logging.warn("Covariance matrix failed, skipping this tmin,tmax {},{}".format(*tmin))
    #     else:
    #         print "fit using ranges {}".format(tmin)
    #         break
    # print "done"
    # exit()

    # ranges = [(10,25), (10,25)]
    multicor = mergecors(cors, zip(*ranges)[0], zip(*ranges)[1])
    funct = functions[options.function](Nt=options.period, ranges=ranges)
    fit.fit(funct, multicor, min(multicor.times), max(multicor.times),
            filestub=options.output_stub, return_chi=False, return_quality=True, options=options)
    print "done"


    logging.info("starting fit with mergedcorrelator")
Ejemplo n.º 25
0
    def __init__(self, dl, device):
        self.dl = dl
        self.device = device

    def __iter__(self):
        for b in self.dl:
            yield to_device(b, self.device)

    def __len__(self):
        return len(self.dl)


# 把数据集装入显存中并用GPU做后续运算
train_dl = DeviceDataLoader(train_dl, dev)
valid_dl = DeviceDataLoader(valid_dl, dev)

model = model.kiUnet(1, 1)  # 这行代码是用CNN模型训练的
to_device(model, dev)  # 把模型及其参数也都放到显存里,再用GPU运行

loss_func = loss_functions.loss_func()

# 选择优化器
opt = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-8)
# 设置权重衰减
scheduler = optim.lr_scheduler.MultiStepLR(opt,
                                           milestones,
                                           gamma=gamma,
                                           last_epoch=-1)

fit.fit(epochs, model, loss_func, opt, train_dl, valid_dl)
Ejemplo n.º 26
0
        description="Train classification models on ImageNet",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    models.add_model_args(parser)
    fit.add_fit_args(parser)
    data.add_data_args(parser)
    dali.add_dali_args(parser)
    data.add_data_aug_args(parser)
    return parser.parse_args()


def setup_logging(args):
    head = '{asctime}:{levelname}: {message}'
    logging.basicConfig(level=logging.DEBUG,
                        format=head,
                        style='{',
                        handlers=[
                            logging.StreamHandler(sys.stderr),
                            logging.FileHandler(args.log)
                        ])
    logging.info('Start with arguments {}'.format(args))


if __name__ == '__main__':
    args = parse_args()
    setup_logging(args)

    model = models.get_model(**vars(args))
    data_loader = data.get_data_loader(args)

    fit.fit(args, model, data_loader)
Ejemplo n.º 27
0
    # end def predict_proba

    def predict(self, X):
        return batch_to_anysize(self.batch_size, self.predict_batch, X)

    # end def predict


if __name__ == '__main__':
    from misc import set_quick_logging
    set_quick_logging()

    datasets = load_data(data_name='mnist')
    clf = MLP(n_epochs=10, batch_size=200)
    fit(clf,
        train_set=datasets[0],
        valid_set=datasets[1],
        test_set=datasets[2],
        flag_report_test=True,
        flag_report_valid=True,
        early_stop=True)

    print clf.predict_proba_batch(datasets[1][0][0:200])
    print clf.predict_batch(datasets[1][0][0:200])
    print clf.predict_proba(datasets[1][0])
    print clf.predict(datasets[1][0])
    print clf.predict_cost_batch(datasets[1][0][0:200], datasets[1][1][0:200])
    print clf.predict_cost(datasets[1][0][0:200], datasets[1][1][0:200])
    print clf.predict_cost(datasets[1][0], datasets[1][1])
Ejemplo n.º 28
0
h3 = layers.DenseLayer(h2, 20)#, nonlinearity=nonlinearities.sigmoid)
h4 = layers.DenseLayer(h3, 20)#, nonlinearity=nonlinearities.sigmoid)
h5 = layers.DenseLayer(h4, 10, nonlinearity=nonlinearities.softmax)

_layers = [h1, h2, h3, h4]


shape = lin.get_output_shape()
Xi =  np.asarray([(t.ravel()) for t in X])
for l in _layers:
    if (l.name != 'merge'):
        inp = layers.InputLayer(shape)
        tlayer = layers.DenseLayer(incoming=inp, num_units=l.num_units, W=l.W, b=l.b, nonlinearity=l.nonlinearity)
        out = layers.DenseLayer(incoming=tlayer, num_units=Xi.shape[1])#, nonlinearity=nonlinearities.sigmoid)
        if (l.name == 'afterinput'):
            fit(lin=inp, lhog = lhog, output_layer=out, X=X, X_hog=X_hog, y=Xi, eval_size=0.1, num_epochs=100,
            l_rate_start = 0.01, l_rate_stop = 0.00001, batch_size = 100, l2_strength = 0, Flip=False)
        else:
            fit(lin=inp, lhog = lhog, output_layer = out, X=Xi, X_hog=X_hog, y=Xi, eval_size=0.1, num_epochs=50,
            l_rate_start = 0.001, l_rate_stop = 0.0001, batch_size = 100, l2_strength = 0, Flip=False)



    shape = l.output_shape
    kf = KFold(NTRAIN, 100)
    Xi = np.empty(tuple(np.append([1], shape[1:])), 'float32')
    for indices in iter(kf):
        lin.input_var = theano.shared(X[indices[1]])
        lhog.input_var = theano.shared(X_hog[indices[1]])
        out = theano.function([], layers.get_output(l, deterministic=True), on_unused_input='ignore')
        t=out()
        Xi = np.concatenate((Xi, out()))
Ejemplo n.º 29
0
def BBfit(x, y, x_err=None, y_err=None, ai=None, bi=None, scatteri=None, clip=None):
    return fit.fit(x, y, x_err, y_err, ai, bi, scatteri, clip, fit_type=2)
 def add_fit(self):
     self.fits[self.number_fits]=fit(self.list_datafiles,self.datafile,self.index,self.number_fits)
     self.list_of_keys=sorted(self.fits.keys())
     self.number_fits+=1
Ejemplo n.º 31
0
        lr=0.02,
        lr_factor=0.2,
        lr_step_epochs='5',
        wd=0,
        mom=0,
        optimizer='sgd',
        disp_batches=10,
        model_prefix=MODEL_DIR + "/resnext101",
    )

    # use less augmentations for fine-tune
    data.set_data_aug_level(parser, 1)

    args = parser.parse_args()

    # pre-trained model should be downloaded and renamed
    sym, arg_params, aux_params = mx.model.load_checkpoint(
        MODEL_DIR + "/resnext101", 0)

    # remove the last fullc layer
    (new_sym, new_args) = get_fine_tune_model(sym, arg_params,
                                              args.num_classes,
                                              args.layer_before_fullc)

    # train
    fit.fit(args=args,
            network=new_sym,
            data_loader=data.get_image_iter,
            arg_params=new_args,
            aux_params=aux_params)
Ejemplo n.º 32
0
test_dataset = TextLoader(X_test, y_test, TEST_TRANSFORMS, char2idx ,idx2char)
test_loader = torch.utils.data.DataLoader(test_dataset, shuffle=True,
                                           batch_size=BATCH_SIZE, pin_memory=True,
                                           drop_last=True, collate_fn=TextCollate())

if MODEL == 'model1':
  from models import model1
  model = model1.TransformerModel(len(ALPHABET), hidden=HIDDEN, enc_layers=ENC_LAYERS, dec_layers=DEC_LAYERS,   
                          nhead=N_HEADS, dropout=DROPOUT).to(DEVICE)
if MODEL == 'model2':
  from models import model2
  model = model2.TransformerModel(len(ALPHABET), hidden=HIDDEN, enc_layers=ENC_LAYERS, dec_layers=DEC_LAYERS,   
                          nhead=N_HEADS, dropout=DROPOUT).to(DEVICE)

if FROM_CHECKPOINT_PATH != None:
  model.load_state_dict(torch.load(FROM_CHECKPOINT_PATH))
  print(f'loading from checkpoint {FROM_CHECKPOINT_PATH}')

criterion = torch.nn.CrossEntropyLoss(ignore_index=char2idx['PAD'])
optimizer = torch.optim.__getattribute__(OPTIMIZER_NAME)(model.parameters(), lr=LR)

if SCHUDULER_ON:
  scheduler =torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=PATIENCE)
else:
  scheduler = None

print(f'checkpoints are saved in {CHECKPOINTS_PATH} every {CHECKPOINT_FREQ} epochs')
for epoch in range(1, N_EPOCHS, CHECKPOINT_FREQ):
  fit(model, optimizer, scheduler, criterion, train_loader, test_loader, epoch, epoch+CHECKPOINT_FREQ)
  torch.save(model.state_dict(), CHECKPOINTS_PATH+'checkpoint_{}.pt'.format(epoch+CHECKPOINT_FREQ))
Ejemplo n.º 33
0
        # data
        data_train_imglist=TRAIN_ALL_DIR + "/train_all_train.lst",
        data_train_imgrec=TRAIN_ALL_DIR + "/train_all_train.rec",
        data_train_imgidx=TRAIN_ALL_DIR + "/train_all_train.idx",
        data_val_imglist=TRAIN_ALL_DIR + "/train_all_val.lst",
        data_val_imgrec=TRAIN_ALL_DIR + "/train_all_val.rec",
        data_val_imgidx=TRAIN_ALL_DIR + "/train_all_val.idx",
        num_classes=NUM_CLASSES,
        num_examples=NUM_TRAIN_IMGS,
        image_shape='3,%d,%d' % (INPUT_HEIGHT, INPUT_WIDTH),
        # train
        gpus='0',
        batch_size=TRAIN_BATCH_SIZE,
        num_epochs=EPOCHS,
        lr=LR,
        lr_factor=0.5,
        lr_step_epochs='5,10,15,20,25,30,35,40,45',
        optimizer='sgd',
        disp_batches=10,
        model_prefix=MODEL_DIR + "/resnext50",
    )
    args = parser.parse_args()

    # load network
    from importlib import import_module
    net = import_module(args.network)
    sym = net.get_symbol(**vars(args))

    # train
    fit.fit(args, sym, data.get_image_iter)
        data_shape=(3, 224, 224),
        path_imgrec=args.val_rec,
        shuffle=True,
        rand_mirror=args.rand_mirror,
        mean=args.rgb_mean,
        cutoff=0,
    )
    val_dataiter = mx.io.PrefetchingIter(val_dataiter)
    # load network
    sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
    from importlib import import_module
    net = import_module('symbols.' + args.network)
    sym = net.get_symbol(num_classes=args.num_classes,
                         multiplier=args.multiplier)
    # print(sym.get_internals()['mobilenetv20_features_conv0_weight'].attr_dict()['mobilenetv20_features_conv0_weight']['__shape__'])
    # exit()
    # set up logger
    logger = logging.getLogger()
    fh = logging.FileHandler(
        os.path.join(
            '../../log',
            time.strftime('%F-%T', time.localtime()).replace(':', '-') +
            '.log'))
    fh.setLevel(logging.DEBUG)
    # ch = logging.StreamHandler()
    # ch.setLevel(logging.INFO)
    logger.addHandler(fh)
    # logger.addHandler(ch)
    # train
    fit.fit(args, sym, train_dataiter, val_dataiter, logger)
Ejemplo n.º 35
0
 def fit(self, data, x=None, var=None):
     return ft.fit(self.legendre, self.coef, data, x=x, var=var)
Ejemplo n.º 36
0
def train_core(
    X,
    y,
    X_valid=None,
    y_valid=None,
    n_iter=500,
    target_width=32,
    target_height=32,
    class_weight=None,
    flag_rgb=True,
    classifier_type="convnet",
    preproc_type="univar",
    bowsvm_config=None,
):
    '''
    core function for training, including data preprocessing and classifier
    training
    '''

    # data preprocessing #
    # input/output: X, X_valid

    if preproc_type == 'none':
        scaler = NullScaler()
        if classifier_type == 'bowsvm':
            scaler.transform = recover_image_from_vector
        X = scaler.transform(X)
        X_valid = scaler.transform(X_valid)

    elif preproc_type == "univar":
        scaler = preprocessing.StandardScaler().fit(X)
        X = scaler.transform(X)
        X_valid = scaler.transform(X_valid)
    elif preproc_type == "zca":
        scaler = preprocessor(flag_zca=True)
        X = scaler.fit(X)
        X_valid = scaler.fit(X_valid)
    elif preproc_type == "lcn":
        scaler = preprocessor(flag_lcn=True)
        X = scaler.fit(X)
        X_valid = scaler.fit(X_valid)
    else:
        raise NotImplementedError(
            'Currently only implemented preproc_type: univar, zca and lcn.')

    # need to make y int

    # train classifier #
    # input: X, y
    # output: clf

    # here clf combines log loss and l2 penalty, which is equivalent to logistic regression
    # alpha is the coeff for l2 qregularization term

    if classifier_type == "convnet":
        print X.shape[0]

        if flag_rgb:
            img_dim = 3
        else:
            img_dim = 1

        clf = lenet5(n_epochs=n_iter,
                     batch_size=256,
                     learning_rate=0.002,
                     img_dim=img_dim,
                     nkerns=[32, 48],
                     num_class=2)

        fit_outputs = fit(clf,
                          train_set=(X, y),
                          flag_report_test=False,
                          flag_report_valid=True,
                          early_stop=True,
                          valid_set=(X_valid, y_valid),
                          optimize_type='momentum')

        clf.set_weights(fit_outputs['best_params'])

    elif classifier_type == "logreg":

        clf = SGDClassifier(loss="log",
                            penalty="l2",
                            n_iter=n_iter,
                            verbose=True,
                            shuffle=True,
                            alpha=0.01,
                            class_weight=class_weight)
        clf.fit(X, y)

    elif classifier_type == "svm":

        clf = SVM(probability=True, verbose=True, kernel='linear')
        clf.fit(X, y)

    elif classifier_type == "bowsvm":

        clf = BowSvm(
            num_cluster=bowsvm_config['num_cluster'],
            verbose=True,
            kernel=bowsvm_config['kernel'],
            degree=bowsvm_config['degree'],
        )
        clf.fit(X, y)

    else:
        raise NotImplementedError(
            "classifier_type can only be convnet / logreg / svm / bow-svm")

    # report number of examples
    n_pos = (y == 1).sum()
    n_neg = (y == 0).sum()
    n_train = y.shape[0]
    logger.info(
        'training set: %d positive examples, %d negative examples, %d total' %
        (n_pos, n_neg, n_train))

    if y_valid is not None:
        n_pos = (y_valid == 1).sum()
        n_neg = (y_valid == 0).sum()
        n_train = y_valid.shape[0]
        logger.info(
            'validation set: %d positive examples, %d negative examples, %d total'
            % (n_pos, n_neg, n_train))

    return clf, scaler
Ejemplo n.º 37
0
 def fit(self, data, x=None, var=None):
     return ft.fit(self.legendre, self.coef, data, x=x, var=var)
Ejemplo n.º 38
0
 def run_fit(self):
     param=[]
     for i in range(len(self.fpcoef)):
         if self.fitcoef[i]: param.append(self.fpcoef[i])
     
     fit(self.errf, param, self.w)
Ejemplo n.º 39
0
 def fit(self, func, coef, data, var):
     return ft.fit(func, coef, data, var=var)
Ejemplo n.º 40
0
quotes=bmfutils.readfile('test.data')
petr4=quotes[quotes.papel=='PETR4']
calls=quotes[quotes.merc=='070']
puts=quotes[quotes.merc=='080']


dates=sorted(set(petr4.date))
for d in dates[:10]:
    data=[]
    precoacao=petr4[petr4.date==d].med[0]
    c=calls[calls.date==d]
    for v in sorted(set(c.vencimento))[:4]:
        t=(v-d).days
        def f(strike,p):
            r=0.00018
            v=p[0]
            return bs.BlackSholes('c',precoacao,strike,t,r,v)
        cv=c[c.vencimento==v]
        params=fit.fit(f,[0.0],cv.exercicio,cv.med)
        print params,v
        data.append(params)
        #pylab.plot(cv.exercicio,f(x,params),'x')
        #pylab.plot(cv.exercicio,tvalue,'.',label=str(v))
    pylab.plot([x[0] for x in data],[x[0] for x in data],'.',label=str(d))
    print '-----'
pylab.rcParams['legend.loc'] = 'best'
pylab.legend()
pylab.show()

Ejemplo n.º 41
0
quotes = bmfutils.readfile('test.data')
petr4 = quotes[quotes.papel == 'PETR4']
calls = quotes[quotes.merc == '070']
puts = quotes[quotes.merc == '080']

dates = sorted(set(petr4.date))
for d in dates[:10]:
    data = []
    precoacao = petr4[petr4.date == d].med[0]
    c = calls[calls.date == d]
    for v in sorted(set(c.vencimento))[:4]:
        t = (v - d).days

        def f(strike, p):
            r = 0.00018
            v = p[0]
            return bs.BlackSholes('c', precoacao, strike, t, r, v)

        cv = c[c.vencimento == v]
        params = fit.fit(f, [0.0], cv.exercicio, cv.med)
        print params, v
        data.append(params)
        #pylab.plot(cv.exercicio,f(x,params),'x')
        #pylab.plot(cv.exercicio,tvalue,'.',label=str(v))
    pylab.plot([x[0] for x in data], [x[0] for x in data], '.', label=str(d))
    print '-----'
pylab.rcParams['legend.loc'] = 'best'
pylab.legend()
pylab.show()
Ejemplo n.º 42
0
# # No need to provide first guess at parameters for fit.gaus
# (xf, yf), params, err, chi = fit.fit(fit.gaus, x,y)

# print "N:    %.2f +/- %.3f" % (params[0], err[0])
# print "N:    %.2f +/- %.3f" % (params[1], err[1])
# print "N:    %.2f +/- %.3f" % (params[2], err[2])


def example_function(params, x):
    kappa, n = params
    return (1. - exp(-kappa * x**n))


# It will still try to guess parameters, but they are dumb!
(xf, yf), p, e, chi = fit.fit(example_function, x, y)
plot(x, y, 'bo', label='Data')
# plot(xf,yf, 'r-', label='Fit')
# errorbar(xf,yf,yerr=e,'r-', label='Fit')
errorbar(xf, yf, yerr=chi)

legend()

# results = fit.fit(example_function, x, y, default_pars = [1, 12, 10, 1, 1, 1])
# plot(results[0][0], results[0][1], 'r--')

# Fit a sub-range:

# clf()
# results = fit.fit(fit.gaus, x, y, data_range=[0, 23])
# plot(results[0][0], results[0][1], 'r-.')
Ejemplo n.º 43
0
    start = 30.
    offset = 50.

    repetitions = 100

    result = p.empty(repetitions * len(times))
    for i in xrange(repetitions):
        v = noisy_psp(height, tau_1, tau_2, start, offset, times, noise)
        result[i * len(times):
               (i + 1) * len(times)] = v

    return result

if __name__ == '__main__':
    noise_est = 0.1
    psps = build_rep_trace(noise_est)
    shapes = segment(psps, dt, 100)

    psp = AlphaPSP()

    i = []
    i_err = []
    res = []
    for shape in shapes:
        r = fit(psp, times, shape, noise_est,
                fail_on_negative_cov=[True, True, True, False, False])
        a, b = psp.integral(times, r[0], r[1])
        print "fit succeeded:", r[-1]
        print "fit result:", r[0]
        print "integral:", a, "+/-", b
Ejemplo n.º 44
0
    def calculate_fit_quality_fixed_seed(self,
                                         seed_val,
                                         debug_plot=False,
                                         max_dev=4.):
        """
        Assert the quality of the fit by comparing the parameters used
        to generate test data to the fit result. The deviation should
        be smaller than `max_dev` times the error estimate reported by
        the fit routine. (e.g., using max_dev=3 leads to a statistical
        failure probability of 0.3% assuming a normal distribution on
        the results.)

        Only the height and tau_1/tau_2 are tested for a correct error
        estimate.
        """

        noise = .1

        seed(seed_val)
        times = p.arange(0, 100, .1)

        height = 1.
        tau_1 = 10.
        tau_2 = 5.
        start = 30.
        offset = 50.

        voltage = noisy_psp(height, tau_1, tau_2, start, offset, times, noise)

        fitres, cov, red_chi2, success = fit(
            AlphaPSP(),
            times,
            voltage,
            noise,
            fail_on_negative_cov=[True, True, True, False, False])

        if debug_plot:
            p.figure()
            p.plot(times, AlphaPSP()(times, *fitres), 'r-')
            p.errorbar(times, voltage, yerr=noise, fmt='bx')
            p.xlabel("time / AU")
            p.ylabel("voltage / AU")
            p.title("fit result")
            fname = "/tmp/fit_quality_plot_{0}.pdf".format(seed_val)
            p.savefig(fname)
            print "Plot saved to:", fname

        err = p.sqrt(p.diag(cov))

        print "seed:", seed_val

        self.assertTrue(success)
        self.assertLess(abs(fitres[0] - height), max_dev * err[0])
        self.assertLess(abs(fitres[1] - tau_1), max_dev * err[1])
        self.assertLess(abs(fitres[2] - tau_2), max_dev * err[2])

        # NOTE: only testing height and time constants for correct
        #       error estimate
        # self.assertLess(abs(fitres[3] - start), max_dev * err[3])
        # self.assertLess(abs(fitres[4] - offset),
        #                 max_dev * err[4])

        self.assertLess(red_chi2, 1.5)
        print red_chi2, abs(fitres[1] - tau_1) / err[1], abs(fitres[2] -
                                                             tau_2) / err[2]
Ejemplo n.º 45
0
    # train_imagenet.py --set-resnet-aug ...
    # Finally, to get the legacy MXNet v1.2 training settings on a per-use basis, invoke as in:
    # train_imagenet.py --set-data-aug-level 3
    parser.set_defaults(
        # network
        num_layers       = 50,

        # data
        resize           = 256,
        num_classes      = 1000,
        num_examples     = 1281167,
        image_shape      = '3,224,224',
        min_random_scale = 1, # if input image has min size k, suggest to use
                              # 256.0/x, e.g. 0.533 for 480
        # train
        num_epochs       = 90,
        lr_step_epochs   = '30,60,80',
        dtype            = 'float32'
    )
    args = parser.parse_args()

    if not args.use_dali:
        data.set_data_aug_level(parser, 0)

    # load network
    import resnet as net
    sym = net.get_symbol(**vars(args))

    # train
    fit.fit(args, sym, dali.get_rec_iter)
def get_drift_rate_camera(center_frequency,
                          data_sets,
                          do_plot=True,
                          units='Hz'):
    fig = None
    data_ = []
    ans_ = []
    err_ = []
    for filename, data_set in data_sets.items():
        data = Data(filename)
        fit_settings = data_set['fit_settings']
        data['name'] = filename
        load_images(data, data_set['camera_args'])
        process_images_eg(data, data_set['camera_args'])
        data = process_clock_scan_camera(data, center_frequency)
        data = fit_clock_scan_camera(data, fit_settings)
        data_set['data'] = data
        fig = plot_clock_scan_camera(data, fit_settings['region'], fig=fig)
        data_.append(data)
        ans_.append(data['fit_clock_scan']['fit'])
        err_.append(data['fit_clock_scan']['err'])

    f0_ = [center_frequency - ans['x0'] for ans in ans_]
    if err_:
        f0err_ = [err['x0'] for err in err_]
    else:
        f0err_ = None
    T_ = [
        data['time']['timestamp'][np.argmin(
            abs(data['clock_aom']['frequency'] - f0))]
        for data, f0 in zip(data_, f0_)
    ]
    T_ = [T - T_[0] for T in T_]

    p_guess = {'a': (f0_[-1] - f0_[0]) / T_[-1], 'b': f0_[0]}
    ans, err = fit(linear, p_guess, T_, f0_, y_err=f0err_)
    print 'drift rate: {} {}/s'.format(ans['a'], units)
    print 'error bar: {} {}/s'.format(err['a'], units)
    cxn = labrad.connect()
    cxn.rf.select_device('clock_dedrift')
    rr = cxn.rf.ramprate()
    print 'new ramprate: {} Hz/s'.format(rr + ans['a'])
    if do_plot:
        T_fit_ = np.linspace(min(T_), max(T_), 1000)
        f0_fit_ = linear(ans)(T_fit_)

        fig = plt.figure()
        fig.set_size_inches(8, 5)
        ax = fig.add_subplot(111)

        if f0err_:
            ax.errorbar(T_, f0_, f0err_, fmt='o')
        else:
            ax.plot(T_, f0_, 'o')
        ax.plot(T_fit_, f0_fit_, '-')
        ax.set_title('measure drift')
        ax.set_ylabel('center frequency [{}]'.format(units))
        ax.set_xlabel('time [s]')

        max_y = max([max(l.get_ydata()) for l in ax.get_lines()])
        min_y = min([min(l.get_ydata()) for l in ax.get_lines()])
        range_y = max_y - min_y
        ax.set_ylim([min_y - .1 * range_y, max_y + .1 * range_y])

        max_x = max([max(l.get_xdata()) for l in ax.get_lines()])
        min_x = min([min(l.get_xdata()) for l in ax.get_lines()])
        range_x = max_x - min_x
        ax.set_xlim([min_x - .1 * range_x, max_x + .1 * range_x])

        return fig
Ejemplo n.º 47
0
        else:
            tlayer = layers.DenseLayer(incoming=inp, num_units=l.num_units, W=l.W, b=l.b)

        out = layers.DenseLayer(incoming=tlayer, num_units=Xi.shape[1])
        if ('afterinput' in l.name):
            fit(lin=inp, lhog = lhog, output_layer=out, X=X, X_hog=X_hog, y=Xi, eval_size=0.1, num_epochs=1,
            l_rate_start = 0.01, l_rate_stop = 0.00001, batch_size = 100, l2_strength = 0, Flip=False)
        else:
            fit(lin=inp, lhog = lhog, output_layer=out, X=Xi, X_hog=X_hog, y=Xi, eval_size=0.1, num_epochs=1,
            l_rate_start = 0.01, l_rate_stop = 0.00001, batch_size = 100, l2_strength = 0, Flip=False)

    shape = l.output_shape
    kf = KFold(NTRAIN, 100)
    Xi = np.empty(tuple(np.append([1], shape[1:])), 'float32')
    for indices in iter(kf):
        lin.input_var = theano.shared(X[indices[1]])
        lhog.input_var = theano.shared(X_hog[indices[1]])
        out = theano.function([], layers.get_output(l, deterministic=True), on_unused_input='ignore')
        t=out()
        Xi = np.concatenate((Xi, out()))

    Xi = Xi[1:]
'''



fit(lin=lin, lhog=lhog, output_layer=out, X=X, X_hog=X_hog, y=y, eval_size=0.1, num_epochs=EPOCHS,
    l_rate_start = 0.02, l_rate_stop = 0.0001, batch_size = 100, l2_strength = 0.00005, Flip=True, p=None)


loader.print_prediction(count=NTEST, numiters=1000, pred=pred, lin=lin, lhog=lhog, output_layer=out)
Ejemplo n.º 48
0
        auto_fit(cors, options=args)
    else:
        while not DONE:
            multicor = mergecors(cors, args.time_start, args.time_end)
            multicor.symmetric = True
            multicor.symmetry = "symmetric" # hack
            multicor.period = args.period

            funct = functions[args.function](Nt=args.period, ranges=zip(args.time_start, args.time_end))

            funct.stride = args.tstride

            logging.info("starting fit with mergedcorrelator")
            try:
                averages, stds, chi = fit.fit(funct, multicor,
                                              min(multicor.times), max(multicor.times), bootstraps=args.bootstraps,
                                              filestub=args.output_stub, return_chi=True,
                                              return_quality=False, writecor=False, tstride=args.tstride, options=args)
            except (fit.InversionError, InvalidFit) as e:
                logging.error("Could not invert, {}".format(e))
                if args.debug_noretry:
                    exit(-1)
                logging.error("Trying larger stride time {}->{}".format(args.tstride, args.tstride+1))
                args.tstride += 1
                funct.stride = args.tstride

                if args.tstride > 3:
                    args.time_start = [t+1 for t in args.time_start]
                    args.time_end = [t-1 for t in args.time_end]
                    args.tstride = 1
                    logging.info("new tstarts: {} new tends: {}".format(args.time_start, args.time_end))
                    continue
Ejemplo n.º 49
0
    #     continue
    fn = "testing_2014-06-04/%s/COMSAT_field_%04d-%02d-%02d.txt" % (
        lakename,
        int(margs_part[6]),
        int(margs_part[7]),
        int(margs_part[8]),
    )
    comsatprofile = np.genfromtxt(fn, usecols=1)
    p1d, p2d = finddefaults(margs_part[0])
    p1a = np.linspace(start=p1d / 10.0, stop=p1d * 2.0, num=n)
    p2a = np.linspace(start=p2d / 4.0, stop=(p2d + 3.0) / 4.0, num=n)
    rmsda = np.zeros((n, n)) * np.nan
    biasa = np.zeros((n, n)) * np.nan
    for i1, p1 in enumerate(p1a):
        for i2, p2 in enumerate(p2a):
            rmsd, bias, ml = fit.fit(margs_part + [p1, p2], comsatprofile)
            rmsda[i1, i2] = rmsd
            biasa[i1, i2] = bias
            print (" ".join(["%.2g" % v for v in [i1, i2, p1, p2, rmsd, bias]]))
    rmsddict[id] = rmsda
    biasdict[id] = biasa
    plt.cla()
    plotfitstats(p1a, p2a, p1d, p2d, rmsda, biasa, lakename, "png/%s.png" % lakename)

p_default = dict()
p_min_rmsd = dict()
p_min_abs_bias = dict()
min_rmsd = dict()
min_abs_bias = dict()
for id in cmddict.keys():
    lakename = "COMSAT%s" % id
Ejemplo n.º 50
0
 def fit(self, func, coef, data, var):
     return ft.fit(func, coef, data, var=var)
Ejemplo n.º 51
0
def tmin_plot(fn, cor, tmin, tmax, filestub=None, bootstraps=NBOOTSTRAPS):
    emass_dt = 3

    index = fn.parameter_names.index("mass")
    fitted_params = []
    fitted_errors = []
    qualities = []
    Tpoints = range(tmin, tmax-(len(fn.parameter_names)+1))
    if args.write_each_boot:
        orig_write_each_boot = args.write_each_boot
    for t in Tpoints:
        if args.write_each_boot:
            args.write_each_boot = orig_write_each_boot+"_{}".format(t)
        try:
            params, errors, qual = fit.fit(fn, cor, t, tmax,
                                           filestub=filestub, bootstraps=bootstraps, return_quality=True, options=args)
            fitted_params.append(params[index])
            fitted_errors.append(errors[index])
            qualities.append(qual)
        except RuntimeError:
            fitted_params.append(np.nan)
            fitted_errors.append(np.nan)
            qualities.append(0.0)
            continue

    fig = plt.figure()

    emass = cor.periodic_effective_mass(emass_dt)
    emass_errors = cor.periodic_effective_mass_errors(emass_dt).values()
    emass_plot = plt.errorbar(np.array(emass.keys())+0.2, emass.values(), yerr=emass_errors, fmt='g^', zorder=0)
    cmap = mpl.cm.cool

    tmin_plot = plt.scatter(Tpoints, fitted_params, c=qualities, s=50, cmap=cmap)
    plt.clim(0, 1)
    tmin_error = plt.errorbar(Tpoints, fitted_params, yerr=fitted_errors, fmt=None, zorder=0)

    for i in flatten(emass_plot):
        i.set_visible(False)

    cb = fig.colorbar(tmin_plot)
    cb.set_label("Fit Quality")

    plt.ylim([0, max(emass.values())*1.2])
    plt.xlim([0, tmax + 2])

    def func(label):
        if label == 'tminplot':
            tmin_plot.set_visible(not tmin_plot.get_visible())
            for i in flatten(tmin_error):
                if i:
                    i.set_visible(not i.get_visible())
        elif label == 'emass':
            for i in flatten(emass_plot):
                if i:
                    i.set_visible(not i.get_visible())
        plt.draw()

    if(filestub):
        logging.info("Saving plot to {}".format(filestub+".png"))
        plt.savefig(filestub)
        logging.info("Saving tmin data to {}".format(filestub+".tmin.out"))
        with open(filestub+".tmin.out", "w") as f:
            for t, data, error, q in zip(Tpoints, fitted_params, fitted_errors, qualities):
                f.write("{}, {}, {}, {}\n".format(t, data, error, q))
    else:
        rax = plt.axes([0.85, 0.8, 0.1, 0.15])
        check = CheckButtons(rax, ('tminplot', 'emass'), (True, False))
        check.on_clicked(func)
        plt.show()
Ejemplo n.º 52
0
true_w = to_gpu(torch.ones((20, 1)))
random_w = torch.randn(true_w.shape)

true_model = to_gpu(Net(true_w))
model = to_gpu(Net(random_w))

optimizer = torch.optim.SGD(model.parameters(), lr=0.05)
scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.8)
criterion = torch.nn.MSELoss()
epochs = 20
save_path = 'test.mdl'

valid_dataset = DatasetFromModel(first_dim=1, batches=256, model=true_model)
train_dataset = DatasetFromModel(first_dim=1, batches=1024, model=true_model)
valid_loader = DataLoader(valid_dataset, batch_size=256)
train_loader = DataLoader(valid_dataset, batch_size=64)

fit(train_gen=train_loader,
    valid_gen=valid_loader,
    model=model,
    optimizer=optimizer,
    scheduler=scheduler,
    epochs=epochs,
    loss_fn=criterion,
    save_path=save_path)

# now try creating a new model and loading the old weights
model_2 = to_gpu(Net(torch.randn(true_w.shape)))
model_2.load_state_dict(torch.load(save_path))
print(model_2.w)
Ejemplo n.º 53
0
def classify(trainX, trainY, testX, testY, k):
    """ Uses the KNN to classify the test data. """
    P = estimatePosterior(trainX, trainY, testX, k)
    E = fit(testX, P)
    (e_rate, se, interval) = error.confidenceInterval(testY, E)
    return (P, E, e_rate, se, interval)