Exemplo n.º 1
0
def simulate1(T, trial, mu0, mu1, std0, std1):    
    rct_res = []
    adpt0_res = []
    adpt1_res = []
    opt0_res = []
    opt1_res = []
    
    for t in range(trial):
        rct = RCT()
        adpt0 = Adapt(pretraining=10)
        adpt1 = Adapt(pretraining=50)
        opt0 = OPT()
        opt1 = OPT()

        rct_temp = []
        adpt0_temp = []
        adpt1_temp = []
        opt0_temp = []
        opt1_temp = []
        
        for period_t in range(T):            
            X, Y0, Y1, EY0_2, EY1_2, Var0, Var1 = sampler(mu0, mu1, std0, std1)
            rct(period_t, X, Y0, Y1)
            adpt0(period_t, X, Y0, Y1)
            adpt1(period_t, X, Y0, Y1)
            opt0(period_t, X, Y0, Y1, EY0_2, EY1_2)
            opt1(period_t, X, Y0, Y1, Var0, Var1)
            
            if period_t > 2:
                rct_temp.append(rct.effect())
                adpt0_temp.append(adpt0.effect())
                adpt1_temp.append(adpt1.effect())
                opt0_temp.append(opt0.effect())
                opt1_temp.append(opt1.effect(estimate=True))
            
        rct_res.append(rct_temp)
        adpt0_res.append(adpt0_temp)
        adpt1_res.append(adpt1_temp)
        opt0_res.append(opt0_temp)
        opt1_res.append(opt1_temp)

    return rct_res, adpt0_res, adpt1_res, opt0_res, opt1_res
Exemplo n.º 2
0
	T.Normalize(mean = base_model.rgb_mean, std = base_model.rgb_std),
	T.Lambda(lambda x: x[[2, 1, 0], ...])
])

dataset_train = opts.dataset(opts.data, train = True, transform = transforms.Compose([
	T.RandomSizedCrop(base_model.input_side),
	T.RandomHorizontalFlip(),
	normalize
]), download = True)
dataset_eval = opts.dataset(opts.data, train = False, transform = transforms.Compose([
	T.Scale(256),
	T.CenterCrop(base_model.input_side),
	normalize
]), download = True)

adapt_sampler = lambda batch, dataset, sampler, **kwargs: type('', (torch.utils.data.Sampler, ), dict(__len__ = dataset.__len__, __iter__ = lambda _: itertools.chain.from_iterable(sampler(batch, dataset, **kwargs))))()
loader_train = torch.utils.data.DataLoader(dataset_train, sampler = adapt_sampler(opts.batch, dataset_train, opts.sampler), num_workers = opts.threads, batch_size = opts.batch, drop_last = True, pin_memory = True)
loader_eval = torch.utils.data.DataLoader(dataset_eval, shuffle = False, num_workers = opts.threads, batch_size = opts.batch, pin_memory = True)

model = opts.model(base_model, dataset_train.num_training_classes).cuda()
model_weights, model_biases, base_model_weights, base_model_biases = [[p for k, p in model.named_parameters() if p.requires_grad and ('bias' in k) == is_bias and ('base' in k) == is_base] for is_base in [False, True] for is_bias in [False, True]]

base_model_lr_mult = model.optimizer_params.pop('base_model_lr_mult', 1.0)
optimizer = model.optimizer([dict(params = base_model_weights, lr = base_model_lr_mult * model.optimizer_params['lr']), dict(params = base_model_biases, lr = base_model_lr_mult * model.optimizer_params['lr'], weight_decay = 0.0), dict(params = model_biases, weight_decay = 0.0)], **model.optimizer_params)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, **model.lr_scheduler_params)

log = open(opts.log, 'w', 0)
for epoch in range(opts.epochs):
	scheduler.step()
	model.train()
	loss_all, norm_all = [], []
Exemplo n.º 3
0
def getdist(question):

    """ function getdist(question): calls function sampler with various parameters, then outputs plots and prints acceptance rates and other values of interest. Accepts inputs 'b' 'c' or 'd' """

    if question == 'b':

        # PART B

        # Define inputs
        ff = '(1.0/2.0)*np.exp(-abs(x))' # Laplace(0,1)
        gg = stats.cauchy
        n = 1000

        # Call sampler
        m, samples, ratio = sampler(ff,gg,n)

        # Create distributions and perform K-S test
        grid = np.linspace(-10,10,num=1000)
        actual = []
        cc = []
        for x in grid:
            actual.append(eval(ff))
            cc.append(stats.cauchy.pdf(x))
            ks = stats.kstest(samples,'laplace')

        # Print and plot relevant information
        print 'calculated M value: ',m
        print 'acceptance rate: ',int(ratio*100),'%'
        print 'K-S test finds a ',int(ks[1]*100),'% chance that these points were sampled from a Laplace(0,1) distribution'

        plt.hist(samples,50,normed=True,label='samples')
        plt.plot(grid,actual,linewidth=3,color='red',linestyle='--',label='f(x)')
        plt.plot(grid,[m*cci for cci in cc],color='green',label='Mg(x)')
        plt.title('Histogram of Samples, with Laplace(0,1) Distribution')
        plt.legend()

        plt.show()

    if question == 'c':

        # PART C

        # Define inputs
        ff = '(1.0/2.0)*np.exp(-abs(x))'
        gg = stats.t(2)
        n = 1000

        # Call sampler
        m, samples, ratio = sampler(ff,gg,n)
        mb, samplesb, ratiob = sampler(ff,stats.cauchy,n)

        # Create distributions
        grid = np.linspace(-10,10,num=1000)
        actual=[]
        st=[]
        for x in grid:
            actual.append(eval(ff))
            st.append(stats.t.pdf(x,2))

        # Print and plot relevant information
        print 'calculated M value: ',m
        print 'acceptance rate: ',int(ratio*100),'%'
        print 'compare to acceptance rate of ',int(ratiob*100),'% from part b'

        plt.hist(samples,50,normed=True,label='samples')
        plt.plot(grid,actual,linewidth=3,color='red',linestyle='--',label='f(x)')
        plt.plot(grid,[m*sti for sti in st],color='green',label='Mg(x)')
        plt.title('Histogram of Samples, with Laplace(0,1) Distribution')
        plt.legend()

        plt.show()

    if question == 'd':

        # PART D

        # Define inputs
        ff = 'np.sqrt(2/np.pi)*x**2*np.exp(-x**2/2)'
        gg = stats.truncnorm(-math.pi/2,10,loc=math.pi/2)
        n = 5000

        # Call sampler
        m, samples, ratio = sampler(ff,gg,n)

        # Create distributions
        grid = np.linspace(0,10,num=1000)
        actual=[]
        nm=[]
        for x in np.linspace(0,10,num=1000):
            actual.append(eval(ff))
            nm.append(stats.truncnorm.pdf(x,-math.pi/2,10,loc=math.pi/2))
            ks = stats.kstest(samples,'maxwell')

        # Print and plot relevant information
        print 'calculated M value: ',m
        print 'acceptance rate: ',int(ratio*100),'%'
        print 'K-S test finds a ',int(ks[1]*100),'% chance that these points were sampled from a Maxwell distribution'

        plt.hist(samples,50,normed=True,label='samples')
        plt.plot(grid,[m*nmi for nmi in nm],color='blue',label='Mg(x)')
        plt.plot(grid,actual,linewidth=3,color='red',linestyle='--',label='f(x)')
        plt.title('Histogram of Samples, with Maxwell Distribution')
        plt.legend()

        plt.show()
Exemplo n.º 4
0
    def register(self,
            image,
            template,
            tform,
            sampler=sampler.bilinear,
            method=metric.forwardsAdditive,
            p=None,
            alpha=None,
            verbose=False
            ):
        """
        Computes the registration between the image and template.

        Parameters
        ----------
        image: nd-array
            The floating image.
        template: nd-array
            The target image.
        tform: deformation (class)
            The deformation model (shift, affine, projective)
        method: collection, optional.
            The registration method (defaults to FrowardsAdditive)
        p: list (or nd-array), optional.
            First guess at fitting parameters.
        alpha: float
            The dampening factor.
        verbose: boolean
            A debug flag for text status updates.

        Returns
        -------
        step: optimization step.
            The best optimization step (after convergence).
        search: list (of optimization steps)
            The set of optimization steps (good and bad)
        """

        p = tform.identity if p is None else p
        deltaP = np.zeros_like(p)

        # Dampening factor.
        alpha = alpha if alpha is not None else 1e-4

        # Variables used to implement a back-tracking algorithm.
        search = []
        badSteps = 0
        bestStep = None

        for itteration in range(0, self.MAX_ITER):

            # Compute the transformed coordinates.
            coords = tform(p, template.coords)

            # Sample to the template frame using the transformed coordinates.
            warpedImage = sampler(image.data, coords.tensor)

            # Evaluate the error metric.
            e = method.error(warpedImage, template.data)

            # Cache the optimization step.
            searchStep = optStep(
               error=np.abs(e).sum() / np.prod(image.data.shape),
               p=p.copy(),
               deltaP=deltaP.copy(),
               decreasing=True
               )

            # Update the current best step.
            bestStep = searchStep if bestStep is None else bestStep

            if verbose:
                log.warn(
                    REGISTRATION_STEP.format(
                        itteration,
                        ' '.join('{0:3.2f}'.format(param) for param in searchStep.p),
                        searchStep.error
                        )
                    )

            # Append the search step to the search.
            search.append(searchStep)

            if len(search) > 1:

                searchStep.decreasing = (searchStep.error < bestStep.error)

                alpha = self.__dampening(alpha, searchStep.decreasing)

                if searchStep.decreasing:
                    bestStep = searchStep
                else:
                    badSteps += 1
                    if badSteps > self.MAX_BAD:
                        if verbose:
                            log.warn(REGISTRATION_STOP)
                        break

                    # Restore the parameters from the previous best iteration.
                    p = bestStep.p.copy()

            # Computes the derivative of the error with respect to model
            # parameters.

            J = method.jacobian(warpedImage, template, tform, p)

            # Compute the parameter update vector.
            deltaP = self.__deltaP(J, e, alpha, p)

            # Evaluate stopping condition:
            if np.dot(deltaP.T, deltaP) < 1e-4:
                break

            # Update the estimated parameters.
            p = method.update(p, deltaP, tform)

        return bestStep, search