Beispiel #1
0
 def update_texture_generator(self, x, y, t=None, l=None, VGGfeatures=None, style_targets=None):
     fake_y = self.G_T(x)
     fake_concat = torch.cat((x, fake_y), dim=1)
     fake_output = self.D_T(fake_concat)
     LTadv = -fake_output.mean()*self.lambda_tadv
     Lrec = self.loss(fake_y, y) * self.lambda_l1
     LT = LTadv + Lrec
     if t is not None:
         with torch.no_grad():
             t[:,0:1] = gaussian(t[:,0:1], stddev=0.2)
             source_mask = self.G_S(t, l).detach()
             source = source_mask.clone()
             source[:,0:1] = gaussian(source[:,0:1], stddev=0.2)
             smaps_fore = [(A.detach()+1)*0.5 for A in self.getmask(source_mask[:,0:1])]
             smaps_back = [1-A for A in smaps_fore]
         fake_t = self.G_T(source)
         out = VGGfeatures(fake_t)
         style_losses1 = [self.style_weights[a] * self.gramloss(A*smaps_fore[a], style_targets[0][a]) for a,A in enumerate(out)]
         style_losses2 = [self.style_weights[a] * self.gramloss(A*smaps_back[a], style_targets[1][a]) for a,A in enumerate(out)]
         Lsty = (sum(style_losses1)+ sum(style_losses2)) * self.lambda_sty
         LT = LT + Lsty
     #global id
     #if id % 20 == 0:
     #    viz_img = to_data(torch.cat((x[0], y[0], fake_y[0]), dim=2))
     #    save_image(viz_img, '../output/texturee_result%d.jpg'%id)
     #id += 1             
     self.trainerG_T.zero_grad()
     LT.backward()
     self.trainerG_T.step()   
     return LTadv.data.mean(), Lrec.data.mean(), Lsty.data.mean() if t is not None else 0
Beispiel #2
0
def plot_linear_fit(ax, x_array, y_array, fit_function, fit_sigma, color, cmap):
    xlim = (min(x_array), max(x_array))
    ylim = (min(y_array), max(y_array))
    ax.set_xlim(*xlim)
    ax.set_ylim(*ylim)
    x_range = np.linspace(*xlim)
    y_range = np.linspace(*ylim)

    ax.scatter(x_array, y_array, lw=0, alpha=0.5, color=color)
    fit_line = [fit_function(x) for x in x_range]
    ax.plot(x_range, fit_line, color=color)

    xx, yy = np.meshgrid(x_range, y_range)
    zz = xx + yy

    for i in range(len(x_range)):
        for j in range(len(y_range)):
            zz[j, i] = gaussian(yy[j, i], fit_function(xx[j, i]), fit_sigma)

    im = ax.imshow(
        zz, origin='lower', interpolation='bilinear',
        cmap=cmap, alpha=0.5, aspect='auto',
        extent=(xlim[0], xlim[-1], ylim[0], ylim[-1]),
        vmin=0.0, vmax=gaussian(0, 0, fit_sigma)
    )

    return ax, im
Beispiel #3
0
def list_of_gaus(arrival_time, intensities, fitted_parameters_f, 
        fitted_parameters_r, norm_factor):
    """Parses results into a master nested list used for presentation and 
    evaluation. Also parses error information.

    Args:
        arrival_time: Arrival time series
        intensities: ATD curve (distribution)
        fitted_parameters_f: Parameters for fitted peaks of the forward method 
                [[height1, mean1, sd1],...]
        fitted_parameters_r: Parameters for fitted peaks of the reverse method 
                [[height1, mean1, sd1],...]
        norm_factor: Scale factor for unnormalised data

Returns:
        gausslist: List of gaussian distributions for each fitting method 
                [[average], [forward], [reverse]]
        min_error: Minimum error between fitting methods
        errorlist: List of error values 
                [forward, reverse, average]
    """
    average = weighted_average(fitted_parameters_f, fitted_parameters_r, ) 
    average = list(average)
    average = [list(i) for i in average]
    gausslist = [[], [], []] #[[average], [forward], [reverse]]
    for i in range(len(fitted_parameters_r)):
        gausslist[2].append(utils.gaussian(arrival_time, 
                *fitted_parameters_r[i]))
        gausslist[1].append(utils.gaussian(arrival_time, 
                *fitted_parameters_f[i]))
    for i in range(len(average)):
        gausslist[0].append(utils.gaussian(arrival_time, *average[i]))
    #Get sum of fitted peaks to compare to ATD curve
    fit_av = list(np.zeros((len(intensities))))
    fit_f = list(np.zeros((len(intensities))))
    fit_r = list(np.zeros((len(intensities))))
    for i in gausslist[0]:
        fit_av = map(add, fit_av, i)
    for i in gausslist[1]:
        fit_f = map(add, fit_f, i)
    for i in gausslist[2]:
        fit_r = map(add, fit_r, i)
    #The last element of each list in gausslist is the ATD curve and the second 
    #last the fit produced from the respective method
    gausslist[2].append(fit_r)
    gausslist[1].append(fit_f)
    gausslist[0].append(fit_av)
    gausslist[2].append(intensities)
    gausslist[1].append(intensities)
    gausslist[0].append(intensities)
    #Calculate errors normalising with scaling factor
    error_f = utils.rmsd(fit_f, intensities) * norm_factor
    error_r = utils.rmsd(fit_r, intensities) * norm_factor
    error_av = utils.rmsd(gausslist[0][-2], intensities) * norm_factor
    errorlist = [error_av, error_f, error_r]
    min_error = min(errorlist) 
    erind = errorlist.index(min_error)
    parlist = [average, fitted_parameters_f, fitted_parameters_r]
    return parlist[erind], gausslist, min_error, errorlist
 def __init__(self):
     self.n_server = 2
     self.servers_capacity = [gaussian(1.2, 0.05), gaussian(0.8, 0.05)]
     #        self.servers_capacity = [gaussian(1,0.4),gaussian(0.85,0.05),uniform(0.3,0.7),uniform(0.75,1.15)]
     self.max_queue_length = 10
     self.time_lam = 2  #
     self.task_lam = 3
     self.num_actions = self.n_server
Beispiel #5
0
def main():
    # parse options
    parser = TestOptions()
    opts = parser.parse()

    # data loader
    print('--- load data ---')
    text = load_image(opts.text_name, opts.text_type)
    label = opts.scale
    step = opts.scale_step * 2.0
    if opts.gpu:
        text = to_var(text)

    # model
    print('--- load model ---')
    netGlyph = GlyphGenerator(n_layers=6, ngf=32)
    netTexture = TextureGenerator(n_layers=6)
    netGlyph.load_state_dict(torch.load(opts.structure_model))
    netTexture.load_state_dict(torch.load(opts.texture_model))
    if opts.gpu:
        netGlyph.cuda()
        netTexture.cuda()
    netGlyph.eval()
    netTexture.eval()

    print('--- testing ---')
    text[:, 0:1] = gaussian(text[:, 0:1], stddev=0.2)
    if label == -1:  # 默认值 -1
        scale = -1.0
        noise = text.data.new(text[:, 0:1].size()).normal_(0, 0.2)
        result = []
        while scale <= 1.0:
            img_str = netGlyph(text, scale)
            img_str[:, 0:1] = torch.clamp(img_str[:, 0:1] + noise, -1, 1)
            result1 = netTexture(img_str).detach()
            result = result + [result1]
            scale = scale + step
    else:
        img_str = netGlyph(text, label * 2.0 - 1.0)
        img_str[:, 0:1] = gaussian(img_str[:, 0:1], stddev=0.2)
        result = [netTexture(img_str)]

    if opts.gpu:
        for i in range(len(result)):
            result[i] = to_data(result[i])

    print('--- save ---')
    # directory
    if not os.path.exists(opts.result_dir):
        os.mkdir(opts.result_dir)
    for i in range(len(result)):
        if label == -1:
            result_filename = os.path.join(opts.result_dir,
                                           (opts.name + '_' + str(i) + '.png'))
        else:
            result_filename = os.path.join(opts.result_dir,
                                           (opts.name + '.png'))
        save_image(result[i][0], result_filename)
Beispiel #6
0
 def update_structure_generator(self, x, xl, l, t=None):
     fake_x = self.G_S(xl, l)  # fake_x : [32,3,256,256]
     fake_output = self.D_S(fake_x)  # 向量
     LSadv = -fake_output.mean() * self.lambda_sadv
     LSrec = self.loss(fake_x, x) * self.lambda_l1
     LS = LSadv + LSrec
     if t is not None:
         # weight map based on the distance field
         # whose pixel value increases with its distance to the nearest text contour point of t
         Mt = (t[:, 1:2] + t[:, 2:3]) * 0.5 + 1.0
         t_noise = t.clone()
         t_noise[:, 0:1] = gaussian(t_noise[:, 0:1], stddev=0.2)
         fake_t = self.G_S(t_noise, l)
         LSgly = self.loss(fake_t * Mt, t * Mt) * self.lambda_gly
         LS = LS + LSgly
     self.trainerG_S.zero_grad()
     LS.backward()
     self.trainerG_S.step()
     # global id
     # if id % 60 == 0:
     #    viz_img = to_data(torch.cat((x[0], xl[0], fake_x[0]), dim=2))
     #    save_image(viz_img, '../output/structure_result%d.jpg'%id)
     # id += 1
     return LSadv.data.mean(), LSrec.data.mean(), LSgly.data.mean(
     ) if t is not None else 0
Beispiel #7
0
 def get_denom(self, xi_t):
     probs = 0.0
     for k in range(self.n_components):
         mu_t_k = self.gmm.means_[k][0]
         var_t_k = self.gmm.covariances_[k][0,0]
         probs += gaussian(xi_t, mu_t_k, var_t_k)
     return probs
Beispiel #8
0
def train(epoch, model, optimizer, training_data_loader, mean, stddev,
          criterion):
    epoch_loss = 0
    for iteration, data in enumerate(training_data_loader, 1):

        target = data
        _input, noise = gaussian(target, mean, stddev)

        if device == 'cuda':
            _input = Variable(_input.cuda())
            noise = Variable(noise.cuda())
            target = Variable(target.cuda())

        output = model(_input)

        loss = criterion(output, noise)
        epoch_loss += loss.item()

        loss.backward()

        optimizer.step()

        optimizer.zero_grad()

        print("===> Epoch[{}]({}/{}): Loss: {:.4f}".format(
            epoch, iteration, len(training_data_loader), loss.item()))

    print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(
        epoch, epoch_loss / len(training_data_loader)))
Beispiel #9
0
def rp_dilutedpdf(pop,r=1,dr=0.1,fb=0.4,n=1e4,fig=None,plot=True,allpdfs=False): #add contrast curves here
    simr = rand.normal(size=n)*dr + r
    mainpdf = utils.gaussian(r,dr,norm=1-fb)

    inds = rand.randint(size(pop.dkepmag),size=n)

    diluted1 = utils.kde(dilutedradius(simr,pop.dkepmag[inds]),norm=fb/2,adaptive=False)
    diluted2 = utils.kde(dilutedradius(simr,-pop.dkepmag[inds]),norm=fb/2,adaptive=False)
    diluted2.renorm((fb/2)**2/quad(diluted2,0,20)[0])

    totpdf = mainpdf + diluted1 + diluted2

    if plot:
        pu.setfig(fig)
        rs = arange(0,2*r,0.01)
        p.plot(rs,totpdf(rs))
        if allpdfs:
            p.plot(rs,mainpdf(rs))
            p.plot(rs,diluted1(rs)) 
            p.plot(rs,diluted2(rs))

    if allpdfs:
        return totpdf,mainpdf,diluted1,diluted2
    else:
        return totpdf
Beispiel #10
0
    def add_to_image(self, image, beam=None):
        """
        Add component to given instance of ``Image`` class.
        """
        # Cellsize [rad]
        dx, dy = image.dx, image.dy
        # Center of image [pix]
        x_c, y_c = image.x_c, image.y_c

        # Parameters of component
        try:
            # Jy, mas, mas, mas,  , rad
            flux, x0, y0, bmaj, e, bpa = self._p
        # If we call method inside ``CGComponent``
        except ValueError:
            flux, x0, y0, bmaj = self._p
            e = 1.
            bpa = 0.

        # There's ONE place to convert them
        x0 *= mas_to_rad
        y0 *= mas_to_rad
        bmaj *= mas_to_rad

        # TODO: Is it [Jy/beam]??
        # Amplitude of gaussian component [Jy/beam]
        # amp = flux / (2. * math.pi * (bmaj / mas_to_rad) ** 2. * e)
        # amp = flux / (2. * math.pi * (bmaj / abs(image.pixsize[0])) ** 2. * e)
        amp = 4. * np.log(2) * flux / (np.pi *
                                       (bmaj / abs(image.pixsize[0]))**2 * e)

        # Create gaussian function of (x, y) with given parameters
        gaussf = gaussian(amp, x0, y0, bmaj, e, bpa=bpa)

        # Calculating angular distances of cells from center of component
        # from cell numbers to relative distances
        # arrays with elements from 1 to imsize
        x, y = np.mgrid[1:image.imsize[0] + 1, 1:image.imsize[1] + 1]
        # from -imsize/2 to imsize/2
        x = x - x_c
        y = y - y_c
        # the same in rads
        x = x * dx
        y = y * dy
        ## relative to component center
        #x = x - x0
        #y = y - y0
        ## convert to mas cause all params are in mas
        #x = x / mas_to_rad
        #y = y / mas_to_rad

        # Creating grid with component's flux at each cell
        fluxes = gaussf(x, y)

        if beam is not None:
            fluxes = beam.convolve(fluxes)

        # Adding component's flux to image grid
        image._image += np.rot90(fluxes)[::-1, ::]
Beispiel #11
0
def stylize_text(model, inputs):

    text_name = inputs["input_image"]
    text = load_image(text_name, 1)
    text = to_var(text)

    label = inputs["scale"]

    text[:, 0:1] = gaussian(text[:, 0:1], stddev=0.2)

    img_str = model['netGlyph'](text, label * 2.0 - 1.0)
    img_str[:, 0:1] = gaussian(img_str[:, 0:1], stddev=0.2)
    result = to_data(model['netTexture'](img_str))

    output = save_image(result[0])

    return {"output_image": output}
Beispiel #12
0
 def estimate(self, xi_t):
     result = 0.0
     for k in range(self.n_components):
         xi_s_k_head = self.xi_s_k(xi_t, k)
         mu_t_k = self.gmm.means_[k][0]
         var_t_k = self.gmm.covariances_[k][0,0]
         beta_k = gaussian(xi_t, mu_t_k, var_t_k) / self.get_denom(xi_t)
         result += xi_s_k_head * beta_k
     return result
Beispiel #13
0
    def plot_experiment(self, path=""):
        color = self.color
        data = self.experiment_data
        cmap = sns.light_palette(color, as_cmap=True)

        fig, ax = plt.subplots()
        occupants, readings = (np.array(array) for array in zip(*data))

        # ax_left, im_left = plot_linear_fit(
        # ax_left, occupants, readings, self.model, self.model_sigma, color,
        # cmap)

        ax, im = plot_linear_fit(
            ax, readings, occupants,
            self.predictor, self.predictor_sigma,
            color, cmap
        )

        ax.set_xlabel("{} sensor readout ({})".format(self.name, self.units))
        ax.set_ylabel("Number of train car occupants")

        # cax, kw = mpl.colorbar.make_axes(
        # [ax_left, ax_right], location="bottom"
        # )

        # norm = mpl.colors.Normalize(vmin=0, vmax=1)
        # cbar = mpl.colorbar.ColorbarBase(
        #     ax, cmap=cmap, norm=norm, alpha=0.5)

        cbar = plt.colorbar(im, alpha=0.5, extend='neither', ticks=[
            gaussian(3 * self.predictor_sigma, 0, self.predictor_sigma),
            gaussian(2 * self.predictor_sigma, 0, self.predictor_sigma),
            gaussian(self.predictor_sigma, 0, self.predictor_sigma),
            gaussian(0, 0, self.predictor_sigma),
        ])
        # cbar.solids.set_edgecolor("face")

        cbar.set_ticklabels(
            ['$3 \sigma$', '$2 \sigma$', '$\sigma$', '{:.2%}'.format(
                gaussian(0, 0, self.predictor_sigma))],
            update_ticks=True
        )

        fig.savefig(os.path.join(path, self.name+".svg"))
    def get_observation_continuous(self, curr_pos=None):
        if isinstance(curr_pos, np.ndarray):
            curr_pos = curr_pos[0]
        elif curr_pos == None: # Default to state of current instance
            curr_pos = self.robot_state

        door = 0
        for door_loc in self.door_locations:
            door += 0.6*utils.gaussian(curr_pos, door_loc, 0.5)  # Doors
        left = (-np.tanh(5 * (curr_pos + 13)) + 1) / 2  # Left wall
        right = (np.tanh(5 * (curr_pos - 13)) + 1) / 2  # Right wall
        return (door, left, right)
Beispiel #15
0
    def update_emission(self):
        """
        Calculate the emission matrix given the estimated mus and sigmas
        
        Only the log scale is saved.
        """

        F = np.zeros((self.n, self.k))

        for i in range(self.k):
            F[:, i] = gaussian(self.obs, self.mu[i], self.sigma[i])

        self.log_F = np.log(F)
 def likelihood_field(self, sensor_data):
     p_hit = 0.9
     p_rand = 0.1
     sig_hit = 6.0
     q = 0
     plist = utils.EndPoint(self.pos, self.bot_param, sensor_data)
     for i in range(len(plist)):
         if sensor_data[i] > self.bot_param[3] - 1 or sensor_data[i] < 1:
             continue
         dist = self.nearest_dist(plist[i][0], plist[i][1], 3, 0.2)
         q += math.log(p_hit * utils.gaussian(0, dist, sig_hit) +
                       p_rand / self.bot_param[3])
     return q
    def __call__(self, landmarks, input_resolution):
        """
            Returns a Tensor which contains the generated heatmaps
            of all elements in the :attr:`landmarks` tensor.

        Args:
            landmarks (ndarray): ndarray ( 2 x N ) contains N two dimensional
            landmarks.
            input_resolution: resolution ( H x W ) is the resoultion/dimension
            in which the landmarks are given.

        Returns:
            Tensor: The generated heatmaps ( N x outputH x outputW ).
        """
        self.inputH = input_resolution[0]
        self.inputW = input_resolution[1]
        self.outputH = self.resolution[0]
        self.outputW = self.resolution[1]
        heatmaps = np.zeros((landmarks.shape[1], self.outputH, self.outputW))
        for i in range(landmarks.shape[1]-1):
            if not np.isnan(landmarks[:, i]).any():
                tmp = utils.gaussian(np.array([self.inputH, self.inputW]),
                                          landmarks[:, i], self.gauss)
                scaled_tmp = sp.misc.imresize(tmp, [self.outputH, self.outputW])
                scaled_tmp = (scaled_tmp - min(scaled_tmp.flatten())) / (
                    max(scaled_tmp.flatten()) - min(scaled_tmp.flatten()))
            else:
                scaled_tmp = np.zeros([self.outputH, self.outputW])
            heatmaps[i] = scaled_tmp

        tmp = utils.gaussian(np.array([self.inputH, self.inputW]),
                                  landmarks[:, -1], 4 * self.gauss)
        scaled_tmp = sp.misc.imresize(tmp, [self.outputH, self.outputW])
        scaled_tmp = (scaled_tmp - min(scaled_tmp.flatten())) / (
            max(scaled_tmp.flatten()) - min(scaled_tmp.flatten()))
        heatmaps[landmarks.shape[1]-1] = scaled_tmp

        return torch.from_numpy(heatmaps)
Beispiel #18
0
 def LikelihoodField(self, sensor_data):
     p_hit = 0.9
     p_rand = 0.1
     sig_hit = 3.0
     q = 1
     plist = utils.EndPoint(self.pos, self.bot_param, sensor_data)
     for i in range(len(plist)):
         if sensor_data[i] > self.bot_param[3] - 1 or sensor_data[i] < 1:
             continue
         dist = self.NearestDistance(plist[i][0], plist[i][1], 4, 0.2)
         q = q * (p_hit * utils.gaussian(0, dist, sig_hit) +
                  p_rand / self.bot_param[3])
         #q += math.log(p_hit*utils.gaussian(0,dist,sig_hit) + p_rand/self.bot_param[3])
     return q
Beispiel #19
0
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 64
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type

        # networks init
        self.En = encoder(self.dataset)
        self.De = decoder(self.dataset)
        self.VAE = VAE_T(self.En, self.De)
        self.VAE_optimizer = optim.Adam(self.VAE.parameters(),lr=args.lrG, betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.VAE.cuda()
            self.BCE_loss = nn.BCELoss().cuda()
        else:
            self.BCE_loss = nn.BCELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.De)
        utils.print_network(self.En)
        print('-----------------------------------------------')

        # load dataset
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST('data/mnist', train=True, download=False,
                                                                          transform=transforms.Compose(
                                                                              [transforms.ToTensor()])),
                                                           batch_size=self.batch_size, shuffle=True)
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(
                datasets.FashionMNIST('data/fashion-mnist', train=True, download=True, transform=transforms.Compose(
                    [transforms.ToTensor()])),
                batch_size=self.batch_size, shuffle=True)

        self.z_dim = 62
        self.z_n = utils.gaussian(self.batch_size, self.z_dim)

        # fixed noise
        if self.gpu_mode:
            self.sample_z_ = Variable(torch.from_numpy(self.z_n).type(torch.FloatTensor).cuda(), volatile=True)
        else:
            self.sample_z_ = Variable(torch.from_numpy(self.z_n).type(torch.FloatTensor), volatile=True)
Beispiel #20
0
 def encrypt_raw(cls, pp: RegevPublicParameters, k: RegevKey, mes: np.ndarray, mes_mod: int = 2, seed=None):
     rng = SeededRNG(seed or secrets.token_bytes(32))
     if mes.ndim != 1:
         raise MessageWrongDimensions()
     if mes.shape[0] != pp.bs:
         raise MessageWrongSize(
             f"Expected message size {pp.bs}, got {mes.shape[0]}")
     mes = mes % mes_mod
     r = uniform(2, rng, lbound=-1, shape=(1, pp.m))
     # print(r.tolist())
     c1 = r @ k.A % pp.cipher_mod
     b = (c1 @ k.sec + gaussian(pp.bound, rng,
                                shape=(pp.m, pp.bs))) % pp.cipher_mod
     c2 = (b + mround(pp.cipher_mod / mes_mod) * mes) % pp.cipher_mod
     return BatchedRegevCiphertext(c1, c2, mes_mod)
Beispiel #21
0
def get_heat_source(image_size, batch_size):
    '''
  Return a Gaussian heat source.
  '''
    heat_src = []
    for i in range(batch_size):
        scale = np.random.uniform(3, 3.5) / (image_size**2)
        f = -utils.gaussian(image_size - 2) * scale  # Negative
        f = torch.Tensor(f).unsqueeze(0)
        f = utils.pad_boundary(f,
                               torch.zeros(1,
                                           4))  # (1 x image_size x image_size)
        heat_src.append(f)
    heat_src = torch.cat(heat_src, dim=0)
    if torch.cuda.is_available():
        heat_src = heat_src.cuda()
    return heat_src
Beispiel #22
0
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 64
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.gpu_mode = False
        self.model_name = args.gan_type

        # networks init
        self.En = encoder(self.dataset)
        self.De = decoder(self.dataset)
        self.VAE = VAE_T(self.En, self.De)
        self.VAE_optimizer = optim.Adam(self.VAE.parameters(),
                                        lr=args.lrG,
                                        betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.VAE.cuda()
            self.BCE_loss = nn.BCELoss().cuda()
        else:
            self.BCE_loss = nn.BCELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.De)
        utils.print_network(self.En)
        print('-----------------------------------------------')

        # load dataset
        self.data_loader = DataLoader()

        self.z_dim = 62
        self.z_n = utils.gaussian(self.batch_size, self.z_dim)

        # fixed noise
        if self.gpu_mode:
            self.sample_z_ = Variable(torch.from_numpy(self.z_n).type(
                torch.FloatTensor).cuda(),
                                      volatile=True)
        else:
            self.sample_z_ = Variable(torch.from_numpy(self.z_n).type(
                torch.FloatTensor),
                                      volatile=True)
Beispiel #23
0
def validate(model, testing_data_loader, mean, stddev, criterion):
    avg_psnr = 0
    model.eval()
    with torch.no_grad():
        for data in testing_data_loader:
            target = data
            _input, noise = gaussian(target, mean, stddev)

            _input = Variable(_input.cuda())
            noise = Variable(noise.cuda())
            target = Variable(target.cuda())

            output = model(_input)
            mse = criterion(output, noise)
            psnr = 10 * log10(1 / mse.item())
            avg_psnr += psnr
    print("===> Avg. PSNR: {:.4f} dB".format(avg_psnr /
                                             len(testing_data_loader)))
Beispiel #24
0
def EM_gaussian_E_step(data, alpha, mu, sigma):
    """ Function that computes the E-step of the EM algorithm
    
    Params:
        data: data set 
        alpha= proportion of each Gaussian.
        mu= list with mean values
        sigma= list with covariate matrices
    
    Returns:
        Tau: matrix with the probabilities P(z=j|x,tetha)
    """
    tau = np.zeros((data.shape[0], len(alpha)))
    for j in np.arange(0, len(alpha)):
        tau[:, j] = alpha[j] * gaussian(data, mu[j], sigma[j])
    denominator = tau.sum(1)
    for j in np.arange(0, len(alpha)):
        tau[:, j] = tau[:, j] / denominator
    return tau
Beispiel #25
0
def MixtureGaus_loglike(data, alpha, mu, sigma):
    """
    Params:
        data: data set
        alpha: proportion of each Gaussian.
        mu: list with mean values
        sigma: list with covariate matrices
    
    Returns:
        loglike: loglikelihood of the observations
    """

    aux_loglike = np.zeros((data.shape[0], len(alpha)))

    for j in np.arange(0, len(alpha)):
        aux_loglike[:, j] = alpha[j] * gaussian(data, mu[j], sigma[j])

    aux_loglike = np.apply_along_axis(lambda x: np.log(np.sum(x)), 1,
                                      aux_loglike)
    loglike = np.sum(aux_loglike) / float(len(data))

    return loglike
Beispiel #26
0
def nll_loss(alpha, sigma, mu, t):
    """
    Loss function, minimizes the negative log-likelihood.
    :param alpha: mixing coefficients (priors)
    :param sigma: covariances, one per kernel
    :param mu: expected value of each kernel
    :param t: batch of target vectors
    :return: loss
    """
    batch_size = alpha.shape[0]
    k = alpha.shape[1]
    t_dim = int(mu.shape[1] / k)

    loss = torch.zeros(batch_size)
    if torch.cuda.is_available():
        loss = loss.cuda()

    for i in range(k):
        likelihood_t = gaussian(t, mu[:, i * t_dim:(i + 1) * t_dim], sigma[:,
                                                                           i])
        loss += alpha[:, i] * likelihood_t
    loss = torch.mean(-torch.log(loss))
    return loss
Beispiel #27
0
def write_word(i, word):
    # randomized
    font_path = font_paths[i % 2]
    underlined = True if i % 3 == 1 else False
    inverted = True if i % 3 == 1 else False

    font = ImageFont.truetype(font_path, np.random.randint(16, 20))
    img_pil = Image.fromarray(np.ones((h, w, 3), np.uint8) * 255)
    draw = ImageDraw.Draw(img_pil)

    # add text
    draw.text((x, y), word, font=font, fill=color)

    # add underline
    tw, th = draw.textsize(word, font=font)
    if underlined:
        draw.line((x, y + th + 1, x + tw, y + th + 1), fill=color)

    if inverted:
        img_pil = ImageOps.invert(img_pil)

    # rotate slightly
    angle = np.random.uniform(-0.2, 0.2)
    img_pil = img_pil.rotate(angle)

    # crop
    img = np.array(img_pil)[y:y + th + 4, x:x + tw]

    # add gaussian noise to image
    img = gaussian(img)

    print(i, word)

    with open(f"out/img_{i}.txt", 'w') as f:
        f.write(word)
    cv2.imwrite(f"out/img_{i}.png", img)
Beispiel #28
0
 def class_probability(target_val):
     prob = target_dist[target_val]
     for attr in dataset.inputs:
         prob *= gaussian(means[target_val][attr],
                          deviations[target_val][attr], example[attr])
     return prob
Beispiel #29
0
def train_VAE(_model):
    feature_a, neighbour1_a, degree1_a, logrmin_a, logrmax_a, smin_a, smax_a, modelnum_a, \
        pointnum1_a, maxdegree1_a, L1_a, cotw1_a = utils.load_data(featurefile_a, vcgan.resultmin, vcgan.resultmax, useS=vcgan.useS)
    #    dataname_a = _model.dataset_name_a
    #    featurefile_a = './'+dataname_a+'.mat'
    #    resultmax = 0.95
    #    resultmin = -0.95
    #    useS = True
    #    feature_a, neighbour1_a, degree1_a, logrmin_a, logrmax_a, smin_a, smax_a, modelnum_a, \
    #        pointnum1_a, maxdegree1_a, L1_a, cotw1_a = utils.load_data(featurefile_a, resultmin, resultmax, useS=useS)
    #    self.feature_a, self.neighbour1_a, self.degree1_a, self.logrmin_a, self.logrmax_a, self.smin_a, self.smax_a, self.modelnum_a, \
    #        self.pointnum1_a, self.maxdegree1_a, self.L1_a, self.cotw1_a = utils.load_data(featurefile_a, resultmin, resultmax, useS=useS)

    #    Ilf = np.zeros((_model.batch_size, 1))
    rng = np.random.RandomState(23456)

    #    if False:
    if os.path.isfile("id.dat"):
        id = pickle.load(open('id.dat', 'rb'))
        id.show()
        Ia = id.Ia
#        Ib = id.Ib
    else:
        Ia = np.arange(len(feature_a))
        #        Ib = np.arange(len(_model.feature_b))
        Ia = random.sample(list(Ia),
                           int(len(feature_a) * (1 - vcgan.vae_ablity)))
        #        Ib = random.sample(list(Ib), int(len(_model.feature_b) * (1 - vcgan.vae_ablity)))
        #        id = Id(Ia, Ib)
        id = utils.Id(Ia)
        #id.show()
        f = open('id.dat', 'wb')
        pickle.dump(id, f, 0)
        f.close()
        id = pickle.load(open('id.dat', 'rb'))
        id.show()

    _model.file.write("VAE start\n")
    #    for step in xrange(_model.start_step_vae, vcgan.n_epoch_Vae):
    for step in range(_model.start_step_vae, vcgan.n_epoch_Vae):
        rng.shuffle(Ia)
        #        rng.shuffle(Ib)
        #        train each batch
        for i in range(0, len(Ia), _model.batch_size):
            timeserver1 = time.strftime('%Y-%m-%d %H:%M:%S',
                                        time.localtime(time.time()))
            feature = feature_a[Ia[i:i + _model.batch_size]]
            random_a = utils.gaussian(len(feature), _model.hidden_dim)
            _, cost_generation_a, cost_latent_a, l2_loss_a = _model.sess.run(
                [
                    _model.train_op_vae_a, _model.neg_loglikelihood_a,
                    _model.KL_divergence_a, _model.r2_a
                ],
                feed_dict={
                    _model.inputs_a: feature,
                    _model.random_a: random_a
                })
            print("%s Processed %d|%d" %
                  (timeserver1, i + _model.batch_size, len(Ia)))

        timeserver1 = time.strftime('%Y-%m-%d %H:%M:%S',
                                    time.localtime(time.time()))
        print(
            "|%s step: [%2d|%d]cost_generation_a: %.8f, cost_latent_a: %.8f, l2_loss_a: %.8f"
            % (timeserver1, step + 1, vcgan.n_epoch_Vae, cost_generation_a,
               cost_latent_a, l2_loss_a))

        #        feature_a = _model.feature_a[Ia]
        ##        feature_b = _model.feature_b[Ib]
        #        random_a = utils.gaussian(len(feature_a), _model.hidden_dim)
        ##        random_b = gaussian(len(feature_b), _model.hidden_dim)
        #
        #        # ------------------------------------VAE a
        #        _, cost_generation_a, cost_latent_a, l2_loss_a = _model.sess.run(
        #            [_model.train_op_vae_a, _model.neg_loglikelihood_a, _model.KL_divergence_a, _model.r2_a],
        #            feed_dict={_model.inputs_a: feature_a, _model.random_a: random_a})
        #        print("|%s step: [%2d|%d]cost_generation_a: %.8f, cost_latent_a: %.8f, l2_loss_a: %.8f" % (
        #            timeserver1, step + 1, vcgan.n_epoch_Vae, cost_generation_a, cost_latent_a, l2_loss_a))
        ##        # ------------------------------------VAE b
        ##        _, cost_generation_b, cost_latent_b, l2_loss_b = _model.sess.run(
        ##            [_model.train_op_vae_b, _model.neg_loglikelihood_b, _model.KL_divergence_b, _model.r2_b],
        ##            feed_dict={_model.inputs_b: feature_b, _model.random_b: random_b})
        ##        print("|%s step: [%2d|%d]cost_generation_b: %.8f, cost_latent_b: %.8f, l2_loss_b: %.8f" % (
        ##            timeserver1, step + 1, vcgan.n_epoch_Vae, cost_generation_b, cost_latent_b, l2_loss_b))

        _model.file.write("|%s Epoch: [%5d|%d] cost_generation_a: %.8f, cost_latent_a: %.8f, l2_loss_a: %.8f\n" \
                        % (timeserver1, step + 1, vcgan.n_epoch_Vae, cost_generation_a, cost_latent_a, l2_loss_a))

        #        _model.file.write("|%s Epoch: [%5d|%d] cost_generation_b: %.8f, cost_latent_b: %.8f, l2_loss_b: %.8f\n" \
        #                        % (timeserver1, step + 1, vcgan.n_epoch_Vae, cost_generation_b, cost_latent_b, l2_loss_b))

        _model.file_vae.write(
            "A %d %.8f %.8f %.8f\n" %
            (step + 1, cost_generation_a, cost_latent_a, l2_loss_a))
        #        _model.file_vae.write("B %d %.8f %.8f %.8f\n" % (step + 1, cost_generation_b, cost_latent_b, l2_loss_b))

        if vcgan.tb and (step + 1) % 20 == 0:
            #            s = _model.sess.run(_model.merge_summary,
            #                              feed_dict={_model.inputs_a: feature_a, _model.inputs_b: feature_b,
            #                                         _model.random_a: random_a,
            #                                         _model.random_b: random_b, _model.lf_dis: Ilf})
            s = _model.sess.run(_model.merge_summary,
                                feed_dict={
                                    _model.inputs_a: feature,
                                    _model.random_a: random_a
                                })
            _model.write.add_summary(s, step)

        if (step + 1) % 20 == 0:
            print('Saving model...\n')
            #            print(vcgan.logfolder)
            #            if vcgan.test_vae:
            #                test_utils.test_vae(_model, step)
            save_path = _model.saver_vae_a.save(_model.sess,
                                                _model.checkpoint_dir_vae_a +
                                                '/vae_a.model',
                                                global_step=step + 1)
            print("Model saved in path: %s\n" % save_path)
            print("Testing the model...\n")
            test_utils.recons_error_a(_model, step)

            # self.saver_vae_b.save(self.sess, self.checkpoint_dir_vae_b + '/vae_b.model', global_step=step + 1)
#            _model.saver_vae_all.save(_model.sess, _model.checkpoint_dir_vae_all + '/vae_all.model', global_step=step + 1)
    print(
        '---------------------------------train VAE success!!----------------------------------'
    )
Beispiel #30
0
        if verbose:
            print "No synthetic emission data found. Re-scanning temperature range."
        resp = load_temp_responses()
        if n_params == 1:
            resp /= resp[2, :]
            resp[np.isnan(resp)] = 0
        if verbose:
            print resp.min(axis=1), np.nanmin(resp, axis=1)
            print resp.max(axis=1), np.nanmax(resp, axis=1)
        logt = np.arange(0, 15.05, 0.05)
        delta_t = logt[1] - logt[0]
        model = np.memmap(
            filename="synth_emiss_{}pars".format(n_params), dtype="float32", mode="w+", shape=(n_vals, n_wlens)
        )
        for p, params in enumerate(parvals):
            dem = gaussian(logt, *params)
            f = resp * dem
            model[p, :] = np.sum(f, axis=1) * delta_t
        if verbose:
            print model.max(axis=0)
            print model[np.isnan(model)].size
        if n_params == 1:
            normmod = model[:, 2].reshape((n_vals, 1))
            model /= normmod
        model.flush()
        if verbose:
            print model.max(axis=0)
else:
    model = None

model = comm.bcast(model, root=0)
Beispiel #31
0
    def update_texture_generator(self,
                                 x,
                                 y,
                                 t=None,
                                 l=None,
                                 VGGfeatures=None,
                                 style_targets=None):
        fake_y = self.G_T(x)

        # 计算L_distance
        # 风格距离图变为黑白图
        BW = x[:, 0, :, :].clone().detach().unsqueeze(dim=1)
        # print(BW.shape)
        BW = BW.expand(BW.shape[0], 3, BW.shape[2], BW.shape[3])
        C = BW
        D = x
        X = fake_y
        C.require_grad_ = False
        D.require_grad_ = False
        # Ldistance = 1e-6 * torch.sum((C * D - X * D))
        Ldistance = torch.norm(C * D - X * D,
                               'fro') * 0.5 * self.lambda_distance

        fake_concat = torch.cat((x, fake_y), dim=1)
        fake_output = self.D_T(fake_concat)
        LTadv = -fake_output.mean() * self.lambda_tadv
        Lrec = self.loss(fake_y, y) * self.lambda_l1
        LT = LTadv + Lrec + Ldistance
        if t is not None:
            with torch.no_grad():
                t[:, 0:1] = gaussian(t[:, 0:1], stddev=0.2)
                source_mask = self.G_S(t, l).detach()
                source = source_mask.clone()
                source[:, 0:1] = gaussian(source[:, 0:1], stddev=0.2)
                smaps_fore = [(A.detach() + 1) * 0.5
                              for A in self.getmask(source_mask[:, 0:1])]
                smaps_back = [1 - A for A in smaps_fore]
            fake_t = self.G_T(source)
            out = VGGfeatures(fake_t)
            style_losses1 = [
                self.style_weights[a] *
                self.gramloss(A * smaps_fore[a], style_targets[0][a])
                for a, A in enumerate(out)
            ]
            style_losses2 = [
                self.style_weights[a] *
                self.gramloss(A * smaps_back[a], style_targets[1][a])
                for a, A in enumerate(out)
            ]
            Lsty = (sum(style_losses1) + sum(style_losses2)) * self.lambda_sty
            LT = LT + Lsty
        # global id
        # if id % 20 == 0:
        #    viz_img = to_data(torch.cat((x[0], y[0], fake_y[0]), dim=2))
        #    save_image(viz_img, '../output/texturee_result%d.jpg'%id)
        # id += 1
        self.trainerG_T.zero_grad()
        LT.backward()
        self.trainerG_T.step()
        return Ldistance.data.mean(), LTadv.data.mean(), Lrec.data.mean(
        ), Lsty.data.mean() if t is not None else 0
Beispiel #32
0
 def forward(self, x, l):
     x[:, 0:1] = gaussian(x[:, 0:1], stddev=0.2)
     xl = self.G_S(x, l)
     xl[:, 0:1] = gaussian(xl[:, 0:1], stddev=0.2)
     return self.G_T(xl)
Beispiel #33
0
 def class_probability(targetval):
     prob = target_dist[targetval]
     for attr in dataset.inputs:
         prob *= gaussian(means[targetval][attr], deviations[targetval][attr], example[attr])
     return prob