Example #1
0
 def nll_poisson(self, preds, target):
     if self.normalize_nll:
         return nn.PoissonNLLLoss(reduction='none')(preds, target).view(
             preds.size(0), -1).mean(dim=1)
     else:
         return nn.PoissonNLLLoss(reduction='none')(preds, target).view(
             preds.size(0), -1).sum(dim=1)
Example #2
0
def train_manager():
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    num_epochs = 300
    lr = 0.000001

    model = LFADSG(time=100,
                   neurons=50,
                   dim_e=16,
                   dim_c=24,
                   dim_d=32,
                   gcn_hidden=[8])
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    criterion = nn.PoissonNLLLoss(log_input=True)

    train_loader = get_train_data_loader(batch_size=128)
    valid_loader = get_valid_data_loader(batch_size=128)

    # optimizer
    optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
    # lr scheduler
    scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.998)

    train_model(model, train_loader, valid_loader, criterion, optimizer,
                scheduler, num_epochs)
Example #3
0
    def __init__(self, hparams):
        """

        Parameters
        ----------
        hparams : :obj:`dict`
            - model_type (:obj:`str`): 'mlp' | 'mlp-mv' | 'lstm'
            - input_size (:obj:`int`)
            - output_size (:obj:`int`)
            - n_hid_layers (:obj:`int`)
            - n_hid_units (:obj:`int`)
            - n_lags (:obj:`int`): number of lags in input data to use for temporal convolution
            - noise_dist (:obj:`str`): 'gaussian' | 'gaussian-full' | 'poisson' | 'categorical'
            - activation (:obj:`str`): 'linear' | 'relu' | 'lrelu' | 'sigmoid' | 'tanh'

        """
        super().__init__()
        self.hparams = hparams
        self.model = None
        self.build_model()

        # choose loss based on noise distribution of the model
        if self.hparams['noise_dist'] == 'gaussian':
            self._loss = nn.MSELoss()
        elif self.hparams['noise_dist'] == 'gaussian-full':
            from behavenet.fitting.losses import GaussianNegLogProb
            self._loss = GaussianNegLogProb()  # model holds precision mat
        elif self.hparams['noise_dist'] == 'poisson':
            self._loss = nn.PoissonNLLLoss(log_input=False)
        elif self.hparams['noise_dist'] == 'categorical':
            self._loss = nn.CrossEntropyLoss()
        else:
            raise ValueError('"%s" is not a valid noise dist' %
                             self.model['noise_dist'])
Example #4
0
    def __init__(self,
        core=Core(),
        readout=Readout(),
        output_nl=nn.Softplus(),
        loss=nn.PoissonNLLLoss(log_input=False),
        val_loss=None,
        detach_core=False,
        learning_rate=1e-3,
        batch_size=1000,
        num_workers=0,
        data_dir='',
        optimizer='AdamW',
        weight_decay=1e-2,
        amsgrad=False,
        betas=[.9,.999],
        max_iter=10000,
        **kwargs):

        super().__init__()
        self.core = core
        self.readout = readout
        self.detach_core = detach_core
        self.save_hyperparameters('learning_rate','batch_size',
            'num_workers', 'data_dir', 'optimizer', 'weight_decay', 'amsgrad', 'betas',
            'max_iter')          
        
        if val_loss is None:
            self.val_loss = loss
        else:
            self.val_loss = val_loss

        self.output_nl = output_nl
        self.loss = loss
Example #5
0
 def _get_loss(self, loss_spec):
     if not isinstance(self.loss_spec, str):
         return self.loss_spec
     elif loss_spec == 'mse':
         return nn.MSELoss(reduction='mean')
     elif loss_spec == 'sse':
         return nn.MSELoss(reduction='sum')
     elif loss_spec == 'crossentropy':
         # Cross entropy loss is used for multiclass categorization and needs inputs in shape
         # ((# minibatch_size, C), targets) where C is a 1-d vector of probabilities for each potential category
         # and where target is a 1d vector of type long specifying the index to the target category. This
         # formatting is different from most other loss functions available to autodiff compositions,
         # and therefore requires a wrapper function to properly package inputs.
         cross_entropy_loss = nn.CrossEntropyLoss()
         return lambda x, y: cross_entropy_loss(x.unsqueeze(0),
                                                y.type(torch.LongTensor))
     elif loss_spec == 'l1':
         return nn.L1Loss(reduction='sum')
     elif loss_spec == 'nll':
         return nn.NLLLoss(reduction='sum')
     elif loss_spec == 'poissonnll':
         return nn.PoissonNLLLoss(reduction='sum')
     elif loss_spec == 'kldiv':
         return nn.KLDivLoss(reduction='sum')
     else:
         raise AutodiffCompositionError(
             "Loss type {} not recognized. Loss argument must be a string or function. "
             "Currently, the recognized loss types are Mean Squared Error, Cross Entropy,"
             " L1 loss, Negative Log Likelihood loss, Poisson Negative Log Likelihood, "
             "and KL Divergence. These are specified as 'mse', 'crossentropy', 'l1', "
             "'nll', 'poissonnll', and 'kldiv' respectively.".format(
                 loss_spec))
Example #6
0
    def __init__(self,
                 nb_kers,
                 nb_tk,
                 nb_sk,
                 time_lags=12,
                 rot_kernel_size=None):
        super(ConvNIM, self).__init__()

        if rot_kernel_size is None:
            rot_kernel_size = [3, 3, 3]

        padding = [k - 1 for k in rot_kernel_size]
        self.chomp3d = Chomp(chomp_sizes=padding, nb_dims=3)
        self.conv = RotConv3d(
            in_channels=2,
            out_channels=nb_kers,
            nb_rotations=8,
            kernel_size=rot_kernel_size,
            padding=padding,
            bias=False,
        )
        self.relu = nn.ReLU(inplace=True)
        self.temporal_fc = nn.Linear(time_lags, nb_tk, bias=False)
        self.spatial_fc = nn.Linear(225, nb_sk, bias=False)

        self.layer = nn.Linear(nb_kers * 8 * nb_tk * nb_sk, 12, bias=True)

        self.criterion = nn.PoissonNLLLoss(log_input=False, reduction="sum")
        self.softplus = nn.Softplus()

        print_num_params(self)
Example #7
0
def get_objective(objective):
    if isinstance(objective, str):
        objective = objective.lower()
        if objective in ['l1', 'l1loss']:
            return nn.L1Loss()
        elif objective in ['nll', 'nllloss']:
            return nn.NLLLoss()
        elif objective in ['nll2d', 'nllloss2d']:
            return nn.NLLLoss2d()
        elif objective in ['poissonnll', 'poissonnllloss']:
            return nn.PoissonNLLLoss()
        elif objective in ['kldiv', 'kldivloss']:
            return nn.KLDivLoss()
        elif objective in ['mse', 'mseloss']:
            return nn.MSELoss()
        elif objective in ['bce', 'bceloss']:
            return nn.BCELoss()
        elif objective in ['smoothl1', 'smoothl1loss']:
            return nn.SmoothL1Loss()
        elif objective in ['crossentropy', 'cross_entropy']:
            return nn.CrossEntropyLoss()
        elif objective in ['ctc', 'ctcloss']:
            return nn.CTCLoss()
        else:
            raise ValueError('unknown argument!')
    elif isinstance(objective, _Loss):
        return objective
    else:
        raise ValueError('unknown argument {}'.format(objective))
Example #8
0
    def __init__(self,
                 model_dim,
                 nb_sk,
                 nb_tk,
                 lr: float = 1e-3,
                 wd: float = 2.0,
                 tmax: int = 10,
                 eta_min: float = 1e-6,
                 verbose=False,):
        super(SingleCellReadout, self).__init__()

        self.temporal_fcs = nn.ModuleList([
            nn.Linear(11, nb_tk[0], bias=False),
            nn.Linear(6, nb_tk[1], bias=False),
            nn.Linear(3, nb_tk[2], bias=False),
        ])
        self.spatial_fcs = nn.ModuleList([
            nn.Linear(225, nb_sk[0], bias=False),
            nn.Linear(64, nb_sk[1], bias=False),
            nn.Linear(16, nb_sk[2], bias=False),
        ])

        self.relu = nn.ReLU(inplace=True)
        model_dims = [model_dim, model_dim * 2, model_dim * 4]
        num_filters = sum([np.prod(item) for item in zip(nb_tk, nb_sk, model_dims)])
        self.layer = nn.Linear(num_filters, 1)
        self.softplus = nn.Softplus()
        self.criterion = nn.PoissonNLLLoss(log_input=False, reduction="sum")

        self.optim = None
        self.optim_schedule = None
        self._setup_optim(lr, wd, tmax, eta_min)

        if verbose:
            print_num_params(self)
Example #9
0
    def __init__(self, config):
        super(MTLayer, self).__init__()
        assert not config.multicell, "For single cell modeling only"
        self.config = config

        num_units = [1] + config.nb_vel_tuning_units + [1]
        layers = []
        for i in range(len(config.nb_vel_tuning_units) + 1):
            layers += [
                nn.Conv2d(num_units[i], num_units[i + 1], 1),
                nn.LeakyReLU()
            ]

        self.vel_tuning = nn.Sequential(*layers)
        self.dir_tuning = nn.Linear(2, 1, bias=False)

        self.temporal_kernel = nn.Linear(config.time_lags, 1, bias=False)
        self.spatial_kernel = nn.Linear(config.grid_size**2, 1, bias=True)

        self.criterion = nn.PoissonNLLLoss(log_input=False)
        self.reg_mats_dict = create_reg_mat(config.time_lags, config.grid_size)
        self.activation = get_activation_fn(config.readout_activation_fn)

        self.init_weights()
        self._load_vel_tuning()
        print_num_params(self)
Example #10
0
    def __init__(self, config: ReadoutConfig, verbose=False):
        super(ConvReadout, self).__init__()

        self.config = config

        _temp_dims = [11, 6, 3]
        _spat_dims = [15, 8, 4]
        spatiotemporal = [
            nn.Sequential(
                weight_norm(nn.Conv3d(
                    in_channels=config.core_dim * 2**i,
                    out_channels=config.nb_units[i],
                    kernel_size=config.kernel_sizes[i],
                    groups=config.groups[i],)),
                nn.ReLU(inplace=True),
                nn.AdaptiveAvgPool3d(1),
                nn.Flatten(),
            )
            for i in config.include_lvls
        ]
        self.spatiotemporal = nn.ModuleList(spatiotemporal)
        self.dropout = nn.Dropout(config.dropout)

        self.layer = nn.Linear(sum(config.nb_units), len(config.useful_cells[config.expt]), bias=True)
        self.softplus = nn.Softplus()
        self.criterion = nn.PoissonNLLLoss(log_input=False, reduction="sum")

        self.apply(get_init_fn(config.init_range))
        if verbose:
            print_num_params(self)
Example #11
0
 def nll_poisson(self, preds, target, masks):
     if self.normalize_nll:
         loss = nn.PoissonNLLLoss(reduction='none')(preds, target)
         return (loss * masks.unsqueeze(-1)).view(preds.size(0), -1).sum(
             dim=-1) / (masks.view(masks.size(0), -1).sum(dim=1))
     else:
         raise NotImplementedError()
def ge_test_extract_fun(data, n_device, batchsize, n_targets, model_path, n_extract):
    #データロード
    test_set = ge_data.ge_test_dataset(data)
    test_loader = DataLoader(test_set, batch_size = batchsize, shuffle=False, num_workers=50)
    device_str = "cuda:{}".format(n_device)
    device = torch.device(device_str if torch.cuda.is_available() else "cpu")
    print("used device : ", device)
    #損失関数
    loss_fun = nn.PoissonNLLLoss()
    #モデルの読み込み
    test_model = ge_nn.Net(n_targets=n_targets)
    test_model.to(device)
    test_model.load_state_dict(torch.load(model_path))
    test_model.eval()
    #損失の記録
    test_loss = []
    #テストデータ番号
    count = 0
    with torch.no_grad():
        for (test_in, test_out) in test_loader:
            count = count + 1
            #モデル入力
            test_in,  test_out = test_in.to(device), test_out.to(device)
            out = test_model(test_in)
            #損失計算
            loss = loss_fun(out, test_out)
            test_loss.append(loss.item())
            #グラフ描画
            out = torch.exp(out)
            if count == n_extract:
                test_out = test_out.to("cpu")
                out = out.to("cpu")
                print(test_out.shape)
                print(out.shape)
                test_out = torch.squeeze(test_out)
                out = torch.squeeze(out)
                print(test_out.shape)
                print(out.shape)
                with open('data_extract_log.txt', 'a') as f:
                    f.write('data{}:tensor detach numpy\n'.format(count))
                test_out = test_out.detach().numpy()
                out = out.detach().numpy()
                #testデータ番号に応じてcsvファイルにデータを抽出(4229, 1024)
                with open('data_extract_log.txt', 'a') as f:
                    f.write('data{}:test out data csv write\n'.format(count))
                with open('/home/abe/data/genome_data/data310/data_test_out{}.csv'.format(count), 'w') as fc :
                    writer = csv.writer(fc)
                    writer.writerows(test_out)
                with open('data_extract_log.txt', 'a') as f:
                    f.write('data{}:model out data csv write 2\n'.format(count))
                with open('/home/abe/data/genome_data/data310/data_out{}.csv'.format(count), 'w') as fc :
                    writer = csv.writer(fc)
                    writer.writerows(out)
            else :
                with open('data_extract_log.txt', 'a') as f:
                    f.write('data{}:data went through\n'.format(count))
    print('data extract finished')
    with open('data_extract_log.txt', 'a') as f:
        f.write('data extract finished')
Example #13
0
 def __init__(self):
     self.activations = {
         'sigmoid': nn.Sigmoid(),
         'relu': nn.ReLU(),
         'relu6': nn.ReLU6(),
         'rrelu0103': nn.RReLU(0.1, 0.3),
         'rrelu0205': nn.RReLU(0.2, 0.5),
         'htang1': nn.Hardtanh(-1, 1),
         'htang2': nn.Hardtanh(-2, 2),
         'htang3': nn.Hardtanh(-3, 3),
         'tanh': nn.Tanh(),
         'elu': nn.ELU(),
         'selu': nn.SELU(),
         'hardshrink': nn.Hardshrink(),
         'leakyrelu01': nn.LeakyReLU(0.1),
         'leakyrelu001': nn.LeakyReLU(0.01),
         'logsigmoid': nn.LogSigmoid(),
         'prelu': nn.PReLU(),
     }
     self.loss_functions = {
         'binary_cross_entropy': nn.BCELoss(),
         'binary_cross_entropy_with_logits': nn.BCEWithLogitsLoss(),
         'poisson_nll_loss': nn.PoissonNLLLoss(),
         # 'cosine_embedding_loss': nn.CosineEmbeddingLoss(),
         # 'cross_entropy': nn.CrossEntropyLoss(),
         # 'ctc_loss': nn.CTCLoss(),
         'hinge_embedding_loss': nn.HingeEmbeddingLoss(),
         'kl_div': nn.KLDivLoss(),
         'l1_loss': nn.L1Loss(),
         'mse_loss': nn.MSELoss(),
         # 'margin_ranking_loss': nn.MarginRankingLoss(),
         # 'multilabel_margin_loss': nn.MultiLabelMarginLoss(),
         'multilabel_soft_margin_loss': nn.MultiLabelSoftMarginLoss(),
         # 'multi_margin_loss': nn.MultiMarginLoss(),
         # 'nll_loss': nn.NLLLoss(),
         'smooth_l1_loss': nn.SmoothL1Loss(),
         'soft_margin_loss': nn.SoftMarginLoss(),
         # 'triplet_margin_loss': nn.TripletMarginLoss(),
     }
     self.learning_rate = 2.8
     self.momentum = 0.8
     self.hidden_size = 10
     self.activation_hidden = 'relu'
     self.loss_function = 'binary_cross_entropy'
     self.sols = {}
     self.solsSum = {}
     self.random = 3
     self.random_grid = [_ for _ in range(10)]
     # self.hidden_size_grid = [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39]
     # self.hidden_size_grid = [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
     # self.learning_rate_grid = [0.1, 1.0, 2.0, 3.0, 5.0]
     # self.activation_hidden_grid = list(self.activations.keys())
     # self.activation_hidden_grid = list(self.activations.keys())
     # self.loss_function_grid = list(self.loss_functions.keys())
     self.grid_search = GridSearch(self)
     self.grid_search.set_enabled(False)
Example #14
0
def ge_train(data, n_device, lr, n_epochs, batchsize, beta1, beta2, model_dir):
    print('calling dataloader ...')
    train = ge_data.ge_train_dataset(data)
    val = ge_data.ge_train_dataset(data)
    #test = ge_data.hogehoge
    train_iter = DataLoader(train, batchsize)
    val_iter = DataLoader(val, batchsize, shuffle=False)
    ####################################################################
    train_model = ge_nn.Net()
    device_str = "cuda:{}".format(n_device)
    device = torch.device(device_str if torch.cuda.is_available() else "cpu")
    print("used device : ", device)
    train_model.to(device)
    optimizer = optim.Adam(train_model.parameters(), lr, betas=(beta1, beta2))
    loss_fun = nn.PoissonNLLLoss()
    loss_fun2 = nn.MSELoss()

    train_model.train()
    for epoch in range(n_epochs):
        counter = 0
        batch_loss = 0.0
        batch_acc = 0.0
        print('Epoch {}/{}'.format(epoch + 1, n_epochs))
        print('------------------------------------------------')
        for train_in, train_out in tqdm(train_iter):
            t1 = time.time()
            counter = counter + 1
            #変数定義
            #モデル入力
            train_in = train_in.to(device)
            train_out = train_out.to(device)
            out = train_model(train_in)
            #損失計算
            loss = loss_fun(out, train_out)
            mse_loss = loss_fun2(out, train_out)
            acc = ge_loss.log_r2_score(out, train_out)
            train_model.zero_grad()
            loss.backward()
            optimizer.step()
            batch_loss += loss.item()
            batch_acc += acc
            t2 = time.time()
            #print('{} batch{} poissonLoss: {:.4f} mseLoss: {:.4f} Acc: {:.4f} time {}'.format(epoch+1, counter,  loss, mse_loss, acc, t2-t1))
        print('------------------------------------------------')
        epoch_loss = batch_loss / batchsize
        epoch_acc = batch_acc / batchsize
        print('{} poissonLoss: {:.4f} mseLoss: {:.4f} Acc: {:.4f}'.format(
            epoch + 1, epoch_loss, mse_loss, epoch_acc))
        print('------------------------------------------------')
        print('------------------------------------------------')
        torch.save(train_model.state_dict(),
                   "./" + model_dir + "/model_epoch{}.pth".format(epoch))
Example #15
0
 def criterion(self,
               log_input=True,
               full=False,
               size_average=None,
               eps=1e-08,
               reduce=None,
               reduction='mean') -> nn.PoissonNLLLoss:
     return nn.PoissonNLLLoss(log_input=log_input,
                              full=full,
                              size_average=size_average,
                              eps=eps,
                              reduce=reduce,
                              reduction=reduction)
 def loglikelihood(self, reduction):
     """
     Return the log-likelihood
     """
     if self._distr == 'poisson':
         if reduction == 'none':
             return self.poisson_cross_entropy
         return nn.PoissonNLLLoss(reduction=reduction)
     elif self._distr == 'bernoulli':
         return nn.BCELoss(reduction=reduction)
     else:
         raise ValueError('{} is not a valid distribution'.format(
             self._distr))
Example #17
0
    def __init__(self,policy_value_model=None):
        # create the model
        if(policy_value_model == None):
            self.model = resnet_policy_value_model()#torch_policy_value_model()
        else:
            self.model = policy_value_model
        learning_rate = 1e-3

        self.optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)

        self.policy_loss = nn.PoissonNLLLoss()
        # self.policy_loss = MSELoss()
        # self.policy_loss = nn.CrossEntropyLoss()
        self.value_loss = MSELoss()
Example #18
0
    def __init__(self,
        learning_rate=1e-3,
        batch_size=1000,
        data_dir='',
        optimizer='AdamW',
        weight_decay=1e-2,
        amsgrad=False,
        betas=[.9,.999],
        max_iter=10000,
        **kwargs):

        super().__init__()
        self.save_hyperparameters()

        self.loss = nn.PoissonNLLLoss(log_input=False)
Example #19
0
    def __init__(self, config, verbose=True):
        super(MTNet, self).__init__()
        assert config.multicell, "For multicell modeling only"

        self.config = config

        self.core = MTRotatioanlConvCoreNew(config, verbose=verbose)
        self.readout = MTReadout(config, verbose=verbose)

        # self.core = MTRotatioanlConvCore(config)
        # self.readout = MTReadout(config, self.core.output_size, verbose=verbose)

        self.criterion = nn.PoissonNLLLoss(log_input=False, reduction="sum")
        self.init_weights()
        if verbose:
            print_num_params(self)
Example #20
0
def get_likelihood_surface(gd, cmod, Npos=20, radius=1, valid_eye_range=5.2):
    '''
    main eye-tracking loop
    '''
    from tqdm import tqdm # progress bar
    from copy import deepcopy

    assert gd.corrected is False, "cannot get an LL surface on an already corrected stimulus"

    loss = nn.PoissonNLLLoss(log_input=False, reduction='none')

    locs = np.linspace(-valid_eye_range, valid_eye_range, Npos) # grid up space

    x,y = gd[:10] # preload some data to get dimensions
    xh = cmod(x) # predict rates
    sz = list(xh.size()) # here's the shape

    NC = gd.NC
    NY = sz[1]
    NX = sz[2]
    LLspace1 = np.zeros([Npos,Npos,NY,NX])

    # Loop over positions (this is the main time-consuming operation)
    for xx in tqdm(range(Npos)):
        for yy in range(Npos):
            ctrXY = (locs[xx],locs[yy])

            inds = np.where(np.hypot(gd.eyeX[gd.valid]-ctrXY[0], gd.eyeY[gd.valid] - ctrXY[1]) < radius)[0]
            if len(inds) > 500:
                x,y = gd[inds] # get data from those indices

                xh = cmod(x) # predict rates

                # reshape and get loss across neurons over space
                sz = list(xh.size())

                L = 0
                for cc in range(NC):
                    yc = y[:,cc][:,None].repeat((1, sz[1]*sz[2])).reshape(sz[0:3])
                    L += loss(xh[:,:,:,cc], yc).sum(axis=0)

                L = L.detach().cpu().numpy()

                LLspace1[xx,yy,:,:] = deepcopy(L)
        
        
    return LLspace1, locs
Example #21
0
    def __init__(self, config, verbose=True):
        super(MTNet, self).__init__()

        self.config = config
        self.beta = 0.0

        self.encoder_stim = RotationalConvEncoder(config, verbose=verbose)
        self.decoder_stim = ConvDecoder(config, verbose=verbose)

        self.encoder_spks = SpksEncoder(config, verbose=verbose)
        self.decoder_spks = SpksDecoder(config, verbose=verbose)

        self.recon_criterion_stim = nn.MSELoss(reduction="sum")
        self.recon_criterion_spks = nn.PoissonNLLLoss(log_input=False,
                                                      reduction="sum")

        self.init_weights()
        if verbose:
            print_num_params(self)
Example #22
0
def ge_test_fun(data, n_device, batchsize, n_targets, model_path):
    #データロード
    test_set = ge_data.ge_test_dataset(data)
    test_loader = DataLoader(test_set,
                             batch_size=batchsize,
                             shuffle=False,
                             num_workers=50)
    device_str = "cuda:{}".format(n_device)
    device = torch.device(device_str if torch.cuda.is_available() else "cpu")
    print("used device : ", device)
    #損失関数
    loss_fun = nn.PoissonNLLLoss()
    #モデルの読み込み
    test_model = ge_nn.Net(n_targets=n_targets)
    test_model.to(device)
    test_model.load_state_dict(torch.load(model_path))
    test_model.eval()
    #損失の記録
    test_loss = []
    test_score = []
    count = 0
    with torch.no_grad():
        for (test_in, test_out) in test_loader:
            #モデル入力
            test_in, test_out = test_in.to(device), test_out.to(device)
            out = test_model(test_in)
            #損失計算
            loss = loss_fun(out, test_out)
            test_loss.append(loss.item())
            #score計算
            score = ge_loss.log_r2_score(out, test_out)
            test_score.append(score)
            #グラフ描画
            out = torch.exp(out)
            test_out = test_out.to("cpu")
            out = out.to("cpu")
        avr_test_loss = np.average(test_loss)
        avr_test_score = np.average(test_score)
    print('test data loss:{}, test r2 score:{}'.format(avr_test_loss,
                                                       avr_test_score))
    with open('train_log.txt', 'a') as f:
        f.write('test data loss:{}, test r2 score:{}'.format(
            avr_test_loss, avr_test_score))
Example #23
0
    def __init__(self, model, n_datasets=1):
        if n_datasets > 1:
            raise ValueError('NLLLoss only supports single datasets')

        metric_strs = ['batches', 'loss', 'r2', 'fc']
        super().__init__(model, metric_strs, n_datasets=n_datasets)

        # choose loss based on noise distribution of the model
        if self.model.hparams['noise_dist'] == 'gaussian':
            self._loss = nn.MSELoss()
        elif self.model.hparams['noise_dist'] == 'gaussian-full':
            from behavenet.fitting.losses import GaussianNegLogProb
            self._loss = GaussianNegLogProb()  # model holds precision mat
        elif self.model.hparams['noise_dist'] == 'poisson':
            self._loss = nn.PoissonNLLLoss(log_input=False)
        elif self.model.hparams['noise_dist'] == 'categorical':
            self._loss = nn.CrossEntropyLoss()
        else:
            raise ValueError('"%s" is not a valid noise dist' %
                             self.model.hparams['noise_dist'])
Example #24
0
    def __init__(self, input_size, hidden_size, num_layers):

        super(RGCNet, self).__init__()

        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.num_directions = 2
        self.seq_len = 1
        self.gcn = GCNet(input_size, [hidden_size] * num_layers)
        self.rnn = RNN(hidden_size, hidden_size, num_layers=1)
        self.logit_layer = self.Linear(hidden_size, 2)
        self.count_layer = self.Linear(hidden_size, 1)

        self.opt = torch.optim.Adam(self.parameters())

        self.logit_loss_fn = nn.CrossEntropyLoss(reduce=True,
                                                 weight=torch.FloatTensor(
                                                     [1, 10]))
        self.count_loss_fn = nn.PoissonNLLLoss(log_input=True)
 def _get_loss(self, loss_spec):
     if not isinstance(self.loss_spec, str):
         return self.loss_spec
     elif loss_spec == 'mse':
         return nn.MSELoss(reduction='sum')
     elif loss_spec == 'crossentropy':
         return nn.CrossEntropyLoss(reduction='sum')
     elif loss_spec == 'l1':
         return nn.L1Loss(reduction='sum')
     elif loss_spec == 'nll':
         return nn.NLLLoss(reduction='sum')
     elif loss_spec == 'poissonnll':
         return nn.PoissonNLLLoss(reduction='sum')
     elif loss_spec == 'kldiv':
         return nn.KLDivLoss(reduction='sum')
     else:
         raise AutodiffCompositionError("Loss type {} not recognized. Loss argument must be a string or function. "
                                        "Currently, the recognized loss types are Mean Squared Error, Cross Entropy,"
                                        " L1 loss, Negative Log Likelihood loss, Poisson Negative Log Likelihood, "
                                        "and KL Divergence. These are specified as 'mse', 'crossentropy', 'l1', "
                                        "'nll', 'poissonnll', and 'kldiv' respectively.".format(loss_spec))
Example #26
0
    def __init__(self, config: FFConfig, verbose=False):
        super(GLM, self).__init__()

        self.config = config

        spat_dim = 15
        self.spatiotemporal = nn.ModuleList([
            nn.Sequential(
                nn.Flatten(),
                weight_norm(nn.Linear(
                    in_features=config.time_lags * 2 * spat_dim ** 2,
                    out_features=1,
                    bias=True,)),
                nn.Softplus(),)
            for _ in range(len(config.useful_cells[config.expt]))
        ])
        self.criterion = nn.PoissonNLLLoss(log_input=False, reduction="sum")

        self.apply(get_init_fn(self.config.init_range))
        if verbose:
            print_num_params(self)
def get_loss_criterion(loss_name, loss_criterion_dict):
    if loss_name == "CrossEntropyLoss":
        loss_criterion = nn.CrossEntropyLoss(
            reduction=loss_criterion_dict["reduction"])
    if loss_name == "L1Loss":
        loss_criterion = nn.L1Loss(reduction=loss_criterion_dict["reduction"])
    if loss_name == "MSELoss":
        loss_criterion = nn.MSELoss(reduction=loss_criterion_dict["reduction"])
    if loss_name == "CTCLoss":
        loss_criterion = nn.CTCLoss(
            reduction=loss_criterion_dict["reduction"],
            blank=loss_criterion_dict["blank"],
            zero_infinity=loss_criterion_dict["zero_infinity"])
    if loss_name == "NLLLoss":
        loss_criterion = nn.NLLLoss(reduction=loss_criterion_dict["reduction"],
                                    weight=None)
    if loss_name == "PoissonNLLLoss":
        loss_criterion = nn.PoissonNLLLoss(
            reduction=loss_criterion_dict["reduction"],
            log_input=loss_criterion_dict["log_input"],
            full=loss_criterion_dict["full"],
            eps=loss_criterion_dict["eps"])
    if loss_name == "KLDivLoss":
        loss_criterion = nn.KLDivLoss(
            reduction=loss_criterion_dict["reduction"])
    if loss_name == "BCELoss":
        loss_criterion = nn.BCELoss(reduction=loss_criterion_dict["reduction"],
                                    weight=None)
    if loss_name == "BCEWithLogitsLoss":
        loss_criterion = nn.BCEWithLogitsLoss(
            reduction=loss_criterion_dict["reduction"],
            weight=None,
            pos_weight=None)
    if loss_name == "SoftMarginLoss":
        loss_criterion = nn.SoftMarginLoss(
            reduction=loss_criterion_dict["reduction"])
    if loss_name == "None":
        pass
    return loss_criterion
Example #28
0
    def __init__(self, config: ReadoutConfig, verbose=False):
        super(Readout, self).__init__()

        self.config = config

        _temp_dims = [11, 6, 3]
        _spat_dims = [15, 8, 4]
        spatiotemporal = [
            nn.Sequential(
                nn.Dropout3d(p=config.dropout, inplace=True),
                nn.Linear(in_features=_temp_dims[i],    # config.time_lags // 2**i,
                          out_features=config.nb_tk[i], bias=True,),
                Permute(dims=(0, 1, -1, 2, 3)),
                nn.Flatten(start_dim=3),
                weight_norm(nn.Linear(
                    in_features=_spat_dims[i] ** 2,
                    out_features=config.nb_sk[i], bias=True,)),
                nn.Flatten(),)
            for i in config.include_lvls
        ]
        self.spatiotemporal = nn.ModuleList(spatiotemporal)
        self.activation = LearnedSwish(slope=1.0)

        # total filters to pool from
        self.nb_filters = {i: config.nb_tk[i] * config.nb_sk[i] * config.core_dim * 2**i for i in config.include_lvls}
        # self.register_buffer('mask', self._compute_mask())
        nf = sum(list(self.nb_filters.values()))
        nc = len(config.useful_cells[config.expt])
        layers = []
        for cc in range(nc):
            layers += [nn.Sequential(nn.Linear(nf, 1, bias=True), LearnedSoftPlus(beta=1.0))]
        self.layers = nn.ModuleList(layers)
        # self.layer = nn.Linear(nb_filters, nb_cells, bias=True)
        # self.activations = nn.Softplus()
        self.criterion = nn.PoissonNLLLoss(log_input=False, reduction="sum")

        self.apply(get_init_fn(config.init_range))
        if verbose:
            print_num_params(self)
Example #29
0
    def __init__(self,
                 nb_exc,
                 nb_inh,
                 nb_vel_tuning,
                 nb_tk,
                 nb_sk,
                 time_lags=12):
        super(DirSelectiveNIM, self).__init__()

        self.dir_tuning = nn.Linear(2, nb_exc + nb_inh, bias=False)
        self.vel_tuning = nn.Sequential(
            conv1x1(1, 32),
            nn.ReLU(),
            conv1x1(32, 32),
            nn.ReLU(),
            conv1x1(32, nb_vel_tuning),
            nn.ReLU(),
        )

        self.temporal_kernels = nn.Linear(time_lags, nb_tk, bias=False)
        self.spatial_kernels = nn.Linear(15**2, nb_sk, bias=True)

        self.layer = nn.Linear(
            (nb_exc + nb_inh) * nb_vel_tuning * nb_sk * nb_tk, 12, bias=True)

        self.reg = Regularizer(reg_values={
            'd2t': 1e-4,
            'd2x': 1e-3
        },
                               time_lags_list=[12],
                               spatial_dims_list=[15])
        self.criterion = nn.PoissonNLLLoss(log_input=False, reduction="sum")

        self.relu = nn.ReLU(inplace=True)
        self.softplus = nn.Softplus()

        # self._load_vel_tuning()
        print_num_params(self)
Example #30
0
    def __init__(self, config: ReadoutConfig, verbose=False):
        super(SingleCellReadout, self).__init__()

        self.config = config
        self.nc = len(config.useful_cells[config.expt])

        # spatio-temporal filters
        self.temporal = nn.ModuleList([
            nn.Linear(
                in_features=config.time_lags // 2**i,
                out_features=config.nb_tk[i],
                bias=False,)
            for i in config.include_lvls
        ])
        _spat_dims = [15, 8, 4]
        self.spatial = nn.ModuleList([
            nn.Linear(
                in_features=_spat_dims[i]**2,
                out_features=config.nb_sk[i] * self.nc,
                bias=False,)
            for i in config.include_lvls
        ])

        # last layer
        nb_filters = sum(config.nb_tk[i] * config.nb_sk[i] * config.core_dim * 2**i for i in config.include_lvls)
        self.layers = nn.ModuleDict(
            {"{:d}".format(c): nn.Sequential(
                nn.ReLU(inplace=True),
                nn.Dropout(config.dropout),
                nn.Linear(in_features=nb_filters,
                          out_features=1, bias=True,),
                nn.Softplus(),)
                for c in range(self.nc)}
        )
        self.criterion = nn.PoissonNLLLoss(log_input=False, reduction="sum")

        if verbose:
            print_num_params(self)