コード例 #1
0
ファイル: train_mm2.py プロジェクト: mountain/suan-demo
    def __init__(self,
                 in_channels,
                 out_channels,
                 channels_per_step_in=1,
                 channels_per_step_out=4):
        super().__init__()
        self.in_steps = in_channels // channels_per_step_in
        self.out_steps = (out_channels - 8) // channels_per_step_out // 4
        self.channels_per_step_in = channels_per_step_in
        self.channels_per_step_out = channels_per_step_out
        self.out_channels = out_channels

        self.lstm = ConvLSTM(1,
                             channels_per_step_out * 4,
                             kernel_size=3,
                             num_layers=1,
                             return_all_layers=True)
        self.unet = resunet(16,
                            8,
                            block=HyperBottleneck,
                            relu=CappingRelu(),
                            ratio=-2,
                            layers=6,
                            vblks=[1, 1, 1, 1, 1, 1],
                            hblks=[1, 1, 1, 1, 1, 1],
                            scales=[-1, -1, -1, -1, -1, -1],
                            factors=[1, 1, 1, 1, 1, 1],
                            spatial=(64, 64))
        self.lstm = ConvLSTM(1,
                             channels_per_step_out * 4,
                             kernel_size=3,
                             num_layers=1,
                             return_all_layers=False)
コード例 #2
0
    def __init__(self, image_size):
        """Multi layeres ConvLSTM.

        Parameters
        ----------
        image_size: (int, int)
            Shape of image.
        """

        super().__init__()
        self.conv_lstm_1 = ConvLSTM(in_channels=1,
                                    hidden_channels=32,
                                    kernel_size=3,
                                    stride=1,
                                    image_size=image_size)
        self.conv_lstm_2 = ConvLSTM(in_channels=32,
                                    hidden_channels=32,
                                    kernel_size=3,
                                    stride=1,
                                    image_size=image_size)
        self.conv_lstm_3 = ConvLSTM(in_channels=32,
                                    hidden_channels=32,
                                    kernel_size=3,
                                    stride=1,
                                    image_size=image_size)
        self.conv2d = nn.Conv2d(32, 1, 1)
コード例 #3
0
    def __init__(self, tot_frame_num = 100, step_ = 6, predict_ = 3 ,Gary_Scale = False, size_index = 256):
        print("gray scale:", Gary_Scale)
        super( unet, self ).__init__()

        self.size_index = size_index
        if size_index != 256:
            self.resize_fraction = window_size = 256/size_index
        else:
            self.resize_fraction = 1

        cuda_gpu = torch.cuda.is_available()
        device = torch.device('cuda:0' if cuda_gpu else 'cpu')

        #self.threshold = torch.autograd.Variable( torch.Tensor([1]) ).to(device)

        self.latent_feature = 0
        self.lstm_buf = []
        self.step = step_
        self.pred = predict_
        self.free_mem_counter = 0
        self.max_pool = nn.MaxPool2d(2)
        self.upsample = nn.UpsamplingBilinear2d(scale_factor=2)

        self.softmax = torch.nn.Softmax()

        self.convlstm1 = ConvLSTM(input_channels=512, hidden_channels=[512, 512, 512], kernel_size=3, step=3,
                        effective_step=[2])

        self.convlstm2 = ConvLSTM(input_channels=384, hidden_channels=[384, 256, 128], kernel_size=3, step=3,
                        effective_step=[2])

        self.convlstm3 = ConvLSTM(input_channels=224, hidden_channels=[224, 128, 32], kernel_size=3, step=3,
                        effective_step=[2])

        self.convlstm4 = ConvLSTM(input_channels=120, hidden_channels=[120, 64, 8], kernel_size=3, step=3,
                        effective_step=[2])

        self.convlstm5 = ConvLSTM(input_channels=62, hidden_channels=[62, 32, 2], kernel_size=3, step=3,
                        effective_step=[2])

        if Gary_Scale == True:
            self.down1 = conv_unit( 2, 62)
        else:
            self.down1 = conv_unit( 3, 62 )

        self.down2 = conv_unit(62, 120)
        self.down3 = conv_unit( 120, 224 )
        self.down4 = conv_unit( 224, 384 )
        self.down5 = conv_unit( 384, 512 )

        self.up1 = Up_Layer0(1024, 512)
        self.up2 = Up_Layer(512, 256)
        self.up3 = Up_Layer(256, 128)
        self.up4 = Up_Layer(128, 64)

        if Gary_Scale == True:
            self.up5 = nn.Conv2d( 64, 2, kernel_size = 1 )
        else:
            self.up5 = nn.Conv2d( 64, 3, kernel_size = 1 )
コード例 #4
0
def test_conv_lstm_reduction(return_all_layers: bool) -> None:
    conv_lstm = ConvLSTM(
        input_dim=2,
        hidden_dim=(8, 4, 2),
        kernel_size=(5, 7, 9),
        num_layers=3,
        return_all_layers=return_all_layers,
    )
    input_tensor = torch.randn(64, 9, 2, 50, 50)
    out, states = conv_lstm(input_tensor)
    assert out is not None
    if return_all_layers:
        assert len(out) == 3
        assert len(states) == 3
        assert out[0].shape == torch.Size([64, 9, 8, 50, 50])
        assert out[1].shape == torch.Size([64, 9, 4, 50, 50])
        assert out[2].shape == torch.Size([64, 9, 2, 50, 50])
    else:
        assert len(out) == 1
        assert len(states) == 1
        assert len(states[0]) == 2
        assert out[0].shape == torch.Size([64, 9, 2, 50, 50])
        h, c = states[0]
        assert h.shape == torch.Size([64, 2, 50, 50])
        assert c.shape == torch.Size([64, 2, 50, 50])
コード例 #5
0
    def __init__(self,
                 tot_frame_num=100,
                 step_=6,
                 predict_=3,
                 Gary_Scale=False,
                 size_index=256):
        print("gray scale:", Gary_Scale)
        super(unet, self).__init__()
        if size_index != 256:
            self.resize_fraction = window_size = 256 / size_index
        else:
            self.resize_fraction = 1

        cuda_gpu = torch.cuda.is_available()

        self.latent_feature = 0
        self.lstm_buf = []
        self.step = step_
        self.pred = predict_
        self.free_mem_counter = 0
        self.max_pool = nn.MaxPool2d(2)
        self.upsample = nn.UpsamplingBilinear2d(scale_factor=2)
        self.one_conv1 = nn.Conv2d(1024, 512, kernel_size=1, bias=True)
        self.one_conv2 = nn.Conv2d(1024, 512, kernel_size=1, bias=True)
        self.one_conv3 = nn.Conv2d(512, 1024, kernel_size=1, bias=True)

        self.convlstm = ConvLSTM(input_channels=512,
                                 hidden_channels=[512, 512, 512],
                                 kernel_size=3,
                                 step=3,
                                 effective_step=[2])

        self.one_conv4 = nn.Conv2d(512, 384, kernel_size=1, bias=True)
        self.one_conv5 = nn.Conv2d(256, 224, kernel_size=1, bias=True)
        self.one_conv6 = nn.Conv2d(128, 120, kernel_size=1, bias=True)
        self.one_conv7 = nn.Conv2d(64, 62, kernel_size=1, bias=True)

        self.rnn = recurrent_network_layer(fraction_index=2)
        self.rnn2 = recurrent_network(fraction_index=2)

        if Gary_Scale == True:
            self.down1 = Down_Layer(1, 64)
        else:
            self.down1 = Down_Layer(3, 64)

        self.down2 = Down_Layer(64, 128)
        self.down3 = Down_Layer(128, 256)
        self.down4 = Down_Layer(256, 512)
        self.down5 = Down_Layer(512, 512)

        self.up1 = Up_Layer(1024, 512)
        self.up2 = Up_Layer(512, 256)
        self.up3 = Up_Layer(256, 128)
        self.up4 = Up_Layer(128, 64)
        if Gary_Scale == True:
            self.up5 = nn.Conv2d(64, 1, kernel_size=1)
        else:
            self.up5 = nn.Conv2d(64, 3, kernel_size=1)
コード例 #6
0
def test_conv_lstm(kernel_size: int, input_dim: int, num_layers: int,
                   return_all_layers: bool) -> None:
    conv_lstm = ConvLSTM(
        input_dim=input_dim,
        hidden_dim=10,
        kernel_size=kernel_size,
        num_layers=num_layers,
        return_all_layers=return_all_layers,
    )
    input_tensor = torch.randn(64, 9, input_dim, 25, 25)
    out, states = conv_lstm(input_tensor)
    assert out is not None
    assert isinstance(out, list)
    if return_all_layers:
        assert len(out) == num_layers
    else:
        assert len(out) == 1
    for i in range(min(1, num_layers)):
        assert out[i].shape == torch.Size([64, 9, 10, 25, 25])
コード例 #7
0
    def __init__(self, image_size):
        """ConvLSTM Encoder Predictor.

        Parameters
        ----------
        image_size: (int, int)
            Shape of image.
        """

        super().__init__()

        self.encoder_1 = ConvLSTM(in_channels=1,
                                  hidden_channels=32,
                                  kernel_size=3,
                                  stride=1,
                                  image_size=image_size)
        self.encoder_2 = ConvLSTM(in_channels=32,
                                  hidden_channels=32,
                                  kernel_size=3,
                                  stride=1,
                                  image_size=image_size)
        self.encoder_3 = ConvLSTM(in_channels=32,
                                  hidden_channels=32,
                                  kernel_size=3,
                                  stride=1,
                                  image_size=image_size)

        self.predictor_1 = ConvLSTM(in_channels=32,
                                    hidden_channels=32,
                                    kernel_size=3,
                                    stride=1,
                                    image_size=image_size)
        self.predictor_2 = ConvLSTM(in_channels=32,
                                    hidden_channels=32,
                                    kernel_size=3,
                                    stride=1,
                                    image_size=image_size)
        self.predictor_3 = ConvLSTM(in_channels=32,
                                    hidden_channels=32,
                                    kernel_size=3,
                                    stride=1,
                                    image_size=image_size)

        self.conv2d = nn.Conv2d(32, 1, 1)
コード例 #8
0
#Xtrain_dummy = tf.ones((batch_size, seq_length, 81, 161, num_vars))
#ytrain_dummy = tf.ones((batch_size, seq_length, 81, 161))

# antall filrer i hver lag.
filters = [256, 256]
# size of filters used
kernels = [3, 3]

from utils import get_xarray_dataset_for_period, get_data_keras, get_train_test
#data = get_xarray_dataset_for_period(start = '2012-01-01', stop = '2012-01-31')
#print(data)
test_start = '2014-01-01'
test_stop = '2018-12-31'
train_dataset, test_dataset = get_train_test(test_start, test_stop, model='ar')
X_train, y_train = get_data_keras(train_dataset,
                                  num_samples=None,
                                  seq_length=24,
                                  batch_size=None,
                                  data_format='channels_last')

model = ConvLSTM(
    X_train=X_train,
    y_train=y_train,
    filters=filters,
    kernels=kernels,
    seq_length=seq_length,
    epochs=epochs,  #batch_size = batch_size, 
    validation_split=0.1,
    name='Model2',
    result_path='/home/hannasv/results/')
コード例 #9
0
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torchvision
import torch.nn.functional as F
import torch.optim as optim
from dataset_loader import MyData, MyTestData, DTestData
from model import FocalNet, FocalNet_sub
from conv_lstm import ConvLSTM
from functions import imsave
import argparse
from Trainer_Teacher import Trainer
import os

if __name__ == '__main__':
    configurations = {
        1: dict(
            max_iteration=500000,
            lr=1.0e-10,
            momentum=0.99,
            weight_decay=0.0005,
            spshot=10000,
            nclass=2,
            sshow=10,
            focal_num=12,
        )
    }
    parser=argparse.ArgumentParser()
    parser.add_argument('--phase', type=str, default='test', help='train or test')
    parser.add_argument('--param', type=str, default=True, help='path to pre-trained parameters')
コード例 #10
0
 def __init__(self, in_ch, out_ch, kernel, stride, pad, n_layers):
     super().__init__()
     self.add_link(ConvLSTM(in_ch, out_ch, kernel, stride=stride, pad=pad))
     for i in range(n_layers):
         self.add_link(ConvLSTM(out_ch, out_ch, kernel, stride=1, pad=pad))
     self.n_layers = n_layers
コード例 #11
0
ファイル: trainer.py プロジェクト: anuragpassi/microbiome-rnn
otu_handler.set_train_val()
otu_handler.normalize_data()
print('Loaded in data. Ready to train.\n')

use_gpu = torch.cuda.is_available()

if not os.path.isdir(output_dir):
    os.mkdir(output_dir)

save_params = (os.path.join(output_dir,
                            model_name), os.path.join(output_dir, log_name))

if use_convs:
    print('Using Conv-LSTM')
    rnn = ConvLSTM(hidden_dim,
                   otu_handler,
                   use_gpu,
                   LSTM_in_size=reduced_num_strains)
else:
    rnn = LSTM(hidden_dim,
               otu_handler,
               use_gpu,
               LSTM_in_size=reduced_num_strains)

train_loss, val_loss = rnn.do_training(
    seq_len,
    batch_size,
    num_epochs,
    learning_rate,
    samples_per_epoch,
    save_params=save_params,
    slice_incr_frequency=slice_incr_frequency)
コード例 #12
0
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torchvision
import torch.nn.functional as F
import torch.optim as optim
from dataset_loader import MyData, MyTestData
from model import FocalNet, FocalNet_sub
from conv_lstm import ConvLSTM
from functions import imsave
import argparse
from Trainer_Student import Trainer
from resnet_18 import Resnet_18
import os
import imageio

if __name__ == '__main__':
    configurations = {
        1: dict(
            max_iteration=300000,
            lr=1.0e-10,
            momentum=0.99,
            weight_decay=0.0005,
            spshot=10000,
            nclass=2,
            sshow=10,
            focal_num=12,
        )
    }
    parser=argparse.ArgumentParser()