예제 #1
0
  def __init__(self, config, dropout = 0.2):
    super(TDConway, self).__init__()
    
    self.stack_1 = ResidualConvStack(3, 64, layer_structure = [1,2,2,2], initial_depth = config.num_channels)

    self.pooler  = MaxPool2d(3, 3, (0,1)) # Downsample (15,19) -> (5, 7)

    # See PyTorch docs for torch.nn.MaxPool2d
    pooled_height = (config.rows + 2) // 3
    pooled_width  = (config.cols + 4) // 3
    
    self.stack_2 = ResidualConvStack(3, 128, layer_structure = [1,2,2,2], initial_depth = 64)
    
    
    self.fc = Sequential(
      Flatten(), 
      Linear(128 * pooled_height * pooled_width, 512),
      SELU(),
      Dropout(0), # Remove in eval
      Linear(512, 2048),
      SELU(),
      Dropout(0), # Remove in eval
      Linear(2048, 1),
      # Sigmoid() # Remove in evaluation version
    )
예제 #2
0
  def __init__(self, config, dropout = 0.2, temperature = None):
    super(TDConway, self).__init__()
    self.temperature = temperature
    
    self.stack_1 = ConvStack(3, 64, num_layers = 2, initial_depth = config.num_channels)

    self.pool  = MaxPool2d(3, 3, (0,1)) # Downsample (15,19) -> (5, 7)

    # See PyTorch docs for torch.nn.MaxPool2d
    pooled_height = (config.rows + 2) // 3
    pooled_width  = (config.cols + 4) // 3
    
    self.stack_2 = ConvStack(3, 128, num_layers = 2, initial_depth = 64)
    
    
    self.fc = Sequential(
      Flatten(), 
      Linear(128 * pooled_height * pooled_width, 256),
      SELU(),
      Dropout(dropout),
      Linear(256, 2048),
      SELU(),
      Dropout(dropout),
      Linear(2048, 1),
      Sigmoid()
    )
예제 #3
0
 def __init__(self, n_features, heads=4):
     super(ThreeConvBlock, self).__init__()
     self.conv1 = FeaStConv(n_features, 16, heads=heads)
     self.conv2 = FeaStConv(16, 16, heads=heads)
     self.conv3 = FeaStConv(16, 16, heads=heads)
     self.lin1 = Linear(16, 16)
     self.lin2 = Linear(16, 4)
     self.s1 = SELU()
     self.s2 = SELU()
     self.s3 = SELU()
     self.s4 = SELU()
     self.s5 = SELU()
     self.out = Linear(4, 1)
예제 #4
0
 def __init__(self, n_features, K=3):
     super(Spectral, self).__init__()
     self.spec1 = TAGConv(n_features, 16, K=K)
     self.spec2 = TAGConv(16, 16, K=K)
     self.spec3 = TAGConv(16, 16, K=K)
     self.lin1 = Linear(16, 64)
     self.lin2 = Linear(64, 8)
     self.out = Linear(8, 1)
     self.s1 = SELU()
     self.s2 = SELU()
     self.s3 = SELU()
     self.s4 = SELU()
     self.s5 = SELU()
예제 #5
0
 def __init__(self, n_features, heads=4, masif_descr=False):
     # REMEMBER TO UPDATE MODEL NAME
     super(SixConv, self).__init__()
     self.masif_descr = masif_descr
     if masif_descr is True:
         self.pre_lin = Linear(80, n_features)
     self.conv1 = FeaStConv(n_features, 16, heads=heads)
     self.conv2 = FeaStConv(16, 16, heads=heads)
     self.conv3 = FeaStConv(16, 16, heads=heads)
     self.conv4 = FeaStConv(16, 16, heads=heads)
     self.conv5 = FeaStConv(16, 16, heads=heads)
     self.conv6 = FeaStConv(16, 16, heads=heads)
     self.affine1 = Linear(n_features)
     self.affine2 = Linear(16)
     self.affine3 = Linear(16)
     self.affine4 = Linear(16)
     self.affine5 = Linear(16)
     self.affine6 = Linear(16)
     self.lin1 = Linear(16, 16)
     self.lin2 = Linear(16, 4)
     self.s1 = SELU()
     self.s2 = SELU()
     self.s3 = SELU()
     self.s4 = SELU()
     self.s5 = SELU()
     self.s6 = SELU()
     self.s7 = SELU()
     self.s8 = SELU()
     self.out = Linear(4, 1)
예제 #6
0
 def __init__(self, n_features, heads=4, masif_descr=False):
     # REMEMBER TO UPDATE MODEL NAME
     super(TwoConv, self).__init__()
     self.masif_descr = masif_descr
     if masif_descr is True:
         self.pre_lin = Linear(80, n_features)
     self.conv1 = FeaStConv(n_features, 16, heads=heads)
     self.conv2 = FeaStConv(16, 16, heads=heads)
     self.lin1 = Linear(16, 64)
     self.lin2 = Linear(64, 4)
     self.s1 = SELU()
     self.s2 = SELU()
     self.s3 = SELU()
     self.s4 = SELU()
     self.out = Linear(4, 1)
예제 #7
0
def get_activation(name):
    act_name = name.lower()
    m = re.match(r"(\w+)\((\d+\.\d+)\)", act_name)
    if m is not None:
        act_name, alpha = m.groups()
        alpha = float(alpha)
        print(act_name, alpha)
    else:
        alpha = 1.0
    if act_name == 'softplus':
        return Softplus()
    elif act_name == 'ssp':
        return SSP()
    elif act_name == 'elu':
        return ELU(alpha)
    elif act_name == 'relu':
        return ReLU()
    elif act_name == 'selu':
        return SELU()
    elif act_name == 'celu':
        return CELU(alpha)
    elif act_name == 'sigmoid':
        return Sigmoid()
    elif act_name == 'tanh':
        return Tanh()
    else:
        raise NameError("Not supported activation: {}".format(name))
예제 #8
0
    def __init__(self, n_layers, hidden_dim, input_dim, activation=SELU()):
        super().__init__()
        self.n_layers = n_layers
        self.hidden_dim = hidden_dim
        self.activation = activation
        for l in range(n_layers - 1):
            if l > 0:
                setattr(self, 'u' + str(l), t.nn.Linear(hidden_dim, hidden_dim))
            else:
                setattr(self, 'u' + str(l), t.nn.Linear(input_dim, hidden_dim))
        for l in range(n_layers):
            output_dim = (l < n_layers - 1) * hidden_dim + (l == n_layers - 1) * 1
            idim = (l > 0) * hidden_dim + (l == 0) * input_dim
            setattr(self, 'z_u' + str(l), t.nn.Linear(idim, output_dim))
            setattr(self, 'z_au' + str(l), t.nn.Linear(idim, 2))
            setattr(self, 'z_au_' + str(l), t.nn.Linear(2, output_dim, bias=False))
            if l > 0:
                setattr(self, 'z_zu' + str(l), t.nn.Linear(idim, hidden_dim))
                setattr(self, 'z_zu_' + str(l), t.nn.Linear(hidden_dim, output_dim, bias=False))

        # initialize parameters correctly (see paper SELU)
        self.initialize_weights_selu()

        # enforce convexity (or rather concavity)
        self.make_cvx()
예제 #9
0
    def __init__(self, config, dropout=0.2):
        super(TDConway, self).__init__()

        self.stack_1 = ModuleList([
            ResidualConvStack(3,
                              64,
                              layer_structure=[1, 2, 2],
                              initial_depth=config.num_channels),
            ResidualConvStack(5,
                              64,
                              layer_structure=[1, 2, 2],
                              initial_depth=config.num_channels),
        ])

        self.stack_2 = ModuleList([
            ResidualConvStack(1, 64 * 2, layer_structure=[0, 2, 2]),
            ResidualConvStack(3, 64 * 2, layer_structure=[0, 2, 2]),
            ResidualConvStack(5, 64 * 2, layer_structure=[0, 2, 2]),
        ])

        self.fc = Sequential(
            Flatten(), Linear(64 * 2 * 3 * config.rows * config.cols, 512),
            SELU(), Dropout(dropout), Linear(512, 2048), SELU(),
            Dropout(dropout), Linear(2048, 1), Sigmoid())
예제 #10
0
 def __init__(self, n_features, heads=4):
     # REMEMBER TO UPDATE MODEL NAME
     super(EightConv, self).__init__()
     self.conv1 = FeaStConv(n_features, 16, heads=heads)
     self.conv2 = FeaStConv(16, 16, heads=heads)
     self.conv3 = FeaStConv(16, 16, heads=heads)
     self.conv4 = FeaStConv(16, 16, heads=heads)
     self.conv5 = FeaStConv(16, 16, heads=heads)
     self.conv6 = FeaStConv(16, 16, heads=heads)
     self.conv7 = FeaStConv(16, 16, heads=heads)
     self.conv8 = FeaStConv(16, 16, heads=heads)
     self.lin1 = Linear(16, 16)
     self.lin2 = Linear(16, 4)
     self.s1 = SELU()
     self.s2 = SELU()
     self.s3 = SELU()
     self.s4 = SELU()
     self.s5 = SELU()
     self.s6 = SELU()
     self.s7 = SELU()
     self.s8 = SELU()
     self.s9 = SELU()
     self.s10 = SELU()
     self.out = Linear(4, 1)
예제 #11
0
    def __init__(self,
                 in_features: int,
                 out_features: int,
                 activation: Module = SELU(),
                 dropout: bool = True):
        """Initialize the weights."""
        super().__init__()
        stddev = np.sqrt(1 / in_features)
        self.activation = activation

        if dropout:
            self.dropout: Module = AlphaDropout(0.2)
        else:
            self.dropout = Identity()

        self.weights = Parameter(
            torch.randn([in_features, out_features]) * stddev)
        self.bias = Parameter(torch.zeros([out_features]))
예제 #12
0
    def __init__(self):
        super(TraffixSignDetection, self).__init__()

        self.conv1 = Conv2d(3, 16, 5, padding=2)
        self.conv2 = Conv2d(16, 32, 5, padding=2)

        self.pool = MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.dropout = Dropout(p=0.92)

        self.activation2 = SELU()
        self.activation = ReLU()

        self.fc1 = Linear(2048, 512)
        self.fc2 = Linear(512, 128)
        self.fc3 = Linear(128, 2)

        self.bn1 = BatchNorm2d(16)
        self.bn2 = BatchNorm2d(32)
예제 #13
0
    def __init__(self, n_layers, hidden_dim, activation=SELU()):
        super(ICNN, self).__init__()
        self.n_layers = n_layers
        self.hidden_dim = hidden_dim
        self.activation = activation
        for l in range(n_layers - 1):
            setattr(self, 'u' + str(l), t.nn.Linear(hidden_dim, hidden_dim))

        for l in range(n_layers):
            output_dim = (l < n_layers - 1) * hidden_dim + (l == n_layers -
                                                            1) * 1
            setattr(self, 'z_u' + str(l), t.nn.Linear(hidden_dim, output_dim))
            setattr(self, 'z_au' + str(l), t.nn.Linear(hidden_dim, 2))
            setattr(self, 'z_au_' + str(l),
                    t.nn.Linear(2, output_dim, bias=False))
            if l > 0:
                setattr(self, 'z_zu' + str(l),
                        t.nn.Linear(hidden_dim, hidden_dim))
                setattr(self, 'z_zu_' + str(l),
                        t.nn.Linear(hidden_dim, output_dim, bias=False))
예제 #14
0
 def __init__(self, n_features, heads=4):
     # REMEMBER TO UPDATE MODEL NAME
     super(FourConv, self).__init__()
     self.conv1 = FeaStConv(n_features, 16, heads=heads)
     self.conv2 = FeaStConv(16, 16, heads=heads)
     self.conv3 = FeaStConv(16, 16, heads=heads)
     self.conv4 = FeaStConv(16, 16, heads=heads)
     self.lin1 = Linear(16, 64)
     self.lin2 = Linear(64, 16)
     self.out = Linear(16, 1)
     self.s1 = SELU()
     self.s2 = SELU()
     self.s3 = SELU()
     self.s4 = SELU()
     self.s5 = SELU()
     self.s6 = SELU()
예제 #15
0
 def __init__(self, n_features):
     super(MultiScaleEncoder, self).__init__()
     # Will have to update these
     self.conv1 = FeaStConv(n_features, 16, heads=4)
     self.conv2 = FeaStConv(32, 16, heads=4)
     self.conv3 = FeaStConv(32, 16, heads=4)
     self.conv4 = FeaStConv(48, 16, heads=4)
     self.conv5 = FeaStConv(16, 16, heads=4)
     self.affine1 = Linear(n_features, 16)
     self.affine2 = Linear(n_features, 16)
     self.affine3 = Linear(16, 16)
     self.affine4 = Linear(16, 16)
     self.lin1 = Linear(16, 64)
     self.lin2 = Linear(64, 8)
     self.out = Linear(8, 1)
     self.s1 = SELU()
     self.s2 = SELU()
     self.s3 = SELU()
     self.s4 = SELU()
     self.s5 = SELU()
     self.s6 = SELU()
     self.s7 = SELU()
예제 #16
0
 def __init__(self, n_layers, hidden_dim, activation=SELU()):
     super(IntegralPredNet, self).__init__()
     self.n_layers = n_layers
     self.hidden_dim = hidden_dim
     self.activation = activation
예제 #17
0
 def __init__(self, n_features, heads=4, masif_descr=False, relu=False):
     # REMEMBER TO UPDATE MODEL NAME
     super(FourteenConv, self).__init__()
     self.masif_descr = masif_descr
     if masif_descr is True:
         self.pre_lin = Linear(80, n_features)
     self.conv1 = FeaStConv(n_features, 16, heads=heads)
     self.conv2 = FeaStConv(16, 16, heads=heads)
     self.conv3 = FeaStConv(16, 16, heads=heads)
     self.conv4 = FeaStConv(16, 16, heads=heads)
     self.conv5 = FeaStConv(16, 16, heads=heads)
     self.conv6 = FeaStConv(16, 16, heads=heads)
     self.conv7 = FeaStConv(16, 16, heads=heads)
     self.conv8 = FeaStConv(16, 16, heads=heads)
     self.conv9 = FeaStConv(16, 16, heads=heads)
     self.conv10 = FeaStConv(16, 16, heads=heads)
     self.conv11 = FeaStConv(16, 16, heads=heads)
     self.conv12 = FeaStConv(16, 16, heads=heads)
     self.conv13 = FeaStConv(16, 16, heads=heads)
     self.conv14 = FeaStConv(16, 16, heads=heads)
     self.s1 = SELU()
     self.s2 = SELU()
     self.s3 = SELU()
     self.s4 = SELU()
     self.s5 = SELU()
     self.s6 = SELU()
     self.s7 = SELU()
     self.s8 = SELU()
     self.s9 = SELU()
     self.s10 = SELU()
     self.s11 = SELU()
     self.s12 = SELU()
     self.s13 = SELU()
     self.s14 = SELU()
     self.s15 = SELU()
     self.s16 = SELU()
     self.lin1 = Linear(16, 16)
     self.lin2 = Linear(16, 4)
     self.out = Linear(4, 1)
     self.relu = relu
예제 #18
0
 def __init__(self, n_features, heads=4, masif_descr=False):
     # REMEMBER TO UPDATE MODEL NAME
     super(TenConvwAffinePool, self).__init__()
     self.masif_descr = masif_descr
     if masif_descr is True:
         self.pre_lin = Linear(80, n_features)
     self.conv1 = FeaStConv(n_features, 16, heads=heads)
     self.conv2 = FeaStConv(16, 16, heads=heads)
     self.conv3 = FeaStConv(16, 16, heads=heads)
     self.conv4 = FeaStConv(16, 16, heads=heads)
     self.conv5 = FeaStConv(16, 16, heads=heads)
     self.conv6 = FeaStConv(16, 16, heads=heads)
     self.conv7 = FeaStConv(16, 16, heads=heads)
     self.conv8 = FeaStConv(16, 16, heads=heads)
     self.conv9 = FeaStConv(16, 16, heads=heads)
     self.conv10 = FeaStConv(16, 16, heads=heads)
     self.interconv1 = FeaStConv(16, 16, heads=heads)
     self.interconv2 = FeaStConv(16, 16, heads=heads)
     self.inters1 = SELU()
     self.inters2 = SELU()
     self.lin1 = Linear(16, 16)
     self.lin2 = Linear(16, 4)
     self.s1 = SELU()
     self.s2 = SELU()
     self.s3 = SELU()
     self.s4 = SELU()
     self.s5 = SELU()
     self.s6 = SELU()
     self.s7 = SELU()
     self.s8 = SELU()
     self.s9 = SELU()
     self.s10 = SELU()
     self.s11 = SELU()
     self.s12 = SELU()
     self.out = Linear(4, 1)
     self.affine1 = Linear(16, 16)
     self.affine2 = Linear(16, 16)
예제 #19
0
import torch as t
from torch.nn import Conv1d as conv, SELU, Linear as fc, Softmax, Sigmoid, AlphaDropout, BatchNorm1d as BN, PReLU, LeakyReLU
import torch.nn as nn
import torch.optim as optim
import numpy as np
import time
from game.game_utils import bucket_encode_actions, array_to_cards
from game.utils import variable

selu = SELU()
softmax = Softmax()
sigmoid = Sigmoid()
leakyrelu = LeakyReLU()


def get_shape(x):
    try:
        return x.data.cpu().numpy().shape
    except:
        return x.numpy().shape


def flatten(x):
    shape = get_shape(x)
    return x.resize(shape[0], int(np.prod(shape[1:])))


class CardFeaturizer1(t.nn.Module):
    """
    The one i got results with
    SELU + AlphaDropout + smart initialization
예제 #20
0
 def __init__(self, dropout):
     super().__init__()
     self.conv1 = None
     self.conv2 = None
     self.dropout = Dropout(p=dropout)
     self.activation = SELU(inplace=True)
예제 #21
0
 def __init__(self, n_features):
     super(SageNet, self).__init__()
     self.conv1 = SAGEConv(n_features, 16, normalize=False)
     self.conv2 = SAGEConv(16, 16, normalize=False)
     self.conv3 = SAGEConv(16, 16, normalize=False)
     self.conv4 = SAGEConv(16, 16, normalize=False)
     self.conv5 = SAGEConv(16, 16, normalize=False)
     self.conv6 = SAGEConv(16, 16, normalize=False)
     self.conv7 = SAGEConv(16, 16, normalize=False)
     self.conv8 = SAGEConv(16, 16, normalize=False)
     self.conv9 = SAGEConv(16, 16, normalize=False)
     self.lin1 = Linear(16, 64)
     self.lin2 = Linear(64, 16)
     self.out = Linear(16, 1)
     self.s1 = SELU()
     self.s2 = SELU()
     self.s3 = SELU()
     self.s4 = SELU()
     self.s5 = SELU()
     self.s6 = SELU()
     self.s7 = SELU()
     self.s8 = SELU()
     self.s9 = SELU()
     self.s10 = SELU()
     self.s11 = SELU()
예제 #22
0
from torchvision.transforms import Compose, ToTensor, Resize
from torchvision.models import resnet50
from flask import Flask, jsonify, request

app = Flask(__name__)
LABELS = ['None', 'Meningioma', 'Glioma', 'Pitutary']

device = "cuda" if is_available() else "cpu"

resnet_model = resnet50(pretrained=True)

for param in resnet_model.parameters():
    param.requires_grad = True

n_inputs = resnet_model.fc.in_features
resnet_model.fc = Sequential(Linear(n_inputs, 2048), SELU(), Dropout(p=0.4),
                             Linear(2048, 2048), SELU(), Dropout(p=0.4),
                             Linear(2048, 4), LogSigmoid())

for name, child in resnet_model.named_children():
    for name2, params in child.named_parameters():
        params.requires_grad = True

resnet_model.to(device)
resnet_model.load_state_dict(
    load('../CNN/models/bt_resnet50_model.pt', map_location=DEVICE(device)))
resnet_model.eval()


def preprocess_image(image_bytes):
    transform = Compose([Resize((512, 512)), ToTensor()])
예제 #23
0
import torch
from torch.nn import ReLU, Tanh, CrossEntropyLoss, Sigmoid, SELU, MSELoss, L1Loss, SmoothL1Loss, NLLLoss, BCELoss
from torch.optim import Adam, SGD, RMSprop, Adagrad
from torch.utils.data.dataset import Dataset

activations = {
    "relu": ReLU(),
    "tanh": Tanh(),
    "sigmoid": Sigmoid(),
    "selu": SELU(),
}

optimizers = {
    "adam": Adam,
    "sgd": SGD,
    "rmsprop": RMSprop,
    "adagrad": Adagrad,
}

losses = {
    "negative log likelihood": NLLLoss(),
    "nll": NLLLoss(),
    "binary cross entropy": BCELoss(),
    "bce": BCELoss(),
    "categorical cross entropy": CrossEntropyLoss(),
    "cce": CrossEntropyLoss(),
    "mean squared error": MSELoss(),
    "mse": MSELoss(),
    "mean absolute error": L1Loss(),
    "mae": L1Loss(),
    "huber loss": SmoothL1Loss(),