Example #1
0
def _create_model(layer_sizes1, layer_sizes2, input_size1, input_size2,
                  learning_rate, reg_par, outdim_size,
                  use_all_singular_values):
    """
    builds the whole model
    the structure of each sub-network is defined in build_mlp_net,
    and it can easily get substituted with a more efficient and powerful network like CNN
    """
    inp_1 = Input((input_size1, ))
    inp_2 = Input((input_size2, ))
    dense_layers1 = [Dense(i) for i in layer_sizes1]
    D1 = dense_layers1[0](inp_1)
    for d_i, d in enumerate(dense_layers1[1:]):
        D1 = d(D1)

    dense_layers2 = [Dense(i) for i in layer_sizes2]
    D2 = dense_layers2[0](inp_2)
    for d_i, d in enumerate(dense_layers2[1:]):
        D2 = d(D2)

    output = concatenate([D1, D2])
    model = Model([inp_1, inp_2], [output])

    model_optimizer = RMSprop(lr=learning_rate)
    model.compile(loss=cca_loss(outdim_size, use_all_singular_values),
                  optimizer=model_optimizer)

    return model
    def __init__(self, layer_sizes1, layer_sizes2, input_size1, input_size2, outdim_size1, outdim_size2,
                         use_all_singular_values, device='cpu', p=0.3):
        super(DeepCCA, self).__init__()
        # self.model1 = MlpNet(layer_sizes1, input_size1, p=p).double()
        # self.model2 = MlpNet(layer_sizes2, input_size2, p=p).double()
        self.device = device
        self.model1 = LeNet(input_size=input_size1, seq_size=50, output_size=outdim_size1, p=p).to(self.device).double()
        self.model2 = LeNet(input_size=input_size2, seq_size=50, output_size=outdim_size2, p=p).to(self.device).double()

        self.loss = cca_loss(outdim_size1, outdim_size2, use_all_singular_values, device).loss
Example #3
0
    def __init__(self,
                 layer_sizes1,
                 layer_sizes2,
                 input_size1,
                 input_size2,
                 outdim_size,
                 use_all_singular_values,
                 device=torch.device('cpu')):
        super(DeepCCA, self).__init__()
        self.model1 = MlpNet(layer_sizes1, input_size1).double()
        self.model2 = MlpNet(layer_sizes2, input_size2).double()

        self.loss = cca_loss(outdim_size, use_all_singular_values, device).loss
Example #4
0
def create_model(layer_sizes1, layer_sizes2, input_size1, input_size2,
                    learning_rate, reg_par, outdim_size, use_all_singular_values):
    """
    builds the whole model
    the structure of each sub-network is defined in build_mlp_net,
    and it can easily get substituted with a more efficient and powerful network like CNN
    """
    view1_model = build_mlp_net(layer_sizes1, input_size1, reg_par)
    view2_model = build_mlp_net(layer_sizes2, input_size2, reg_par)

    model = Sequential()
    model.add(Merge([view1_model, view2_model], mode='concat'))

    model_optimizer = RMSprop(lr=learning_rate)
    model.compile(loss=cca_loss(outdim_size, use_all_singular_values), optimizer=model_optimizer)

    return model
Example #5
0
def create_model(layer_sizes1, layer_sizes2, input_size1, input_size2,
                 learning_rate, reg_par, outdim_size, use_all_singular_values):
    """
    builds the whole model
    the structure of each sub-network is defined in build_mlp_net,
    and it can easily get substituted with a more efficient and powerful network like CNN
    """
    view1_model = build_mlp_net(layer_sizes1, input_size1, reg_par)
    view2_model = build_mlp_net(layer_sizes2, input_size2, reg_par)

    model = Sequential()
    model.add(Merge([view1_model, view2_model], mode='concat'))

    model_optimizer = RMSprop(lr=learning_rate)
    model.compile(loss=cca_loss(outdim_size, use_all_singular_values),
                  optimizer=model_optimizer)

    return model
Example #6
0
    def __init__(self):
        super(Networks, self).__init__()
        self.encoder1 = nn.Sequential(
            nn.Linear(1750, 620, bias=False),
            nn.ReLU(),
            #nn.Linear(500, 300, bias=False),
            #nn.ReLU(),
            #nn.Linear(300, 100, bias=False),
            #nn.ReLU(),
        )

        self.decoder1 = nn.Sequential(
            #nn.Linear(100, 300, bias=False),
            #nn.ReLU(),
            #nn.Linear(300, 500, bias=False),
            #nn.ReLU(),
            nn.Linear(620, 1750, bias=False),
            nn.ReLU(),
        )

        self.encoder2 = nn.Sequential(
            nn.Linear(79, 620, bias=False),
            nn.ReLU(),
            #nn.Linear(500, 300, bias=False),
            #nn.ReLU(),
            #nn.Linear(300, 100, bias=False),
            #nn.ReLU(),
        )

        self.decoder2 = nn.Sequential(
            #nn.Linear(100, 300, bias=False),
            #nn.ReLU(),
            #nn.Linear(300, 500, bias=False),
            #nn.ReLU(),
            nn.Linear(620, 79, bias=False),
            nn.ReLU(),
        )

        self.model1 = nn.Linear(620, 10)
        self.model2 = nn.Linear(620, 10)
        self.loss = cca_loss(5).loss
        self.weight = nn.Parameter(1.0e-4 * torch.ones(2500, 2500))
        self.beta1 = torch.nn.Parameter(torch.Tensor([0.5]))
        self.beta2 = torch.nn.Parameter(torch.Tensor([0.5]))
Example #7
0
def create_model(layer_sizes1, layer_sizes2, input_size1, input_size2,
                 learning_rate, reg_par, outdim_size, use_all_singular_values,
                 batch_size):
    """
    builds the whole model
    the structure of each sub-network is defined in build_mlp_net,
    and it can easily get substituted with a more efficient and powerful network like CNN
    """
    view1_model = build_mlp_net(layer_sizes1, input_size1, reg_par)
    view2_model = build_mlp_net(layer_sizes2, input_size2, reg_par)
    input1 = Input(shape=(input_size1, ))
    input2 = Input(shape=(input_size2, ))

    view1 = view1_model(input1)
    view2 = view2_model(input2)
    output = concatenate([view1, view2])
    model = Model(inputs=[input1, input2], outputs=output)

    model_optimizer = RMSprop(lr=learning_rate)
    model.compile(loss=cca_loss(outdim_size, use_all_singular_values),
                  optimizer=model_optimizer)

    return model