コード例 #1
0
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x, y, z):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x), F.log_softmax(x), F.log_softmax(x)


# with one loss function given
model = Network()
trainer = ModuleTrainer(model)

regularizers = [
    regs.L1Regularizer(1e-4, 'fc*'),
    regs.L2Regularizer(1e-5, 'conv*')
]
constraints = [
    cons.UnitNorm(5, 'batch', 'fc*'),
    cons.MaxNorm(5, 0, 'batch', 'conv*')
]
callbacks = [cbks.ReduceLROnPlateau(monitor='loss', verbose=1)]

trainer.compile(loss='nll_loss',
                optimizer='adadelta',
                regularizers=regularizers,
                constraints=constraints,
コード例 #2
0
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)

callbacks = [
    EarlyStopping(patience=10),
    ReduceLROnPlateau(factor=0.5, patience=5)
]
regularizers = [
    L1Regularizer(scale=1e-3, module_filter='conv*'),
    L2Regularizer(scale=1e-5, module_filter='fc*')
]
constraints = [UnitNorm(frequency=3, unit='batch', module_filter='fc*')]
initializers = [XavierUniform(bias=False, module_filter='fc*')]
metrics = [CategoricalAccuracy(top_k=3)]

trainer.compile(criterion='nll_loss',
                optimizer='adadelta',
コード例 #3
0
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x), F.log_softmax(x)


# one loss function for multiple targets
model = Network()
trainer = ModuleTrainer(model)
trainer.compile(loss='nll_loss', optimizer='adadelta')

trainer.fit(x_train, [y_train, y_train],
            num_epoch=3,
            batch_size=128,
            verbose=1)
ypred1, ypred2 = trainer.predict(x_train)
print(ypred1.size(), ypred2.size())

eval_loss = trainer.evaluate(x_train, [y_train, y_train])
print(eval_loss)
# multiple loss functions
model = Network()
trainer = ModuleTrainer(model)
trainer.compile(loss=['nll_loss', 'nll_loss'], optimizer='adadelta')
コード例 #4
0
        self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 1)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return th.abs(10 - x)


model = Network()
trainer = ModuleTrainer(model)

trainer.compile(loss='unconstrained_sum',
                optimizer='adadelta')

trainer.fit(x_train,
            num_epoch=3, 
            batch_size=128,
            verbose=1)

ypred = trainer.predict(x_train)
print(ypred.size())

eval_loss = trainer.evaluate(x_train, None)
print(eval_loss)
コード例 #5
0
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x, y, z):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)

trainer.compile(loss='nll_loss', optimizer='adadelta')

trainer.fit([x_train, x_train, x_train],
            y_train,
            val_data=([x_test, x_test, x_test], y_test),
            num_epoch=3,
            batch_size=128,
            verbose=1)

ypred = trainer.predict([x_train, x_train, x_train])
print(ypred.size())

eval_loss = trainer.evaluate([x_train, x_train, x_train], y_train)
print(eval_loss)
コード例 #6
0
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        #x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)

trainer.compile(loss='nll_loss',
                optimizer='adadelta',
                regularizers=[reg.L1Regularizer(1e-4)])

trainer.fit(x_train,
            y_train,
            val_data=(x_test, y_test),
            num_epoch=3,
            batch_size=128,
            verbose=1)

ypred = trainer.predict(x_train)
print(ypred.size())
コード例 #7
0
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)

callbacks = [EarlyStopping(patience=10),
             ReduceLROnPlateau(factor=0.5, patience=5)]
regularizers = [L1Regularizer(scale=1e-3, module_filter='conv*'),
                L2Regularizer(scale=1e-5, module_filter='fc*')]
constraints = [UnitNorm(frequency=3, unit='batch', module_filter='fc*')]
initializers = [XavierUniform(bias=False, module_filter='fc*')]
metrics = [CategoricalAccuracy(top_k=3)]

trainer.compile(criterion='nll_loss',
                optimizer='adadelta',
                regularizers=regularizers,
                constraints=constraints,
                initializers=initializers,
                metrics=metrics,
コード例 #8
0
        self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        #x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)

trainer.compile(loss='nll_loss', optimizer='adadelta')

trainer.fit_loader(train_loader, num_epoch=3, verbose=1)

ypred = trainer.predict(x_train)
print(ypred.size())

eval_loss = trainer.evaluate(x_train, y_train)
print(eval_loss)

print(trainer.history)
#print(trainer.history['loss'])