def load_all_models(n_numbers): all_models = list() for i in range(n_numbers): filename = MODEL_PATH + '/' + str(i + 1) + '_model.pth' model = resnet_v2.resnet34().cuda() model.load_state_dict(torch.load(filename)) all_models.append(model) print('>loaded %s' % filename) return all_models
def Ensemble_(): print("Reading Raw File...") raw = Data() raw.Read(RAWDATA_PATH) X_train, X_test, y_train, y_test = Split_train_val(raw.X, raw.y, train_rate=0.8) print("Normalization...") X_train = Normalization(X_train) # X/=255. print("Standardization...") # X_train, _mean, _std = Standardization(X_train) for i in range(ENSEMBLE_NUM): ## Split to training, validation X_train_en, X_val, y_train_en, y_val = Split_train_val(X_train, y_train, train_rate=0.8) train_set, val_set = TensorDataset(X_train_en, y_train_en), TensorDataset(X_val, y_val) print("Building Model...") model = resnet_v2.resnet34().cuda() loss = nn.CrossEntropyLoss() # The criterion combines nn.LogSoftmax() loss = loss.cuda() optimizer = torch.optim.Adamax(model.parameters(), lr=LEARNING_RATE) print("Fitting Model...") loss_history, acc_history = Fit(i,train_set,val_set,model,loss,optimizer,batch_size=BATCH_SIZE,epochs=128) ob = Data() ob.Read(OBSERVE_PATH) ob.X = Normalization(ob.X,'Divide', 'None', 'None', True) ob_set = TensorDataset(ob.X, ob.y) ## test.y is ID not label. ob_loader = DataLoader(ob_set, batch_size=BATCH_SIZE, shuffle=False, num_workers=WORKERS) stack_pred= [] members = load_all_models(ENSEMBLE_NUM) for model in members: pred = [] for data in ob_loader: ob_pred = model(data[0].cuda()) pred += [np.argmax(ob_pred.cpu().data.numpy(), axis=1)] stack_pred.append(np.hstack(pred)) # Voting submission = [['id', 'label']] stack_pred = np.asarray(stack_pred) for i in range(np.shape(stack_pred)[1]): pred = np.argmax(np.bincount(stack_pred[:,i])) submission.append([i, pred]) print("Submission...") with open(SUBMISSION_PATH, 'w') as submissionFile: writer = csv.writer(submissionFile) writer.writerows(submission) print('Writing Complete!')
def __init__(self, num_classes=1, num_filters=32, pretrained=False, is_deconv=False): """ :param num_classes: :param num_filters: :param pretrained: False - no pre-trained network is used True - encoder is pre-trained with resnet34 :is_deconv: False: bilinear interpolation is used in decoder True: deconvolution is used in decoder """ super().__init__() self.num_classes = num_classes self.pool = nn.MaxPool2d(2, 2) self.encoder = resnet_v2.resnet34(pretrained=pretrained) # self.relu = nn.ReLU(inplace=True) self.relu = nn.PReLU() self.conv1 = nn.Sequential(self.encoder.conv1, self.encoder.bn1, self.encoder.relu, self.pool) self.conv2 = self.encoder.layer1 self.conv3 = self.encoder.layer2 self.conv4 = self.encoder.layer3 self.conv5 = self.encoder.layer4 self.center = DecoderBlockV2(512, num_filters * 8 * 2, num_filters * 8, is_deconv) self.dec5 = DecoderBlockV2(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv) self.dec4 = DecoderBlockV2(256 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv) self.dec3 = DecoderBlockV2(128 + num_filters * 8, num_filters * 4 * 2, num_filters * 2, is_deconv) self.dec2 = DecoderBlockV2(64 + num_filters * 2, num_filters * 2 * 2, num_filters * 2 * 2, is_deconv) self.dec1 = DecoderBlockV2(num_filters * 2 * 2, num_filters * 2 * 2, num_filters, is_deconv) self.dec0 = ConvRelu(num_filters, num_filters) self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)