Exemple #1
0
        for j in range(batch_size):
            if np.argmax(model.out.predict_result[j]) == label[j]:
                val_acc += 1

    return val_acc / (batch_num * batch_size), val_loss / batch_num


if __name__ == "__main__":
    logpath = 'logs'
    if not os.path.exists(logpath):
        os.mkdir(logpath)
    logdir = logpath + '/LRELU0.001_log.txt'
    print_freq = 50
    val_freq = 200
    DL = DataLoader()
    images, labels = DL.load_mnist('./data/mnist')
    test_images, test_labels = DL.load_mnist('./data/mnist', 't10k')
    batch_size = 100
    model = Model(batch_size)
    #record
    train_loss_record = []
    train_acc_record = []
    val_loss_record = []
    val_acc_record = []
    with open(logdir, 'w') as logf:
        for epoch in range(20):
            # save record every epoch
            history = dict()
            history['train_acc'] = train_acc_record
            history['train_loss'] = train_loss_record
Exemple #2
0
                        help='k shot during test')
    parser.add_argument('--n_category',
                        type=int,
                        default=40,
                        help='num class in training batch')
    parser.add_argument('--n_samples',
                        type=int,
                        default=4,
                        help='images for each class in training batch')
    parser.add_argument('--test_size',
                        type=int,
                        default=16,
                        help='batch size for test')

    arg_opt = parser.parse_args()
    dataloader = DataLoader(arg_opt.image_file)

    resnet = FeatMap(100)
    loss_fn = nn.SmoothL1Loss()
    optimizer = optim.SGD(trans.parameters(),
                          lr=arg_opt.learning_rate,
                          momentum=0.9,
                          nesterov=True,
                          weight_decay=1e-8)
    schedule = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.9)

    pretrain_model = torch.load(
        "/home/hexianhao/Workspace/Python/Few_Shot_CAM/model.pkl")
    pretrain_dict = pretrain_model.state_dict()
    model_dict = resnet.state_dict()
    pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict}
Exemple #3
0
        self.weights = []
        self.bias = []
        for i in range(length):
            index = "arr_" + str(i)
            self.weights.append(file_weights[index])
            self.bias.append(file_bias[index])
            
def get_minibatchs(data, label,batch_size):
    minibatch_data = [data[k:k+batch_size] for k in range(0, len(data), batch_size)]
    minibatch_label = [label[k:k+batch_size] for k in range(0, len(data), batch_size)]
    return minibatch_data, minibatch_label

if __name__ == "__main__":
    
    batch_size = 10
    DL = DataLoader()
    train_data, train_label0 = DL.load_mnist('./data/mnist')
    test_data, test_label = DL.load_mnist('./data/mnist', 't10k')

    train_images = [(im / 255).reshape(1, 784) for im in train_data] 
    test_images = [(im / 255).reshape(1, 784) for im in test_data] 
    train_label = [vectorized_result(int(i)) for i in train_label0]
    train_img_batchs, train_label_batchs = get_minibatchs(train_images, train_label, batch_size)

    model = DNN([28 * 28, 64, 10])
    steps = 0
    eval_freq = 6000
    for epoch in range(50):

        for train_img_batch, train_res_batch in zip(train_img_batchs, train_label_batchs):
            # normal SGD train