# 构建MyDataset实例 #train_data = RMBDataset(data_dir=train_dir, transform=train_transform) #valid_data = RMBDataset(data_dir=valid_dir, transform=valid_transform) train_data = BongosDataset(data_dir=train_dir, transform=train_transform) valid_data = BongosDataset(data_dir=valid_dir, transform=valid_transform) # 构建DataLoder train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True) valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE) # ============================ step 2/5 模型 ============================ #net = LeNet(classes=2) net = VGGNet(num_classes=2) net.initialize_weights() net = net.cuda() # ============================ step 3/5 损失函数 ============================ criterion = nn.CrossEntropyLoss() criterion = nn.CrossEntropyLoss().cuda() # ============================ step 4/5 优化器 ============================ optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9) # 选择优化器 scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1) # 设置学习率下降策略 # ============================ step 5/5 训练 ============================ train_curve = list() valid_curve = list()
elif (args.dataset == 'cifar100'): print("| Preparing CIFAR-100 dataset...") sys.stdout.write("| ") trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train) testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test) num_classes = 100 trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.bs, shuffle=True, num_workers=2) testloader = torch.utils.data.DataLoader(testset, batch_size=args.bs, shuffle=False, num_workers=2) # Model print('\n[Phase 2] : Model setup') print('| Building net type [' + args.net + ']...') if args.net == 'vgg16': net = VGGNet(num_classes, args.drop_p, False, args.feat_dim, args.conv == 5) else: print('Error : Network should be either [ResNet34]') sys.exit(0) net.init_weights() net.to(device) # Training print('\n[Phase 3] : Training model') print('| Training Epochs = ' + str(args.num_epochs)) print('| Initial Learning Rate = ' + str(args.lr)) optimizer = optim.SGD(net.parameters(), lr=cf.learning_rate(args.lr, 1), momentum=0.9, weight_decay=args.wd) elapsed_time = 0
test_size=0.2, random_state=42) lb = LabelBinarizer() trainY = lb.fit_transform(trainY) testY = lb.transform(testY) aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest") model = VGGNet.build(width=64, height=64, depth=3, classes=len(lb.classes_)) INIT_LR = 0.05 EPOCHS = 70 BS = 32 print("[INFO] training network...") opt = SGD(lr=INIT_LR, decay=INIT_LR / EPOCHS) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS), validation_data=(testX, testY), steps_per_epoch=len(trainX) // BS, epochs=EPOCHS)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.bs, shuffle=True, num_workers=2) testloader = torch.utils.data.DataLoader(testset, batch_size=args.bs, shuffle=False, num_workers=2) # Model print('\n[Phase 2] : Model setup') print('| Building net type [' + args.net + ']...') if args.net == 'vgg16': #net = ResNet(34, num_classes) net = VGGNet(num_classes, args.drop_p, args.drop_last_only, args.feat_dim, args.conv == 5) else: print('Error : Network should be either [ResNet34]') sys.exit(0) net.init_weights() net.to(device) # Training print('\n[Phase 3] : Training model') print('| Training Epochs = ' + str(args.num_epochs)) print('| Initial Learning Rate = ' + str(args.lr)) optimizer = optim.SGD(net.parameters(), lr=cf.learning_rate(args.lr, 1), momentum=0.9,
trainY = to_categorical(trainY, num_classes=2) testY = to_categorical(testY, num_classes=2) # augmenting datset aug = ImageDataGenerator(rotation_range=50, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.3, zoom_range=0.4, horizontal_flip=True, vertical_flip=True, fill_mode="nearest") # build model model = VGGNet.build(width=img_dims[0], height=img_dims[1], depth=img_dims[2], classes=2) # compile the model opt = sgd(lr=lr, decay=lr / epochs) model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) # train the model H = model.fit_generator(aug.flow(trainX, trainY, batch_size=batch_size), validation_data=(testX, testY), steps_per_epoch=len(trainX) // batch_size, epochs=epochs, verbose=1) # save the model to disk model.save(args.model)
transform=transform_test) num_classes = 100 testloader = torch.utils.data.DataLoader(testset, batch_size=args.bs, shuffle=False, num_workers=2) # Model print('\n[Phase 2] : Model setup') print('| Building net type [' + args.net + ']...') if args.net == 'resnet34': net = ResNet(34, num_classes, 0.5) elif args.net == 'densenet': net = DenseNet3(100, num_classes, 12, 0.5, True, 0.2) elif args.net == 'vgg16': net = VGGNet(num_classes, 0.5, False, 2048, True) else: print('Error : Network should be either [ResNet34]') sys.exit(0) checkpoint = torch.load(args.model_path) net.load_state_dict(checkpoint['model']) net.to(device) avg = 0 for i in range(10): avg += test(net, testloader) print(avg / 10)
import tensorflow as tf import gc from model.vggnet import VGGNet from util import train # Declare Static Variables vgg11_log_dir = './vgg/vgg11' vgg13_log_dir = './vgg/vgg13' input_shape = (32, 32, 3) n_class = 100 # Build VGG11 pretrained_vgg = VGGNet(11) pretrained_vgg.build(input_shape, n_class) # Pretrain VGG11 with pretrained_vgg.graph.as_default() as graph: loss = graph.get_tensor_by_name('loss:0') lr = tf.placeholder_with_default(1e-2, (), name='learning_rate') global_step = tf.train.get_or_create_global_step() with tf.variable_scope('optimizer'): tf.train.MomentumOptimizer(lr, 0.9).minimize(loss, global_step) sess = tf.Session(graph=graph) sess = train(sess, os.path.join(vgg11_log_dir, 'log')) with pretrained_vgg.graph.as_default() as graph:
lb = LabelBinarizer() labels = lb.fit_transform(labels) # partition the data into training and testing splits using 80% of # the data for training and the remaining 20% for testing (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.2, random_state=42) # construct the image generator for data augmentation aug = ImageDataGenerator(rotation_range=25, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest") # initialize the model print("[INFO] compiling model...") model = VGGNet.build(width=IMAGE_DIMS[1], height=IMAGE_DIMS[0], depth=IMAGE_DIMS[2], classes=len(lb.classes_)) opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) # train the network print("[INFO] training network...") H = model.fit_generator( aug.flow(trainX, trainY, batch_size=BS), validation_data=(testX, testY), steps_per_epoch=len(trainX) // BS, epochs=EPOCHS, verbose=1) # save the model to disk print("[INFO] serializing network...") model.save(args["model"])