예제 #1
0
import numpy as np
import mnist
from model.network import Net

print('Loadind data......')
num_classes = 10
train_images = mnist.train_images()  #[60000, 28, 28]
train_labels = mnist.train_labels()
test_images = mnist.test_images()
test_labels = mnist.test_labels()

print('Preparing data......')
train_images -= int(np.mean(train_images))
train_images /= int(np.std(train_images))
test_images -= int(np.mean(test_images))
test_images /= int(np.std(test_images))
training_data = train_images.reshape(60000, 1, 28, 28)
training_labels = np.eye(num_classes)[train_labels]
testing_data = test_images.reshape(10000, 1, 28, 28)
testing_labels = np.eye(num_classes)[test_labels]

net = Net()
print('Training Lenet......')
net.train(training_data, training_labels, 32, 1, 'weights.pkl')
print('Testing Lenet......')
net.test(testing_data, testing_labels, 100)
print('Testing with pretrained weights......')
net.test_with_pretrained_weights(testing_data, testing_labels, 100,
                                 'pretrained_weights.pkl')
예제 #2
0
def train(args):
    # CONFIGS = yaml.load(open(args.config)) # deprecated, please set the configs in parse_args()

    # Set device
    if torch.cuda.is_available():
        os.environ["CUDA_VISIBLE_DEVICES"] = args.device.strip()
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")  # Not suggested

    # Set save folder & logging config
    subfolder = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(time.time()))
    if not args.save_folder or (not os.path.isdir(args.save_folder)):
        print(
            "Warning: Not invalid value of 'save_folder', set as default value: './save_folder'.."
        )
        save_folder = "./save_folder"
    else:
        save_folder = args.save_folder
    if not os.path.exists(save_folder):
        os.mkdir(save_folder)
    save_folder = os.path.join(save_folder, subfolder)
    os.mkdir(save_folder)
    #TODO:logging

    # Load Dataset
    trainloader = get_loader(args.train_gtfile,
                             batch_size=args.batch_size,
                             num_thread=args.num_workers)
    valloader = get_loader(args.val_gtfile,
                           batch_size=args.batch_size,
                           num_thread=args.num_workers)

    # Init Net
    model = Net(numAngle=args.num_angle,
                numRho=args.num_rho,
                backbone=args.backbone)
    if args.resume:
        model.load_state_dict(torch.load(args.resume))
    model = torch.nn.DataParallel(model).to(device)

    # Optimizer
    optimizer = optim.Adam(model.parameters())

    # Loss
    criterion = torch.nn.CrossEntropyLoss()
    losses = AverageMeter()

    # Start Training
    model.train()
    iter = 0  # iter id start from 1
    for epoch in range(args.max_epoch):

        for batch in trainloader:
            start = time.time()
            iter += 1
            img_tensor, gt_tensor = batch
            optimizer.zero_grad()

            # Forwarding
            preds = model(img_tensor)

            # Calculate Loss
            loss = criterion(preds, gt_tensor)
            loss.backward()
            optimizer.step()
            losses.update(loss.item(), args.batch_size)

            if iter % args.show_interval == 0:
                logging.info(
                    f"Training [{epoch}/{args.max_epoch}][{iter}] Loss:{losses.avg} Time:{time.time()-start:.1f}s"
                )

            if iter % args.val_interval == 0:
                pass
test_images = mnist.test_images()
test_labels = mnist.test_labels()
train_images -= int(np.mean(train_images))
train_images = train_images.astype('float64')
train_images /= int(np.std(train_images))
test_images -= int(np.mean(test_images))
test_images = test_images.astype('float64')
test_images /= int(np.std(test_images))
train_data = train_images.reshape(60000, 1, 28, 28)
train_set_labels = np.eye(num_classes)[train_labels]
testing_data = test_images.reshape(10000, 1, 28, 28)
testing_labels = np.eye(num_classes)[test_labels]

net = Net()
print('Training Lenet......')
net.train(train_data, train_set_labels, 32, 1, 'weights.pkl')
print('Testing Lenet......')
net.test(testing_data, testing_labels, 100)
print('Testing with pretrained weights......')
net.test_with_pretrained_weights(testing_data, testing_labels, 100,
                                 'pretrained_weights.pkl')

#camera part
pygame.camera.init()
RANGE = 300


def capture_image():
    cam.start()
    image = cam.get_image()
    cam.stop()
예제 #4
0
    print("Let's use", torch.cuda.device_count(), "GPUs!")
model = Net(num_classes=n_classes)
# pretrained model in my pc. now i will train on all images for 2 epochs
# model.load_state_dict(torch.load('./epoch_5_val_loss_7.03_auc_0.844.pth'))
model = nn.DataParallel(model).to(device)


optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4)
criterion = torch.nn.CrossEntropyLoss()

train_loss, val_loss = [], []

for epoch in range(epochs):
    print('Epoch {}/{}'.format(epoch, epochs - 1))
    print('-' * 10)
    model.train()
    running_loss = 0
    tk0 = tqdm(train_loader, total=int(len(train_loader)))
    for im, labels in tk0:
        inputs = im["image"].to(device, dtype=torch.float)
        labels = labels.to(device, dtype=torch.long)
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
        tk0.set_postfix(loss=(loss.item()))

    epoch_loss = running_loss / (len(train_loader) / batch_size)
    train_loss.append(epoch_loss)