示例#1
0
def get_instance_segmentation_model(bone='resnet50', attention=False):
    if bone == 'mobilenet_v2':
        if attention == False:
            backbone = models.mobilenet_v2(pretrained=True,
                                           att=attention).features
        if attention == True:
            backbone = models.mobilenet_v2(pretrained=False,
                                           att=attention).features
        backbone.out_channels = 1280
    if bone == 'googlenet':
        if attention == False:
            backbone = models.googlenet(pretrained=True)
        if attention == True:
            backbone = models.googlenet(pretrained=False)
        backbone.out_channels = 1024
    if bone == 'densenet121':
        if attention == False:
            backbone = models.densenet121(pretrained=True,
                                          att=attention).features
        if attention == True:
            backbone = models.densenet121(pretrained=False,
                                          att=attention).features
        backbone.out_channels = 1024
    if bone == 'resnet50':
        if attention == False:
            backbone = models.resnet50(pretrained=True, att=attention)
        if attention == True:
            backbone = models.resnet50(pretrained=False, att=attention)
        backbone.out_channels = 2048
    if bone == 'shufflenet_v2_x1_0':
        if attention == False:
            backbone = models.shufflenet_v2_x1_0(pretrained=True)
        if attention == True:
            backbone = models.shufflenet_v2_x1_0(pretrained=False)
        backbone.out_channels = 1024
    if bone == 'inception_v3':
        if attention == False:
            backbone = models.inception_v3(
            )  #'InceptionOutputs' object has no attribute 'values'
        if attention == True:
            backbone = models.inception_v3(
            )  #'InceptionOutputs' object has no attribute 'values'
        backbone.out_channels = 2048
    if bone == 'squeezenet1_0':
        if attention == False:
            backbone = models.squeezenet1_0(pretrained=True).features
        if attention == True:
            backbone = models.squeezenet1_0(pretrained=False).features
        backbone.out_channels = 512

    anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512), ),
                                       aspect_ratios=((0.5, 1.0, 2.0), ))
    roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
                                                    output_size=7,
                                                    sampling_ratio=2)
    model = MaskRCNN(backbone,
                     num_classes=2,
                     rpn_anchor_generator=anchor_generator,
                     box_roi_pool=roi_pooler)
    return model
示例#2
0
def get_model(model_name):
    if model_name == 'VGG19':
        net = VGG('VGG19')
    elif model_name == 'Resnet18':
        net = ResNet18()
    elif model_name == 'alexnet':
        net = alexnet(pretrained=False, progress=True)
    elif model_name == 'inception_resnet_v1':
        net = inception_resnet_v1(pretrained=False, progress=True)
    elif model_name == 'inception_v3':
        net = inception_v3(pretrained=False, progress=True)
    elif model_name == 'googlenet':
        net = googlenet(pretrained=False, progress=True)
    elif model_name == 'densenet':
        net = densenet121(pretrained=False, progress=True)
    elif model_name == 'mynet':
        net = mynet(in_channels=3, num_classes=7)
    elif model_name == 'mobilenetv2':
        net = mobilenetv2()
    elif model_name == 'shufflenetv2':
        net = shufflenetv2()
    elif model_name == 'PeleeNet':
        net = Peleenet()
    elif model_name == 'resnet_cut':
        net = ResNet18_cut()
    elif model_name == 'peleenet_doubleloss':
        net = Peleenet_doubleloss()
    elif model_name == 'resnet_cut_doubleloss':
        net = ResNet18_cut_center()
    return net
示例#3
0
def loader1():
    WIDTH = 60
    HEIGHT = 80
    LR = 1e-3
    EPOCHS = 10
    MODEL_NAME = 'python-drives-{}-{}-{}-epochs.model'.format(
        LR, 'alexnetv2', EPOCHS)
    model = googlenet(WIDTH, HEIGHT, 3, LR, output=3, model_name=MODEL_NAME)
    model.load('/home/nayangupta824/python_drives/Final_training_data/' +
               MODEL_NAME)
    return model
示例#4
0
def create_net(num_classes, dnn='resnet20', **kwargs):
    ext = None
    if dnn in ['resnet20', 'resnet56', 'resnet110']:
        net = models.__dict__[dnn](num_classes=num_classes)
    elif dnn == 'resnet50':
        net = torchvision.models.resnet50(num_classes=num_classes)
    elif dnn == 'resnet101':
        net = torchvision.models.resnet101(num_classes=num_classes)
    elif dnn == 'resnet152':
        net = torchvision.models.resnet152(num_classes=num_classes)
    elif dnn == 'densenet121':
        net = torchvision.models.densenet121(num_classes=num_classes)
    elif dnn == 'densenet161':
        net = torchvision.models.densenet161(num_classes=num_classes)
    elif dnn == 'densenet201':
        net = torchvision.models.densenet201(num_classes=num_classes)
    elif dnn == 'inceptionv4':
        net = models.inceptionv4(num_classes=num_classes)
    elif dnn == 'inceptionv3':
        net = torchvision.models.inception_v3(num_classes=num_classes)
    elif dnn == 'vgg16i':  # vgg16 for imagenet
        net = torchvision.models.vgg16(num_classes=num_classes)
    elif dnn == 'googlenet':
        net = models.googlenet()
    elif dnn == 'mnistnet':
        net = MnistNet()
    elif dnn == 'fcn5net':
        net = models.FCN5Net()
    elif dnn == 'lenet':
        net = models.LeNet()
    elif dnn == 'lr':
        net = models.LinearRegression()
    elif dnn == 'vgg16':
        net = models.VGG(dnn.upper())
    elif dnn == 'alexnet':
        #net = models.AlexNet()
        net = torchvision.models.alexnet()
    elif dnn == 'lstman4':
        net, ext = models.LSTMAN4(datapath=kwargs['datapath'])
    elif dnn == 'lstm':
        # model = lstm(embedding_dim=args.hidden_size, num_steps=args.num_steps, batch_size=args.batch_size,
        #              vocab_size=vocab_size, num_layers=args.num_layers, dp_keep_prob=args.dp_keep_prob)
        net = lstmpy.lstm(vocab_size=kwargs['vocab_size'],
                          batch_size=kwargs['batch_size'])

    else:
        errstr = 'Unsupport neural network %s' % dnn
        logger.error(errstr)
        raise errstr
    return net, ext
示例#5
0
def initModel(args):
    # Setup Model
    if args.arch == "resnet50":
        model = models.resnet50(pretrained=True,
                                num_classes=18,
                                scale=args.scale)
    elif args.arch == "resnet101":
        model = models.resnet101(pretrained=True,
                                 num_classes=18,
                                 scale=args.scale)
    elif args.arch == "resnet152":
        model = models.resnet152(pretrained=True,
                                 num_classes=18,
                                 scale=args.scale)
    elif args.arch == "vgg16":
        model = models.vgg16(pretrained=True, num_classes=18)
    elif args.arch == "googlenet":
        model = models.googlenet(pretrained=True, num_classes=18)

    for param in model.parameters():
        param.requires_grad = False

    #model = model.cuda()

    if args.resume is not None:
        if os.path.isfile(args.resume):
            print(("Loading model and optimizer from checkpoint '{}'".format(
                args.resume)))
            checkpoint = torch.load(args.resume,
                                    map_location=torch.device('cpu'))

            # model.load_state_dict(checkpoint['state_dict'])
            d = collections.OrderedDict()
            for key, value in list(checkpoint['state_dict'].items()):
                tmp = key[7:]
                d[tmp] = value
            model.load_state_dict(d)

            print(("Loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch'])))
            sys.stdout.flush()
        else:
            print(("No checkpoint found at '{}'".format(args.resume)))
            sys.stdout.flush()

    model.eval()

    summary(model, (3, 640, 640))

    return model
示例#6
0
def load_model_quantized(model_name, device, dataset, num_labels):
    pretrained = (dataset == "imagenet")
    if model_name == "mobilenet":
        model = models.mobilenet_v2(pretrained=pretrained, progress=True, quantize=False)
    elif model_name == "resnet50":
        model = torchvision.models.quantization.resnet50(pretrained=pretrained, progress=True, quantize=False)
    elif model_name == "resnet50_ptcv":
        model = ptcv.qresnet50_ptcv(pretrained=pretrained)
    elif model_name == "inceptionv3":
        model = models.inception_v3(pretrained=pretrained, progress=True, quantize=False)
    elif model_name == "googlenet":
        model = models.googlenet(pretrained=pretrained, progress=True, quantize=False)
    elif model_name == "shufflenetv2":
        model = models.shufflenet_v2_x1_0(pretrained=pretrained, progress=True, quantize=False)
    elif model_name == 'dlrm':
        # These arguments are hardcoded to the defaults from DLRM (matching the pretrained model).
        model = DLRM_Net(16,
                         np.array([1460, 583, 10131227, 2202608, 305, 24, 12517, 633, 3, 93145, 5683,
                                   8351593, 3194, 27, 14992, 5461306, 10, 5652, 2173, 4, 7046547,
                                   18, 15, 286181, 105, 142572], dtype=np.int32),
                         np.array([13, 512, 256,  64,  16]),
                         np.array([367, 512, 256,   1]),
                         'dot', False, -1, 2, True, 0., 1, False, 'mult', 4, 200, False, 200)
        ld_model = torch.load('data/dlrm.pt')
        model.load_state_dict(ld_model["state_dict"])
    elif model_name == 'bert':
        config = AutoConfig.from_pretrained(
            'bert-base-cased',
            num_labels=num_labels,
            finetuning_task='mnli',
        )
        model = BertForSequenceClassification.from_pretrained('data/bert.bin', from_tf=False, config=config)
    else:
        raise ValueError("Unsupported model type")

    if dataset == "cifar10":
        ld_model = torch.load(f"data/{model_name}.pt")
        model.load_state_dict(ld_model)

    model = model.to(device)
    return model
示例#7
0
def create_net(num_classes, dnn='resnet20', **kwargs):
    ext = None
    if dnn in ['resnet20', 'resnet56', 'resnet110']:
        net = models.__dict__[dnn](num_classes=num_classes)
    elif dnn == 'resnet50':
        #net = models.__dict__['resnet50'](num_classes=num_classes)
        net = torchvision.models.resnet50(num_classes=num_classes)
    elif dnn == 'inceptionv4':
        net = models.inceptionv4(num_classes=num_classes)
    elif dnn == 'inceptionv3':
        net = torchvision.models.inception_v3(num_classes=num_classes)
    elif dnn == 'vgg16i':  # vgg16 for imagenet
        net = torchvision.models.vgg16(num_classes=num_classes)
    elif dnn == 'googlenet':
        net = models.googlenet()
    elif dnn == 'mnistnet':
        net = MnistNet()
    elif dnn == 'fcn5net':
        net = models.FCN5Net()
    elif dnn == 'lenet':
        net = models.LeNet()
    elif dnn == 'lr':
        net = models.LinearRegression()
    elif dnn == 'vgg16':
        net = models.VGG(dnn.upper())
    elif dnn == 'alexnet':
        net = torchvision.models.alexnet()
    elif dnn == 'lstman4':
        net, ext = models.LSTMAN4(datapath=kwargs['datapath'])
    elif dnn == 'lstm':
        net = lstmpy.lstm(vocab_size=kwargs['vocab_size'],
                          batch_size=kwargs['batch_size'])

    else:
        errstr = 'Unsupport neural network %s' % dnn
        logger.error(errstr)
        raise errstr
    return net, ext
示例#8
0
import random

WIDTH = 240
HEIGHT = 120
LR = 1e-3
EPOCHS = 1
MODEL_NAME = 'pygta5-car-fast-{}-{}-{}-epochs-300K-data.model'.format(
    LR, 'alexnetv2', EPOCHS)

t_time = 0.09

w = [1, 0, 0]
a = [0, 1, 0]
d = [0, 0, 1]

model = googlenet(HEIGHT, WIDTH, 1, LR, output=5)
model.load('TlustoNETv1.1.tfl')


#Hold key for an amout of secs coz lesser is better than morrer??
def hold_key(hold_time, key):
    start = time.time()
    while time.time() - start < hold_time:
        pyautogui.keyDown(key)
    pyautogui.keyUp(key)


def main():
    for i in list(range(4))[::-1]:
        print(i + 1)
        time.sleep(1)
示例#9
0
player_contents = json.loads(
    urllib.request.urlopen("http://localhost:8080/api/player").read().decode(
        'utf-8'))
world_info = json.loads(
    urllib.request.urlopen("http://localhost:8080/api/world").read().decode(
        'utf-8'))
world_contents = json.loads(
    urllib.request.urlopen(
        "http://localhost:8080/api/world/objects?distance=200").read().decode(
            'utf-8'))
Score = 0
WIDTH = 480
HEIGHT = 270
LR = 1e-3
EPOCHS = 30
model = googlenet(WIDTH, HEIGHT, 3, LR, output=9)
while True:
    file_name = 'training_data-{}.npy'.format(starting_value)

    if os.path.isfile(file_name):
        print('File exists, moving along', starting_value)
        starting_value += 1
    else:
        print('File does not exist, starting fresh!', starting_value)

        break


def GetData():
    player_contents = json.loads(
        urllib.request.urlopen(
示例#10
0
    pyautogui.hotkey('d')


def strafe_right():
    pyautogui.hotkey('c')


def strafe_left():
    pyautogui.hotkey('z')


def no_keys():
    a = 1


model = googlenet(WIDTH, HEIGHT, 1, LR, output=7)
MODEL_NAME = 'parrotBeebop-googlenet_color-0.0001-LR-1.model'

model.load(MODEL_NAME)

print('We have loaded a previous model!!!!')


def main():

    new_hook = pyxhook.HookManager()
    new_hook.KeyDown = OnKeyPress
    new_hook.HookKeyboard()
    new_hook.start()
    screen = test()
    global key
示例#11
0
wdl = 0
sal = 0
sdl = 0
nkl = 0

w = [1,0,0,0,0,0,0,0,0]
s = [0,1,0,0,0,0,0,0,0]
a = [0,0,1,0,0,0,0,0,0]
d = [0,0,0,1,0,0,0,0,0]
wa = [0,0,0,0,1,0,0,0,0]
wd = [0,0,0,0,0,1,0,0,0]
sa = [0,0,0,0,0,0,1,0,0]
sd = [0,0,0,0,0,0,0,1,0]
nk = [0,0,0,0,0,0,0,0,1]

model = googlenet(WIDTH, HEIGHT, 3, LR, output=9, model_name=MODEL_NAME)

if LOAD_MODEL:
    model.load(PREV_MODEL)
    print('We have loaded a previous model!!!!')
    

# iterates through the training files


for e in range(EPOCHS):
    #data_order = [i for i in range(1,FILE_I_END+1)]
    data_order = [i for i in range(1,FILE_I_END+1)]
    shuffle(data_order)
    for count,i in enumerate(data_order):
        
示例#12
0
    PressKey(D)
    ReleaseKey(W)
    ReleaseKey(A)


def no_keys():
    if random.randrange(0, 3) == 1:
        PressKey(W)
    else:
        ReleaseKey(W)
    ReleaseKey(A)
    ReleaseKey(S)
    ReleaseKey(D)


model = googlenet(WIDTH, HEIGHT, 3, LR, output = 9)
MODEL_NAME = ''
model.load(MODEL_NAME)

print('We have loaded a previous model!!!!')

def main():
    last_time = time.time()
    for i in list(range(4))[::-1]:
        print(i + 1)
        time.sleep(1)

    paused = False
    mode_choice = 0

    screen = grab_screen(region = (0, 40, GAME_WIDTH, GAME_HEIGHT + 40))
示例#13
0
# train_model.py

import numpy as np
from models import otherception3 as googlenet
import tensorflow as tf
from random import shuffle

FILE_I_END = 1860

WIDTH = 240
HEIGHT = 120
LR = 1e-3
EPOCHS = 350

model = googlenet(HEIGHT, WIDTH, 1, LR, output=5, model_name='TlustoNETv1.1')

for e in range(EPOCHS):

    train_data = np.load('training_data_grey_shuffled.npy')
    shuffle(train_data)

    train = train_data[:-50]
    test = train_data[-50:]

    X = np.array([i[0] for i in train]).reshape(-1, HEIGHT, WIDTH, 1)
    Y = [i[1] for i in train]

    test_x = np.array([i[0] for i in test]).reshape(-1, HEIGHT, WIDTH, 1)
    test_y = [i[1] for i in test]

    model.fit({'input': X}, {'targets': Y},
示例#14
0
def main(args):
    if args.checkpoint == '':
        args.checkpoint = "checkpoints/ic17_%s_bs_%d_ep_%d" % (
            args.arch, args.batch_size, args.n_epoch)
    if args.pretrain:
        if 'synth' in args.pretrain:
            args.checkpoint += "_pretrain_synth"
        else:
            args.checkpoint += "_pretrain_ic17"

    print(('checkpoint path: %s' % args.checkpoint))
    print(('init lr: %.8f' % args.lr))
    print(('schedule: ', args.schedule))
    sys.stdout.flush()

    if not os.path.isdir(args.checkpoint):
        os.makedirs(args.checkpoint)

    writer = SummaryWriter(args.checkpoint)

    kernel_num = 18
    start_epoch = 0

    data_loader = IC17Loader(is_transform=True, img_size=args.img_size)
    train_loader = torch.utils.data.DataLoader(data_loader,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=0,
                                               drop_last=False,
                                               pin_memory=True)

    if args.arch == "resnet50":
        model = models.resnet50(pretrained=True, num_classes=kernel_num)
    elif args.arch == "resnet101":
        model = models.resnet101(pretrained=True, num_classes=kernel_num)
    elif args.arch == "resnet152":
        model = models.resnet152(pretrained=True, num_classes=kernel_num)
    elif args.arch == "vgg16":
        model = models.vgg16(pretrained=False, num_classes=kernel_num)
    elif args.arch == "googlenet":
        model = models.googlenet(pretrained=True, num_classes=kernel_num)

    model = torch.nn.DataParallel(model).cuda()
    model.train()

    summary(model, (3, 640, 640))

    if hasattr(model.module, 'optimizer'):
        optimizer = model.module.optimizer
    else:
        # NOTE 这个地方的momentum对训练影响相当之大,使用0.99时训练crossentropy无法收敛.
        #optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    title = 'icdar2017'
    if args.pretrain:
        print('Using pretrained model.')
        assert os.path.isfile(
            args.pretrain), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.pretrain)
        model.load_state_dict(checkpoint['state_dict'])
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(
            ['Learning Rate', 'Train Loss', 'Train Acc.', 'Train IOU.'])
    elif args.resume:
        print('Resuming from checkpoint.')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.resume)
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        # optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        print('Training from scratch.')
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names(
            ['Learning Rate', 'Train Loss', 'Train Acc.', 'Train IOU.'])

    for epoch in range(start_epoch, args.n_epoch):
        adjust_learning_rate(args, optimizer, epoch)
        print(('\nEpoch: [%d | %d] LR: %f' %
               (epoch + 1, args.n_epoch, optimizer.param_groups[0]['lr'])))

        train_loss, train_te_acc, train_te_iou = train(train_loader, model,
                                                       dice_loss, optimizer,
                                                       epoch, writer)
        if epoch % 4 == 3:
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'lr': args.lr,
                    'optimizer': optimizer.state_dict(),
                },
                checkpoint=args.checkpoint,
                filename='checkpoint_%d.pth' % epoch)

        logger.append([
            optimizer.param_groups[0]['lr'], train_loss, train_te_acc,
            train_te_iou
        ])
    logger.close()
示例#15
0
def test(args):
    data_loader = IC17TestLoader(long_size=args.long_size)
    test_loader = torch.utils.data.DataLoader(data_loader,
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=2,
                                              drop_last=True)

    # Setup Model
    if args.arch == "resnet50":
        model = models.resnet50(pretrained=True,
                                num_classes=18,
                                scale=args.scale)
    elif args.arch == "resnet101":
        model = models.resnet101(pretrained=True,
                                 num_classes=18,
                                 scale=args.scale)
    elif args.arch == "resnet152":
        model = models.resnet152(pretrained=True,
                                 num_classes=18,
                                 scale=args.scale)
    elif args.arch == "vgg16":
        model = models.vgg16(pretrained=True, num_classes=18)
    elif args.arch == "googlenet":
        model = models.googlenet(pretrained=True, num_classes=18)

    for param in model.parameters():
        param.requires_grad = False

    model = model.cuda()

    if args.resume is not None:
        if os.path.isfile(args.resume):
            print(("Loading model and optimizer from checkpoint '{}'".format(
                args.resume)))
            checkpoint = torch.load(args.resume)

            # model.load_state_dict(checkpoint['state_dict'])
            d = collections.OrderedDict()
            for key, value in list(checkpoint['state_dict'].items()):
                tmp = key[7:]
                d[tmp] = value
            model.load_state_dict(d)

            print(("Loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch'])))
            sys.stdout.flush()
        else:
            print(("No checkpoint found at '{}'".format(args.resume)))
            sys.stdout.flush()

    model.eval()

    total_frame = 0.0
    total_time = 0.0
    for idx, (org_img, img) in enumerate(test_loader):
        print(('progress: %d / %d' % (idx, len(test_loader))))
        sys.stdout.flush()

        img = img.cuda()
        org_img = org_img.numpy().astype('uint8')[0]
        text_box = org_img.copy()

        torch.cuda.synchronize()
        start = time.time()

        cls_logits, link_logits = model(img)

        outputs = torch.cat((cls_logits, link_logits), dim=1)
        shape = outputs.shape
        pixel_pos_scores = F.softmax(outputs[:, 0:2, :, :], dim=1)[:, 1, :, :]
        # pixel_pos_scores=torch.sigmoid(outputs[:,1,:,:])
        # FIXME the dimention should be changed
        link_scores = outputs[:, 2:, :, :].view(shape[0], 2, 8, shape[2],
                                                shape[3])
        link_pos_scores = F.softmax(link_scores, dim=1)[:, 1, :, :, :]

        mask, bboxes = to_bboxes(org_img,
                                 pixel_pos_scores.cpu().numpy(),
                                 link_pos_scores.cpu().numpy())

        score = pixel_pos_scores[0, :, :]
        score = score.data.cpu().numpy().astype(np.float32)

        torch.cuda.synchronize()
        end = time.time()
        total_frame += 1
        total_time += (end - start)
        print(('fps: %.2f' % (total_frame / total_time)))
        sys.stdout.flush()

        for bbox in bboxes:
            cv2.drawContours(text_box, [bbox.reshape(4, 2)], -1, (0, 255, 0),
                             2)

        image_name = data_loader.img_paths[idx].split('/')[-1].split('.')[0]
        write_result_as_txt(image_name, bboxes, 'outputs/submit_ic17/')

        text_box = cv2.resize(text_box, (org_img.shape[1], org_img.shape[0]))
        score_s = cv2.resize(
            np.repeat(score[:, :, np.newaxis] * 255, 3, 2).astype(np.uint8),
            (org_img.shape[1], org_img.shape[0]))
        mask = cv2.resize(
            np.repeat(mask[:, :, np.newaxis], 3, 2).astype(np.uint8),
            (org_img.shape[1], org_img.shape[0]))

        link_score = (link_pos_scores[0, 0, :, :]).cpu().numpy() * (
            score > 0.5).astype(np.float)
        link_score = cv2.resize(
            np.repeat(link_score[:, :, np.newaxis] * 255, 3,
                      2).astype(np.uint8),
            (org_img.shape[1], org_img.shape[0]))
        debug(idx, data_loader.img_paths,
              [[text_box, score_s], [link_score, mask]], 'outputs/vis_ic17/')

    return
    cmd = 'cd %s;zip -j %s %s/*' % ('./outputs/', 'submit_ic17.zip',
                                    'submit_ic17')
    print(cmd)
    sys.stdout.flush()
    util.cmd.cmd(cmd)
    cmd_eval = 'cd eval;sh eval_ic17.sh'
    sys.stdout.flush()
    util.cmd.cmd(cmd_eval)
示例#16
0
def reverse_right():
    PressKey(S)
    PressKey(D)
    ReleaseKey(W)
    ReleaseKey(A)


def no_keys():
    ReleaseKey(W)
    ReleaseKey(A)
    ReleaseKey(S)
    ReleaseKey(D)


model = googlenet((WIDTH * 2), HEIGHT, 3, LR, output=9)
MODEL_NAME = 'models\pygta5-{}-{}-{}-epochs-1-hist_data.model'.format(
    LR, 'googlenet', EPOCHS)
model.load(MODEL_NAME)

print('We have loaded a previous model!!!!')


def main():
    for i in list(range(4))[::-1]:
        print(i + 1)
        time.sleep(1)

    paused = False
    mode_choice = 0
    test_data = []
示例#17
0
trainset = datasets.CIFAR10('train/', download=True, train=True, transform=transform)
valset = datasets.CIFAR10('val/', download=True, train=False, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
valloader = torch.utils.data.DataLoader(valset, batch_size=64, shuffle=True)
len_trainset = len(trainset)
len_valset = len(valset)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")




student1 = mod.densenet()
student1.to(device)
student1.load_state_dict(torch.load('densenetV2.pth'))

student2 = mod.googlenet()
student2.to(device)
student2.load_state_dict(torch.load('googlenetV2.pth'))

selector = mod.CNN()
selector.to(device)
classes = ('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')
device = ("cuda:0" if torch.cuda.is_available() else "cpu")
classes = ('plane', 'car', 'bird', 'cat','deer', 'dog', 'frog', 'horse', 'ship', 'truck')
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
    for inputs, labels in valloader:
        inputs = inputs.to(device)
        model = torch.tensor(selector(inputs).flatten() >0.5,dtype=torch.uint8)
        _,s1 = torch.max(student1(inputs),1)
wdl = 0
sal = 0
sdl = 0
nkl = 0

w = [1, 0, 0, 0, 0, 0, 0, 0, 0]
s = [0, 1, 0, 0, 0, 0, 0, 0, 0]
a = [0, 0, 1, 0, 0, 0, 0, 0, 0]
d = [0, 0, 0, 1, 0, 0, 0, 0, 0]
wa = [0, 0, 0, 0, 1, 0, 0, 0, 0]
wd = [0, 0, 0, 0, 0, 1, 0, 0, 0]
sa = [0, 0, 0, 0, 0, 0, 1, 0, 0]
sd = [0, 0, 0, 0, 0, 0, 0, 1, 0]
nk = [0, 0, 0, 0, 0, 0, 0, 0, 1]

model = googlenet(WIDTH, HEIGHT, 3, LR, output=9, model_name=MODEL_NAME)

if LOAD_MODEL:
    model.load(PREV_MODEL)
    print('We have loaded a previous model!!!!')


def pretty_time_left(time_start, iterations_finished, total_iterations):
    if iterations_finished == 0:
        time_left = 0
    else:
        time_end = time.time()
        diff_finished = time_end - time_start
        time_per_iteration = diff_finished / iterations_finished
        assert time_per_iteration >= 0
示例#19
0
import settings
from models import inception_v3 as googlenet, alexnet
from utils.virtual_gamepad import VirtualGamepad
from vision_gaming.identify import raw_image, match_number
from vision_gaming.job import Job
from vision_gaming.process import resize, binary_threshold
from vision_gaming.vision_system import VisionSystem as VS

WIDTH = settings.TARGET_RESOLUTION[0]
HEIGHT = settings.TARGET_RESOLUTION[1]

print('Creating virtual gamepad...')
gamepad = VirtualGamepad()

print('Loading model...')
model = googlenet(width=WIDTH, height=HEIGHT, lr=settings.LEARNING_RATE)
model.load(settings.MODEL_NAME)

print('Running visual system...')
system = VS(wait=settings.DRIVING_DELAY)
main_camera_job = Job(screen_rect=settings.MAIN_CAMERA_RECT,
                      process=[resize(settings.TARGET_RESOLUTION)],
                      identify=raw_image())
system.register_job('main_camera', main_camera_job)
templates = [cv2.imread('templates/{}.jpg'.format(i), 1) for i in range(0, 10)]
speedometer_job = Job(screen_rect=settings.SPEEDOMETER_RECT,
                      process=[binary_threshold(200, 255)],
                      identify=match_number(templates, range(0, 10)))
system.register_job('speedometer', speedometer_job)
system.run()
def main():
    epoch = 2
    class_number = 2
    chosen_model = "resnet50"
    transform = googlenet_transform()

    if chosen_model == "alexnet":
        model = alexnet(class_num = class_number)
    elif chosen_model == "googlenet":
        model = googlenet(class_num = class_number)
    elif chosen_model == "resnet18":
        model = resnet18(class_num = class_number)
    elif chosen_model == "resnet34":
        model = resnet34(class_num = class_number)
    elif chosen_model == "resnet50":
        model = resnet50(class_num = class_number)
    elif chosen_model == "resnet101":
        model = resnet101(class_num = class_number)
    elif chosen_model == "resnet152":
        model = resnet152(class_num = class_number)


    use_gpu = torch.cuda.is_available()
    if use_gpu:
        model = model.cuda()
        print("Use GPU")
    else:
        print("Use CPU")
    train_txt_path = "/Users/Barry/Desktop/classification_models_pytorch/trainset.txt"
    trainset = Mydataset(train_txt_path, transform)
    trainloader = DataLoader(trainset, 
                             batch_size = 3, 
                             shuffle=True
                             )
    val_txt_path = "/Users/Barry/Desktop/classification_models_pytorch/valset.txt"
    valset = Mydataset(val_txt_path, transform)
    valloader = DataLoader(valset, 
                             batch_size = 3, 
                             shuffle=True
                             )
    
    print("data loaded")
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr = 0.001 )

    print("start to train")
    for i in range(epoch):
        model.train()
        for batch_index, (data, target) in enumerate(trainloader):
            if use_gpu:
                data = data.cuda()
                target = target.cuda()
            data, label = Variable(data), Variable(target)
            output = model(data)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            
            if batch_index > 0 and batch_index % 50 == 0:
                print("Trian epoch {},  iterate {}, loss is {}".format(i, batch_index, loss))
            # validate prcess
            if batch_index > 0 and batch_index % 500 == 0:
                model.eval()
                losses = []
                for val_index, (data, target) in enumerate(valloader):
                    data, label = Variable(data), Variable(target)
                    output = model(data)
                    loss = criterion(output, target)
                    losses.append(loss.data)
                    if val_index > 0 and val_index % 100 == 0:
                        break

                result = np.mean(losses)
                print("validation loss is {}".format(result))
                model.train()
示例#21
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-m",
                        type=str,
                        default="models/ctr-googlenet.model-EPOCH_199",
                        help="Model name (and location)")
    #parser.add_argument("-m",type=str, default="models/ctr-nvidia.model-EPOCH_199", help="Model name (and location)")
    parser.add_argument(
        "-deb",
        action="store_true",
        default=False,
        help="Activates debug mode: prints choices made by the CNN model")
    parser.add_argument("-dis",
                        action="store_true",
                        default=False,
                        help="Creates a cv2 window displaying the game")

    args = parser.parse_args()
    MODEL_NAME = args.m
    debug_mode = args.deb
    display_mode = args.dis

    model = googlenet(W_RES, H_RES, 3, lr=0.0001, output=3)
    #model = nvidia(W_RES, H_RES, lr=0.0001, output=3)
    model.load(MODEL_NAME)

    weighted_rng = True  # If False applies argmax to find where to steer

    #Delay while moving to emulator window
    for i in range(1, 4):
        print(i)
        time.sleep(1)
    print('START')

    paused = False
    print('START')

    while (True):
        if not paused:
            # Find a way to get exact coordinates automatically
            # 55 px accounts for title bar.
            screen = grab_screen(region=(0, 55, 645, 530))
            screen = cv2.cvtColor(screen, cv2.COLOR_BGR2RGB)
            screen_res = cv2.resize(screen, (W_RES, H_RES))

            # CNN model
            prediction_choice = makePrediction(model, screen_res, weighted_rng,
                                               debug_mode)
            applyChoice(prediction_choice)

        keys = key_check()
        if 'T' in keys:
            if paused:
                paused = False
                print("Restart in 1 second")
                time.sleep(1)
                print("RESTARTED")
            else:
                print("Pause")
                paused = True
                ReleaseKey(L)
                ReleaseKey(J)
                ReleaseKey(Z)
                time.sleep(1)

        if 'P' in keys:
            break

        if display_mode == True:
            cv2.imshow('window', screen)
            if cv2.waitKey(25) & 0xFF == ord('q'):
                cv2.destroyAllWindows()
                break