示例#1
0
def main():
    # Set any options needed
    options = opts.parse(OPTS, OPTS_HELP)
    swarmService = defaults.register(OPTS, OPTS_HELP, \
        Importer, "couchdbImporter")
    # Serve forever
    defaults.serve()
示例#2
0
文件: swarm.py 项目: pdxjohnny/comms
def main():
    options = opts.parse(OPTS, OPTS_HELP)
    mySwarm = swarm()
    mySwarm.password = options["mPass"]
    mySwarm.keys("private.pem", "private.pem")
    # Start the server
    if options["mStart"]:
        thread.start_new_thread(mySwarm.start, ())
        # Give it time to start the swarm server so we can register
        # This is a problem because theres only one server in this case
        time.sleep(0.1)
    mySwarm.registerWith(options["sName"], password=options["mPass"], \
        host=options["mHost"], port=options["mPort"])
    data = {
        "some": "data"
    }
    raw_input("Press enter to call master")
    for i in xrange(0, 5):
        print mySwarm.call(options["sMethod"], \
            options["mHost"], options["mPort"], data)
    raw_input("\nPress enter to call swarm")
    for i in xrange(0, 5):
        print mySwarm.callDist(options["sName"], options["sMethod"], \
            options["mHost"], options["mPort"], data)
    raw_input("\nPress enter for nodeList")
    print mySwarm.call("nodeList", \
        options["mHost"], options["mPort"], {})
    raw_input("\nPress enter to exit")
示例#3
0
def simpletest1():
    # test if the code can learn a simple sequence
    opt = parse()
    opts(opt)
    epochs = 100
    train_loader, val_loader, valvideo_loader = get_dataset(opt)
    trainer = train.Trainer()
    model = AsyncTFBase(100, 5, opt.nhidden).cuda()
    criterion = AsyncTFCriterion(opt).cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                opt.lr,
                                momentum=opt.momentum,
                                weight_decay=opt.weight_decay)
    epoch = -1
    for i in range(epochs):
        top1, _ = trainer.train(train_loader, model, criterion, optimizer, i,
                                opt)
        print('cls weights: {}, aa weights: {}'.format(
            model.mA.parameters().next().norm().data[0],
            model.mAAa.parameters().next().norm().data[0]))
    top1, _ = trainer.validate(train_loader, model, criterion, epochs, opt)

    for i in range(5):
        top1val, _ = trainer.validate(val_loader, model, criterion, epochs + i,
                                      opt)
        print('top1val: {}'.format(top1val))

    ap = trainer.validate_video(valvideo_loader, model, criterion, epoch, opt)
    return top1, top1val, ap
示例#4
0
def main():
    global args, best_top1
    args = parse()
    if not args.no_logger:
        tee.Tee(args.cache + '/log.txt')
    print(vars(args))
    seed(args.manual_seed)

    model, criterion, optimizer = create_model(args)
    if args.resume:
        best_top1 = checkpoints.load(args, model, optimizer)
    print(model)
    trainer = train.Trainer()
    loaders = get_dataset(args)
    train_loader = loaders[0]

    if args.evaluate:
        scores = validate(trainer, loaders, model, criterion, args)
        checkpoints.score_file(scores, "{}/model_000.txt".format(args.cache))
        return

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            trainer.train_sampler.set_epoch(epoch)
        scores = {}
        scores.update(trainer.train(train_loader, model, criterion, optimizer, epoch, args))
        scores.update(validate(trainer, loaders, model, criterion, args, epoch))

        is_best = scores[args.metric] > best_top1
        best_top1 = max(scores[args.metric], best_top1)
        checkpoints.save(epoch, args, model, optimizer, is_best, scores, args.metric)
    if not args.nopdb:
        pdb.set_trace()
示例#5
0
def main():
    options = opts.parse(OPTS, OPTS_HELP)
    if options["start"]:
        myServer = server()
        myServer.start(service.HOST, PORT, alwaysAdd)
    else:
        connect(service.HOST, PORT, "hello")
示例#6
0
def main():
    global opt, best_mAP
    opt = parse()
    tee.Tee(opt.cache+'/log.txt')
    print(vars(opt))
    seed(opt.manual_seed)

    model, criterion, optimizer = create_model(opt)
    if opt.resume: best_mAP = checkpoints.load(opt, model, optimizer)
    print(model)
    trainer = train.Trainer()
    train_loader, val_loader, valvideo_loader = get_dataset(opt)

    if opt.evaluate:
        #trainer.validate(val_loader, model, criterion, -1, opt)
        trainer.validate_video(valvideo_loader, model, -1, opt)
        return

    for epoch in range(opt.start_epoch, opt.epochs):
        if opt.distributed:
            trainer.train_sampler.set_epoch(epoch)
        top1,top5 = trainer.train(train_loader, model, criterion, optimizer, epoch, opt)
        top1val,top5val = trainer.validate(val_loader, model, criterion, epoch, opt)
        mAP = trainer.validate_video(valvideo_loader, model, epoch, opt)
        is_best = mAP > best_mAP
        best_mAP = max(mAP, best_mAP)
        scores = {'top1train':top1,'top5train':top5,'top1val':top1val,'top5val':top5val,'mAP':mAP}
        checkpoints.save(epoch, opt, model, optimizer, is_best, scores)
示例#7
0
def main(argv):
  # 5. parse command line arguments
  argv0 = argv.pop(0)
  opts.parse(argv)

  # Initialize map
  # print "font-size: %d" % opts.CFG["font-size"]
  app = gui.MainWindow(int(opts.CFG["width"]), int(opts.CFG["height"]),
                       int(opts.CFG["font-size"]), not opts.CFG["mouse"],
                       int(opts.CFG["header"]))

  buildMenu(argv,app)


  # 10. scan rom dirs
  # sortid.png
  # sortid/[sortid,]something.ext
  # sortid.ini


  # sortid
  # - key | label | path

  # 20. open pygame
  # 30. prompt menu
  # 40. exit pygame
  # 50. action menu results
  # 60. goto 2.

  while True:
    r = app.run()
    print "r= %s" % r

    if (r is None) or (r == gui.R_EXIT):
      break
    if (r == ':'):
      continue
    else:
      for k in opts.CFG["ext"]:
        if r[-len(k)-1:] == "."+k:
          print "RUN %s %s %s" %  (k,opts.CFG["ext"][k], r)
          subprocess.call("%s '%s'" % (opts.CFG["ext"][k], r),shell=True)

    # subprocess.call("sh",shell=True)

  sys.exit()
示例#8
0
def main():
    opt = opts.parse()
    chainer.cuda.get_device_from_id(opt.gpu).use()
    for i in range(1, opt.nTrials + 1):
        print('+-- Trial {} --+'.format(i))
        t_err, v_err = train(opt, i)

    return t_err, v_err
示例#9
0
def main():
    opt = opts.parse()
    if opt.noGPU == False:
        torch.cuda.set_device(opt.gpu)
    for i in range(1, opt.nTrials + 1):
        print('+-- Trial {} --+'.format(i))
        best_val_error = train(opt, i)
        print("Best validation rate: {}".format(best_val_error))
示例#10
0
def main():
    best_score = 0
    args = parse()
    if not args.no_logger:
        tee.Tee(args.cache + '/log.txt')
    print(vars(args))
    print('experiment folder: {}'.format(experiment_folder()))
    print('git hash: {}'.format(get_script_dir_commit_hash()))
    seed(args.manual_seed)
    cudnn.benchmark = not args.disable_cudnn_benchmark
    cudnn.enabled = not args.disable_cudnn

    metrics = get_metrics(args.metrics)
    tasks = get_tasks(args.tasks)
    model, criterion = get_model(args)
    if args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     args.lr,
                                     weight_decay=args.weight_decay)
    else:
        assert False, "invalid optimizer"

    if args.resume:
        best_score = checkpoints.load(args, model, optimizer)
    print(model)
    trainer = train.Trainer()
    train_loader, val_loader = get_dataset(args)

    if args.evaluate:
        scores = validate(trainer, val_loader, model, criterion, args, metrics,
                          tasks, -1)
        print(scores)
        score_file(scores, "{}/model_999.txt".format(args.cache))
        return

    if args.warmups > 0:
        for i in range(args.warmups):
            print('warmup {}'.format(i))
            trainer.validate(train_loader, model, criterion, -1, metrics, args)
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            trainer.train_sampler.set_epoch(epoch)
        scores = {}
        scores.update(
            trainer.train(train_loader, model, criterion, optimizer, epoch,
                          metrics, args))
        scores.update(
            validate(trainer, val_loader, model, criterion, args, metrics,
                     tasks, epoch))
        is_best = scores[args.metric] > best_score
        best_score = max(scores[args.metric], best_score)
        checkpoints.save(epoch, args, model, optimizer, is_best, scores,
                         args.metric)
示例#11
0
def main():
    global opt, best_mAP
    opt = parse()
    tee.Tee(opt.cache + '/log.txt')
    print(vars(opt))
    seed(opt.manual_seed)

    base_model, logits_model, criterion, base_optimizer, logits_optimizer = create_model(
        opt)
    if opt.resume:
        best_mAP = checkpoints.load(opt, base_model, logits_model,
                                    base_optimizer, logits_optimizer)
    print(logits_model)
    trainer = train.Trainer()
    train_loader, val_loader, valvideo_loader = get_dataset(opt)

    if opt.evaluate:
        trainer.validate(val_loader, base_model, logits_model, criterion, -1,
                         opt)
        trainer.validate_video(valvideo_loader, base_model, logits_model,
                               criterion, -1, opt)
        return

    for epoch in range(opt.start_epoch, opt.epochs):
        if opt.distributed:
            trainer.train_sampler.set_epoch(epoch)
        s_top1, s_top5, o_top1, o_top5, v_top1, v_top5, sov_top1 = trainer.train(
            train_loader, base_model, logits_model, criterion, base_optimizer,
            logits_optimizer, epoch, opt)
        s_top1val, s_top5val, o_top1val, o_top5val, v_top1val, v_top5val, sov_top1val = trainer.validate(
            val_loader, base_model, logits_model, criterion, epoch, opt)
        sov_mAP, sov_rec_at_n, sov_mprec_at_n = trainer.validate_video(
            valvideo_loader, base_model, logits_model, criterion, epoch, opt)
        is_best = sov_mAP > best_mAP
        best_mAP = max(sov_mAP, best_mAP)
        scores = {
            's_top1': s_top1,
            's_top5': s_top5,
            'o_top1': o_top1,
            'o_top5': o_top5,
            'v_top1': v_top1,
            'v_top5': v_top5,
            'sov_top1': sov_top1,
            's_top1val': s_top1val,
            's_top5val': s_top5val,
            'o_top1val': o_top1val,
            'o_top5val': o_top5val,
            'v_top1val': v_top1val,
            'v_top5val': v_top5val,
            'sov_top1val': sov_top1val,
            'mAP': sov_mAP,
            'sov_rec_at_n': sov_rec_at_n,
            'sov_mprec_at_n': sov_mprec_at_n
        }
        checkpoints.save(epoch, opt, base_model, logits_model, base_optimizer,
                         logits_optimizer, is_best, scores)
示例#12
0
def main():
    global opt, best_mAP
    opt = parse()
    tee.Tee(opt.cache + '/log_0724-valvideo.txt')
    #print(vars(opt))
    seed(opt.manual_seed)

    print('1. create_model')
    base_model, logits_model, criterion, base_optimizer, logits_optimizer = create_model(
        opt)
    if opt.resume:
        print('checkpoints load')
        #best_mAP = checkpoints.load(opt, base_model, logits_model, base_optimizer, logits_optimizer)
        checkpoints.load(opt, base_model, logits_model, base_optimizer,
                         logits_optimizer)

    #print('base_model = InceptionI3D Networks') # InceptionI3D Networks
    #print(base_model)
    #print('logits_model = AsyncTFBase: Linear Networks') # AsyncTFBase: Linear Networks
    #print(logits_model)

    trainer = train.Trainer()

    print('2. get_dataset')
    train_loader, val_loader, valvideo_loader = get_dataset(opt)
    #print('train_loader') # [56586, [25,img,s,v,o,meta]]
    #print(train_loader)    # 56586=pairs
    #print('val_loader')   # [12676, [25,img,s,v,o,meta]]
    #print(val_loader)
    #print('valvideo_loader') # [1863, [25+1,img,s,v,o,meta]]
    #print(valvideo_loader)   # 1863=num_(kind of video)

    if opt.evaluate:
        trainer.validate(val_loader, base_model, logits_model, criterion, -1,
                         opt)
        trainer.validate_video(valvideo_loader, base_model, logits_model,
                               criterion, -1, opt)
        return

    print('3.3 Valiation Video')
    #if opt.distributed:
    #    trainer.train_sampler.set_epoch(epoch)

    sov_mAP, sov_rec_at_n, sov_mprec_at_n = trainer.validate_video(
        valvideo_loader, base_model, logits_model, criterion, epoch, opt)

    is_best = sov_mAP > best_mAP
    best_mAP = max(sov_mAP, best_mAP)
    scores = {
        'mAP': sov_mAP,
        'sov_rec_at_n': sov_rec_at_n,
        'sov_mprec_at_n': sov_mprec_at_n
    }
    checkpoints.score_file(scores,
                           "{}/model_{}.txt".format(opt.cache, 'valvideo'))
示例#13
0
def main():
    options = opts.parse(OPTS, OPTS_HELP)
    client = comms.service()
    client.keys("public.pem", "private.pem")
    client.getKey(host=options["host"], port=options["port"])
    services, error = client.call("nodeList", \
        options["host"], options["port"], {})
    if not error:
        countServices(services)
    else:
        raise comms.error(services, error)
示例#14
0
def main():
    # Cuda Setting Value : Default = '0'
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'

    # opts.py : get arg parser
    opt = opts.parse()

    # Cuda Current Device
    print(("device id: {}".format(torch.cuda.current_device())))
    # Pytorch version : 0.4.1
    print("torch.version", torch.__version__)
    # Cuda version : 9.0.1
    print("cuda_version", torch.version.cuda)
示例#15
0
def main():
    opt = opts.parse()
    model = net.ConvNet(opt.n_classes, opt.BC, opt.nobias, opt.dropout_ratio)
    if opt.gpu > -1:
        chainer.cuda.get_device_from_id(opt.gpu).use()
        model.to_gpu()
    optimizer = optimizers.NesterovAG(lr=opt.LR, momentum=opt.momentum)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(opt.weight_decay))
    train_iter, val_iter = dataset.setup(opt)
    updater = training.StandardUpdater(train_iter, optimizer, device=opt.gpu)
    # Trainer
    trainer = training.Trainer(updater, (opt.n_epochs, 'epoch'), opt.save)
    trainer.extend(extensions.ExponentialShift('lr', 0.1, opt.LR),
                   trigger=ManualScheduleTrigger(opt.schedule, 'epoch'))
    trainer.extend(extensions.Evaluator(val_iter, model,
                                        device=opt.gpu), trigger=(1, 'epoch'))
    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.snapshot(filename='min_loss'), trigger=MinValueTrigger(
        key='validation/main/loss', trigger=(5, 'epoch')))
    trainer.extend(extensions.snapshot(filename='max_accuracy'), trigger=MaxValueTrigger(
        key='validation/main/accuracy', trigger=(5, 'epoch')))
    trainer.extend(extensions.snapshot_object(model, 'min_loss_model'),
                   trigger=MinValueTrigger(key='validation/main/loss', trigger=(5, 'epoch')))
    trainer.extend(extensions.snapshot_object(model, 'max_accuracy_model'),
                   trigger=MaxValueTrigger(key='validation/main/accuracy', trigger=(5, 'epoch')))
    trainer.extend(extensions.observe_lr())
    trainer.extend(extensions.LogReport())
    if extensions.PlotReport.available():
        trainer.extend(extensions.PlotReport(
            ['main/loss', 'validation/main/loss'], 'epoch', file_name='loss.png'))
        trainer.extend(extensions.PlotReport(
            ['main/accuracy', 'validation/main/accuracy'], 'epoch', file_name='accuracy.png'))
        trainer.extend(extensions.PlotReport(
            ['lr'], 'epoch', file_name='learning_rate.png'))
    trainer.extend(extensions.PrintReport(['elapsed_time', 'epoch', 'iteration', 'lr',
                                           'main/loss', 'main/accuracy', 'validation/main/loss', 'validation/main/accuracy']))
    trainer.extend(extensions.ProgressBar(update_interval=25))
    if opt.resume and os.path.exists(opt.resume):
        chainer.serializers.load_npz(opt.resume, trainer)
    # Run the training
    try:
        trainer.run()
    except Exception as e:
        import shutil
        import traceback
        print('\nerror message')
        print(traceback.format_exc())
        shutil.rmtree(opt.save)
示例#16
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    #cudnn.benchmark = True

    opt = opts.parse()

    print(("device id: {}".format(torch.cuda.current_device())))
    print("torch.version", torch.__version__)
    print("cuda_version", torch.version.cuda)

    models = importlib.import_module('models.init')
    # print(models)
    criterions = importlib.import_module('criterions.init')
    checkpoints = importlib.import_module('checkpoints')
    Trainer = importlib.import_module('models.' + opt.netType + '-train')

    # if opt.genLine:
    #     if opt.testOnly:
    #         processData('test')
    #     else:
    #         print('Prepare train data')
    #         processData('train')

    # try:
    #     DataLoader = importlib.import_module('models.' + opt.netType + '-dataloader')
    #     print('DataLoader1 : ', DataLoader)
    # except ImportError:
    #     DataLoader = importlib.import_module('datasets.dataloader')
    #     #print('DataLoader2 : ', DataLoader)

    # Data loading
    print('=> Setting up data loader')
    #trainLoader, valLoader = DataLoader.create(opt)
    #print('opt',opt)

    # Load previous checkpoint, if it exists
    print('=> Checking checkpoints')
    checkpoint = checkpoints.load(opt)

    # Create model
    model, optimState = models.setup(opt, checkpoint)
    model.cuda()

    criterion = criterions.setup(opt, checkpoint, model)

    ##################################################################################

    model.eval()

    cap = cv2.VideoCapture("input_video/Driving_Studio.avi")

    if cap.isOpened():
        print("width : {}, height : {}".format(cap.get(3), cap.get(4)))

    video_width = int(cap.get(3))
    video_height = int(cap.get(4))

    # fourcc = cv2.VideoWriter_fourcc(*'DIVX')
    # out = cv2.VideoWriter('output_video/TEST.avi', fourcc, 25.0, (video_width,video_height),0)

    prev_time = 0

    fps_list = []

    while True:
        ret, frame = cap.read()

        if ret:
            cur_time = time.time()

            input_img = frame / 255.
            input_img = preprocess_img(input_img)

            # array to tensor
            input_img = torch.from_numpy(input_img).float()

            with torch.no_grad():
                inputData_var = Variable(input_img).unsqueeze(0).cuda()

                # inference
                output = model.forward(inputData_var, None)

                # gpu -> cpu,  tensor -> numpy
                output = output.detach().cpu().numpy()

                output = output[0]

                output = postprocess_img(output)
                output = np.clip(output, 0, 255)
                output = np.uint8(output)

                end_time = time.time()
                sec = end_time - cur_time

                fps = 1 / sec
                fps_list.append(fps)

                print("Estimated fps {0} ".format(fps))

                cv2.imshow("output", output)

                key = cv2.waitKey(1) & 0xFF
                if key == 27: break
示例#17
0
def main():
    opt = opts.parse()
    chainer.cuda.get_device_from_id(opt.gpu).use()
    for split in opt.splits:
        print('+-- Split {} --+'.format(split))
        train(opt, split)
示例#18
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    #cudnn.benchmark = True
    pidcal = PidCal()
    opt = opts.parse()
    warper = Warper()
    slidewindow = SlideWindow()
    stopline = StopLine()
    print(("device id: {}".format(torch.cuda.current_device())))
    print("torch.version",torch.__version__)
    print("cuda_version",torch.version.cuda)


    models = importlib.import_module('models.init')
    # print(models)
    criterions = importlib.import_module('criterions.init')
    checkpoints = importlib.import_module('checkpoints')
    Trainer = importlib.import_module('models.' + opt.netType + '-train')

    # Data loading
    print('=> Setting up data loader')

    # Load previous checkpoint, if it exists
    print('=> Checking checkpoints')
    checkpoint = checkpoints.load(opt)

    # Create model
    model, optimState = models.setup(opt, checkpoint)
    model.cuda()

    criterion = criterions.setup(opt, checkpoint, model)

    ##################################################################################

    model.eval()

    cap = None

    if opt.video_idx is 0:
        cap = cv2.VideoCapture("input_video/720p.mp4")
    elif opt.video_idx is 1:
        cap = cv2.VideoCapture("input_video/straight.avi")
    elif opt.video_idx is 2:
        cap = cv2.VideoCapture("input_video/test.avi")
    elif opt.video_idx is 3:
        cap = cv2.VideoCapture("input_video/track.avi")
    elif opt.video_idx is 4:
        cap =cv2.VideoCapture("output_video/field.avi")
    elif opt.video_idx is 5:
        cap = cv2.VideoCapture("output_video/2020-08-23 19:20:01.166517.avi")
    else:
        cap = cv2.VideoCapture(0)
        # video test
        cap.set(3,1280)
        cap.set(4,720)

    if cap.isOpened():
        print("width : {}, height : {}".format(cap.get(3), cap.get(4)))

    video_width = int(cap.get(3))
    video_height = int(cap.get(4))



    #fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
    # out = cv2.VideoWriter('output_video/TEST_1.avi', fourcc, 20.0, (1280,480),0)

    prev_time = 0

    fps_list = []

    now = datetime.datetime.now()

    fourcc = None
    out = None

    if opt.video_idx > 2:
        fourcc =cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter('output_video/' + str(now) + '.avi',fourcc,30.0,(1280,720))

    pid_list=list()
    steer_list = list()
    lpf_list = list()

    pid_old = None
    steer_theta = 0
    i=0
    x_location = 240
    frame_cnt = 0
    while True:
        ret, frame = cap.read()
        if frame is None:
            break
        frame_height, frame_width, frame_channels = frame.shape

        print("Frame Info : (Height, Width, Channels) : ({}, {}, {})".format(frame_height, frame_width, frame_channels))

        record_frame = cv2.resize(frame, (1280,720))

        if ret:
            cur_time = time.time()
            frame_new = cv2.resize(frame, (320,180))

            input_img = frame_new / 255.
            input_img = preprocess_img(input_img)

            # array to tensor
            input_img = torch.from_numpy(input_img).float()

            with torch.no_grad():
                inputData_var = Variable(input_img).unsqueeze(0).cuda()

                # inference
                output = model.forward(inputData_var)
                output = torch.sigmoid(output)
                #output = F.softmax(output, dim=1)

                # gpu -> cpu,  tensor -> numpy
                output = output.detach().cpu().numpy()

                output = output[0]

                output = postprocess_img(output)
                output = np.clip(output, 0, 1)
                output *= 255
                output = np.uint8(output)


                output = cv2.resize(output, (640, 360))
                output[output>80] = 255
                output[output<=80] = 0

                # cv2.circle(output, (output.shape[1]/2, output.shape[0]), 9, (255,255,0), -1)
                cv2.imshow("output_img", output)

                print("shape_info", output.shape)
                # cv2.circle(output, (output.shape[0]/2, output.shape[1]/2), 9, (0,255,0), -1)
                #warper_img = warper.warp(output)
                warper_img = warper.warp_test(output)
                cv2.imshow("warp_img", warper_img)

                # warper_img_test = warper.warp_test(output)
                # cv2.imshow("warp_img_test",warper_img_test)
                ret, left_start_x, right_start_x, cf_img = slidewindow.w_slidewindow(warper_img, 180)

                if ret:
                    i+=1
                    left_x_current,right_x_current, sliding_img,steer_theta,center, length = slidewindow.h_slidewindow(warper_img, left_start_x, right_start_x)
                    #stop_test Lee youn joo
                    # if center != None:
                    #     locate_x, locate_y = center
                    #     if (warper_img[int(locate_y)][int(locate_x)] != 0):
                    #         stopFlag, id_L, id_R = stopline.findline(warper_img,locate_x,locate_y,length,left_x_current,right_x_current)
                    #         if stopFlag != None:
                    #             if frame_cnt == 0:
                    #                 print('STOP!')
                    #                 cv2.line(warper_img,id_L,id_R,(0,0,255),2)
                    #                 cv2.waitKey(-1)
                    #             frame += 1
                    #         if (frame_cnt > 0):
                    #             frame_cnt = 0
                    #         print(stopFlag,frame_cnt)
                    # SD.stop(warper_img)
                    SD.stoping_tmp(warper_img)
                    cv2.imshow('sliding_img', sliding_img)
                    steer_list.append(steer_theta)

                    x_location = (left_x_current+right_x_current)/2

                    # low pass filter
                    steer_theta = lpf(steer_theta, 0.3)
                    lpf_list.append(steer_theta)

                    # steer theta : Degree
                    print("steer theta:" ,steer_theta)
                    #
                    # if steer_theta<-28.0 or steer_theta >28.0:
                    #     # auto_drive(pid_old)
                    #     auto_drive(steer_theta)

                    # else:
                        # degree angle
                    pid = round(pidcal.pid_control(steer_theta),6)
                    pid_list.append(pid)

                    print("pid :",pid)

                    pid_old = pid
                    auto_drive(steer_theta)

                        # auto_drive(pid)
                else:
                    auto_drive(steer_theta)
                    # auto_drive(pid)
                    pidcal.error_sum = 0
                    pidcal.error_old = 0


                end_time = time.time()
                sec = end_time - cur_time

                fps = 1/sec
                fps_list.append(fps)

                print("Estimated fps {0} " . format(fps))

                # out.write(add_img)

                cv2.imshow("frame",frame)


                if opt.video_idx == -1:
                    print("frame.shape : {}".format(frame.shape))
                    out.write(frame)
                # cv2.imshow("src", warper_img)
                # cv2.imshow("out_img", output)
                cv2.imshow("cf_img", cf_img)

                key = cv2.waitKey(1) & 0xFF
                if key == 27: break
                elif key == ord('p'):
                    cv2.waitKey(-1)

    cap.release()
    cv2.destroyAllWindows()

    plt.plot(range(i),steer_list,label='steer')
    plt.legend()
    plt.plot(range(i),pid_list,label='pid')
    plt.legend()

    plt.plot(range(i),lpf_list,label='lpf')
    plt.legend()
    pid_info=pidcal.info_p()
    plt.savefig('output_video/video_idx:'+ str(opt.video_idx)+' '+str(pid_info) +'.png', dpi=300)
示例#19
0
import torch
import os
import opts
from datasets import *
import models
import torch.utils.data
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
import itertools

opt = opts.parse()
device = torch.device("cuda:0" if torch.cuda.is_available else "cpu")
print("{} is used.".format(device))

def load_model(model_path1:str, model_path2:str):
    model1 = models.EnvNet5(50).cuda()
    model2 = models.EnvNet6(50).cuda()

    model1.load_state_dict(torch.load(model_path1, map_location=lambda storage, loc: storage))
    model2.load_state_dict(torch.load(model_path2, map_location=lambda storage, loc: storage))

    model1.eval()
    model2.eval()

    return model1, model2

def data_test(models):

    test_fold = 5
    print("Data Get fold {}".format(test_fold))
示例#20
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    #cudnn.benchmark = True

    opt = opts.parse()
    warper = Warper()
    #slidewindow  = SlideWindow()
    # slidewindow  = LineDetector()

    pidcal = PidCal()

    print(("device id: {}".format(torch.cuda.current_device())))
    print("torch.version", torch.__version__)
    print("cuda_version", torch.version.cuda)

    models = importlib.import_module('models.init')
    # print(models)
    criterions = importlib.import_module('criterions.init')
    checkpoints = importlib.import_module('checkpoints')
    Trainer = importlib.import_module('models.' + opt.netType + '-train')

    # Data loading
    print('=> Setting up data loader')
    #trainLoader, valLoader = DataLoader.create(opt)
    #print('opt',opt)

    # Load previous checkpoint, if it exists
    print('=> Checking checkpoints')
    checkpoint = checkpoints.load(opt)

    # Create model
    model, optimState = models.setup(opt, checkpoint)
    model.cuda()

    criterion = criterions.setup(opt, checkpoint, model)

    ##################################################################################
    model.eval()

    cap = cv2.VideoCapture(
        "/home/foscar/ISCC_2019/src/race/src/my_lane_detection/input_video/0.avi"
    )
    ret, frame = cap.read()
    slidewindow = LineDetector(frame)
    if cap.isOpened():
        print("width : {}, height : {}".format(cap.get(3), cap.get(4)))

    video_width = int(cap.get(3))
    video_height = int(cap.get(4))

    fourcc = cv2.VideoWriter_fourcc(*'DIVX')
    video_name = time.time()
    out = cv2.VideoWriter('output_video/{}.avi'.format(video_name), fourcc,
                          25.0, (video_width, video_height), 0)

    prev_time = 0

    count = 0

    while True:
        ret, frame = cap.read()
        count += 1
        if ret:
            cur_time = time.time()
            frame = cv2.resize(frame, (480, 360))

            input_img = frame / 255.
            input_img = preprocess_img(input_img)

            # array to tensor
            input_img = torch.from_numpy(input_img).float()

            with torch.no_grad():
                inputData_var = Variable(input_img).unsqueeze(0).cuda()

                # inference
                output = model.forward(inputData_var)

                print("output.shape : ", output.shape)

                # gpu -> cpu,  tensor -> numpy
                output = output.detach().cpu().numpy()

                output = output[0]

                output = postprocess_img(output)
                # cv2.imshow("203",output)
                output *= 255
                output = np.clip(output, 0, 255)
                output = np.uint8(output)

                # resize
                output = cv2.resize(output, (640, 480))
                # cv2.imshow('resize',output)
                # threshold
                ret, thr_img = cv2.threshold(output, 20, 255, 0)
                # cv2.imshow('threshold',thr_img)
                # warp
                warp_img = warper.warp(thr_img)

                # cv2.imshow('warped',warp_img)
                # cv2.imshow("new output", canny_like_output)

                #canny = cv2.Canny(warp_img, 40, 255)
                kernel1 = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
                kernel2 = np.ones((
                    5,
                    5,
                ), np.uint8)

                #dilate = cv2.dilate(warp_img, kernel1, iterations=2)
                #closed = cv2.morphologyEx(dilate, cv2.MORPH_OPEN, kernel2)
                # x_start_L, x_start_R=slidewindow.find_sliding_point(warp_img)
                # img, x_location = slidewindow.slide_window(x_start_L,x_start_R,warp_img)
                slided_img, x_location, point_list_left, point_list_right = slidewindow.main(
                    warp_img)

                if x_location != None:
                    # cv2.circle(img,(int(x_location),300),5,(0,0,255),3)
                    pid = round(pidcal.pid_control(int(x_location)), 6)
                    #print("pid rate : ", pid)
                    auto_drive(pid, x_location)
                else:
                    pid = pidcal.pid_control(slidewindow_middle)
                    print("pid rate : ", pid)
                    auto_drive(pid)

                end_time = time.time()
                sec = end_time - cur_time

                fps = 1 / sec
                fps_list.append(fps)

                print("Estimated fps {0} ".format(fps))

                out.write(output)

                cv2.imshow("src", frame)
                pid_draw.append(pid)

                # cv2.imshow("th_img", thr_img)
                # cv2.imshow("output", output)
                # img = cv2.imread('/home/foscar/Downloads/wapped_screenshot_12.08.2020.png',cv2.IMREAD_GRAYSCALE)
                # img = cv2.resize(img, (640, 480))
                # ret, thr_img = cv2.threshold(img,20,255,cv2.THRESH_BINARY)
                # img ,xloc, point_list_left, point_list_right = slidewindow.main(thr_img)
                # plt.xlim(0,640)
                # plt.ylim(0,480)
                # plt.plot(point_list_left[0], point_list_left[1])
                # plt.plot(point_list_right[0], point_list_right[1])
                # plt.show()
                # cv2.imshow('aa',img)
                # key = cv2.waitKey(1) & 0xFFx
                # if key == 27: break
                # elif key == ord('p'):
                #     cv2.waitKey(-1)
                cv2.imshow("ws", slided_img)
                print("x_loc :", x_location)
                key = cv2.waitKey(1) & 0xFF
                if key == 27: break
                elif key == ord('p'):
                    cv2.waitKey(-1)
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    #cudnn.benchmark = True

    opt = opts.parse()

    print(("device id: {}".format(torch.cuda.current_device())))
    print("torch.version",torch.__version__)
    print("cuda_version",torch.version.cuda)


    models = importlib.import_module('models.init')
    # print(models)
    criterions = importlib.import_module('criterions.init')
    checkpoints = importlib.import_module('checkpoints')
    Trainer = importlib.import_module('models.' + opt.netType + '-train')

    # if opt.genLine:
    #     if opt.testOnly:
    #         processData('test')
    #     else:
    #         print('Prepare train data')
    #         processData('train')

#    try:
#        DataLoader = importlib.import_module('models.' + opt.netType + '-dataloader')
#        #print('DataLoader1 : ', DataLoader)
#    except ImportError:
#        DataLoader = importlib.import_module('datasets.dataloader')
#        #print('DataLoader2 : ', DataLoader)
#
    # Data loading
    print('=> Setting up data loader')
    #trainLoader, valLoader = DataLoader.create(opt)
    #print('opt',opt)

    # Load previous checkpoint, if it exists
    print('=> Checking checkpoints')
    checkpoint = checkpoints.load(opt)

    # Create model
    model, optimState = models.setup(opt, checkpoint)
    model.cuda()

    criterion = criterions.setup(opt, checkpoint, model)

    ##################################################################################

    model.eval()

    file_name = "0000.png"
    input_img = cv2.imread(file_name,cv2.IMREAD_COLOR)


    img_w,img_h,_ = input_img.shape
    print("w : ", img_w, " h : ", img_h)

    image = image_loader(input_img)
    im,imgs =  model(image)
    img = im[0].cpu().detach().numpy()
    print(img.shape)
    img = np.transpose(img, (1, 2, 0))
    print(img.shape, ' ', type(img))
    img = np.clip(img,0,255)
    img = np.uint8(img)
    dst = cv2.resize(img,dsize=(img_h,img_w),interpolation=cv2.INTER_LANCZOS4)
    #dst = cv2.threshold(dst,50,255,cv2.THRESH_BINARY)
    dst = dst*255
    dst = np.clip(dst,0,255)

    #print(dst.shape)
    # plt.imshow(dst)
    # plt.show()

    cv2.imshow("dst",dst)
    cv2.waitKey(100000)
    #cv2.imwrite('output_img/test12_bin.png',dst)
    mean = np.array([0.485, 0.456, 0.406])
    std = np.array([0.229, 0.224, 0.225])
示例#22
0
import torch
import torch.nn
import torch.optim
from torch import autograd
from torch.nn.functional import avg_pool2d, interpolate, softmax
from torch.nn import UpsamplingBilinear2d, Upsample
from torch.autograd import Variable
import numpy as np
import tqdm
import matplotlib.pyplot as plt

import config as c
import opts
import time

opts.parse(sys.argv)
config_str = ""
config_str += "==="*30 + "\n"
config_str += "Config options:\n\n"

upsampler = Upsample(size=(c.org_size, c.org_size), align_corners=True, mode='bilinear')

for v in dir(c):
    if v[0]=='_': continue
    s=eval('c.%s'%(v))
    config_str += "  {:25}\t{}\n".format(v,s)

config_str += "==="*30 + "\n"

print(config_str)
示例#23
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    #cudnn.benchmark = True

    opt = opts.parse()
    warper = Warper()
    slidewindow  = SlideWindow()
    pidcal = PidCal()

    print(("device id: {}".format(torch.cuda.current_device())))
    print("torch.version",torch.__version__)
    print("cuda_version",torch.version.cuda)


    models = importlib.import_module('models.init')
    # print(models)
    criterions = importlib.import_module('criterions.init')
    checkpoints = importlib.import_module('checkpoints')
    Trainer = importlib.import_module('models.' + opt.netType + '-train')

    # Data loading
    print('=> Setting up data loader')
    #trainLoader, valLoader = DataLoader.create(opt)
    #print('opt',opt)

    # Load previous checkpoint, if it exists
    print('=> Checking checkpoints')
    checkpoint = checkpoints.load(opt)

    # Create model
    model, optimState = models.setup(opt, checkpoint)
    model.cuda()

    criterion = criterions.setup(opt, checkpoint, model)

    ##################################################################################
    model.eval()

    cap = cv2.VideoCapture(0)

    if cap.isOpened():
        print("width : {}, height : {}".format(cap.get(3), cap.get(4)))

    video_width = int(cap.get(3))
    video_height = int(cap.get(4))

    fourcc = cv2.VideoWriter_fourcc(*'DIVX')
    video_name = time.time()
    out = cv2.VideoWriter('output_video/{}.avi'.format(video_name), fourcc, 25.0, (video_width,video_height),0)

    prev_time = 0

    fps_list = []


    while True:
        ret, frame = cap.read()

        if ret:
            cur_time = time.time()
            frame = cv2.resize(frame, (480,360))

            input_img = frame / 255.
            input_img = preprocess_img(input_img)

            # array to tensor
            input_img = torch.from_numpy(input_img).float()

            with torch.no_grad():
                inputData_var = Variable(input_img).unsqueeze(0).cuda()

                # inference
                output = model.forward(inputData_var)

                print("output.shape : ", output.shape)

                # gpu -> cpu,  tensor -> numpy
                output = output.detach().cpu().numpy()

                output = output[0]

                output = postprocess_img(output)
                output *= 255
                output = np.clip(output, 0, 255)
                output = np.uint8(output)

                # resize
                output = cv2.resize(output, (640, 480))

                # threshold
                ret, thr_img = cv2.threshold(output, 180, 255, 0)
                # warp
                output, warp_img = warper.warp(output, thr_img)
                img, x_location = slidewindow.slidewindow(warp_img)

                if x_location != None:
                    pid = round(pidcal.pid_control(int(x_location)), 6)
                    print("pid rate : ", pid)
                    auto_drive(pid, x_location)
                else:
                    pid = pidcal.pid_control(320)
                    print("pid rate : ", pid)
                    auto_drive(pid)


                end_time = time.time()
                sec = end_time - cur_time

                fps = 1/sec
                fps_list.append(fps)

                print("Estimated fps {0} " . format(fps))

                out.write(output)

                cv2.imshow("src", frame)


                cv2.imshow("output", output)
                cv2.imshow("thre", img)

                key = cv2.waitKey(1) & 0xFF
                if key == 27: break
                elif key == ord('p'):
                    cv2.waitKey(-1)
示例#24
0
文件: main.py 项目: zhj2020/wireframe
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    #cudnn.benchmark = True

    opt = opts.parse()
    print(("device id: {}".format(torch.cuda.current_device())))


    models = importlib.import_module('models.init')
    criterions = importlib.import_module('criterions.init')
    checkpoints = importlib.import_module('checkpoints')
    Trainer = importlib.import_module('models.' + opt.netType + '-train')

    if opt.genLine:
        if opt.testOnly:
            processData('test')
        else:
            processData('train')

    try:
        DataLoader = importlib.import_module('models.' + opt.netType + '-dataloader')
    except ImportError:
        DataLoader = importlib.import_module('datasets.dataloader')

    # Data loading
    print('=> Setting up data loader')
    trainLoader, valLoader = DataLoader.create(opt)

    # Load previous checkpoint, if it exists
    print('=> Checking checkpoints')
    checkpoint = checkpoints.load(opt)

    # Create model
    model, optimState = models.setup(opt, checkpoint)
    model.cuda()

    criterion = criterions.setup(opt, checkpoint, model)

    # The trainer handles the training loop and evaluation on validation set
    trainer = Trainer.createTrainer(model, criterion, opt, optimState)

    if opt.testOnly:
        loss = trainer.test(valLoader, 0)
        sys.exit()

    bestLoss = math.inf
    startEpoch = max([1, opt.epochNum])
    if checkpoint != None:
        startEpoch = checkpoint['epoch'] + 1
        bestLoss = checkpoint['loss']
        print('Previous loss: \033[1;36m%1.4f\033[0m' % bestLoss)

    trainer.LRDecay(startEpoch)

    for epoch in range(startEpoch, opt.nEpochs + 1):
        trainer.scheduler.step()

        trainLoss = trainer.train(trainLoader, epoch)
        testLoss = trainer.test(valLoader, epoch)

        bestModel = False
        if testLoss < bestLoss:
            bestModel = True
            bestLoss = testLoss
            print(' * Best model: \033[1;36m%1.4f\033[0m * ' % testLoss)

        checkpoints.save(epoch, trainer.model, criterion, trainer.optimizer, bestModel, testLoss ,opt)

    print(' * Finished Err: \033[1;36m%1.4f\033[0m * ' % bestLoss)
示例#25
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    #cudnn.benchmark = True

    opt = opts.parse()

    print(("device id: {}".format(torch.cuda.current_device())))
    print("torch.version",torch.__version__)
    print("cuda_version",torch.version.cuda)


    models = importlib.import_module('models.init')
    # print(models)
    criterions = importlib.import_module('criterions.init')
    checkpoints = importlib.import_module('checkpoints')
    Trainer = importlib.import_module('models.' + opt.netType + '-train')

    # if opt.genLine:
    #     if opt.testOnly:
    #         processData('test')
    #     else:
    #         print('Prepare train data')
    #         processData('train')

    try:
        DataLoader = importlib.import_module('models.' + opt.netType + '-dataloader')
        print('DataLoader1 : ', DataLoader)
    except ImportError:
        DataLoader = importlib.import_module('datasets.dataloader')
        #print('DataLoader2 : ', DataLoader)

    # Data loading
    print('=> Setting up data loader')
    trainLoader, valLoader = DataLoader.create(opt)
    #print('opt',opt)

    # Load previous checkpoint, if it exists
    print('=> Checking checkpoints')
    checkpoint = checkpoints.load(opt)

    # Create model
    model, optimState = models.setup(opt, checkpoint)
    model.cuda()

    criterion = criterions.setup(opt, checkpoint, model)

    ##################################################################################

    model.eval()

    cap = cv2.VideoCapture("output_video/TEST11.avi")

    if cap.isOpened():
        print("width : {}, height : {}".format(cap.get(3), cap.get(4)))

    video_width = int(cap.get(3))
    print(type(video_width))
    video_height = int(cap.get(4))

    fourcc = cv2.VideoWriter_fourcc(*'DIVX')
    out = cv2.VideoWriter('output_video/TEST11_ADAM.avi', fourcc, 25.0, (video_width,video_height),0)

    while True:
        ret, frame = cap.read()

        if ret:
            image = image_loader(frame)
            im =  model(image)
            img = im[0][0].cpu().detach().numpy()
            img = np.clip(img,0,255)
            slice1Copy = np.uint8(img)
            print(slice1Copy.shape)
            slice1Copy = np.transpose(slice1Copy,(1,2,0))
            dst = cv2.resize(slice1Copy,dsize=(video_width,video_height),interpolation=cv2.INTER_LANCZOS4)
            print(type(dst))
            #dst = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR)

            out.write(dst)

            # add_img = cv2.hconcat([frame, dst])
            #cv2.imshow("original",frame)
            cv2.imshow('video', dst)
            #
            k = cv2.waitKey(1) & 0xFF
            if k == 27:
                 break
        else:
            print('error')
            break

    cap.release()
    cv2.destroyAllWindows()
示例#26
0
def main():
    global opt, best_mAP
    opt = parse()
    tee.Tee(opt.cache + '/log0819-t2f51.txt')
    #print(vars(opt))
    seed(opt.manual_seed)

    print('1. create_model')
    base_model, logits_model, criterion, base_optimizer, logits_optimizer = create_model(
        opt)
    if opt.resume:
        print('checkpoints load')
        best_mAP = checkpoints.load(opt, base_model, logits_model,
                                    base_optimizer, logits_optimizer)
        #checkpoints.load(opt, base_model, logits_model, base_optimizer, logits_optimizer)

    print('base_model = InceptionI3D Networks')  # InceptionI3D Networks
    #print(base_model)
    print('logits_model = AsyncTFBase: Linear Networks'
          )  # AsyncTFBase: Linear Networks
    #print(logits_model)

    trainer = train.Trainer()

    print('2. get_dataset')
    train_loader, val_loader, valvideo_loader = get_dataset(opt)
    #print('train_loader') # [56586, [25, img, tuple]]
    #print(train_loader)    # 56586のペア(img-tuple)
    #print('val_loader')   # [12676, [25, img, tuple]]
    #print(val_loader)
    #print('valvideo_loader') # [1863, [25, img, tuple]]
    #print(valvideo_loader)   # 1863=ビデオの種類

    if opt.evaluate:
        trainer.validate(val_loader, base_model, logits_model, criterion, -1,
                         opt)
        trainer.validate_video(valvideo_loader, base_model, logits_model,
                               criterion, -1, opt)
        return

# write csv
    with open('train_log.csv', 'w') as csvfile:
        csv_writer = csv.writer(csvfile)
        csv_writer.writerow(['i', 'loss', 's', 'v', 'o'])

        print('3. Train & Test (Validation)')
        for epoch in range(opt.start_epoch, opt.epochs):  # 0~20
            #print('epoch = ', epoch)
            if opt.distributed:
                trainer.train_sampler.set_epoch(epoch)

            print('3.1 Training')
            s_top1, s_top5, o_top1, o_top5, v_top1, v_top5, sov_top1 = trainer.train(
                train_loader, base_model, logits_model, criterion,
                base_optimizer, logits_optimizer, epoch, opt, csv_writer)

            print('3.2 Test (Validation)')
            s_top1val, s_top5val, o_top1val, o_top5val, v_top1val, v_top5val, sov_top1val = trainer.validate(
                val_loader, base_model, logits_model, criterion, epoch, opt)

            print('3.3 Test (Validation_Video)')
            sov_mAP, sov_rec_at_n, sov_mprec_at_n = trainer.validate_video(
                valvideo_loader, base_model, logits_model, criterion, epoch,
                opt)

            is_best = sov_mAP > best_mAP
            best_mAP = max(sov_mAP, best_mAP)
            scores = {
                's_top1': s_top1,
                's_top5': s_top5,
                'o_top1': o_top1,
                'o_top5': o_top5,
                'v_top1': v_top1,
                'v_top5': v_top5,
                'sov_top1': sov_top1,
                's_top1val': s_top1val,
                's_top5val': s_top5val,
                'o_top1val': o_top1val,
                'o_top5val': o_top5val,
                'v_top1val': v_top1val,
                'v_top5val': v_top5val,
                'sov_top1val': sov_top1val,
                'mAP': sov_mAP,
                'sov_rec_at_n': sov_rec_at_n,
                'sov_mprec_at_n': sov_mprec_at_n
            }
            #scores = {'s_top1':s_top1,'s_top5':s_top5,'o_top1':o_top1,'o_top5':o_top5,'v_top1':v_top1,'v_top5':v_top5,'sov_top1':sov_top1,'s_top1val':s_top1val,'s_top5val':s_top5val,'o_top1val':o_top1val,'o_top5val':o_top5val,'v_top1val':v_top1val,'v_top5val':v_top5val,'sov_top1val':sov_top1val}
            checkpoints.save(epoch, opt, base_model, logits_model,
                             base_optimizer, logits_optimizer, is_best, scores)
示例#27
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'

    opt = opts.parse()

    # Device ID : 0
    print("Device ID : {}".format(torch.cuda.current_device()))
    # torch.version : 1.4.0
    print("torch.version : ", torch.__version__)
    # cuda_version : 10.1
    print("cuda_version : ", torch.version.cuda)

    models = importlib.import_module('models.init')
    criterions = importlib.import_module('criterions.init')
    checkpoints = importlib.import_module('checkpoints')
    #Trainer = importlib.import_module('models.' + opt.netType  '-train')
    Trainer = importlib.import_module('models.stackedHGB-train')

    try:
        DataLoader = importlib.import_module('models.stackedHGB-dataloader')
        #print("try 문")
    except ImportError:
        DataLoader = importlib.import_module('datasets.dataloader')
        #print("try 문")

    # Data Load
    print("Set up data loader")
    trainLoader, valLoader = DataLoader.create(opt)

    # Check Points Loader
    print('=> Checking checkpoints')
    checkpoint = checkpoints.load(opt)

    # Create model
    model, optimState = models.setup(opt, checkpoint)
    model.cuda()

    criterion = criterions.setup(opt, checkpoint, model)

    trainer = Trainer.createTrainer(model, criterion, opt, optimState)

    if opt.testOnly:
        loss = trainer.test(valLoader, 0)
        sys.exit()


    bestLoss = math.inf
    startEpoch = max([1, opt.epochNum])
    #print("opt.epochNum : ", opt.epochNum)

    if checkpoint != None:
        startEpoch = checkpoint['epoch'] + 1
        bestLoss = checkpoint['loss']
        print('Previous loss: \033[1;36m%1.4f\033[0m' % bestLoss)
#     optimizer.step()
    trainer.LRDecay(startEpoch)
    opt.nEpochs + 1
    print("opt.nEpochs : ",opt.nEpochs)
    # training
    print("Training Start")
    for epoch in range(startEpoch, opt.nEpochs + 1):
        trainer.scheduler.step()

        trainLoss = trainer.train(trainLoader, epoch)
        testLoss = trainer.test(valLoader, epoch)
        
        bestModel = False
        if testLoss < bestLoss:
            bestModel = True
            bestLoss = testLoss
            print(' * Best model: \033[1;36m%1.4f\033[0m * ' % testLoss)

        checkpoints.save(epoch, trainer.model, criterion, trainer.optimizer, bestModel, testLoss ,opt)

    print(' * Finished Err: \033[1;36m%1.4f\033[0m * ' % bestLoss)
示例#28
0
    if not os.path.exists(dir):
        with open(dir, 'w') as f:
            f.write(file)
    else:
        with open(dir, 'a') as f:
            f.write(file)


if __name__ == '__main__':
    while not os.path.exists(first_model_dir):
        time.sleep(30)
        print('The first model not generate wait to val and get mAP file')

    raw_num, part_num, Original_list, Part_class_list = class_reduce.Class_num()
    global opt
    opt = opts.parse(part_num)
    opt.vdlist, opt.BBOX_dir = opts.vd_list('val')

    model,_ = create_model(opt, val=1)  # model OK

    # loss and optimizer
    criterion = torch.nn.BCEWithLogitsLoss()
    # model = torch.nn.DataParallel(model).cuda()

    val_loader = get_dataset_val(opt, Part_class_list)
    video_loader = get_dataset_video(opt, Part_class_list)

    for model_No in range(1, model_num_all):
        model_name = '%02d_' % (model_No) + 'model.pth'
        model_dir = '../model/' + model_name
        while not os.path.exists(model_dir):
示例#29
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    #cudnn.benchmark = True
    pidcal = PidCal()
    opt = opts.parse()
    warper = Warper()
    slidewindow = SlideWindow()

    print(("device id: {}".format(torch.cuda.current_device())))
    print("torch.version", torch.__version__)
    print("cuda_version", torch.version.cuda)

    models = importlib.import_module('models.init')
    # print(models)
    criterions = importlib.import_module('criterions.init')
    checkpoints = importlib.import_module('checkpoints')
    Trainer = importlib.import_module('models.' + opt.netType + '-train')

    # Data loading
    print('=> Setting up data loader')

    # Load previous checkpoint, if it exists
    print('=> Checking checkpoints')
    checkpoint = checkpoints.load(opt)

    # Create model
    model, optimState = models.setup(opt, checkpoint)
    model.cuda()

    criterion = criterions.setup(opt, checkpoint, model)

    ##################################################################################

    model.eval()

    cap = cv2.VideoCapture("input_video/test.avi")

    if cap.isOpened():
        print("width : {}, height : {}".format(cap.get(3), cap.get(4)))

    video_width = int(cap.get(3))
    video_height = int(cap.get(4))

    fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
    # out = cv2.VideoWriter('output_video/TEST_1.avi', fourcc, 20.0, (1280,480),0)

    prev_time = 0

    fps_list = []

    # fourcc =cv2.VideoWriter_fourcc(*'MJPG')
    out = cv2.VideoWriter('input_video/processed_video.avi', fourcc, 40.0,
                          (480, 320), 0)

    steer_list = list()
    lpf_list = list()

    while True:
        ret, frame = cap.read()

        if ret:
            cur_time = time.time()
            frame_new = cv2.resize(frame, (320, 180))

            input_img = frame_new / 255.
            input_img = preprocess_img(input_img)

            # array to tensor
            input_img = torch.from_numpy(input_img).float()

            with torch.no_grad():
                inputData_var = Variable(input_img).unsqueeze(0).cuda()

                # inference
                output = model.forward(inputData_var)
                output = torch.sigmoid(output)
                #output = F.softmax(output, dim=1)

                # gpu -> cpu,  tensor -> numpy
                output = output.detach().cpu().numpy()

                output = output[0]

                output = postprocess_img(output)
                output = np.clip(output, 0, 1)
                output *= 255
                output = np.uint8(output)

                output = cv2.resize(output, (640, 360))
                output[output > 80] = 255
                output[output <= 80] = 0

                warper_img, point_img = warper.warp(output)
                ret, left_start_x, right_start_x, cf_img = slidewindow.w_slidewindow(
                    warper_img)

                if ret:
                    left_x_current, right_x_current, sliding_img, steer_theta = slidewindow.h_slidewindow(
                        warper_img, left_start_x, right_start_x)
                    cv2.imshow('sliding_img', sliding_img)
                    steer_list.append(steer_theta)
                    lpf_result = lpf(steer_theta, 0.5)
                    lpf_list.append(lpf_result)
                    print("steer theta:", steer_theta)
                    if steer_theta < -28 or steer_theta > 28:
                        continue
                    else:
                        pid = round(pidcal.pid_control(int(50 * steer_theta)),
                                    6)
                        print("pid :", pid)
                        '''
                        auto_drive(pid)
                        '''
                else:
                    pidcal.error_sum = 0

                end_time = time.time()
                sec = end_time - cur_time

                fps = 1 / sec
                fps_list.append(fps)

                print("Estimated fps {0} ".format(fps))

                # out.write(add_img)

                cv2.imshow("frame", frame)
                out.write(warper_img)
                # cv2.imshow("src", warper_img)
                # cv2.imshow("out_img", output)
                cv2.imshow("cf_img", cf_img)

                key = cv2.waitKey(1) & 0xFF
                if key == 27: break
                elif key == ord('p'):
                    cv2.waitKey(-1)
    plt.figure(1)
    plt.plot(steer_list)
    plt.figure(2)
    plt.plot(lpf_list)
    plt.show()
示例#30
0
def main():
    opt = opts.parse()
    for split in opt.splits:
        print('+-- Split {} --+'.format(split))
        train(opt, split)
示例#31
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    #cudnn.benchmark = True

    opt = opts.parse()

    print(("device id: {}".format(torch.cuda.current_device())))
    print("torch.version",torch.__version__)
    print("cuda_version",torch.version.cuda)


    models = importlib.import_module('models.init')
    # print(models)
    criterions = importlib.import_module('criterions.init')
    checkpoints = importlib.import_module('checkpoints')
    Trainer = importlib.import_module('models.' + opt.netType + '-train')


    # if opt.genLine:
    #     if opt.testOnly:
    #         processData('test')
    #     else:
    #         print('Prepare train data')
    #         processData('train')

    try:
        DataLoader = importlib.import_module('models.' + opt.netType + '-dataloader')
        #print('DataLoader1 : ', DataLoader)
    except ImportError:
        DataLoader = importlib.import_module('datasets.dataloader')
        #print('DataLoader2 : ', DataLoader)

    # Data loading
    print('=> Setting up data loader')
    trainLoader, valLoader = DataLoader.create(opt)
    #print('opt',opt)

    # Load previous checkpoint, if it exists
    print('=> Checking checkpoints')
    checkpoint = checkpoints.load(opt)

    # Create model
    model, optimState = models.setup(opt, checkpoint)
    model.cuda()

    criterion = criterions.setup(opt, checkpoint, model)



    # The trainer handles the training loop and evaluation on validation set
    trainer = Trainer.createTrainer(model, criterion, opt, optimState)

    if opt.testOnly:
        loss = trainer.test(valLoader, 0)
        sys.exit()


    bestLoss = math.inf
    startEpoch = max([1, opt.epochNum])
    #print("opt.epochNum : ", opt.epochNum)

    if checkpoint != None:
        startEpoch = checkpoint['epoch'] + 1
        bestLoss = checkpoint['loss']
        print('Previous loss: \033[1;36m%1.4f\033[0m' % bestLoss)
#     optimizer.step()
    trainer.LRDecay(startEpoch)
    # opt.nEpochs + 1
    for epoch in range(startEpoch, opt.nEpochs + 1):
        trainer.scheduler.step()

        #trainLoss = trainer.train(trainLoader, epoch)
        testLoss = trainer.test(valLoader, epoch)

        break
示例#32
0
文件: train.py 项目: yxw027/ISCC_2020
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'

    # OPTION PARSER
    opt = opts.parse()

    # Device ID
    print("Device ID : {}".format(torch.cuda.current_device()))
    # torch.version : 1.4.0
    print("torch.version : ", torch.__version__)
    # cuda version : 10.1
    print("cuda_version : ", torch.version.cuda)

    # model init
    models = importlib.import_module('models.init')
    # criterions init
    criterions = importlib.import_module('criterions.init')
    # checkpoints init
    checkpoints = importlib.import_module('checkpoints')
    # Trainer init
    Trainer = importlib.import_module('models.' + opt.netType + '-train')

    # DataLoader Init
    DataLoader = importlib.import_module('datasets.dataloader')

    # Data Load
    trainLoader, valLoader = DataLoader.create(opt)

    # Check Points Loader
    print("=> Checking checkpoints")
    checkpoint = checkpoints.load(opt)

    # Create model
    model, optimState = models.setup(opt, checkpoint)
    model.cuda()

    criterion = criterions.setup(opt, checkpoint, model)

    trainer = Trainer.createTrainer(model, criterion, opt, optimState)

    bestLoss = math.inf
    startEpoch = max([1, opt.epochNum])

    if checkpoint != None:
        startEpoch = checkpoint['epoch'] + 1
        bestLoss = checkpoint['loss']
        print('Previous loss: \033[1;36m%1.4f\033[0m' % bestLoss)

    trainer.LRDecay(startEpoch)

    print("***** Training Start *****")
    for epoch in range(startEpoch, opt.nEpochs + 1):
        trainer.scheduler.step()

        trainLoss = trainer.train(trainLoader, epoch)
        testLoss = trainer.test(valLoader, epoch)

        bestModel = False

        if testLoss < bestLoss:
            bestModel = True
            bestLoss = testLoss
            print(' * Best model: \033[1;36m%1.4f\033[0m * ' % testLoss)

        checkpoints.save(epoch, trainer.model, criterion, trainer.optimizer,
                         bestModel, testLoss, opt)

    print(' * Finished Err: \033[1;36m%1.4f\033[0m * ' % bestLoss)