def main(args):
    config = configuration.get_config(args)

    url = utils.validate_input(config.get("username"), config.get("token"),
                               config.get("server"))
    connection = utils.connect(url)
    if config.get("repo"):
        retrieve_jobs(config.get("repo"))
    load_jobs()
    start_time = time.time()

    bundle_stream = None
    if config.get("stream"):
        bundle_stream = config.get("stream")

    submit_jobs(connection, config.get("server"), bundle_stream=bundle_stream)

    if config.get("poll"):
        jobs = poll_jobs(connection, config.get("timeout"))
        end_time = time.time()
        jobs['duration'] = end_time - start_time
        jobs['username'] = config.get("username")
        jobs['token'] = config.get("token")
        jobs['server'] = config.get("server")
        results_directory = os.getcwd() + '/results'
        utils.mkdir(results_directory)
        utils.write_yaml(config.get("poll"), results_directory, jobs)
    exit(0)
Пример #2
0
def main(args):
    config = configuration.get_config(args)

    url = utils.validate_input(config.get("username"), config.get("token"), config.get("server"))
    connection = utils.connect(url)
    if config.get("repo"):
        retrieve_jobs(config.get("repo"))
    load_jobs()
    start_time = time.time()

    bundle_stream = None
    if config.get("stream"):
        bundle_stream = config.get("stream")

    submit_jobs(connection, config.get("server"), bundle_stream=bundle_stream)

    if config.get("poll"):
        jobs = poll_jobs(connection, config.get("timeout"))
        end_time = time.time()
        if config.get("bisect"):
            for job_id in jobs:
                if 'result' in jobs[job_id]:
                    if jobs[job_id]['result'] == 'FAIL':
                        exit(1)
        jobs['duration'] = end_time - start_time
        jobs['username'] = config.get("username")
        jobs['token'] = config.get("token")
        jobs['server'] = config.get("server")
        results_directory = os.getcwd() + '/results'
        utils.mkdir(results_directory)
        utils.write_json(config.get("poll"), results_directory, jobs)
    exit(0)
Пример #3
0
def main():
    parser = argparse.ArgumentParser(description='World Models ' + ID)
    parser.add_argument('--data_dir',
                        '-d',
                        default="/data/wm",
                        help='The base data/output directory')
    parser.add_argument(
        '--game', default='CarRacing-v0',
        help='Game to use')  # https://gym.openai.com/envs/CarRacing-v0/
    parser.add_argument('--experiment_name',
                        default='experiment_1',
                        help='To isolate its files from others')
    parser.add_argument('--num_rollouts',
                        '-n',
                        default=100,
                        type=int,
                        help='Number of rollouts to collect')
    parser.add_argument(
        '--offset',
        '-o',
        default=0,
        type=int,
        help='Offset rollout count, in case running on distributed cluster')
    parser.add_argument('--frame_resize',
                        '-r',
                        default=64,
                        type=int,
                        help='h x w resize of each observation frame')
    parser.add_argument('--cores',
                        default=0,
                        type=int,
                        help='Number of CPU cores to use. 0=all cores')
    args = parser.parse_args()
    log(ID, "args =\n " + str(vars(args)).replace(",", ",\n "))

    output_dir = os.path.join(args.data_dir, args.game, args.experiment_name,
                              ID)
    mkdir(output_dir)

    log(ID, "Starting")

    if args.cores == 0:
        cores = cpu_count()
    else:
        cores = args.cores
    start = 1 + args.offset
    end = args.num_rollouts + 1 + args.offset
    rollouts_per_core = np.array_split(range(start, end), cores)
    pool = Pool(cores)
    worker_arg_tuples = []
    for i in rollouts_per_core:
        if len(i) != 0:
            worker_arg_tuples.append((i, args, output_dir))
    pool.map(worker, worker_arg_tuples)
    pool.close()
    pool.join()

    log(ID, "Done")
Пример #4
0
    def save(self, save_dir="faiss"):
        """Save FAISS index to disc

        Args:
            save_dir (str, optional): Directory to save FAISS index. Defaults to "faiss".
        """
        mkdir(save_dir)
        faiss.write_index(self.index, os.path.join(save_dir, "faiss.index"))
        save_json_file(os.path.join(save_dir, "faiss.map"), self.id_map)
Пример #5
0
def screen_params(params, prefix='0'):
    model = params['model']
    dataset = params['dataset']
    val_size = params['val_size']
    folds = params['folds']
    task = params['task']
    n_epoch = params['n_epoch']
    N = params['N']
    lograte = params['lograte'][int(prefix)]
    if prefix == '0':
        # only write log file for trial 0
        logfile = log('log/experiments.log', 'screen params')
        logfile.record(str(datetime.now()))
        for key, val in params.items():
            logfile.record('{0} = {1}'.format(key, val))
        logfile.save()
    Xtrain, Ytrain, _, _ = read_data(dataset)
    d = len(Xtrain[0])

    results = []
    if model == 'NN':
        root = '{0}-NN-H{1}-screen/'.format(dataset, params['H'])
    elif model == 'RF':
        root = '{0}-RF-screen/'.format(dataset)

    dirname = root + '{0:s}-{1:s}-N{2:d}-ep{3:d}'.format(
        dataset, model, N, n_epoch)
    if model == 'RF':
        for logGamma in params['logGamma']:
            Gamma = 10**logGamma
            _, resdir, _, tbdir = mkdir(
                dirname, '{:.1f}-{:.1f}'.format(lograte, logGamma))
            model_params, fit_params, model_type = params_process(
                model, logGamma, lograte, params, tbdir, d)

            model_params['Gamma'] = Gamma
            score = validate(Xtrain, Ytrain, val_size, model_type,
                             model_params, fit_params, folds)
            results.append({'Gamma': Gamma, 'score': score})
    elif model == 'NN':
        # For compatibility issue of old codes
        logGamma = -100
        _, resdir, _, tbdir = mkdir(dirname,
                                    '{:.1f}-'.format(lograte, logGamma))
        model_params, fit_params, model_type = params_process(
            model, logGamma, lograte, params, tbdir, d)
        score = validate(Xtrain, Ytrain, val_size, model_type, model_params,
                         fit_params, folds)
        results.append({'Gamma': -100, 'score': score})
    filename = resdir + 'output-' + prefix
    with open(filename, 'w') as f:
        f.write(str(results))
Пример #6
0
def run_colocalisation():
    if request.method == "POST":
        config = request.json
        job_id = config['job_id']
        utils.mkdir(f"/tmp/{job_id}/")
        with open(f"/tmp/{job_id}/colocalisation.out", "w") as log:
            app.colocalisation_jobs[job_id] = subprocess.Popen(
                ["../api/colocalisation_api.py",
                 json.dumps(config)],
                stdout=log,
                stderr=log)

        return config
Пример #7
0
def initVideoWriter(cam, scale_rate, filename, output_videos_dir):
    if not os.path.exists(output_videos_dir):
        mkdir(output_videos_dir)
    # We convert the resolutions from float to integer.
    frame_width = int(cam.get(3) * scale_rate)
    frame_height = int(cam.get(4) * scale_rate)
    FPS = __getFPS(cam)
    logger.info('Video has {FPS}fps')
    outVideoName = f'{getVideoName(filename)}_out_{FPS}fps.avi'
    # Define the codec and create VideoWriter object.The output is stored in '{}.avi' file.
    return cv2.VideoWriter(os.path.join(output_videos_dir, outVideoName),
                           cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), FPS,
                           (frame_width, frame_height))
Пример #8
0
def run_segment():
    config = request.json
    if request.method == "POST":
        utils.mkdir(f"/tmp/{config['job_id']}/")
        print(config['job_id'])
        with open(f"/tmp/{config['job_id']}/segmentation.out", "w") as log:
            subprocess.Popen([
                "../api/segment_api.py",
                json.dumps(config),
            ],
                             stdout=log,
                             stderr=log)

        return config
Пример #9
0
def screen_params_alloc(params):
    dataset = params['dataset']
    model = params['model']
    lograte = params['lograte']
    logGamma = params['logGamma']
    N = params['N']
    n_epoch = params['n_epoch']
    if model == 'NN':
        root = '{0}-NN-H{1}-screen/'.format(dataset, params['H'])
    elif model == 'RF':
        root = '{0}-RF-screen/'.format(dataset)

    dirname = root + '{0:s}-{1:s}-N{2:d}-ep{3:d}'.format(
        dataset, model, N, n_epoch)
    _, resdir, _, _ = mkdir(dirname)
    filename = resdir + 'output-'
    row = ['log(rate)\log(Gamma)']
    row.extend(logGamma)
    output = [row]
    for idx, rate in enumerate(lograte):
        row = []
        try:
            with open(filename + str(idx), 'r') as f:
                result = eval(f.read())
        except:
            print('lograte list is not run out')
            continue
        row.append(rate)
        for item in result:
            row.append(item['score'])
        output.append(row)
    finalop = [output, params]
    with open(filename + 'alloc', 'w') as f:
        f.write(str(finalop))
Пример #10
0
def train_and_test_alloc(params):
    dataset = params['dataset']
    model = params['model']
    N = params['N']
    n_epoch = params['n_epoch']
    trials = params['trials']
    if model == 'NN':
        root = '{0}-NN-H{1}-test/'.format(dataset, params['H'])
    elif model == 'RF':
        root = '{0}-RF-test/'.format(dataset)

    dirname = root + '{0:s}-{1:s}-N{2:d}-ep{3:d}'.format(
        dataset, model, N, n_epoch)
    _, resdir, _, _ = mkdir(dirname)
    filename = resdir + 'output-'
    tags = ['accuracy', 'sparsity', 'traintime', 'testtime']
    alloc = {}
    for idx in range(4):
        tag = tags[idx]
        result = np.zeros(trials)
        for prefix in range(trials):
            try:
                with open(filename + str(prefix), 'r') as fr:
                    dict1, _, model_params, fit_params = eval(fr.read())
            except:
                print('# of trials is incorrect.')
                break
            result[prefix] = dict1[tag]
        mean = np.mean(result)
        std = np.std(result)
        alloc[tag] = {'mean': mean, 'std': std}
    finalop = [alloc, dataset, model_params, fit_params]
    with open(filename + 'alloc', 'w') as fw:
        fw.write(str(finalop))
Пример #11
0
def main(args):
    config = configuration.get_config(args)

    url = utils.validate_input(config.get("username"), config.get("token"),
                               config.get("server"))
    connection = utils.connect(url)
    if config.get("repo"):
        retrieve_jobs(config.get("repo"))

    if config.get("jobs"):
        load_jobs(config.get("jobs"))
        print "Loading jobs from top folder " + str(config.get("jobs"))
    else:
        load_jobs(os.getcwd())

    start_time = time.time()

    bundle_stream = None
    if config.get("stream"):
        bundle_stream = config.get("stream")

    submit_jobs(connection, config.get("server"), bundle_stream=bundle_stream)

    if config.get("poll"):
        jobs = poll_jobs(connection, config.get("timeout"))
        end_time = time.time()
        if config.get("bisect"):
            for job_id in jobs:
                if 'result' in jobs[job_id]:
                    if jobs[job_id]['result'] == 'FAIL':
                        exit(1)
        jobs['duration'] = end_time - start_time
        jobs['username'] = config.get("username")
        jobs['token'] = config.get("token")
        jobs['server'] = config.get("server")
        results_directory = os.getcwd() + '/results'
        utils.mkdir(results_directory)
        utils.write_json(config.get("poll"), results_directory, jobs)
    exit(0)
Пример #12
0
def main(args):
    config = configuration.get_config(args)

    url = utils.validate_input(config.get("username"), config.get("token"), config.get("server"))
    connection = utils.connect(url)
    if config.get("repo"):
        retrieve_jobs(config.get("repo"))

    if config.get("jobs"):
        load_jobs(config.get("jobs"))
        print "Loading jobs from top folder " + str(config.get("jobs"))
    else:
        load_jobs(os.getcwd())

    start_time = time.time()

    bundle_stream = None
    if config.get("stream"):
        bundle_stream = config.get("stream")

    submit_jobs(connection, config.get("server"), bundle_stream=bundle_stream)

    if config.get("poll"):
        jobs = poll_jobs(connection, config.get("timeout"))
        end_time = time.time()
        if config.get("bisect"):
            for job_id in jobs:
                if "result" in jobs[job_id]:
                    if jobs[job_id]["result"] == "FAIL":
                        exit(1)
        jobs["duration"] = end_time - start_time
        jobs["username"] = config.get("username")
        jobs["token"] = config.get("token")
        jobs["server"] = config.get("server")
        results_directory = os.getcwd() + "/results"
        utils.mkdir(results_directory)
        utils.write_json(config.get("poll"), results_directory, jobs)
    exit(0)
Пример #13
0
def testing(paramsfile, N, H, prefix='0'):
    params = read_params(paramsfile)
    dataset = params['dataset']
    model = params['model']
    n_epoch = params['n_epoch']
    if model == 'NN':
        root = '{0}-NN-H{1}-screen/'.format(dataset, H)
    elif model == 'RF':
        root = '{0}-RF-screen/'.format(dataset)

    dirname = root + '{0:s}-{1:s}-N{2:d}-ep{3:d}'.format(
        dataset, model, N, n_epoch)
    _, resdir, _, _ = mkdir(dirname)
    paramsfile = resdir + 'output-alloc'

    train_and_test(dataset, params=paramsfile, prefix=prefix)
Пример #14
0
def screen_params_append(params):
    dataset = params['dataset']
    model = params['model']
    lograte = params['lograte']
    logGamma = params['logGamma']
    N = params['N']
    n_epoch = params['n_epoch']
    if model == 'NN':
        root = '{0}-NN-H{1}-screen/'.format(dataset, params['H'])
    elif model == 'RF':
        root = '{0}-RF-screen/'.format(dataset)

    dirname = root + '{0:s}-{1:s}-N{2:d}-ep{3:d}'.format(
        dataset, model, N, n_epoch)
    _, resdir, _, _ = mkdir(dirname)
    filename = resdir + 'output-'
    with open(filename + 'alloc', 'r') as fr:
        result, params = eval(fr.read())
    result[0].extend(logGamma)
    for idx, rate in enumerate(lograte):
        try:
            with open(filename + str(idx), 'r') as fr:
                new_result = eval(fr.read())
        except:
            print('lograte list is not run out')
            break
        for item in new_result:
            result[idx + 1].append(item['score'])
    sortidx = np.argsort(result[0][1:]) + 1
    updated = []
    for row in result:
        newrow = [row[0]]
        for idx in sortidx:
            newrow.append(row[idx])
        updated.append(newrow)
    finalop = [updated, params]
    with open(filename + 'alloc', 'w') as f:
        f.write(str(finalop))
Пример #15
0
def main():
    parser = argparse.ArgumentParser(description='World Models ' + ID)
    parser.add_argument('--data_dir',
                        '-d',
                        default="./data/wm",
                        help='The base data/output directory')
    parser.add_argument(
        '--game', default='CarRacing-v0',
        help='Game to use')  # https://gym.openai.com/envs/CarRacing-v0/
    parser.add_argument('--experiment_name',
                        default='experiment_1',
                        help='To isolate its files from others')
    parser.add_argument(
        '--load_batch_size',
        default=10,
        type=int,
        help='Load game frames in batches so as not to run out of memory')
    parser.add_argument(
        '--model',
        '-m',
        default='',
        help=
        'Initialize the model from given file, or "default" for one in data folder'
    )
    parser.add_argument('--no_resume',
                        action='store_true',
                        help='Don'
                        't auto resume from the latest snapshot')
    parser.add_argument(
        '--resume_from',
        '-r',
        default='',
        help='Resume the optimization from a specific snapshot')
    parser.add_argument('--test',
                        action='store_true',
                        help='Generate samples only')
    parser.add_argument('--gpu',
                        '-g',
                        default=-1,
                        type=int,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--epoch',
                        '-e',
                        default=1,
                        type=int,
                        help='number of epochs to learn')
    parser.add_argument(
        '--snapshot_interval',
        '-s',
        default=100,
        type=int,
        help='100 = snapshot every 100itr*batch_size imgs processed')
    parser.add_argument('--z_dim',
                        '-z',
                        default=32,
                        type=int,
                        help='dimension of encoded vector')
    parser.add_argument('--batch_size',
                        '-b',
                        type=int,
                        default=100,
                        help='learning minibatch size')
    parser.add_argument('--no_progress_bar',
                        '-p',
                        action='store_true',
                        help='Display progress bar during training')
    parser.add_argument('--kl_tolerance', type=float, default=0.5, help='')

    args = parser.parse_args()
    log(ID, "args =\n " + str(vars(args)).replace(",", ",\n "))

    output_dir = os.path.join(args.data_dir, args.game, args.experiment_name,
                              ID)
    random_rollouts_dir = os.path.join(args.data_dir, args.game,
                                       args.experiment_name, 'random_rollouts')
    mkdir(output_dir)

    max_iter = 0
    auto_resume_file = None
    files = os.listdir(output_dir)
    for file in files:
        if re.match(r'^snapshot_iter_', file):
            iter = int(re.search(r'\d+', file).group())
            if (iter > max_iter):
                max_iter = iter
    if max_iter > 0:
        auto_resume_file = os.path.join(output_dir,
                                        "snapshot_iter_{}".format(max_iter))

    model = CVAE(args.z_dim)

    if args.model:
        if args.model == 'default':
            args.model = os.path.join(output_dir, ID + ".model")
        log(ID, "Loading saved model from: " + args.model)
        chainer.serializers.load_npz(args.model, model)

    optimizer = chainer.optimizers.Adam(alpha=0.0001)
    optimizer.setup(model)

    log(ID, "Loading training data")
    train = VisionDataset(dir=random_rollouts_dir,
                          load_batch_size=args.load_batch_size,
                          shuffle=True,
                          verbose=True)
    train_iter = chainer.iterators.SerialIterator(train,
                                                  args.batch_size,
                                                  shuffle=False)

    updater = training.StandardUpdater(train_iter,
                                       optimizer,
                                       device=args.gpu,
                                       loss_func=model.get_loss_func(
                                           args.kl_tolerance))

    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=output_dir)
    trainer.extend(extensions.snapshot(),
                   trigger=(args.snapshot_interval, 'iteration'))
    trainer.extend(
        extensions.LogReport(trigger=(100 if args.gpu >= 0 else 10,
                                      'iteration')))
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'iteration', 'main/loss', 'main/kl_loss', 'main/rec_loss',
            'elapsed_time'
        ]))
    if not args.no_progress_bar:
        trainer.extend(
            extensions.ProgressBar(
                update_interval=100 if args.gpu >= 0 else 10))

    sample_idx = np.random.choice(range(train.get_current_batch_size()),
                                  64,
                                  replace=False)
    sample_frames = chainer.Variable(np.asarray(train[sample_idx]))
    np.random.seed(31337)
    sample_z = chainer.Variable(
        np.random.normal(0, 1, (64, args.z_dim)).astype(np.float32))
    save_images_collage(sample_frames.data,
                        os.path.join(output_dir, 'train.png'))
    sampler = Sampler(model, args, output_dir, sample_frames, sample_z)
    trainer.extend(sampler, trigger=(args.snapshot_interval, 'iteration'))

    if args.resume_from:
        log(ID, "Resuming trainer manually from snapshot: " + args.resume_from)
        chainer.serializers.load_npz(args.resume_from, trainer)
    elif not args.no_resume and auto_resume_file is not None:
        log(ID,
            "Auto resuming trainer from last snapshot: " + auto_resume_file)
        chainer.serializers.load_npz(auto_resume_file, trainer)

    if not args.test:
        log(ID, "Starting training")
        trainer.run()
        log(ID, "Done training")
        log(ID, "Saving model")
        chainer.serializers.save_npz(os.path.join(output_dir, ID + ".model"),
                                     model)

    if args.test:
        log(ID, "Saving test samples")
        sampler(trainer)

    if not args.test:
        log(ID, "Saving latent z's for all training data")
        train = VisionDataset(dir=random_rollouts_dir,
                              load_batch_size=args.load_batch_size,
                              shuffle=False,
                              verbose=True)
        total_batches = train.get_total_batches()
        for batch in range(total_batches):
            gc.collect()
            train.load_batch(batch)
            batch_frames, batch_rollouts, batch_rollouts_counts = train.get_current_batch(
            )
            mu = None
            ln_var = None
            splits = batch_frames.shape[0] // args.batch_size
            if batch_frames.shape[0] % args.batch_size != 0:
                splits += 1
            for i in range(splits):
                start_idx = i * args.batch_size
                end_idx = (i + 1) * args.batch_size
                sample_frames = batch_frames[start_idx:end_idx]
                if args.gpu >= 0:
                    sample_frames = chainer.Variable(cp.asarray(sample_frames))
                else:
                    sample_frames = chainer.Variable(sample_frames)
                this_mu, this_ln_var = model.encode(sample_frames)
                this_mu = this_mu.data
                this_ln_var = this_ln_var.data
                if args.gpu >= 0:
                    this_mu = cp.asnumpy(this_mu)
                    this_ln_var = cp.asnumpy(this_ln_var)
                if mu is None:
                    mu = this_mu
                    ln_var = this_ln_var
                else:
                    mu = np.concatenate((mu, this_mu), axis=0)
                    ln_var = np.concatenate((ln_var, this_ln_var), axis=0)
            running_count = 0
            for rollout in batch_rollouts:
                rollout_dir = os.path.join(random_rollouts_dir, rollout)
                rollout_count = batch_rollouts_counts[rollout]
                start_idx = running_count
                end_idx = running_count + rollout_count
                this_mu = mu[start_idx:end_idx]
                this_ln_var = ln_var[start_idx:end_idx]
                np.savez_compressed(os.path.join(rollout_dir, "mu+ln_var.npz"),
                                    mu=this_mu,
                                    ln_var=this_ln_var)
                running_count = running_count + rollout_count
            log(ID, "> Processed z's for rollouts " + str(batch_rollouts))
            # Free up memory:
            batch_frames = None
            mu = None
            ln_var = None

    log(ID, "Done")
def main():
    global colours, img_size
    args = parse_args()
#    videos_dir = args.videos_dir
    output_path = args.output_path
    no_display = args.no_display
    detect_interval = args.detect_interval  # you need to keep a balance between performance and fluency
    margin = args.margin  # if the face is big in your video ,you can set it bigger for tracking easiler
    scale_rate = args.scale_rate  # if set it smaller will make input frames smaller
    show_rate = args.show_rate  # if set it smaller will dispaly smaller frames
    face_score_threshold = args.face_score_threshold

    mkdir(output_path)
    # for display
    if not no_display:
        colours = np.random.rand(32, 3)

    # init tracker
    tracker = Sort()  # create instance of the SORT tracker

    logger.info('Start track and extract......')
    with tf.Graph().as_default():
        with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),
                                              log_device_placement=False)) as sess:
            pnet, rnet, onet = detect_face.create_mtcnn(sess, os.path.join(project_dir, "align"))

            minsize = 40  # minimum size of face for mtcnn to detect
            threshold = [0.6, 0.7, 0.7]  # three steps's threshold
            factor = 0.709  # scale factor

#            for filename in os.listdir(videos_dir):
#                logger.info('All files:{}'.format(filename))
#            for filename in os.listdir(videos_dir):
#                suffix = filename.split('.')[1]
#                if suffix != 'mp4' and suffix != 'avi':  # you can specify more video formats if you need
#                    continue
#                video_name = os.path.join(videos_dir, filename)
#                directoryname = os.path.join(output_path, filename.split('.')[0])
            
#                logger.info('Video_name:{}'.format(video_name))
            directoryname=output_path
            cam = cv2.VideoCapture(0) #video_name --> 0 indicating feed from camera
                
            c = 0
            while True:
                    final_faces = []
                    addtional_attribute_list = []
                    ret, frame = cam.read()
                    if not ret:
                        logger.warning("ret false")
                        break
                    if frame is None:
                        logger.warning("frame drop")
                        break

                    frame = cv2.resize(frame, (0, 0), fx=scale_rate, fy=scale_rate)
                    r_g_b_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    if c % detect_interval == 0:
                        img_size = np.asarray(frame.shape)[0:2]
                        mtcnn_starttime = time()
                        faces, points = detect_face.detect_face(r_g_b_frame, minsize, pnet, rnet, onet, threshold,
                                                                factor)
                        logger.info("MTCNN detect face cost time : {} s".format(
                            round(time() - mtcnn_starttime, 3)))  # mtcnn detect ,slow
                        face_sums = faces.shape[0]
                        
#                        if len(face_boxes) > 0:
#                                for i in range(len(face_boxes)):
#                                    box = face_boxes[i]
#                                    cropped_face = frame[box[0]:box[2], box[1]:box[3], :]
#                                    cropped_face = cv2.resize(cropped_face, (160, 160), interpolation=cv2.INTER_AREA)
#                                    feature = face_recognition.recognize(cropped_face)
#                                    (name , proba , conf) = face_classfier.classify(feature)
#                                    
#                                    cv2.rectangle(frame, (box[1], box[0]), (box[3], box[2]), (0, 255, 0), 2)
#                                    name = name+":%"+ proba
#                                    # plot result idx under box
#                                    text_x = box[1]
#                                    text_y = box[2] + 20
#                                    if conf:
#                                        cv2.putText(frame, name, (text_x, text_y), cv2.FONT_HERSHEY_COMPLEX_SMALL,
#                                                1, (0, 0, 255), thickness=1, lineType=2)
                
                
                        if face_sums > 0:
                            face_list = []
                            for i, item in enumerate(faces):
                                score = round(faces[i, 4], 2)
                                if score > face_score_threshold:
                                    det = np.squeeze(faces[i, 0:4])

                                    # face rectangle
                                    det[0] = np.maximum(det[0] - margin, 0)
                                    det[1] = np.maximum(det[1] - margin, 0)
                                    det[2] = np.minimum(det[2] + margin, img_size[1])
                                    det[3] = np.minimum(det[3] + margin, img_size[0])
                                    face_list.append(item)

                                    # face cropped
                                    bb = np.array(det, dtype=np.int32)

                                    # use 5 face landmarks  to judge the face is front or side
                                    squeeze_points = np.squeeze(points[:, i])
                                    tolist = squeeze_points.tolist()
                                    facial_landmarks = []
                                    for j in range(5):
                                        item = [tolist[j], tolist[(j + 5)]]
                                        facial_landmarks.append(item)
                                    if args.face_landmarks:
                                        for (x, y) in facial_landmarks:
                                            cv2.circle(frame, (int(x), int(y)), 3, (0, 255, 0), -1)
                                    cropped = frame[bb[1]:bb[3], bb[0]:bb[2], :].copy()
                                    
                                    cropped_face = cv2.resize(cropped, (160, 160), interpolation=cv2.INTER_AREA)
                                    feature = face_recognition.recognize(cropped_face)
                                    (name , proba , conf) = face_classfier.classify(feature)
                                    
                                    #cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]), (0, 255, 0), 2)
                                    name = name+":%"+ proba
                                    # plot result idx under box
                                    text_x = bb[0] - 10
                                    text_y = bb[1] - 10
                                    if conf:
                                        cv2.putText(frame, name, (text_x, text_y), cv2.FONT_HERSHEY_COMPLEX_SMALL,
                                                1, (0, 0, 255), thickness=1, lineType=2)
                                    
                                    
                                    dist_rate, high_ratio_variance, width_rate = judge_side_face(
                                        np.array(facial_landmarks))

                                    # face addtional attribute(index 0:face score; index 1:0 represents front face and 1 for side face )
                                    item_list = [cropped, score, dist_rate, high_ratio_variance, width_rate]
                                    addtional_attribute_list.append(item_list)

                            final_faces = np.array(face_list)

                    trackers = tracker.update(final_faces, img_size, directoryname, addtional_attribute_list, detect_interval)

                    c += 1

                    for d in trackers:
                       # print("D values:",d)
                        if not no_display:
                            d = d.astype(np.int32)
                            cv2.rectangle(frame, (d[0], d[1]), (d[2], d[3]), colours[d[4] % 32, :] * 255, 3)
                            if final_faces != []:
#                                cv2.putText(frame, 'ID : %d  DETECT' % (d[4]), (d[0] - 10, d[1] - 10),
#                                            cv2.FONT_HERSHEY_SIMPLEX,
#                                            0.75,
#                                            colours[d[4] % 32, :] * 255, 2)
                                cv2.putText(frame, 'DETECTOR', (5, 45), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                                            (1, 1, 1), 2)
                            else:
                                cv2.putText(frame, 'ID : %d' % (d[4]), (d[0] - 10, d[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
                                            0.75,
                                            colours[d[4] % 32, :] * 255, 2)

                    if not no_display:
                        frame = cv2.resize(frame, (0, 0), fx=show_rate, fy=show_rate)
                        cv2.imshow("Frame", frame)
                        if cv2.waitKey(1) & 0xFF == ord('q'):
                            break
#    print("Debugging state:...")
#    print("faces size",faces.shape)
#    print("faces content",faces)
#    print("RGB frame")
#    print(r_g_b_frame)
#    print("RGB frame size",r_g_b_frame.shape)
#    print("face list shape",len(face_list))
#    print(face_list)
#    print("Det",det)
#    
    
   
    cam.release()
    cv2.destroyAllWindows()
# modify the default parameters of np.load
    np.load.__defaults__=(None, False, True, 'ASCII')
Пример #17
0
def main():
    parser = argparse.ArgumentParser(description='World Models ' + ID)
    parser.add_argument('--data_dir',
                        '-d',
                        default="/data/wm",
                        help='The base data/output directory')
    parser.add_argument(
        '--game', default='CarRacing-v0',
        help='Game to use')  # https://gym.openai.com/envs/CarRacing-v0/
    parser.add_argument('--experiment_name',
                        default='experiment_1',
                        help='To isolate its files from others')
    parser.add_argument('--rollouts',
                        '-n',
                        default=100,
                        type=int,
                        help='Number of times to rollout')
    parser.add_argument('--frame_resize',
                        default=64,
                        type=int,
                        help='h x w resize of each observation frame')
    parser.add_argument('--hidden_dim',
                        default=256,
                        type=int,
                        help='LSTM hidden units')
    parser.add_argument('--z_dim',
                        '-z',
                        default=32,
                        type=int,
                        help='dimension of encoded vector')
    parser.add_argument('--mixtures',
                        default=5,
                        type=int,
                        help='number of gaussian mixtures for MDN')
    parser.add_argument('--temperature',
                        '-t',
                        default=1.0,
                        type=float,
                        help='Temperature (tau) for MDN-RNN (model)')
    parser.add_argument('--predict_done',
                        action='store_true',
                        help='Whether MDN-RNN should also predict done state')
    parser.add_argument('--cores',
                        default=0,
                        type=int,
                        help='Number of CPU cores to use. 0=all cores')
    parser.add_argument(
        '--weights_type',
        default=1,
        type=int,
        help="1=action_dim*(z_dim+hidden_dim), 2=z_dim+2*hidden_dim")
    parser.add_argument('--record', action='store_true', help='Record as gifs')

    args = parser.parse_args()
    log(ID, "args =\n " + str(vars(args)).replace(",", ",\n "))

    if args.game in DOOM_GAMES:
        env = ViZDoomWrapper(args.game)
    else:
        env = gym.make(args.game)
    action_dim = len(env.action_space.low)
    args.action_dim = action_dim
    env = None

    if args.cores == 0:
        cores = cpu_count()
    else:
        cores = args.cores

    output_dir = os.path.join(args.data_dir, args.game, args.experiment_name,
                              ID)
    mkdir(output_dir)
    model_dir = os.path.join(args.data_dir, args.game, args.experiment_name,
                             'model')
    vision_dir = os.path.join(args.data_dir, args.game, args.experiment_name,
                              'vision')
    controller_dir = os.path.join(args.data_dir, args.game,
                                  args.experiment_name, 'controller')

    model = MDN_RNN(args.hidden_dim, args.z_dim, args.mixtures,
                    args.predict_done)
    chainer.serializers.load_npz(os.path.join(model_dir, "model.model"), model)
    vision = CVAE(args.z_dim)
    chainer.serializers.load_npz(os.path.join(vision_dir, "vision.model"),
                                 vision)
    # controller = np.random.randn(action_dim * (args.z_dim + args.hidden_dim) + action_dim).astype(np.float32)
    # controller = np.random.randn(args.z_dim + 2 * args.hidden_dim).astype(np.float32)
    controller = np.load(os.path.join(controller_dir,
                                      "controller.model"))['xmean']
    W_c, b_c = transform_to_weights(args, controller)

    log(ID, "Starting")

    worker_arg_tuples = []
    for rollout_num in range(args.rollouts):
        worker_arg_tuples.append(
            (rollout_num, args, vision, model.copy(), W_c, b_c, output_dir))
    pool = Pool(cores)
    cumulative_rewards = pool.map(worker, worker_arg_tuples)
    pool.close()
    pool.join()

    log(ID, "Cumulative Rewards:")
    for rollout_num in range(args.rollouts):
        log(
            ID, "> #{} = {:.2f}".format((rollout_num + 1),
                                        cumulative_rewards[rollout_num]))

    log(
        ID, "Mean: {:.2f} Std: {:.2f}".format(np.mean(cumulative_rewards),
                                              np.std(cumulative_rewards)))
    log(
        ID, "Highest: #{} = {:.2f} Lowest: #{} = {:.2f}".format(
            np.argmax(cumulative_rewards) + 1, np.amax(cumulative_rewards),
            np.argmin(cumulative_rewards) + 1, np.amin(cumulative_rewards)))

    cumulative_rewards_file = os.path.join(output_dir,
                                           "cumulative_rewards.npy.gz")
    log(
        ID, "Saving cumulative rewards to: " +
        os.path.join(output_dir, "cumulative_rewards.npy.gz"))
    with gzip.GzipFile(cumulative_rewards_file, "w") as file:
        np.save(file, cumulative_rewards)

    # To load:
    # with gzip.GzipFile(cumulative_rewards_file, "r") as file:
    #     cumulative_rewards = np.load(file)

    log(ID, "Done")
Пример #18
0
def main(cfg: DictConfig) -> None:
    if cfg.trainer.print_torch_setup is True:
        print_torch_setup()

    if cfg.trainer.seed is not None:
        random.seed(cfg.trainer.seed)
        torch.manual_seed(cfg.trainer.seed)
        torch.backends.cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    assert torch.cuda.is_available(), 'This code requires a GPU to train'
    torch.backends.cudnn.benchmark = True
    assert cfg.trainer.output_dir, 'You need to specify an output directory'

    mkdir(cfg.trainer.output_dir)
    experiment_name = time.strftime("%Y%m%d-%H%M%S")
    print(f'The current experiment will be tracked as {experiment_name}')
    output_dir = os.path.join(cfg.trainer.output_dir, experiment_name)
    print(f'Results will be saved in {output_dir}')
    writer = SummaryWriter(output_dir)

    # this is just a workaround for now
    # hparams logging to a file and as text into tensorboard
    # it is certainly not perfect... :/
    hparams = flatten_dict(OmegaConf.to_container(cfg, resolve=True))
    hparams_as_str = [
        str(k) + ' >>> ' + str(v) + '\n' for k, v in hparams.items()
    ]
    # TODO: this seems to not work properly!
    # writer.add_hparams(hparams, metric_dict={'acc': 1}, run_name=experiment_name)
    with open(os.path.join(output_dir, 'hparams.txt'), 'w',
              encoding='utf-8') as hparams_file:
        for line in hparams_as_str:
            hparams_file.write(line)
    writer.add_text('hparams', '\r\n'.join(hparams_as_str), global_step=0)

    device = torch.device(cfg.trainer.device)
    assert device.type == 'cuda', 'Only GPU based training is supported'

    dataset = instantiate(cfg.dataset.train)

    assert cfg.dataset.val_split is not None, 'Handling a separate validation set is not implemented as of now!'
    train_size = int((1 - cfg.dataset.val_split) * len(dataset))
    val_size = len(dataset) - train_size
    train_dataset, val_dataset = torch.utils.data.random_split(
        dataset, [train_size, val_size])

    train_sampler_weights = dataset.make_weights_for_dataset_sampling(
        train_dataset)
    sampler = WeightedRandomSampler(
        train_sampler_weights,
        num_samples=cfg.dataset.train_samples_per_epoch,
        replacement=True)
    train_collate_fn = dataset.get_collate_fn(
        mode='train', channels_last=cfg.trainer.channels_last)
    train_dataloader = instantiate(cfg.dataloader.train,
                                   dataset=train_dataset,
                                   collate_fn=train_collate_fn,
                                   sampler=sampler)

    val_collate_fn = dataset.get_collate_fn(
        mode='val', channels_last=cfg.trainer.channels_last)
    val_dataloader = instantiate(cfg.dataloader.val,
                                 dataset=val_dataset,
                                 collate_fn=val_collate_fn)

    # this handler moves a batch to the GPU as uint8, casts it to a float after transferring it
    # and normalizes the images
    to_device_handler = ToDeviceFunction(device=device,
                                         mean=cfg.dataset.mean,
                                         std=cfg.dataset.std)

    # the prefetch loader prefetches the next batch onto the GPU which makes up a couple
    # of percent in the training loop
    train_dataloader = PrefetchLoader(loader=train_dataloader,
                                      to_device_handler=to_device_handler)

    # val_dataloader = PrefetchLoader(loader=val_dataloader,
    #                                 to_device_handler=to_device_handler)

    model = instantiate(cfg.models.model, device=device).to(device)

    if cfg.trainer.channels_last is True:
        model = model.to(memory_format=torch.channels_last)

    if cfg.trainer.anomaly_detection is True:
        torch.autograd.set_detect_anomaly(mode=True)

    params_to_optimize = [{
        "params": [p for p in model.parameters() if p.requires_grad]
    }]

    optimizer = instantiate(cfg.optimizer, params_to_optimize)

    scaler = GradScaler(enabled=cfg.trainer.amp)

    if cfg.trainer.resume is not None:
        if os.path.isfile(cfg.trainer.resume):
            print("Trying to load checkpoint '{}'".format(cfg.trainer.resume))

            if cfg.trainer.from_u2net_checkpoint is True:
                checkpoint = torch.load(cfg.trainer.resume,
                                        map_location=device)
                model.load_state_dict(checkpoint)
            else:
                checkpoint = torch.load(cfg.trainer.resume,
                                        map_location=device)
                model.load_state_dict(checkpoint['model'])

                if cfg.trainer.weights_only is False:
                    cfg.trainer.start_epoch = checkpoint['epoch']
                    optimizer.load_state_dict(checkpoint['optimizer'])
                    scaler.load_state_dict(checkpoint['scaler'])

            print(
                f'Loaded checkpoint {cfg.trainer.resume}. Resuming training at epoch {cfg.trainer.start_epoch}'
            )
        else:
            warnings.warn(f'Checkpoint f{cfg.trainer.resume} not found!')

    print("Start training...")
    start_time = time.time()

    if cfg.trainer.dry_run is True:
        print("Doing dry run, running val on train dataset...")
        # validate_one_epoch(writer, model, train_dataloader, device, 0, cfg.trainer.print_freq)
        return

    for epoch in range(cfg.trainer.start_epoch, cfg.trainer.epochs):
        train_one_epoch(writer, device, model, optimizer, scaler,
                        train_dataloader, epoch, cfg)
        # validate_one_epoch(writer, model, val_dataloader, epoch, cfg)

        checkpoint = {
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'scaler': scaler.state_dict(),
            'epoch': epoch,
            'cfg': cfg
        }
        save_on_master(checkpoint,
                       os.path.join(output_dir, 'model_{}.pth'.format(epoch)))
        save_on_master(checkpoint, os.path.join(output_dir, 'checkpoint.pth'))

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))
Пример #19
0
def main():
    parser = argparse.ArgumentParser(description='World Models ' + ID)
    parser.add_argument('--data_dir',
                        '-d',
                        default="./data/wm",
                        help='The base data/output directory')
    parser.add_argument(
        '--game', default='CarRacing-v0',
        help='Game to use')  # https://gym.openai.com/envs/CarRacing-v0/
    parser.add_argument('--experiment_name',
                        default='experiment_1',
                        help='To isolate its files from others')
    parser.add_argument(
        '--load_batch_size',
        default=100,
        type=int,
        help='Load rollouts in batches so as not to run out of memory')
    parser.add_argument(
        '--model',
        '-m',
        default='',
        help=
        'Initialize the model from given file, or "default" for one in data folder'
    )
    parser.add_argument('--no_resume',
                        action='store_true',
                        help='Don'
                        't auto resume from the latest snapshot')
    parser.add_argument(
        '--resume_from',
        '-r',
        default='',
        help='Resume the optimization from a specific snapshot')
    parser.add_argument('--test',
                        action='store_true',
                        help='Generate samples only')
    parser.add_argument('--gpu',
                        '-g',
                        default=-1,
                        type=int,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--epoch',
                        '-e',
                        default=20,
                        type=int,
                        help='number of epochs to learn')
    parser.add_argument('--snapshot_interval',
                        '-s',
                        default=200,
                        type=int,
                        help='snapshot every x games')
    parser.add_argument('--z_dim',
                        '-z',
                        default=32,
                        type=int,
                        help='dimension of encoded vector')
    parser.add_argument('--hidden_dim',
                        default=256,
                        type=int,
                        help='LSTM hidden units')
    parser.add_argument('--mixtures',
                        default=5,
                        type=int,
                        help='number of gaussian mixtures for MDN')
    parser.add_argument('--no_progress_bar',
                        '-p',
                        action='store_true',
                        help='Display progress bar during training')
    parser.add_argument('--predict_done',
                        action='store_true',
                        help='Whether MDN-RNN should also predict done state')
    parser.add_argument('--sample_temperature',
                        default=1.,
                        type=float,
                        help='Temperature for generating samples')
    parser.add_argument('--gradient_clip',
                        default=0.,
                        type=float,
                        help='Clip grads L2 norm threshold. 0 = no clip')
    parser.add_argument('--sequence_length',
                        type=int,
                        default=128,
                        help='sequence length for LSTM for TBPTT')
    parser.add_argument('--in_dream',
                        action='store_true',
                        help='Whether to train in dream, or real environment')
    parser.add_argument(
        '--initial_z_noise',
        default=0.,
        type=float,
        help="Gaussian noise std for initial z for dream training")
    parser.add_argument('--done_threshold',
                        default=0.5,
                        type=float,
                        help='What done probability really means done')
    parser.add_argument('--temperature',
                        '-t',
                        default=1.0,
                        type=float,
                        help='Temperature (tau) for MDN-RNN (model)')
    parser.add_argument('--dream_max_len',
                        default=2100,
                        type=int,
                        help="Maximum timesteps for dream to avoid runaway")
    parser.add_argument(
        '--weights_type',
        default=1,
        type=int,
        help="1=action_dim*(z_dim+hidden_dim), 2=z_dim+2*hidden_dim")
    parser.add_argument(
        '--initial_z_size',
        default=10000,
        type=int,
        help="How many real initial frames to load for dream training")

    args = parser.parse_args()
    log(ID, "args =\n " + str(vars(args)).replace(",", ",\n "))

    output_dir = os.path.join(args.data_dir, args.game, args.experiment_name,
                              ID)
    mkdir(output_dir)
    random_rollouts_dir = os.path.join(args.data_dir, args.game,
                                       args.experiment_name, 'random_rollouts')
    vision_dir = os.path.join(args.data_dir, args.game, args.experiment_name,
                              'vision')

    log(ID, "Starting")

    max_iter = 0
    auto_resume_file = None
    files = os.listdir(output_dir)
    for file in files:
        if re.match(r'^snapshot_iter_', file):
            iter = int(re.search(r'\d+', file).group())
            if (iter > max_iter):
                max_iter = iter
    if max_iter > 0:
        auto_resume_file = os.path.join(output_dir,
                                        "snapshot_iter_{}".format(max_iter))

    model = MDN_RNN(args.hidden_dim, args.z_dim, args.mixtures,
                    args.predict_done)
    vision = CVAE(args.z_dim)
    chainer.serializers.load_npz(os.path.join(vision_dir, "vision.model"),
                                 vision)

    if args.model:
        if args.model == 'default':
            args.model = os.path.join(output_dir, ID + ".model")
        log(ID, "Loading saved model from: " + args.model)
        chainer.serializers.load_npz(args.model, model)

    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)
    if args.gradient_clip > 0.:
        optimizer.add_hook(
            chainer.optimizer_hooks.GradientClipping(args.gradient_clip))

    log(ID, "Loading training data")
    train = ModelDataset(dir=random_rollouts_dir,
                         load_batch_size=args.load_batch_size,
                         verbose=False)
    train_iter = chainer.iterators.SerialIterator(train,
                                                  batch_size=1,
                                                  shuffle=False)

    env = gym.make(args.game)
    action_dim = len(env.action_space.low)
    args.action_dim = action_dim

    updater = TBPTTUpdater(train_iter, optimizer, model.get_loss_func(), args,
                           model)

    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=output_dir)
    trainer.extend(extensions.snapshot(),
                   trigger=(args.snapshot_interval, 'iteration'))
    trainer.extend(
        extensions.LogReport(trigger=(10 if args.gpu >= 0 else 1,
                                      'iteration')))
    trainer.extend(
        extensions.PrintReport(['epoch', 'iteration', 'loss', 'elapsed_time']))
    if not args.no_progress_bar:
        trainer.extend(
            extensions.ProgressBar(update_interval=10 if args.gpu >= 0 else 1))

    sample_size = 256
    rollout_z_t, rollout_z_t_plus_1, rollout_action, _, done = train[0]
    sample_z_t = rollout_z_t[0:sample_size]
    sample_z_t_plus_1 = rollout_z_t_plus_1[0:sample_size]
    sample_action = rollout_action[0:sample_size]
    img_t = vision.decode(sample_z_t).data
    img_t_plus_1 = vision.decode(sample_z_t_plus_1).data
    if args.predict_done:
        done = done.reshape(-1)
        img_t_plus_1[np.where(
            done[0:sample_size] >= 0.5), :, :, :] = 0  # Make done black
    save_images_collage(img_t, os.path.join(output_dir, 'train_t.png'))
    save_images_collage(img_t_plus_1,
                        os.path.join(output_dir, 'train_t_plus_1.png'))
    image_sampler = ImageSampler(model.copy(), vision, args, output_dir,
                                 sample_z_t, sample_action)
    trainer.extend(image_sampler,
                   trigger=(args.snapshot_interval, 'iteration'))

    if args.resume_from:
        log(ID, "Resuming trainer manually from snapshot: " + args.resume_from)
        chainer.serializers.load_npz(args.resume_from, trainer)
    elif not args.no_resume and auto_resume_file is not None:
        log(ID,
            "Auto resuming trainer from last snapshot: " + auto_resume_file)
        chainer.serializers.load_npz(auto_resume_file, trainer)

    if not args.test:
        log(ID, "Starting training")
        trainer.run()
        log(ID, "Done training")
        log(ID, "Saving model")
        chainer.serializers.save_npz(os.path.join(output_dir, ID + ".model"),
                                     model)

    if args.test:
        log(ID, "Saving test samples")
        image_sampler(trainer)

    log(ID, "Generating gif for a rollout generated in dream")
    if args.gpu >= 0:
        model.to_cpu()
    model.reset_state()
    # current_z_t = np.random.randn(64).astype(np.float32)  # Noise as starting frame
    rollout_z_t, rollout_z_t_plus_1, rollout_action, done = train[
        np.random.randint(len(train))]  # Pick a random real rollout
    current_z_t = rollout_z_t[0]  # Starting frame from the real rollout
    current_z_t += np.random.normal(0, 0.5, current_z_t.shape).astype(
        np.float32)  # Add some noise to the real rollout starting frame
    all_z_t = [current_z_t]
    # current_action = np.asarray([0., 1.]).astype(np.float32)
    for i in range(rollout_z_t.shape[0]):
        # if i != 0 and i % 200 == 0: current_action = 1 - current_action  # Flip actions every 100 frames
        current_action = np.expand_dims(
            rollout_action[i], 0)  # follow actions performed in a real rollout
        output = model(current_z_t,
                       current_action,
                       temperature=args.sample_temperature)
        if args.predict_done:
            current_z_t, done = output
            done = done.data
            # print(i, current_action, done)
        else:
            current_z_t = output
        all_z_t.append(current_z_t.data)
        if args.predict_done and done[0] >= 0.5:
            break
    dream_rollout_imgs = vision.decode(np.asarray(all_z_t).astype(
        np.float32)).data
    dream_rollout_imgs = post_process_image_tensor(dream_rollout_imgs)
    imageio.mimsave(os.path.join(output_dir, 'dream_rollout.gif'),
                    dream_rollout_imgs,
                    fps=20)

    log(ID, "Done")
Пример #20
0
    next_states_v = torch.tensor(next_states).to(device)
    actions_v = torch.tensor(actions).to(device)
    rewards_v = torch.tensor(rewards).to(device)
    done_mask = torch.ByteTensor(dones).to(device)

    state_action_values = net(states_v).gather(1, actions_v.unsqueeze(-1)).squeeze(-1)
    next_state_values = tgt_net(next_states_v).max(1)[0]
    next_state_values[done_mask] = 0.0
    next_state_values = next_state_values.detach()

    expected_state_action_values = next_state_values * GAMMA + rewards_v
    return nn.MSELoss()(state_action_values, expected_state_action_values)


if __name__ == "__main__":
    mkdir('.', 'checkpoints')
    parser = argparse.ArgumentParser()
    parser.add_argument("--cuda", default=False, action="store_true", help="Enable cuda")
    parser.add_argument("--env", default=DEFAULT_ENV_NAME,
                        help="Name of the environment, default=" + DEFAULT_ENV_NAME)
    parser.add_argument("--reward", type=float, default=MEAN_REWARD_GOAL,
                        help="Mean reward goal to stop training, default=%.2f" % MEAN_REWARD_GOAL)
    args = parser.parse_args()
    device = torch.device("cuda" if args.cuda else "cpu")

    env = wrappers.make_env(args.env)

    net = dqn_model.DQN(env.observation_space.shape, env.action_space.n).to(device)
    tgt_net = dqn_model.DQN(env.observation_space.shape, env.action_space.n).to(device)
    writer = SummaryWriter(comment="-" + args.env)
    print(net)
Пример #21
0
def boot_report(config):
    connection, jobs, duration =  parse_json(config.get("boot"))
    # TODO: Fix this when multi-lab sync is working
    #download_log2html(log2html)
    results_directory = os.getcwd() + '/results'
    results = {}
    utils.mkdir(results_directory)
    test_plan = None

    if config.get("lab"):
        report_directory = os.path.join(results_directory, config.get("lab"))
    else:
        report_directory = results_directory
    if os.path.exists(report_directory):
        shutil.rmtree(report_directory)
    utils.mkdir(report_directory)

    for job_id in jobs:
        print 'Job ID: %s' % job_id
        # Init
        boot_meta = {}
        api_url = None
        arch = None
        board_instance = None
        boot_retries = 0
        kernel_defconfig_full = None
        kernel_defconfig = None
        kernel_defconfig_base = None
        kernel_version = None
        device_tree = None
        kernel_endian = None
        kernel_tree = None
        kernel_addr = None
        initrd_addr = None
        dtb_addr = None
        dtb_append = None
        fastboot = None
        fastboot_cmd = None
        job_file = ''
        board_offline = False
        kernel_boot_time = None
        boot_failure_reason = None
        efi_rtc = False
        # Retrieve job details
        device_type = ''
        job_details = connection.scheduler.job_details(job_id)
        if job_details['requested_device_type_id']:
            device_type = job_details['requested_device_type_id']
        if job_details['description']:
            job_name = job_details['description']
            try:
                job_short_name = re.search(".*?([A-Z]+.*)", job_name).group(1)
            except Exception:
                job_short_name = 'boot-test'
        try:
            device_name = job_details['_actual_device_cache']['hostname']
        except Exception:
            continue
        result = jobs[job_id]['result']
        bundle = jobs[job_id]['bundle']
        if not device_type:
            device_type = job_details['_actual_device_cache']['device_type_id']
        if bundle is None and device_type == 'dynamic-vm':
            host_job_id = job_id.replace('.1', '.0')
            bundle = jobs[host_job_id]['bundle']
            if bundle is None:
                print '%s bundle is empty, skipping...' % device_type
                continue
        # Retrieve the log file
        try:
            binary_job_file = connection.scheduler.job_output(job_id)
        except xmlrpclib.Fault:
            print 'Job output not found for %s' % device_type
            continue
        # Parse LAVA messages out of log
        raw_job_file = str(binary_job_file)
        for line in raw_job_file.splitlines():
            if 'Infrastructure Error:' in line:
                print 'Infrastructure Error detected!'
                index = line.find('Infrastructure Error:')
                boot_failure_reason = line[index:]
                board_offline = True
            if 'Bootloader Error:' in line:
                print 'Bootloader Error detected!'
                index = line.find('Bootloader Error:')
                boot_failure_reason = line[index:]
                board_offline = True
            if 'Kernel Error:' in line:
                print 'Kernel Error detected!'
                index = line.find('Kernel Error:')
                boot_failure_reason = line[index:]
            if 'Userspace Error:' in line:
                print 'Userspace Error detected!'
                index = line.find('Userspace Error:')
                boot_failure_reason = line[index:]
            if '<LAVA_DISPATCHER>' not in line:
                if len(line) != 0:
                    job_file += line + '\n'
            if 'rtc-efi rtc-efi: setting system clock to' in line:
                if device_type == 'dynamic-vm':
                    efi_rtc = True

        # Retrieve bundle
        if bundle is not None:
            json_bundle = connection.dashboard.get(bundle)
            bundle_data = json.loads(json_bundle['content'])
            # Get the boot data from LAVA
            for test_results in bundle_data['test_runs']:
                # Check for the LAVA self boot test
                if test_results['test_id'] == 'lava':
                    for test in test_results['test_results']:
                        # TODO for compat :(
                        if test['test_case_id'] == 'kernel_boot_time':
                            kernel_boot_time = test['measurement']
                        if test['test_case_id'] == 'test_kernel_boot_time':
                            kernel_boot_time = test['measurement']
                    bundle_attributes = bundle_data['test_runs'][-1]['attributes']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.defconfig'):
                print bundle_attributes['kernel.defconfig']
            if utils.in_bundle_attributes(bundle_attributes, 'target'):
                board_instance = bundle_attributes['target']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.defconfig'):
                kernel_defconfig = bundle_attributes['kernel.defconfig']
                defconfig_list = kernel_defconfig.split('-')
                #arch = defconfig_list[0]
                arch = defconfig_list[-1]
                # Remove arch
                defconfig_list.pop(0)
                kernel_defconfig_full = '-'.join(defconfig_list)
                kernel_defconfig_base = ''.join(kernel_defconfig_full.split('+')[:1])
                if kernel_defconfig_full == kernel_defconfig_base:
                    kernel_defconfig_full = None
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.version'):
                kernel_version = bundle_attributes['kernel.version']
            if utils.in_bundle_attributes(bundle_attributes, 'device.tree'):
                device_tree = bundle_attributes['device.tree']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.endian'):
                kernel_endian = bundle_attributes['kernel.endian']
            if utils.in_bundle_attributes(bundle_attributes, 'platform.fastboot'):
                fastboot = bundle_attributes['platform.fastboot']
            if kernel_boot_time is None:
                if utils.in_bundle_attributes(bundle_attributes, 'kernel-boot-time'):
                    kernel_boot_time = bundle_attributes['kernel-boot-time']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.tree'):
                kernel_tree = bundle_attributes['kernel.tree']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel-addr'):
                kernel_addr = bundle_attributes['kernel-addr']
            if utils.in_bundle_attributes(bundle_attributes, 'initrd-addr'):
                initrd_addr = bundle_attributes['initrd-addr']
            if utils.in_bundle_attributes(bundle_attributes, 'dtb-addr'):
                dtb_addr = bundle_attributes['dtb-addr']
            if utils.in_bundle_attributes(bundle_attributes, 'dtb-append'):
                dtb_append = bundle_attributes['dtb-append']
            if utils.in_bundle_attributes(bundle_attributes, 'boot_retries'):
                boot_retries = int(bundle_attributes['boot_retries'])
            if utils.in_bundle_attributes(bundle_attributes, 'test.plan'):
                test_tmp = bundle_attributes['test.plan']
                if test_tmp:
                    test_plan = test_tmp
        else:
            if not kernel_defconfig or not kernel_version or not kernel_tree:
                job_defnition = {}
                if 'original_definition' in job_details.keys():
                    job_definition = job_details['original_definition']
                    try:
                        job_dictionary = eval(job_definition)
                    except Exception:
                        pass
                    if job_dictionary:
                        if 'actions' in job_dictionary.keys():
                            actions = job_dictionary['actions']
                            for i in range(0, len(actions)):
                                try:
                                    kernel_defconfig = actions[i]['metadata']['kernel.defconfig']
                                    kernel_version = actions[i]['metadata']['kernel.version']
                                    kernel_tree = actions[i]['metadata']['kernel.tree']
                                    kernel_endian = actions[i]['metadata']['kernel.endian']
                                    platform_fastboot = actions[i]['metadata']['platform.fastboot']
                                    device_tree = actions[i]['metadata']['kernel.tree']
                                    break
                                except KeyError:
                                    continue
                if 'target' in job_details.keys():
                    print job_details.keys()
        # Check if we found efi-rtc
        if test_plan == 'boot-kvm-uefi' and not efi_rtc:
            if device_type == 'dynamic-vm':
                boot_failure_reason = 'Unable to read EFI rtc'
                result = 'FAIL'

        # Record the boot log and result
        # TODO: Will need to map device_types to dashboard device types
        if kernel_defconfig and device_type and result:
            if ( 'arm' == arch or 'arm64' == arch ) and device_tree is None:
                platform_name = device_map[device_type][0] + ',legacy'
            else:
                if test_plan == 'boot-nfs' or test_plan == 'boot-nfs-mp':
                    platform_name = device_map[device_type][0] + '_rootfs:nfs'
                else:
                    platform_name = device_map[device_type][0]

            # Create txt format boot metadata
            print 'Creating boot log for %s' % (platform_name + job_name + '_' + job_id)
            log = 'boot-%s.txt' % (platform_name + job_name + '_' + job_id)
            html = 'boot-%s.html' % (platform_name + job_name + '_' + job_id)
            if config.get("lab"):
                directory = os.path.join(results_directory, kernel_defconfig + '/' + config.get("lab"))
            else:
                directory = os.path.join(results_directory, kernel_defconfig)
            utils.ensure_dir(directory)

            utils.write_file(job_file, log, directory)

            if kernel_boot_time is None:
                kernel_boot_time = '0.0'
            if results.has_key(kernel_defconfig):
                results[kernel_defconfig].append({'device_type': platform_name,
                    'job_id': job_id, 'job_name': job_short_name,
                    'kernel_boot_time': kernel_boot_time, 'result': result,
                    'device_name': device_name})
            else:
                results[kernel_defconfig] = [{'device_type': platform_name,
                    'job_id': job_id, 'job_name': job_short_name,
                    'kernel_boot_time': kernel_boot_time, 'result': result,
                    'device_name': device_name}]

            # Create JSON format boot metadata
            print 'Creating JSON format boot metadata'
            if config.get("lab"):
                boot_meta['lab_name'] = config.get("lab")
            else:
                boot_meta['lab_name'] = None
            if board_instance:
                boot_meta['board_instance'] = board_instance
            boot_meta['retries'] = boot_retries
            boot_meta['boot_log'] = log
            boot_meta['boot_log_html'] = html
            # TODO: Fix this
            boot_meta['version'] = '1.0'
            boot_meta['arch'] = arch
            boot_meta['defconfig'] = kernel_defconfig_base
            if kernel_defconfig_full is not None:
                boot_meta['defconfig_full'] = kernel_defconfig_full
            if device_map[device_type][1]:
                boot_meta['mach'] = device_map[device_type][1]
            boot_meta['kernel'] = kernel_version

            boot_meta['job'] = kernel_tree
            boot_meta['board'] = platform_name
            if board_offline and result == 'FAIL':
                boot_meta['boot_result'] = 'OFFLINE'
                #results[kernel_defconfig]['result'] = 'OFFLINE'
            else:
                boot_meta['boot_result'] = result
            if result == 'FAIL' or result == 'OFFLINE':
                if boot_failure_reason:
                    boot_meta['boot_result_description'] = boot_failure_reason
                else:
                    boot_meta['boot_result_description'] = 'Unknown Error: platform failed to boot'
            boot_meta['boot_time'] = kernel_boot_time
            # TODO: Fix this
            boot_meta['boot_warnings'] = None
            if device_tree:
                if arch == 'arm64':
                    boot_meta['dtb'] = 'dtbs/' + device_map[device_type][1] + '/' + device_tree
                else:
                    boot_meta['dtb'] = 'dtbs/' + device_tree
            else:
                boot_meta['dtb'] = device_tree
            boot_meta['dtb_addr'] = dtb_addr
            boot_meta['dtb_append'] = dtb_append
            boot_meta['endian'] = kernel_endian
            boot_meta['fastboot'] = fastboot
            # TODO: Fix this
            boot_meta['initrd'] = None
            boot_meta['initrd_addr'] = initrd_addr
            if arch == 'arm':
                boot_meta['kernel_image'] = 'zImage'
            elif arch == 'arm64':
                boot_meta['kernel_image'] = 'Image'
            else:
                boot_meta['kernel_image'] = 'bzImage'
            boot_meta['loadaddr'] = kernel_addr
            json_file = 'boot-%s.json' % (platform_name + job_name + '_' + job_id)
            utils.write_json(json_file, directory, boot_meta)
            # add by wuyanjun
            # add the ip device mapping
            get_ip_board_mapping(job_file, log, directory, report_directory)
            parser_and_get_result(job_file, log, directory, report_directory, connection)

    if results and kernel_tree and kernel_version:
        print 'Creating summary for %s' % (kernel_version)
        boot = '%s-boot-report.txt' % (kernel_version)
        if test_plan and ('boot' in test_plan or 'BOOT' in test_plan):
            boot = boot.replace('boot', test_plan)
        passed = 0
        failed = 0
        for defconfig, results_list in results.items():
            for result in results_list:
                if result['result'] == 'PASS':
                    passed += 1
                else:
                    failed += 1
        total = passed + failed
        with open(os.path.join(report_directory, boot), 'a') as f:
            f.write('Subject: %s boot: %s boots: %s passed, %s failed (%s)\n' % (kernel_tree,
                                                                                str(total),
                                                                                str(passed),
                                                                                str(failed),
                                                                                kernel_version))
            f.write('\n')
            f.write('Total Duration: %.2f minutes\n' % (duration / 60))
            f.write('Tree/Branch: %s\n' % kernel_tree)
            f.write('Git Describe: %s\n' % kernel_version)
            first = True
            for defconfig, results_list in results.items():
                for result in results_list:
                    if result['result'] == 'OFFLINE':
                        if first:
                            f.write('\n')
                            f.write('Boards Offline:\n')
                            first = False
                        f.write('\n')
                        f.write(defconfig)
                        f.write('\n')
                        break
                for result in results_list:
                    if result['result'] == 'OFFLINE':
                        f.write('    %s   %s   %s   %ss   %s: %s\n' % (result['job_id'],
                                                                    result['device_type'],
                                                                    result['device_name'],
                                                                    result['kernel_boot_time'],
                                                                    result['job_name'],
                                                                    result['result']))
                        f.write('\n')
            first = True
            for defconfig, results_list in results.items():
                for result in results_list:
                    if result['result'] == 'FAIL':
                        if first:
                            f.write('\n')
                            f.write('Failed Boot Tests:\n')
                            first = False
                        f.write('\n')
                        f.write(defconfig)
                        f.write('\n')
                        break
                for result in results_list:
                    if result['result'] == 'FAIL':
                        f.write('    %s   %s   %s   %ss   %s: %s\n' % (result['job_id'],
                                                                    result['device_type'],
                                                                    result['device_name'],
                                                                    result['kernel_boot_time'],
                                                                    result['job_name'],
                                                                    result['result']))
            f.write('\n')
            f.write('Full Boot Report:\n')
            for defconfig, results_list in results.items():
                f.write('\n')
                f.write(defconfig)
                f.write('\n')
                for result in results_list:
                    f.write('    %s   %s   %s   %ss   %s: %s\n' %
                                                                    (result['job_id'],
                                                                        result['device_type'],
                                                                        result['device_name'],
                                                                        result['kernel_boot_time'],
                                                                        result['job_name'],
                                                                        result['result']))
Пример #22
0
def train_and_test(dataset, params='auto', prefix='0'):
    # If params is a string, treat it as the allocated
    # params screen results. And choose the best params
    # config.
    if type(params) == str:
        logGamma, lograte, params = print_params(params)
    else:
        logGamma = params['logGamma']
        lograte = params['lograte']
    model = params['model']

    if model == 'NN':
        root = '{0}-NN-H{1}-test/'.format(dataset, params['H'])
    elif model == 'RF':
        root = '{0}-RF-test/'.format(dataset)
    dirname = root + '{0:s}-{1:s}-N{2:d}-ep{3:d}'.format(
        dataset, model, params['N'], params['n_epoch'])
    plotdir, resdir, _, tbdir = mkdir(dirname, prefix)

    Xtrain, Ytrain, Xtest, Ytest = read_data(dataset)
    d = len(Xtrain[0])

    model_params, fit_params, model_type = params_process(
        model, logGamma, lograte, params, tbdir, d)

    # only write log file for trial 0
    if prefix == '0':
        logfile = log('log/experiments.log', 'train and test')
        logfile.record(str(datetime.now()))
        logfile.record('{0} = {1}'.format('dataset', dataset))
        for key, val in model_params.items():
            logfile.record('{0} = {1}'.format(key, val))
        for key, val in fit_params.items():
            logfile.record('{0} = {1}'.format(key, val))
        logfile.save()

    score, sparsity, traintime, testtime, Ypr = _train_and_test(
        Xtrain, Ytrain, Xtest, Ytest, model_type, model_params, fit_params)

    fig = plt.figure()
    if dataset == 'daniely':
        R_sample = np.sum(Xtest[::10, int(d / 2):] * Xtest[::10, :int(d / 2)],
                          axis=1)
    else:
        R_sample = np.linalg.norm(Xtest[::10, :], axis=1)
    sort_idx = np.argsort(R_sample)
    Ypr = np.array(Ypr)
    plt.scatter(R_sample[sort_idx], Ypr[::10][sort_idx], c='r')
    plt.plot(R_sample[sort_idx], Ytest[::10][sort_idx], c='b')
    plt.title("predicted y")
    plt.savefig(plotdir + 'predict_plot-{}.png'.format(prefix), dpi=300)

    output = {
        'accuracy': score,
        'sparsity': sparsity,
        'traintime': traintime,
        'testtime': testtime
    }
    finalop = [output, dataset, model_params, fit_params]
    filename = resdir + 'output-' + prefix
    with open(filename, 'w') as f:
        f.write(str(finalop))
Пример #23
0
def boot_report(config):
    connection, jobs, duration = parse_yaml(config.get("boot"))
    # TODO: Fix this when multi-lab sync is working
    results_directory = os.getcwd() + '/results'
    results = {}
    utils.mkdir(results_directory)
    test_plan = None

    if config.get("lab"):
        report_directory = os.path.join(results_directory, config.get("lab"))
    else:
        report_directory = results_directory

    if os.path.exists(report_directory):
        shutil.rmtree(report_directory)
    utils.mkdir(report_directory)

    for job_id in jobs:
        print 'Job ID: %s' % job_id
        # Init
        boot_meta = {}
        arch = None
        board_instance = None
        boot_retries = 0
        kernel_defconfig_full = None
        kernel_defconfig = None
        kernel_defconfig_base = None
        kernel_version = None
        device_tree = None
        kernel_tree = None
        kernel_addr = None
        initrd_addr = None
        dtb_addr = None
        dtb_append = None
        job_file = ''
        board_offline = False
        kernel_boot_time = None
        boot_failure_reason = None
        efi_rtc = False
        # Retrieve job details
        device_type = ''
        job_details = connection.scheduler.job_details(job_id)
        if job_details['requested_device_type_id']:
            device_type = job_details['requested_device_type_id']
        if job_details['description']:
            job_name = job_details['description']
            try:
                job_short_name = re.search(".*?([A-Z]+.*)", job_name).group(1)
            except Exception:
                job_short_name = 'boot-test'
        try:
            device_name = job_details['_actual_device_cache']['hostname']
        except Exception:
            continue
        result = jobs[job_id]['result']
        bundle = jobs[job_id]['bundle']
        if not device_type:
            device_type = job_details['_actual_device_cache']['device_type_id']
        try:
            binary_job_file = connection.scheduler.job_output(job_id)
        except xmlrpclib.Fault:
            print 'Job output not found for %s' % device_type
            continue
        # Parse LAVA messages out of log
        raw_job_file = str(binary_job_file)
        for line in raw_job_file.splitlines():
            if 'Infrastructure Error:' in line:
                print 'Infrastructure Error detected!'
                index = line.find('Infrastructure Error:')
                boot_failure_reason = line[index:]
                board_offline = True
            if 'Bootloader Error:' in line:
                print 'Bootloader Error detected!'
                index = line.find('Bootloader Error:')
                boot_failure_reason = line[index:]
                board_offline = True
            if 'Kernel Error:' in line:
                print 'Kernel Error detected!'
                index = line.find('Kernel Error:')
                boot_failure_reason = line[index:]
            if 'Userspace Error:' in line:
                print 'Userspace Error detected!'
                index = line.find('Userspace Error:')
                boot_failure_reason = line[index:]
            if '<LAVA_DISPATCHER>' not in line:
                if len(line) != 0:
                    job_file += line + '\n'
            if 'rtc-efi rtc-efi: setting system clock to' in line:
                if device_type == 'dynamic-vm':
                    efi_rtc = True
        if not kernel_defconfig or not kernel_version or not kernel_tree:
            try:
                job_metadata_info = connection.results.get_testjob_metadata(
                    job_id)
                kernel_defconfig = utils.get_value_by_key(
                    job_metadata_info, 'kernel_defconfig')
                kernel_version = utils.get_value_by_key(
                    job_metadata_info, 'kernel_version')
                kernel_tree = utils.get_value_by_key(job_metadata_info,
                                                     'kernel_tree')
                device_tree = utils.get_value_by_key(job_metadata_info,
                                                     'device_tree')
            except Exception:
                continue

        # Record the boot log and result
        # TODO: Will need to map device_types to dashboard device types
        if kernel_defconfig and device_type and result:
            if ('arm' == arch or 'arm64' == arch) and device_tree is None:
                platform_name = device_map[device_type][0] + ',legacy'
            else:
                if test_plan == 'boot-nfs' or test_plan == 'boot-nfs-mp':
                    platform_name = device_map[device_type][0] + '_rootfs:nfs'
                else:
                    platform_name = device_map[device_type][0]

            # Create txt format boot metadata
            print 'Creating boot log for %s' % (platform_name + job_name +
                                                '_' + job_id)
            log = 'boot-%s.txt' % (platform_name + job_name + '_' + job_id)
            if config.get("lab"):
                directory = os.path.join(
                    results_directory,
                    kernel_defconfig + '/' + config.get("lab"))
            else:
                directory = os.path.join(results_directory, kernel_defconfig)
            utils.ensure_dir(directory)

            utils.write_file(job_file, log, directory)

            if kernel_boot_time is None:
                kernel_boot_time = '0.0'
            if results.has_key(kernel_defconfig):
                results[kernel_defconfig].append({
                    'device_type': platform_name,
                    'job_id': job_id,
                    'job_name': job_short_name,
                    'kernel_boot_time': kernel_boot_time,
                    'result': result,
                    'device_name': device_name
                })
            else:
                results[kernel_defconfig] = [{
                    'device_type': platform_name,
                    'job_id': job_id,
                    'job_name': job_short_name,
                    'kernel_boot_time': kernel_boot_time,
                    'result': result,
                    'device_name': device_name
                }]

            # Create JSON format boot metadata
            print 'Creating JSON format boot metadata'
            if config.get("lab"):
                boot_meta['lab_name'] = config.get("lab")
            else:
                boot_meta['lab_name'] = None
            if board_instance:
                boot_meta['board_instance'] = board_instance
            boot_meta['retries'] = boot_retries
            boot_meta['boot_log'] = log
            # TODO: Fix this
            boot_meta['version'] = '1.0'
            boot_meta['arch'] = arch
            boot_meta['defconfig'] = kernel_defconfig_base
            if kernel_defconfig_full is not None:
                boot_meta['defconfig_full'] = kernel_defconfig_full
            if device_map[device_type][1]:
                boot_meta['mach'] = device_map[device_type][1]
            boot_meta['kernel'] = kernel_version

            boot_meta['job'] = kernel_tree
            boot_meta['board'] = platform_name
            if board_offline and result == 'FAIL':
                boot_meta['boot_result'] = 'OFFLINE'
                #results[kernel_defconfig]['result'] = 'OFFLINE'
            else:
                boot_meta['boot_result'] = result
            if result == 'FAIL' or result == 'OFFLINE':
                if boot_failure_reason:
                    boot_meta['boot_result_description'] = boot_failure_reason
                else:
                    boot_meta[
                        'boot_result_description'] = 'Unknown Error: platform failed to boot'
            boot_meta['boot_time'] = kernel_boot_time
            # TODO: Fix this
            boot_meta['boot_warnings'] = None
            if device_tree:
                if arch == 'arm64':
                    boot_meta['dtb'] = 'dtbs/' + device_map[device_type][
                        1] + '/' + device_tree
                else:
                    boot_meta['dtb'] = 'dtbs/' + device_tree
            else:
                boot_meta['dtb'] = device_tree
            boot_meta['dtb_addr'] = dtb_addr
            boot_meta['dtb_append'] = dtb_append
            # TODO: Fix this
            boot_meta['initrd'] = None
            boot_meta['initrd_addr'] = initrd_addr
            if arch == 'arm':
                boot_meta['kernel_image'] = 'zImage'
            elif arch == 'arm64':
                boot_meta['kernel_image'] = 'Image'
            else:
                boot_meta['kernel_image'] = 'bzImage'
            boot_meta['loadaddr'] = kernel_addr
            json_file = 'boot-%s.json' % (platform_name + job_name + '_' +
                                          job_id)
            utils.write_json(json_file, directory, boot_meta)
            # add by wuyanjun
            parser_and_get_result(job_file, log, directory, report_directory,
                                  connection)

            #try to generate test_summary
            generate_test_report(job_id, connection)

    if results and kernel_tree and kernel_version:
        print 'Creating summary for %s' % (kernel_version)
        boot = '%s-boot-report.txt' % (kernel_version)
        if test_plan and ('boot' in test_plan or 'BOOT' in test_plan):
            boot = boot.replace('boot', test_plan)
        passed = 0
        failed = 0
        for defconfig, results_list in results.items():
            for result in results_list:
                if result['result'] == 'PASS':
                    passed += 1
                else:
                    failed += 1
        total = passed + failed
        with open(os.path.join(report_directory, boot), 'a') as f:
            f.write('Subject: %s boot: %s boots: %s passed, %s failed (%s)\n' %
                    (kernel_tree, str(total), str(passed), str(failed),
                     kernel_version))
            f.write('\n')
            f.write('Total Duration: %.2f minutes\n' % (duration / 60))
            f.write('Tree/Branch: %s\n' % kernel_tree)
            f.write('Git Describe: %s\n' % kernel_version)
            first = True
            for defconfig, results_list in results.items():
                for result in results_list:
                    if result['result'] == 'OFFLINE':
                        if first:
                            f.write('\n')
                            f.write('Boards Offline:\n')
                            first = False
                        f.write('\n')
                        f.write(defconfig)
                        f.write('\n')
                        break
                for result in results_list:
                    if result['result'] == 'OFFLINE':
                        f.write(
                            '    %s   %s   %s   %ss   %s: %s\n' %
                            (result['job_id'], result['device_type'],
                             result['device_name'], result['kernel_boot_time'],
                             result['job_name'], result['result']))
                        f.write('\n')
            first = True
            for defconfig, results_list in results.items():
                for result in results_list:
                    if result['result'] == 'FAIL':
                        if first:
                            f.write('\n')
                            f.write('Failed Boot Tests:\n')
                            first = False
                        f.write('\n')
                        f.write(defconfig)
                        f.write('\n')
                        break
                for result in results_list:
                    if result['result'] == 'FAIL':
                        f.write(
                            '    %s   %s   %s   %ss   %s: %s\n' %
                            (result['job_id'], result['device_type'],
                             result['device_name'], result['kernel_boot_time'],
                             result['job_name'], result['result']))
            f.write('\n')
            f.write('Full Boot Report:\n')
            for defconfig, results_list in results.items():
                f.write('\n')
                f.write(defconfig)
                f.write('\n')
                for result in results_list:
                    f.write('    %s   %s   %s   %ss   %s: %s\n' %
                            (result['job_id'], result['device_type'],
                             result['device_name'], result['kernel_boot_time'],
                             result['job_name'], result['result']))
def main():
    global colours, img_size
    args = parse_args()
    videos_dir = args.videos_dir
    output_path = args.output_path
    no_display = args.no_display
    detect_interval = args.detect_interval  # you need to keep a balance between performance and fluency
    margin = args.margin  # if the face is big in your video ,you can set it bigger for tracking easiler
    scale_rate = args.scale_rate  # if set it smaller will make input frames smaller
    show_rate = args.show_rate  # if set it smaller will dispaly smaller frames
    face_score_threshold = args.face_score_threshold

    mkdir(output_path)
    # for display
    if not no_display:
        colours = np.random.rand(32, 3)

    # init tracker
    tracker = Sort()  # create instance of the SORT tracker

    logger.info('Start track and extract......')
    with tf.Graph().as_default():
        with tf.Session(
                config=tf.ConfigProto(gpu_options=tf.GPUOptions(
                    allow_growth=True),
                                      log_device_placement=False)) as sess:
            pnet, rnet, onet = detect_face.create_mtcnn(
                sess, os.path.join(project_dir, "align"))

            minsize = 40  # minimum size of face for mtcnn to detect
            threshold = [0.6, 0.7, 0.7]  # three steps's threshold
            factor = 0.709  # scale factor

            for filename in os.listdir(videos_dir):
                logger.info('All files:{}'.format(filename))
            for filename in os.listdir(videos_dir):
                suffix = filename.split('.')[1]
                if suffix != 'mp4' and suffix != 'avi':  # you can specify more video formats if you need
                    continue
                video_name = os.path.join(videos_dir, filename)
                directoryname = os.path.join(output_path,
                                             filename.split('.')[0])
                logger.info('Video_name:{}'.format(video_name))
                #cam = cv2.VideoCapture(video_name)
                cam = cv2.VideoCapture(0)
                c = 0
                while True:
                    final_faces = []
                    addtional_attribute_list = []
                    ret, frame = cam.read()
                    if not ret:
                        logger.warning("ret false")
                        break
                    if frame is None:
                        logger.warning("frame drop")
                        break

                    frame = cv2.resize(frame, (0, 0),
                                       fx=scale_rate,
                                       fy=scale_rate)
                    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                    print('shape of gray')
                    print(gray.shape)
                    r_g_b_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    if c % detect_interval == 0:
                        img_size = np.asarray(frame.shape)[0:2]
                        mtcnn_starttime = time()
                        faces, points = detect_face.detect_face(
                            r_g_b_frame, minsize, pnet, rnet, onet, threshold,
                            factor)
                        logger.info(
                            "MTCNN detect face cost time : {} s".format(
                                round(time() - mtcnn_starttime,
                                      3)))  # mtcnn detect ,slow
                        face_sums = faces.shape[0]
                        if face_sums > 0:
                            face_list = []
                            for i, item in enumerate(faces):
                                score = round(faces[i, 4], 6)
                                if score > face_score_threshold:
                                    det = np.squeeze(faces[i, 0:4])

                                    # face rectangle
                                    det[0] = np.maximum(det[0] - margin, 0)
                                    det[1] = np.maximum(det[1] - margin, 0)
                                    det[2] = np.minimum(
                                        det[2] + margin, img_size[1])
                                    det[3] = np.minimum(
                                        det[3] + margin, img_size[0])
                                    face_list.append(item)

                                    # face cropped
                                    bb = np.array(det, dtype=np.int32)

                                    # use 5 face landmarks  to judge the face is front or side
                                    squeeze_points = np.squeeze(points[:, i])
                                    tolist = squeeze_points.tolist()
                                    facial_landmarks = []
                                    for j in range(5):
                                        item = [tolist[j], tolist[(j + 5)]]
                                        facial_landmarks.append(item)
                                    if args.face_landmarks:
                                        for (x, y) in facial_landmarks:
                                            cv2.circle(frame, (int(x), int(y)),
                                                       3, (0, 255, 0), -1)
                                    cropped = frame[bb[1]:bb[3],
                                                    bb[0]:bb[2], :].copy()

                                    dist_rate, high_ratio_variance, width_rate = judge_side_face(
                                        np.array(facial_landmarks))

                                    # face addtional attribute(index 0:face score; index 1:0 represents front face and 1 for side face )
                                    item_list = [
                                        cropped, score, dist_rate,
                                        high_ratio_variance, width_rate
                                    ]
                                    addtional_attribute_list.append(item_list)

                            final_faces = np.array(face_list)
                            emotion = 'Happy'
                    face_detection = cv2.CascadeClassifier(
                        'haarcascade_frontalface_default.xml')
                    emotion_classifier = load_model(
                        'models/_mini_XCEPTION.106-0.65.hdf5', compile=False)
                    EMOTIONS = [
                        "angry", "disgust", "scared", "happy", "sad",
                        "surprised", "neutral"
                    ]

                    frontal_faces = face_detection.detectMultiScale(
                        gray,
                        scaleFactor=1.1,
                        minNeighbors=5,
                        minSize=(30, 30),
                        flags=cv2.CASCADE_SCALE_IMAGE)

                    if len(frontal_faces) > 0:
                        frontal_faces = sorted(frontal_faces,
                                               reverse=True,
                                               key=lambda x: (x[2] - x[0]) *
                                               (x[3] - x[1]))[0]
                        (fX, fY, fW, fH) = frontal_faces
                        roi = gray[fY:fY + fH, fX:fX + fW]
                        roi = cv2.resize(roi, (48, 48))
                        roi = roi.astype("float") / 255.0
                        roi = img_to_array(roi)
                        roi = np.expand_dims(roi, axis=0)
                        np.reshape(roi, (48, 48, 1))
                        print(roi.shape)
                        preds = emotion_classifier.predict(roi)[0]
                        emotion_probability = np.max(preds)
                        label = EMOTIONS[preds.argmax()]

                    trackers = tracker.update(final_faces, img_size,
                                              directoryname,
                                              addtional_attribute_list,
                                              detect_interval)

                    c += 1

                    emoTracker = ''
                    print(trackers)
                    for d in trackers:
                        if not no_display:
                            d = d.astype(np.int32)
                            cv2.rectangle(frame, (d[0], d[1]), (d[2], d[3]),
                                          colours[d[4] % 32, :] * 255, 3)

                            if final_faces != []:
                                print('ID %d Detect' % (d[4]))

                                if label != emoTracker:
                                    emoTracker = label
                                cv2.putText(
                                    frame, 'ID : %d  DETECT, EMOTION : %s' %
                                    ((d[4]), emoTracker),
                                    (d[0] - 10, d[1] - 10),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.4,
                                    colours[d[4] % 32, :] * 255, 2)

                            else:
                                cv2.putText(frame, 'ID : %d' % (d[4]),
                                            (d[0] - 10, d[1] - 10),
                                            cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                                            colours[d[4] % 32, :] * 255, 2)

                    if not no_display:
                        frame = cv2.resize(frame, (0, 0),
                                           fx=show_rate,
                                           fy=show_rate)
                        cv2.imshow("Frame", frame)
                        if cv2.waitKey(1) & 0xFF == ord('q'):
                            break
Пример #25
0
 def save_blur_image(self):
     '''This function saves the blurred image'''
     mkdir('Blur_rgb')
     cv2.imwrite(f'{self.path}/Blur_rgb/{self.bluredimname}', self.bluredim)
Пример #26
0
def worker(worker_arg_tuple):
    rollouts_per_core, args, output_dir = worker_arg_tuple

    np.random.seed()

    if args.game in DOOM_GAMES:
        env = ViZDoomWrapper(args.game)
    else:
        env = gym.make(args.game)

    for rollout_num in rollouts_per_core:
        t = 1

        actions_array = []
        frames_array = []
        rewards_array = []

        observation = env.reset()
        frames_array.append(
            imresize(observation, (args.frame_resize, args.frame_resize)))

        start_time = time.time()
        prev_action = None
        while True:
            # action = env.action_space.sample()
            action = generate_action(
                env.action_space.low,
                env.action_space.high,
                prev_action,
                balance_no_actions=True if args.game in DOOM_GAMES else False,
                force_actions=False if args.game in DOOM_GAMES else True)
            prev_action = action
            observation, reward, done, _ = env.step(action)
            actions_array.append(action)
            frames_array.append(
                imresize(observation, (args.frame_resize, args.frame_resize)))
            rewards_array.append(reward)

            if done:
                log(
                    ID,
                    "\t> Rollout {}/{} finished after {} timesteps in {:.2f}s".
                    format(rollout_num, args.num_rollouts, t,
                           (time.time() - start_time)))
                break
            t = t + 1

        actions_array = np.asarray(actions_array)
        frames_array = np.asarray(frames_array)
        rewards_array = np.asarray(rewards_array).astype(np.float32)

        rollout_dir = os.path.join(output_dir, str(rollout_num))
        mkdir(rollout_dir)

        # from lib.utils import post_process_image_tensor
        # import imageio
        # imageio.mimsave(os.path.join(output_dir, str(rollout_num), 'rollout.gif'), post_process_image_tensor(frames_array), fps=20)

        with gzip.GzipFile(os.path.join(rollout_dir, "frames.npy.gz"),
                           "w") as file:
            np.save(file, frames_array)
        np.savez_compressed(os.path.join(rollout_dir, "misc.npz"),
                            action=actions_array,
                            reward=rewards_array)
        with open(os.path.join(rollout_dir, "count"), "w") as file:
            print("{}".format(frames_array.shape[0]), file=file)

    env.close()
Пример #27
0
 def save_labels(self):
     '''This function saves the label channel'''
     mkdir('Labels')
     cv2.imwrite(f'{self.path}/Labels/{self.sfmlname}', self.labels)
Пример #28
0
Файл: test.py Проект: koddev/men
def detectExtract(a):
    global colours, img_size
    args = parse_args()
    videos_dir = args.videos_dir
    output_path = args.output_path
    no_display = args.no_display
    detect_interval = args.detect_interval  # you need to keep a balance between performance and fluency
    margin = args.margin  # if the face is big in your video ,you can set it bigger for tracking easiler
    scale_rate = args.scale_rate  # if set it smaller will make input frames smaller
    show_rate = args.show_rate  # if set it smaller will dispaly smaller frames
    face_score_threshold = args.face_score_threshold

    mkdir(output_path)
    # for display
    if not no_display:
        colours = np.random.rand(32, 3)

    # init tracker
    tracker = Sort()  # create instance of the SORT tracker

    logger.info('Start track and extract......')
    with tf.Graph().as_default():
        with tf.Session(
                config=tf.ConfigProto(gpu_options=tf.GPUOptions(
                    allow_growth=True),
                                      log_device_placement=True)) as sess:

            pnet, rnet, onet = detect_face.create_mtcnn(
                sess, os.path.join(project_dir, "align"))

            minsize = 80  # minimum size of face for mtcnn to detect
            threshold = [0.6, 0.7, 0.7]  # three steps's threshold
            factor = 0.709  # scale factor

            for filename in os.listdir(videos_dir):
                logger.info('All files:{}'.format(filename))
            for filename in os.listdir(videos_dir):
                suffix = filename.split('.')[1]
                if suffix != 'mp4' and suffix != 'avi':  # you can specify more video formats if you need
                    continue
                video_name = os.path.join(videos_dir, filename)
                directoryname = os.path.join(output_path,
                                             filename.split('.')[0])
                logger.info('Video_name:{}'.format(video_name))
                cam = cv2.VideoCapture(video_name)
                c = 0
                while True:
                    final_faces = []
                    addtional_attribute_list = []
                    ret, frame = cam.read()
                    if not ret:
                        logger.warning("ret false")
                        break
                    if frame is None:
                        logger.warning("frame drop")
                        break

                    # frame = cv2.resize(frame, (0, 0), fx=scale_rate, fy=scale_rate)
                    r_g_b_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    if c % detect_interval == 0:
                        img_size = np.asarray(frame.shape)[0:2]
                        mtcnn_starttime = time()

                        faces, points = detect_face.detect_face(
                            r_g_b_frame, minsize, pnet, rnet, onet, threshold,
                            factor)
                        logger.info(
                            "MTCNN detect face cost time : {} s".format(
                                round(time() - mtcnn_starttime,
                                      3)))  # mtcnn detect ,slow
                        face_sums = faces.shape[0]
                        if face_sums > 0:
                            face_list = []
                            for i, item in enumerate(faces):
                                score = round(faces[i, 4], 6)
                                if score > face_score_threshold:
                                    det = np.squeeze(faces[i, 0:4])

                                    # face rectangle
                                    det[0] = np.maximum(det[0] - margin, 0)
                                    det[1] = np.maximum(det[1] - margin, 0)
                                    det[2] = np.minimum(
                                        det[2] + margin, img_size[1])
                                    det[3] = np.minimum(
                                        det[3] + margin, img_size[0])
                                    face_list.append(item)

                                    # face cropped
                                    bb = np.array(det, dtype=np.int32)

                                    # use 5 face landmarks  to judge the face is front or side
                                    squeeze_points = np.squeeze(points[:, i])
                                    tolist = squeeze_points.tolist()
                                    facial_landmarks = []
                                    for j in range(5):
                                        item = [tolist[j], tolist[(j + 5)]]
                                        facial_landmarks.append(item)
                                    if args.face_landmarks:
                                        for (x, y) in facial_landmarks:
                                            cv2.circle(frame, (int(x), int(y)),
                                                       3, (0, 255, 0), -1)
                                    cropped = frame[bb[1]:bb[3],
                                                    bb[0]:bb[2], :].copy()

                                    dist_rate, high_ratio_variance, width_rate = judge_side_face(
                                        np.array(facial_landmarks))

                                    # face addtional attribute(index 0:face score; index 1:0 represents front face and 1 for side face )
                                    item_list = [
                                        cropped, score, dist_rate,
                                        high_ratio_variance, width_rate
                                    ]
                                    addtional_attribute_list.append(item_list)

                            final_faces = np.array(face_list)

                    trackers = tracker.update(final_faces, img_size,
                                              directoryname,
                                              addtional_attribute_list,
                                              detect_interval)

                    c += 1

                    for d in trackers:
                        if not no_display:
                            d = d.astype(np.int32)
                            cv2.rectangle(frame, (d[0], d[1]), (d[2], d[3]),
                                          colours[d[4] % 32, :] * 255, 3)
                            if final_faces != []:
                                cv2.putText(frame, 'ID : %d  DETECT' % (d[4]),
                                            (d[0] - 10, d[1] - 10),
                                            cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                                            colours[d[4] % 32, :] * 255, 2)
                                cv2.putText(frame, 'DETECTOR', (5, 45),
                                            cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                                            (1, 1, 1), 2)
                            else:
                                cv2.putText(frame, 'ID : %d' % (d[4]),
                                            (d[0] - 10, d[1] - 10),
                                            cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                                            colours[d[4] % 32, :] * 255, 2)

                    if not no_display:
                        frame = cv2.resize(frame, (0, 0),
                                           fx=show_rate,
                                           fy=show_rate)
                        cv2.imshow("Frame", frame)
                        if cv2.waitKey(1) & 0xFF == ord('q'):
                            break
Пример #29
0
 def save_output_image(self):
     '''This function saves th output image'''
     mkdir('images')
     cv2.imwrite(f'{self.path}/images/{self.outname}', self.outim)
Пример #30
0
def boot_report(config):
    connection, jobs, duration = parse_json(config.get("boot"))
    # TODO: Fix this when multi-lab sync is working
    #download_log2html(log2html)
    results_directory = os.getcwd() + '/results'
    results = {}
    dt_tests = False
    utils.mkdir(results_directory)
    for job_id in jobs:
        print 'Job ID: %s' % job_id
        # Init
        boot_meta = {}
        api_url = None
        arch = None
        board_instance = None
        boot_retries = 0
        kernel_defconfig = None
        kernel_defconfig_base = None
        kernel_version = None
        device_tree = None
        kernel_endian = None
        kernel_tree = None
        git_branch = None
        kernel_addr = None
        initrd_addr = None
        dtb_addr = None
        dtb_append = None
        fastboot = None
        fastboot_cmd = None
        test_plan = None
        job_file = ''
        dt_test = None
        dt_test_result = None
        dt_tests_passed = None
        dt_tests_failed = None
        board_offline = False
        kernel_boot_time = None
        boot_failure_reason = None
        efi_rtc = False
        # Retrieve job details
        job_details = connection.scheduler.job_details(job_id)
        if job_details['requested_device_type_id']:
            device_type = job_details['requested_device_type_id']
        if job_details['description']:
            job_name = job_details['description']
        result = jobs[job_id]['result']
        bundle = jobs[job_id]['bundle']
        if bundle is None and device_type == 'dynamic-vm':
            host_job_id = job_id.replace('.1', '.0')
            bundle = jobs[host_job_id]['bundle']
            if bundle is None:
                print '%s bundle is empty, skipping...' % device_type
                continue
        # Retrieve the log file
        try:
            binary_job_file = connection.scheduler.job_output(job_id)
        except xmlrpclib.Fault:
            print 'Job output not found for %s' % device_type
            continue
        # Parse LAVA messages out of log
        raw_job_file = str(binary_job_file)
        for line in raw_job_file.splitlines():
            if 'Infrastructure Error:' in line:
                print 'Infrastructure Error detected!'
                index = line.find('Infrastructure Error:')
                boot_failure_reason = line[index:]
                board_offline = True
            if 'Bootloader Error:' in line:
                print 'Bootloader Error detected!'
                index = line.find('Bootloader Error:')
                boot_failure_reason = line[index:]
                board_offline = True
            if 'Kernel Error:' in line:
                print 'Kernel Error detected!'
                index = line.find('Kernel Error:')
                boot_failure_reason = line[index:]
            if 'Userspace Error:' in line:
                print 'Userspace Error detected!'
                index = line.find('Userspace Error:')
                boot_failure_reason = line[index:]
            if '<LAVA_DISPATCHER>' not in line:
                if len(line) != 0:
                    job_file += line + '\n'
            if '### dt-test ### end of selftest' in line:
                dt_tests = True
                regex = re.compile("(?P<test>\d+\*?)")
                dt_test_results = regex.findall(line)
                if len(dt_test_results) > 2:
                    dt_tests_passed = dt_test_results[2]
                    dt_tests_failed = dt_test_results[3]
                else:
                    dt_tests_passed = dt_test_results[0]
                    dt_tests_failed = dt_test_results[1]
                if int(dt_tests_failed) > 0:
                    dt_test_result = 'FAIL'
                else:
                    dt_test_result = 'PASS'
            if 'rtc-efi rtc-efi: setting system clock to' in line:
                if device_type == 'dynamic-vm':
                    efi_rtc = True
        # Retrieve bundle
        if bundle is not None:
            json_bundle = connection.dashboard.get(bundle)
            bundle_data = json.loads(json_bundle['content'])
            # Get the boot data from LAVA
            for test_results in bundle_data['test_runs']:
                # Check for the LAVA self boot test
                if test_results['test_id'] == 'lava':
                    for test in test_results['test_results']:
                        # TODO for compat :(
                        if test['test_case_id'] == 'kernel_boot_time':
                            kernel_boot_time = test['measurement']
                        if test['test_case_id'] == 'test_kernel_boot_time':
                            kernel_boot_time = test['measurement']
                    bundle_attributes = bundle_data['test_runs'][-1][
                        'attributes']
            if utils.in_bundle_attributes(bundle_attributes,
                                          'kernel.defconfig'):
                print bundle_attributes['kernel.defconfig']
            if utils.in_bundle_attributes(bundle_attributes, 'target'):
                board_instance = bundle_attributes['target']
            if utils.in_bundle_attributes(bundle_attributes,
                                          'kernel.defconfig'):
                kernel_defconfig = bundle_attributes['kernel.defconfig']
                kernel_defconfig_base = ''.join(
                    kernel_defconfig.split('+')[:1])
            if utils.in_bundle_attributes(bundle_attributes, 'arch'):
                arch = bundle_attributes['arch']
            if utils.in_bundle_attributes(bundle_attributes,
                                          'kernel.describe'):
                kernel_version = bundle_attributes['kernel.describe']
            if utils.in_bundle_attributes(bundle_attributes, 'device.tree'):
                device_tree = bundle_attributes['device.tree']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.endian'):
                kernel_endian = bundle_attributes['kernel.endian']
            if utils.in_bundle_attributes(bundle_attributes,
                                          'platform.fastboot'):
                fastboot = bundle_attributes['platform.fastboot']
            if kernel_boot_time is None:
                if utils.in_bundle_attributes(bundle_attributes,
                                              'kernel-boot-time'):
                    kernel_boot_time = bundle_attributes['kernel-boot-time']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.tree'):
                kernel_tree = bundle_attributes['kernel.tree']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel-addr'):
                kernel_addr = bundle_attributes['kernel-addr']
            if utils.in_bundle_attributes(bundle_attributes, 'initrd-addr'):
                initrd_addr = bundle_attributes['initrd-addr']
            if utils.in_bundle_attributes(bundle_attributes, 'dtb-addr'):
                dtb_addr = bundle_attributes['dtb-addr']
            if utils.in_bundle_attributes(bundle_attributes, 'dtb-append'):
                dtb_append = bundle_attributes['dtb-append']
            if utils.in_bundle_attributes(bundle_attributes, 'boot_retries'):
                boot_retries = int(bundle_attributes['boot_retries'])
            if utils.in_bundle_attributes(bundle_attributes, 'test.plan'):
                test_plan = bundle_attributes['test.plan']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.branch'):
                git_branch = bundle_attributes['kernel.branch']

        # Check if we found efi-rtc
        if test_plan == 'boot-kvm-uefi' and not efi_rtc:
            if device_type == 'dynamic-vm':
                boot_failure_reason = 'Unable to read EFI rtc'
                result = 'FAIL'
        # Record the boot log and result
        # TODO: Will need to map device_types to dashboard device types
        if kernel_defconfig and device_type and result:
            if (arch == 'arm' or arch == 'arm64') and device_tree is None:
                platform_name = device_map[device_type][0] + ',legacy'
            else:
                if device_tree == 'vexpress-v2p-ca15_a7.dtb':
                    platform_name = 'vexpress-v2p-ca15_a7'
                elif device_tree == 'fsl-ls2080a-simu.dtb':
                    platform_name = 'fsl-ls2080a-simu'
                elif test_plan == 'boot-kvm' or test_plan == 'boot-kvm-uefi':
                    if device_tree == 'sun7i-a20-cubietruck.dtb':
                        if device_type == 'dynamic-vm':
                            device_type = 'cubieboard3-kvm-guest'
                            platform_name = device_map[device_type][0]
                        else:
                            device_type = 'cubieboard3-kvm-host'
                            platform_name = device_map[device_type][0]
                    elif device_tree == 'apm-mustang.dtb':
                        if device_type == 'dynamic-vm':
                            if test_plan == 'boot-kvm-uefi':
                                device_type = 'mustang-kvm-uefi-guest'
                            else:
                                device_type = 'mustang-kvm-guest'
                            platform_name = device_map[device_type][0]
                        else:
                            if test_plan == 'boot-kvm-uefi':
                                device_type = 'mustang-kvm-uefi-host'
                            else:
                                device_type = 'mustang-kvm-host'
                            platform_name = device_map[device_type][0]
                    elif device_tree == 'juno.dtb':
                        if device_type == 'dynamic-vm':
                            if test_plan == 'boot-kvm-uefi':
                                device_type = 'juno-kvm-uefi-guest'
                            else:
                                device_type = 'juno-kvm-guest'
                            platform_name = device_map[device_type][0]
                        else:
                            if test_plan == 'boot-kvm-uefi':
                                device_type = 'juno-kvm-uefi-host'
                            else:
                                device_type = 'juno-kvm-host'
                            platform_name = device_map[device_type][0]
                elif test_plan == 'boot-nfs' or test_plan == 'boot-nfs-mp':
                    platform_name = device_map[device_type][0] + '_rootfs:nfs'
                else:
                    platform_name = device_map[device_type][0]
            print 'Creating boot log for %s' % platform_name
            log = 'boot-%s.txt' % platform_name
            html = 'boot-%s.html' % platform_name
            if config.get("lab"):
                directory = os.path.join(
                    results_directory,
                    kernel_defconfig + '/' + config.get("lab"))
            else:
                directory = os.path.join(results_directory, kernel_defconfig)
            utils.ensure_dir(directory)
            utils.write_file(job_file, log, directory)
            if kernel_boot_time is None:
                kernel_boot_time = '0.0'
            if results.has_key(kernel_defconfig):
                results[kernel_defconfig].append({
                    'device_type': platform_name,
                    'dt_test_result': dt_test_result,
                    'dt_tests_passed': dt_tests_passed,
                    'dt_tests_failed': dt_tests_failed,
                    'kernel_boot_time': kernel_boot_time,
                    'result': result
                })
            else:
                results[kernel_defconfig] = [{
                    'device_type': platform_name,
                    'dt_test_result': dt_test_result,
                    'dt_tests_passed': dt_tests_passed,
                    'dt_tests_failed': dt_tests_failed,
                    'kernel_boot_time': kernel_boot_time,
                    'result': result
                }]
            # Create JSON format boot metadata
            print 'Creating JSON format boot metadata'
            if config.get("lab"):
                boot_meta['lab_name'] = config.get("lab")
            else:
                boot_meta['lab_name'] = None
            if board_instance:
                boot_meta['board_instance'] = board_instance
            boot_meta['retries'] = boot_retries
            boot_meta['boot_log'] = log
            boot_meta['boot_log_html'] = html
            # TODO: Fix this
            boot_meta['version'] = '1.1'
            boot_meta['arch'] = arch
            boot_meta['defconfig'] = kernel_defconfig_base
            boot_meta['defconfig_full'] = kernel_defconfig
            if device_map[device_type][1]:
                boot_meta['mach'] = device_map[device_type][1]
            boot_meta['kernel'] = kernel_version
            boot_meta['git_branch'] = git_branch
            boot_meta['job'] = kernel_tree
            boot_meta['board'] = platform_name
            if board_offline and result == 'FAIL':
                boot_meta['boot_result'] = 'OFFLINE'
                #results[kernel_defconfig]['result'] = 'OFFLINE'
            else:
                boot_meta['boot_result'] = result
            if result == 'FAIL' or result == 'OFFLINE':
                if boot_failure_reason:
                    boot_meta['boot_result_description'] = boot_failure_reason
                else:
                    boot_meta[
                        'boot_result_description'] = 'Unknown Error: platform failed to boot'
            boot_meta['boot_time'] = kernel_boot_time
            # TODO: Fix this
            boot_meta['boot_warnings'] = None
            if device_tree:
                if arch == 'arm64':
                    boot_meta['dtb'] = 'dtbs/' + device_map[device_type][
                        1] + '/' + device_tree
                else:
                    boot_meta['dtb'] = 'dtbs/' + device_tree
            else:
                boot_meta['dtb'] = device_tree
            boot_meta['dtb_addr'] = dtb_addr
            boot_meta['dtb_append'] = dtb_append
            boot_meta['dt_test'] = dt_test
            boot_meta['endian'] = kernel_endian
            boot_meta['fastboot'] = fastboot
            # TODO: Fix this
            boot_meta['initrd'] = None
            boot_meta['initrd_addr'] = initrd_addr
            if arch == 'arm':
                boot_meta['kernel_image'] = 'zImage'
            elif arch == 'arm64':
                boot_meta['kernel_image'] = 'Image'
            else:
                boot_meta['kernel_image'] = 'bzImage'
            boot_meta['loadaddr'] = kernel_addr
            json_file = 'boot-%s.json' % platform_name
            utils.write_json(json_file, directory, boot_meta)
            print 'Creating html version of boot log for %s' % platform_name
            cmd = 'python log2html.py %s' % os.path.join(directory, log)
            subprocess.check_output(cmd, shell=True)
            if config.get("lab") and config.get("api") and config.get("token"):
                print 'Sending boot result to %s for %s' % (config.get("api"),
                                                            platform_name)
                headers = {
                    'Authorization': config.get("token"),
                    'Content-Type': 'application/json'
                }
                api_url = urlparse.urljoin(config.get("api"), '/boot')
                push('POST',
                     api_url,
                     data=json.dumps(boot_meta),
                     headers=headers)
                headers = {
                    'Authorization': config.get("token"),
                }
                print 'Uploading text version of boot log'
                with open(os.path.join(directory, log)) as lh:
                    data = lh.read()
                api_url = urlparse.urljoin(
                    config.get("api"), '/upload/%s/%s/%s/%s/%s/%s/%s' %
                    (kernel_tree, git_branch, kernel_version, arch,
                     kernel_defconfig, config.get("lab"), log))
                push('PUT', api_url, data=data, headers=headers)
                print 'Uploading html version of boot log'
                with open(os.path.join(directory, html)) as lh:
                    data = lh.read()
                api_url = urlparse.urljoin(
                    config.get("api"), '/upload/%s/%s/%s/%s/%s/%s/%s' %
                    (kernel_tree, git_branch, kernel_version, arch,
                     kernel_defconfig, config.get("lab"), html))
                push('PUT', api_url, data=data, headers=headers)
Пример #31
0
        default=DEFAULT_ENV_NAME,
        help="Environment name to use, default=" + DEFAULT_ENV_NAME,
    )
    parser.add_argument("-r", "--record", help="Directory to store video recording")
    parser.add_argument(
        "--no-visualize",
        default=True,
        action="store_false",
        dest="visualize",
        help="Disable visualization of the game play",
    )
    args = parser.parse_args()

    env = wrappers.make_env(args.env)
    if args.record:
        mkdir(".", args.record)
        env = gym.wrappers.Monitor(env, args.record)
    net = dqn_model.DQN(env.observation_space.shape, env.action_space.n)
    net.load_state_dict(
        torch.load(args.model, map_location=lambda storage, loc: storage)
    )

    state = env.reset()
    total_reward = 0.0
    c = collections.Counter()

    while True:
        start_ts = time.time()
        if args.visualize:
            env.render()
        state_v = torch.tensor(np.array([state], copy=False))
Пример #32
0
FPS = 25


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("-m", "--model", required=True, help="Model file to load")
    parser.add_argument("-e", "--env", default=DEFAULT_ENV_NAME,
                        help="Environment name to use, default=" + DEFAULT_ENV_NAME)
    parser.add_argument("-r", "--record", help="Directory to store video recording")
    parser.add_argument("--no-visualize", default=True, action='store_false', dest='visualize',
                        help="Disable visualization of the game play")
    args = parser.parse_args()

    env = wrappers.make_env(args.env)
    if args.record:
        mkdir('.', args.record)
        env = gym.wrappers.Monitor(env, args.record)
    net = dqn_model.DQN(env.observation_space.shape, env.action_space.n)
    net.load_state_dict(torch.load(args.model, map_location=lambda storage, loc: storage))

    state = env.reset()
    total_reward = 0.0
    c = collections.Counter()

    while True:
        start_ts = time.time()
        if args.visualize:
            env.render()
        state_v = torch.tensor(np.array([state], copy=False))
        q_vals = net(state_v).data.numpy()[0]
        action = np.argmax(q_vals)
Пример #33
0
def boot_report(config):
    connection, jobs, duration =  parse_json(config.get("boot"))
    # TODO: Fix this when multi-lab sync is working
    #download_log2html(log2html)
    results_directory = os.getcwd() + '/results'
    results = {}
    dt_tests = False
    utils.mkdir(results_directory)
    for job_id in jobs:
        print 'Job ID: %s' % job_id
        # Init
        boot_meta = {}
        api_url = None
        arch = None
        board_instance = None
        boot_retries = 0
        kernel_defconfig = None
        kernel_defconfig_base = None
        kernel_version = None
        device_tree = None
        kernel_endian = None
        kernel_tree = None
        git_branch = None
        kernel_addr = None
        initrd_addr = None
        dtb_addr = None
        dtb_append = None
        fastboot = None
        fastboot_cmd = None
        test_plan = None
        job_file = ''
        dt_test = None
        dt_test_result = None
        dt_tests_passed = None
        dt_tests_failed = None
        board_offline = False
        kernel_boot_time = None
        boot_failure_reason = None
        efi_rtc = False
        # Retrieve job details
        job_details = connection.scheduler.job_details(job_id)
        if job_details['requested_device_type_id']:
            device_type = job_details['requested_device_type_id']
        if job_details['description']:
            job_name = job_details['description']
        result = jobs[job_id]['result']
        bundle = jobs[job_id]['bundle']
        if bundle is None and device_type == 'dynamic-vm':
            host_job_id = job_id.replace('.1', '.0')
            bundle = jobs[host_job_id]['bundle']
            if bundle is None:
                print '%s bundle is empty, skipping...' % device_type
                continue
        # Retrieve the log file
        try:
            binary_job_file = connection.scheduler.job_output(job_id)
        except xmlrpclib.Fault:
            print 'Job output not found for %s' % device_type
            continue
        # Parse LAVA messages out of log
        raw_job_file = str(binary_job_file)
        for line in raw_job_file.splitlines():
            if 'Infrastructure Error:' in line:
                print 'Infrastructure Error detected!'
                index = line.find('Infrastructure Error:')
                boot_failure_reason = line[index:]
                board_offline = True
            if 'Bootloader Error:' in line:
                print 'Bootloader Error detected!'
                index = line.find('Bootloader Error:')
                boot_failure_reason = line[index:]
                board_offline = True
            if 'Kernel Error:' in line:
                print 'Kernel Error detected!'
                index = line.find('Kernel Error:')
                boot_failure_reason = line[index:]
            if 'Userspace Error:' in line:
                print 'Userspace Error detected!'
                index = line.find('Userspace Error:')
                boot_failure_reason = line[index:]
            if '<LAVA_DISPATCHER>' not in line:
                if len(line) != 0:
                    job_file += line + '\n'
            if '### dt-test ### end of selftest' in line:
                dt_tests = True
                regex = re.compile("(?P<test>\d+\*?)")
                dt_test_results = regex.findall(line)
                if len(dt_test_results) > 2:
                    dt_tests_passed = dt_test_results[2]
                    dt_tests_failed = dt_test_results[3]
                else:
                    dt_tests_passed = dt_test_results[0]
                    dt_tests_failed = dt_test_results[1]
                if int(dt_tests_failed) > 0:
                    dt_test_result = 'FAIL'
                else:
                    dt_test_result = 'PASS'
            if 'rtc-efi rtc-efi: setting system clock to' in line:
                if device_type == 'dynamic-vm':
                    efi_rtc = True
        # Retrieve bundle
        if bundle is not None:
            json_bundle = connection.dashboard.get(bundle)
            bundle_data = json.loads(json_bundle['content'])
            # Get the boot data from LAVA
            for test_results in bundle_data['test_runs']:
                # Check for the LAVA self boot test
                if test_results['test_id'] == 'lava':
                    for test in test_results['test_results']:
                        # TODO for compat :(
                        if test['test_case_id'] == 'kernel_boot_time':
                            kernel_boot_time = test['measurement']
                        if test['test_case_id'] == 'test_kernel_boot_time':
                            kernel_boot_time = test['measurement']
                    bundle_attributes = bundle_data['test_runs'][-1]['attributes']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.defconfig'):
                print bundle_attributes['kernel.defconfig']
            if utils.in_bundle_attributes(bundle_attributes, 'target'):
                board_instance = bundle_attributes['target']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.defconfig'):
                kernel_defconfig = bundle_attributes['kernel.defconfig']
                kernel_defconfig_base = ''.join(kernel_defconfig.split('+')[:1])
            if utils.in_bundle_attributes(bundle_attributes, 'arch'):
                arch = bundle_attributes['arch']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.describe'):
                kernel_version = bundle_attributes['kernel.describe']
            if utils.in_bundle_attributes(bundle_attributes, 'device.tree'):
                device_tree = bundle_attributes['device.tree']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.endian'):
                kernel_endian = bundle_attributes['kernel.endian']
            if utils.in_bundle_attributes(bundle_attributes, 'platform.fastboot'):
                fastboot = bundle_attributes['platform.fastboot']
            if kernel_boot_time is None:
                if utils.in_bundle_attributes(bundle_attributes, 'kernel-boot-time'):
                    kernel_boot_time = bundle_attributes['kernel-boot-time']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.tree'):
                kernel_tree = bundle_attributes['kernel.tree']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel-addr'):
                kernel_addr = bundle_attributes['kernel-addr']
            if utils.in_bundle_attributes(bundle_attributes, 'initrd-addr'):
                initrd_addr = bundle_attributes['initrd-addr']
            if utils.in_bundle_attributes(bundle_attributes, 'dtb-addr'):
                dtb_addr = bundle_attributes['dtb-addr']
            if utils.in_bundle_attributes(bundle_attributes, 'dtb-append'):
                dtb_append = bundle_attributes['dtb-append']
            if utils.in_bundle_attributes(bundle_attributes, 'boot_retries'):
                boot_retries = int(bundle_attributes['boot_retries'])
            if utils.in_bundle_attributes(bundle_attributes, 'test.plan'):
                test_plan = bundle_attributes['test.plan']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.branch'):
                git_branch = bundle_attributes['kernel.branch']

        # Check if we found efi-rtc
        if test_plan == 'boot-kvm-uefi' and not efi_rtc:
            if device_type == 'dynamic-vm':
                boot_failure_reason = 'Unable to read EFI rtc'
                result = 'FAIL'
        # Record the boot log and result
        # TODO: Will need to map device_types to dashboard device types
        if kernel_defconfig and device_type and result:
            if (arch == 'arm' or arch =='arm64') and device_tree is None:
                platform_name = device_map[device_type][0] + ',legacy'
            else:
                if device_tree == 'vexpress-v2p-ca15_a7.dtb':
                    platform_name = 'vexpress-v2p-ca15_a7'
                elif device_tree == 'fsl-ls2080a-simu.dtb':
                    platform_name = 'fsl-ls2080a-simu'
                elif test_plan == 'boot-kvm' or test_plan == 'boot-kvm-uefi':
                    if device_tree == 'sun7i-a20-cubietruck.dtb':
                        if device_type == 'dynamic-vm':
                            device_type = 'cubieboard3-kvm-guest'
                            platform_name = device_map[device_type][0]
                        else:
                            device_type = 'cubieboard3-kvm-host'
                            platform_name = device_map[device_type][0]
                    elif device_tree == 'apm-mustang.dtb':
                        if device_type == 'dynamic-vm':
                            if test_plan == 'boot-kvm-uefi':
                                device_type = 'mustang-kvm-uefi-guest'
                            else:
                                device_type = 'mustang-kvm-guest'
                            platform_name = device_map[device_type][0]
                        else:
                            if test_plan == 'boot-kvm-uefi':
                                device_type = 'mustang-kvm-uefi-host'
                            else:
                                device_type = 'mustang-kvm-host'
                            platform_name = device_map[device_type][0]
                    elif device_tree == 'juno.dtb':
                        if device_type == 'dynamic-vm':
                            if test_plan == 'boot-kvm-uefi':
                                device_type = 'juno-kvm-uefi-guest'
                            else:
                                device_type = 'juno-kvm-guest'
                            platform_name = device_map[device_type][0]
                        else:
                            if test_plan == 'boot-kvm-uefi':
                                device_type = 'juno-kvm-uefi-host'
                            else:
                                device_type = 'juno-kvm-host'
                            platform_name = device_map[device_type][0]
                elif test_plan == 'boot-nfs' or test_plan == 'boot-nfs-mp':
                    platform_name = device_map[device_type][0] + '_rootfs:nfs'
                else:
                    platform_name = device_map[device_type][0]
            print 'Creating boot log for %s' % platform_name
            log = 'boot-%s.txt' % platform_name
            html = 'boot-%s.html' % platform_name
            if config.get("lab"):
                directory = os.path.join(results_directory, kernel_defconfig + '/' + config.get("lab"))
            else:
                directory = os.path.join(results_directory, kernel_defconfig)
            utils.ensure_dir(directory)
            utils.write_file(job_file, log, directory)
            if kernel_boot_time is None:
                kernel_boot_time = '0.0'
            if results.has_key(kernel_defconfig):
                results[kernel_defconfig].append({'device_type': platform_name, 'dt_test_result': dt_test_result, 'dt_tests_passed': dt_tests_passed, 'dt_tests_failed': dt_tests_failed, 'kernel_boot_time': kernel_boot_time, 'result': result})
            else:
                results[kernel_defconfig] = [{'device_type': platform_name, 'dt_test_result': dt_test_result, 'dt_tests_passed': dt_tests_passed, 'dt_tests_failed': dt_tests_failed, 'kernel_boot_time': kernel_boot_time, 'result': result}]
            # Create JSON format boot metadata
            print 'Creating JSON format boot metadata'
            if config.get("lab"):
                boot_meta['lab_name'] = config.get("lab")
            else:
                boot_meta['lab_name'] = None
            if board_instance:
                boot_meta['board_instance'] = board_instance
            boot_meta['retries'] = boot_retries
            boot_meta['boot_log'] = log
            boot_meta['boot_log_html'] = html
            # TODO: Fix this
            boot_meta['version'] = '1.1'
            boot_meta['arch'] = arch
            boot_meta['defconfig'] = kernel_defconfig_base
            boot_meta['defconfig_full'] = kernel_defconfig
            if device_map[device_type][1]:
                boot_meta['mach'] = device_map[device_type][1]
            boot_meta['kernel'] = kernel_version
            boot_meta['git_branch'] = git_branch
            boot_meta['job'] = kernel_tree
            boot_meta['board'] = platform_name
            if board_offline and result == 'FAIL':
                boot_meta['boot_result'] = 'OFFLINE'
                #results[kernel_defconfig]['result'] = 'OFFLINE'
            else:
                boot_meta['boot_result'] = result
            if result == 'FAIL' or result == 'OFFLINE':
                if boot_failure_reason:
                    boot_meta['boot_result_description'] = boot_failure_reason
                else:
                    boot_meta['boot_result_description'] = 'Unknown Error: platform failed to boot'
            boot_meta['boot_time'] = kernel_boot_time
            # TODO: Fix this
            boot_meta['boot_warnings'] = None
            if device_tree:
                if arch == 'arm64':
                    boot_meta['dtb'] = 'dtbs/' + device_map[device_type][1] + '/' + device_tree
                else:
                    boot_meta['dtb'] = 'dtbs/' + device_tree
            else:
                boot_meta['dtb'] = device_tree
            boot_meta['dtb_addr'] = dtb_addr
            boot_meta['dtb_append'] = dtb_append
            boot_meta['dt_test'] = dt_test
            boot_meta['endian'] = kernel_endian
            boot_meta['fastboot'] = fastboot
            # TODO: Fix this
            boot_meta['initrd'] = None
            boot_meta['initrd_addr'] = initrd_addr
            if arch == 'arm':
                boot_meta['kernel_image'] = 'zImage'
            elif arch == 'arm64':
                boot_meta['kernel_image'] = 'Image'
            else:
                boot_meta['kernel_image'] = 'bzImage'
            boot_meta['loadaddr'] = kernel_addr
            json_file = 'boot-%s.json' % platform_name
            utils.write_json(json_file, directory, boot_meta)
            print 'Creating html version of boot log for %s' % platform_name
            cmd = 'python log2html.py %s' % os.path.join(directory, log)
            subprocess.check_output(cmd, shell=True)
            if config.get("lab") and config.get("api") and config.get("token"):
                print 'Sending boot result to %s for %s' % (config.get("api"), platform_name)
                headers = {
                    'Authorization': config.get("token"),
                    'Content-Type': 'application/json'
                }
                api_url = urlparse.urljoin(config.get("api"), '/boot')
                push('POST', api_url, data=json.dumps(boot_meta), headers=headers)
                headers = {
                    'Authorization': config.get("token"),
                }
                print 'Uploading text version of boot log'
                with open(os.path.join(directory, log)) as lh:
                    data = lh.read()
                api_url = urlparse.urljoin(config.get("api"), '/upload/%s/%s/%s/%s/%s/%s/%s' % (kernel_tree,
                                                                                 git_branch,
                                                                                 kernel_version,
                                                                                 arch,
                                                                                 kernel_defconfig,
                                                                                 config.get("lab"),
                                                                                 log))
                push('PUT', api_url, data=data, headers=headers)
                print 'Uploading html version of boot log'
                with open(os.path.join(directory, html)) as lh:
                    data = lh.read()
                api_url = urlparse.urljoin(config.get("api"), '/upload/%s/%s/%s/%s/%s/%s/%s' % (kernel_tree,
                                                                                 git_branch,
                                                                                 kernel_version,
                                                                                 arch,
                                                                                 kernel_defconfig,
                                                                                 config.get("lab"),
                                                                                 html))
                push('PUT', api_url, data=data, headers=headers)
Пример #34
0
def boot_report(config):
    connection, jobs, duration = parse_json(config.get("boot"))
    # TODO: Fix this when multi-lab sync is working
    #download_log2html(log2html)
    results_directory = os.getcwd() + '/results'
    results = {}
    utils.mkdir(results_directory)
    test_plan = None
    for job_id in jobs:
        print 'Job ID: %s' % job_id
        # Init
        boot_meta = {}
        api_url = None
        arch = None
        board_instance = None
        boot_retries = 0
        kernel_defconfig_full = None
        kernel_defconfig = None
        kernel_defconfig_base = None
        kernel_version = None
        device_tree = None
        kernel_endian = None
        kernel_tree = None
        kernel_addr = None
        initrd_addr = None
        dtb_addr = None
        dtb_append = None
        fastboot = None
        fastboot_cmd = None
        job_file = ''
        board_offline = False
        kernel_boot_time = None
        boot_failure_reason = None
        efi_rtc = False
        # Retrieve job details
        device_type = ''
        job_details = connection.scheduler.job_details(job_id)
        if job_details['requested_device_type_id']:
            device_type = job_details['requested_device_type_id']
        if job_details['description']:
            job_name = job_details['description']
            try:
                job_short_name = re.search(".*?([A-Z]+.*)", job_name).group(1)
            except Exception:
                job_short_name = 'boot-test'
        device_name = job_details['_actual_device_cache']['hostname']
        result = jobs[job_id]['result']
        bundle = jobs[job_id]['bundle']
        if not device_type:
            device_type = job_details['_actual_device_cache']['device_type_id']
        if bundle is None and device_type == 'dynamic-vm':
            host_job_id = job_id.replace('.1', '.0')
            bundle = jobs[host_job_id]['bundle']
            if bundle is None:
                print '%s bundle is empty, skipping...' % device_type
                continue
        # Retrieve the log file
        try:
            binary_job_file = connection.scheduler.job_output(job_id)
        except xmlrpclib.Fault:
            print 'Job output not found for %s' % device_type
            continue
        # Parse LAVA messages out of log
        raw_job_file = str(binary_job_file)
        for line in raw_job_file.splitlines():
            if 'Infrastructure Error:' in line:
                print 'Infrastructure Error detected!'
                index = line.find('Infrastructure Error:')
                boot_failure_reason = line[index:]
                board_offline = True
            if 'Bootloader Error:' in line:
                print 'Bootloader Error detected!'
                index = line.find('Bootloader Error:')
                boot_failure_reason = line[index:]
                board_offline = True
            if 'Kernel Error:' in line:
                print 'Kernel Error detected!'
                index = line.find('Kernel Error:')
                boot_failure_reason = line[index:]
            if 'Userspace Error:' in line:
                print 'Userspace Error detected!'
                index = line.find('Userspace Error:')
                boot_failure_reason = line[index:]
            if '<LAVA_DISPATCHER>' not in line:
                if len(line) != 0:
                    job_file += line + '\n'
            if 'rtc-efi rtc-efi: setting system clock to' in line:
                if device_type == 'dynamic-vm':
                    efi_rtc = True

        # Retrieve bundle
        if bundle is not None:
            json_bundle = connection.dashboard.get(bundle)
            bundle_data = json.loads(json_bundle['content'])
            # Get the boot data from LAVA
            for test_results in bundle_data['test_runs']:
                # Check for the LAVA self boot test
                if test_results['test_id'] == 'lava':
                    for test in test_results['test_results']:
                        # TODO for compat :(
                        if test['test_case_id'] == 'kernel_boot_time':
                            kernel_boot_time = test['measurement']
                        if test['test_case_id'] == 'test_kernel_boot_time':
                            kernel_boot_time = test['measurement']
                    bundle_attributes = bundle_data['test_runs'][-1][
                        'attributes']
            if utils.in_bundle_attributes(bundle_attributes,
                                          'kernel.defconfig'):
                print bundle_attributes['kernel.defconfig']
            if utils.in_bundle_attributes(bundle_attributes, 'target'):
                board_instance = bundle_attributes['target']
            if utils.in_bundle_attributes(bundle_attributes,
                                          'kernel.defconfig'):
                kernel_defconfig = bundle_attributes['kernel.defconfig']
                defconfig_list = kernel_defconfig.split('-')
                #arch = defconfig_list[0]
                arch = defconfig_list[-1]
                # Remove arch
                defconfig_list.pop(0)
                kernel_defconfig_full = '-'.join(defconfig_list)
                kernel_defconfig_base = ''.join(
                    kernel_defconfig_full.split('+')[:1])
                if kernel_defconfig_full == kernel_defconfig_base:
                    kernel_defconfig_full = None
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.version'):
                kernel_version = bundle_attributes['kernel.version']
            if utils.in_bundle_attributes(bundle_attributes, 'device.tree'):
                device_tree = bundle_attributes['device.tree']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.endian'):
                kernel_endian = bundle_attributes['kernel.endian']
            if utils.in_bundle_attributes(bundle_attributes,
                                          'platform.fastboot'):
                fastboot = bundle_attributes['platform.fastboot']
            if kernel_boot_time is None:
                if utils.in_bundle_attributes(bundle_attributes,
                                              'kernel-boot-time'):
                    kernel_boot_time = bundle_attributes['kernel-boot-time']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.tree'):
                kernel_tree = bundle_attributes['kernel.tree']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel-addr'):
                kernel_addr = bundle_attributes['kernel-addr']
            if utils.in_bundle_attributes(bundle_attributes, 'initrd-addr'):
                initrd_addr = bundle_attributes['initrd-addr']
            if utils.in_bundle_attributes(bundle_attributes, 'dtb-addr'):
                dtb_addr = bundle_attributes['dtb-addr']
            if utils.in_bundle_attributes(bundle_attributes, 'dtb-append'):
                dtb_append = bundle_attributes['dtb-append']
            if utils.in_bundle_attributes(bundle_attributes, 'boot_retries'):
                boot_retries = int(bundle_attributes['boot_retries'])
            if utils.in_bundle_attributes(bundle_attributes, 'test.plan'):
                test_tmp = bundle_attributes['test.plan']
                if test_tmp:
                    test_plan = test_tmp
        else:
            if not kernel_defconfig or not kernel_version or not kernel_tree:
                job_defnition = {}
                if 'original_definition' in job_details.keys():
                    job_definition = job_details['original_definition']
                    try:
                        job_dictionary = eval(job_definition)
                    except Exception:
                        pass
                    if job_dictionary:
                        if 'actions' in job_dictionary.keys():
                            actions = job_dictionary['actions']
                            for i in range(0, len(actions)):
                                try:
                                    kernel_defconfig = actions[i]['metadata'][
                                        'kernel.defconfig']
                                    kernel_version = actions[i]['metadata'][
                                        'kernel.version']
                                    kernel_tree = actions[i]['metadata'][
                                        'kernel.tree']
                                    kernel_endian = actions[i]['metadata'][
                                        'kernel.endian']
                                    platform_fastboot = actions[i]['metadata'][
                                        'platform.fastboot']
                                    device_tree = actions[i]['metadata'][
                                        'kernel.tree']
                                    break
                                except KeyError:
                                    continue
                if 'target' in job_details.keys():
                    print job_details.keys()
        # Check if we found efi-rtc
        if test_plan == 'boot-kvm-uefi' and not efi_rtc:
            if device_type == 'dynamic-vm':
                boot_failure_reason = 'Unable to read EFI rtc'
                result = 'FAIL'

        # Record the boot log and result
        # TODO: Will need to map device_types to dashboard device types
        if kernel_defconfig and device_type and result:
            if ('arm' == arch or 'arm64' == arch) and device_tree is None:
                platform_name = device_map[device_type][0] + ',legacy'
            else:
                if test_plan == 'boot-nfs' or test_plan == 'boot-nfs-mp':
                    platform_name = device_map[device_type][0] + '_rootfs:nfs'
                else:
                    platform_name = device_map[device_type][0]

            print 'Creating boot log for %s' % (platform_name + job_name +
                                                '_' + job_id)
            log = 'boot-%s.txt' % (platform_name + job_name + '_' + job_id)
            html = 'boot-%s.html' % (platform_name + job_name + '_' + job_id)
            if config.get("lab"):
                directory = os.path.join(
                    results_directory,
                    kernel_defconfig + '/' + config.get("lab"))
            else:
                directory = os.path.join(results_directory, kernel_defconfig)
            utils.ensure_dir(directory)
            utils.write_file(job_file, log, directory)
            if kernel_boot_time is None:
                kernel_boot_time = '0.0'
            if results.has_key(kernel_defconfig):
                results[kernel_defconfig].append({
                    'device_type': platform_name,
                    'job_id': job_id,
                    'job_name': job_short_name,
                    'kernel_boot_time': kernel_boot_time,
                    'result': result,
                    'device_name': device_name
                })
            else:
                results[kernel_defconfig] = [{
                    'device_type': platform_name,
                    'job_id': job_id,
                    'job_name': job_short_name,
                    'kernel_boot_time': kernel_boot_time,
                    'result': result,
                    'device_name': device_name
                }]
            # Create JSON format boot metadata
            print 'Creating JSON format boot metadata'
            if config.get("lab"):
                boot_meta['lab_name'] = config.get("lab")
            else:
                boot_meta['lab_name'] = None
            if board_instance:
                boot_meta['board_instance'] = board_instance
            boot_meta['retries'] = boot_retries
            boot_meta['boot_log'] = log
            boot_meta['boot_log_html'] = html
            # TODO: Fix this
            boot_meta['version'] = '1.0'
            boot_meta['arch'] = arch
            boot_meta['defconfig'] = kernel_defconfig_base
            if kernel_defconfig_full is not None:
                boot_meta['defconfig_full'] = kernel_defconfig_full
            if device_map[device_type][1]:
                boot_meta['mach'] = device_map[device_type][1]
            boot_meta['kernel'] = kernel_version

            boot_meta['job'] = kernel_tree
            boot_meta['board'] = platform_name
            if board_offline and result == 'FAIL':
                boot_meta['boot_result'] = 'OFFLINE'
                #results[kernel_defconfig]['result'] = 'OFFLINE'
            else:
                boot_meta['boot_result'] = result
            if result == 'FAIL' or result == 'OFFLINE':
                if boot_failure_reason:
                    boot_meta['boot_result_description'] = boot_failure_reason
                else:
                    boot_meta[
                        'boot_result_description'] = 'Unknown Error: platform failed to boot'
            boot_meta['boot_time'] = kernel_boot_time
            # TODO: Fix this
            boot_meta['boot_warnings'] = None
            if device_tree:
                if arch == 'arm64':
                    boot_meta['dtb'] = 'dtbs/' + device_map[device_type][
                        1] + '/' + device_tree
                else:
                    boot_meta['dtb'] = 'dtbs/' + device_tree
            else:
                boot_meta['dtb'] = device_tree
            boot_meta['dtb_addr'] = dtb_addr
            boot_meta['dtb_append'] = dtb_append
            boot_meta['endian'] = kernel_endian
            boot_meta['fastboot'] = fastboot
            # TODO: Fix this
            boot_meta['initrd'] = None
            boot_meta['initrd_addr'] = initrd_addr
            if arch == 'arm':
                boot_meta['kernel_image'] = 'zImage'
            elif arch == 'arm64':
                boot_meta['kernel_image'] = 'Image'
            else:
                boot_meta['kernel_image'] = 'bzImage'
            boot_meta['loadaddr'] = kernel_addr
            json_file = 'boot-%s.json' % (platform_name + job_name + '_' +
                                          job_id)
            utils.write_json(json_file, directory, boot_meta)

    if config.get("lab"):
        report_directory = os.path.join(results_directory, config.get("lab"))
        utils.mkdir(report_directory)
    else:
        report_directory = results_directory

    if results and kernel_tree and kernel_version:
        print 'Creating summary for %s' % (kernel_version)
        boot = '%s-boot-report.txt' % (kernel_version)
        if test_plan and ('boot' in test_plan or 'BOOT' in test_plan):
            boot = boot.replace('boot', test_plan)
        passed = 0
        failed = 0
        for defconfig, results_list in results.items():
            for result in results_list:
                if result['result'] == 'PASS':
                    passed += 1
                else:
                    failed += 1
        total = passed + failed
        with open(os.path.join(report_directory, boot), 'a') as f:
            f.write('Subject: %s boot: %s boots: %s passed, %s failed (%s)\n' %
                    (kernel_tree, str(total), str(passed), str(failed),
                     kernel_version))
            f.write('\n')
            f.write('Total Duration: %.2f minutes\n' % (duration / 60))
            f.write('Tree/Branch: %s\n' % kernel_tree)
            f.write('Git Describe: %s\n' % kernel_version)
            first = True
            for defconfig, results_list in results.items():
                for result in results_list:
                    if result['result'] == 'OFFLINE':
                        if first:
                            f.write('\n')
                            f.write('Boards Offline:\n')
                            first = False
                        f.write('\n')
                        f.write(defconfig)
                        f.write('\n')
                        break
                for result in results_list:
                    if result['result'] == 'OFFLINE':
                        f.write(
                            '    %s   %s   %s   %ss   %s: %s\n' %
                            (result['job_id'], result['device_type'],
                             result['device_name'], result['kernel_boot_time'],
                             result['job_name'], result['result']))
                        f.write('\n')
            first = True
            for defconfig, results_list in results.items():
                for result in results_list:
                    if result['result'] == 'FAIL':
                        if first:
                            f.write('\n')
                            f.write('Failed Boot Tests:\n')
                            first = False
                        f.write('\n')
                        f.write(defconfig)
                        f.write('\n')
                        break
                for result in results_list:
                    if result['result'] == 'FAIL':
                        f.write(
                            '    %s   %s   %s   %ss   %s: %s\n' %
                            (result['job_id'], result['device_type'],
                             result['device_name'], result['kernel_boot_time'],
                             result['job_name'], result['result']))
            f.write('\n')
            f.write('Full Boot Report:\n')
            for defconfig, results_list in results.items():
                f.write('\n')
                f.write(defconfig)
                f.write('\n')
                for result in results_list:
                    f.write('    %s   %s   %s   %ss   %s: %s\n' %
                            (result['job_id'], result['device_type'],
                             result['device_name'], result['kernel_boot_time'],
                             result['job_name'], result['result']))
    # add by wuyanjun
    if results and directory:
        parser_and_get_result(results, directory, report_directory)
        get_ip_board_mapping(results, directory, report_directory)
Пример #35
0
def boot_report(config):
    connection, jobs, duration =  parse_json(config.get("boot"))
    # TODO: Fix this when multi-lab sync is working
    #download_log2html(log2html)
    results_directory = os.getcwd() + '/results'
    results = {}
    dt_tests = False
    utils.mkdir(results_directory)
    for job_id in jobs:
        print 'Job ID: %s' % job_id
        # Init
        boot_meta = {}
        api_url = None
        arch = None
        board_instance = None
        boot_retries = 0
        kernel_defconfig_full = None
        kernel_defconfig = None
        kernel_defconfig_base = None
        kernel_version = None
        device_tree = None
        kernel_endian = None
        kernel_tree = None
        kernel_addr = None
        initrd_addr = None
        dtb_addr = None
        dtb_append = None
        fastboot = None
        fastboot_cmd = None
        test_plan = None
        job_file = ''
        dt_test = None
        dt_test_result = None
        dt_tests_passed = None
        dt_tests_failed = None
        board_offline = False
        kernel_boot_time = None
        boot_failure_reason = None
        efi_rtc = False
        # Retrieve job details
        job_details = connection.scheduler.job_details(job_id)
        if job_details['requested_device_type_id']:
            device_type = job_details['requested_device_type_id']
        if job_details['description']:
            job_name = job_details['description']
        result = jobs[job_id]['result']
        bundle = jobs[job_id]['bundle']
        if bundle is None and device_type == 'dynamic-vm':
            host_job_id = job_id.replace('.1', '.0')
            bundle = jobs[host_job_id]['bundle']
            if bundle is None:
                print '%s bundle is empty, skipping...' % device_type
                continue
        # Retrieve the log file
        try:
            binary_job_file = connection.scheduler.job_output(job_id)
        except xmlrpclib.Fault:
            print 'Job output not found for %s' % device_type
            continue
        # Parse LAVA messages out of log
        raw_job_file = str(binary_job_file)
        for line in raw_job_file.splitlines():
            if 'Infrastructure Error:' in line:
                print 'Infrastructure Error detected!'
                index = line.find('Infrastructure Error:')
                boot_failure_reason = line[index:]
                board_offline = True
            if 'Bootloader Error:' in line:
                print 'Bootloader Error detected!'
                index = line.find('Bootloader Error:')
                boot_failure_reason = line[index:]
                board_offline = True
            if 'Kernel Error:' in line:
                print 'Kernel Error detected!'
                index = line.find('Kernel Error:')
                boot_failure_reason = line[index:]
            if 'Userspace Error:' in line:
                print 'Userspace Error detected!'
                index = line.find('Userspace Error:')
                boot_failure_reason = line[index:]
            if '<LAVA_DISPATCHER>' not in line:
                if len(line) != 0:
                    job_file += line + '\n'
            if '### dt-test ### end of selftest' in line:
                dt_tests = True
                regex = re.compile("(?P<test>\d+\*?)")
                dt_test_results = regex.findall(line)
                if len(dt_test_results) > 2:
                    dt_tests_passed = dt_test_results[2]
                    dt_tests_failed = dt_test_results[3]
                else:
                    dt_tests_passed = dt_test_results[0]
                    dt_tests_failed = dt_test_results[1]
                if int(dt_tests_failed) > 0:
                    dt_test_result = 'FAIL'
                else:
                    dt_test_result = 'PASS'
            if 'rtc-efi rtc-efi: setting system clock to' in line:
                if device_type == 'dynamic-vm':
                    efi_rtc = True
        # Retrieve bundle
        if bundle is not None:
            json_bundle = connection.dashboard.get(bundle)
            bundle_data = json.loads(json_bundle['content'])
            # Get the boot data from LAVA
            for test_results in bundle_data['test_runs']:
                # Check for the LAVA self boot test
                if test_results['test_id'] == 'lava':
                    for test in test_results['test_results']:
                        # TODO for compat :(
                        if test['test_case_id'] == 'kernel_boot_time':
                            kernel_boot_time = test['measurement']
                        if test['test_case_id'] == 'test_kernel_boot_time':
                            kernel_boot_time = test['measurement']
                    bundle_attributes = bundle_data['test_runs'][-1]['attributes']
		if test_results['test_id'] == 'lava-command':
                    # Post stuff with the test API.
                    build_id = "56b9648659b514b7f6e41fac"
                    test_suite = test_api_post(config, build_id, test_results)
                    if test_suite:
                        print json.dumps(test_suite)
                        print 'Sending test suite to %s' % config.get("api")
                        headers = {
                                   'Authorization': config.get("token"),
                                   'Content-Type': 'application/json'
                                  }
                        api_url = urlparse.urljoin(config.get("api"), '/test/suite')
                        push('POST', api_url, json.dumps(test_suite), headers)
                    
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.defconfig'):
                print bundle_attributes['kernel.defconfig']
            if utils.in_bundle_attributes(bundle_attributes, 'target'):
                board_instance = bundle_attributes['target']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.defconfig'):
                kernel_defconfig = bundle_attributes['kernel.defconfig']
                defconfig_list = kernel_defconfig.split('-')
                arch = defconfig_list[0]
                # Remove arch
                defconfig_list.pop(0)
                kernel_defconfig_full = '-'.join(defconfig_list)
                kernel_defconfig_base = ''.join(kernel_defconfig_full.split('+')[:1])
                if kernel_defconfig_full == kernel_defconfig_base:
                    kernel_defconfig_full = None
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.version'):
                kernel_version = bundle_attributes['kernel.version']
            if utils.in_bundle_attributes(bundle_attributes, 'device.tree'):
                device_tree = bundle_attributes['device.tree']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.endian'):
                kernel_endian = bundle_attributes['kernel.endian']
            if utils.in_bundle_attributes(bundle_attributes, 'platform.fastboot'):
                fastboot = bundle_attributes['platform.fastboot']
            if kernel_boot_time is None:
                if utils.in_bundle_attributes(bundle_attributes, 'kernel-boot-time'):
                    kernel_boot_time = bundle_attributes['kernel-boot-time']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel.tree'):
                kernel_tree = bundle_attributes['kernel.tree']
            if utils.in_bundle_attributes(bundle_attributes, 'kernel-addr'):
                kernel_addr = bundle_attributes['kernel-addr']
            if utils.in_bundle_attributes(bundle_attributes, 'initrd-addr'):
                initrd_addr = bundle_attributes['initrd-addr']
            if utils.in_bundle_attributes(bundle_attributes, 'dtb-addr'):
                dtb_addr = bundle_attributes['dtb-addr']
            if utils.in_bundle_attributes(bundle_attributes, 'dtb-append'):
                dtb_append = bundle_attributes['dtb-append']
            if utils.in_bundle_attributes(bundle_attributes, 'boot_retries'):
                boot_retries = int(bundle_attributes['boot_retries'])
            if utils.in_bundle_attributes(bundle_attributes, 'test.plan'):
                test_plan = bundle_attributes['test.plan']

        # Check if we found efi-rtc
        if test_plan == 'boot-kvm-uefi' and not efi_rtc:
            if device_type == 'dynamic-vm':
                boot_failure_reason = 'Unable to read EFI rtc'
                result = 'FAIL'
        # Record the boot log and result
        # TODO: Will need to map device_types to dashboard device types
        if kernel_defconfig and device_type and result:
            if (arch == 'arm' or arch =='arm64') and device_tree is None:
                platform_name = device_map[device_type][0] + ',legacy'
            else:
                if device_tree == 'vexpress-v2p-ca15_a7.dtb':
                    platform_name = 'vexpress-v2p-ca15_a7'
                elif device_tree == 'fsl-ls2080a-simu.dtb':
                    platform_name = 'fsl-ls2080a-simu'
                elif test_plan == 'boot-kvm' or test_plan == 'boot-kvm-uefi':
                    if device_tree == 'sun7i-a20-cubietruck.dtb':
                        if device_type == 'dynamic-vm':
                            device_type = 'cubieboard3-kvm-guest'
                            platform_name = device_map[device_type][0]
                        else:
                            device_type = 'cubieboard3-kvm-host'
                            platform_name = device_map[device_type][0]
                    elif device_tree == 'apm-mustang.dtb':
                        if device_type == 'dynamic-vm':
                            if test_plan == 'boot-kvm-uefi':
                                device_type = 'mustang-kvm-uefi-guest'
                            else:
                                device_type = 'mustang-kvm-guest'
                            platform_name = device_map[device_type][0]
                        else:
                            if test_plan == 'boot-kvm-uefi':
                                device_type = 'mustang-kvm-uefi-host'
                            else:
                                device_type = 'mustang-kvm-host'
                            platform_name = device_map[device_type][0]
                    elif device_tree == 'juno.dtb':
                        if device_type == 'dynamic-vm':
                            if test_plan == 'boot-kvm-uefi':
                                device_type = 'juno-kvm-uefi-guest'
                            else:
                                device_type = 'juno-kvm-guest'
                            platform_name = device_map[device_type][0]
                        else:
                            if test_plan == 'boot-kvm-uefi':
                                device_type = 'juno-kvm-uefi-host'
                            else:
                                device_type = 'juno-kvm-host'
                            platform_name = device_map[device_type][0]
                elif test_plan == 'boot-nfs' or test_plan == 'boot-nfs-mp':
                    platform_name = device_map[device_type][0] + '_rootfs:nfs'
                else:
                    platform_name = device_map[device_type][0]
            print 'Creating boot log for %s' % platform_name
            log = 'boot-%s.txt' % platform_name
            html = 'boot-%s.html' % platform_name
            if config.get("lab"):
                directory = os.path.join(results_directory, kernel_defconfig + '/' + config.get("lab"))
            else:
                directory = os.path.join(results_directory, kernel_defconfig)
            utils.ensure_dir(directory)
            utils.write_file(job_file, log, directory)
            if kernel_boot_time is None:
                kernel_boot_time = '0.0'
            if results.has_key(kernel_defconfig):
                results[kernel_defconfig].append({'device_type': platform_name, 'dt_test_result': dt_test_result, 'dt_tests_passed': dt_tests_passed, 'dt_tests_failed': dt_tests_failed, 'kernel_boot_time': kernel_boot_time, 'result': result})
            else:
                results[kernel_defconfig] = [{'device_type': platform_name, 'dt_test_result': dt_test_result, 'dt_tests_passed': dt_tests_passed, 'dt_tests_failed': dt_tests_failed, 'kernel_boot_time': kernel_boot_time, 'result': result}]
            # Create JSON format boot metadata
            print 'Creating JSON format boot metadata'
            if config.get("lab"):
                boot_meta['lab_name'] = config.get("lab")
            else:
                boot_meta['lab_name'] = None
            if board_instance:
                boot_meta['board_instance'] = board_instance
            boot_meta['retries'] = boot_retries
            boot_meta['boot_log'] = log
            boot_meta['boot_log_html'] = html
            # TODO: Fix this
            boot_meta['version'] = '1.0'
            boot_meta['arch'] = arch
            boot_meta['defconfig'] = kernel_defconfig_base
            if kernel_defconfig_full is not None:
                boot_meta['defconfig_full'] = kernel_defconfig_full
            if device_map[device_type][1]:
                boot_meta['mach'] = device_map[device_type][1]
            boot_meta['kernel'] = kernel_version
            boot_meta['job'] = kernel_tree
            boot_meta['board'] = platform_name
            if board_offline and result == 'FAIL':
                boot_meta['boot_result'] = 'OFFLINE'
                #results[kernel_defconfig]['result'] = 'OFFLINE'
            else:
                boot_meta['boot_result'] = result
            if result == 'FAIL' or result == 'OFFLINE':
                if boot_failure_reason:
                    boot_meta['boot_result_description'] = boot_failure_reason
                else:
                    boot_meta['boot_result_description'] = 'Unknown Error: platform failed to boot'
            boot_meta['boot_time'] = kernel_boot_time
            # TODO: Fix this
            boot_meta['boot_warnings'] = None
            if device_tree:
                if arch == 'arm64':
                    boot_meta['dtb'] = 'dtbs/' + device_map[device_type][1] + '/' + device_tree
                else:
                    boot_meta['dtb'] = 'dtbs/' + device_tree
            else:
                boot_meta['dtb'] = device_tree
            boot_meta['dtb_addr'] = dtb_addr
            boot_meta['dtb_append'] = dtb_append
            boot_meta['dt_test'] = dt_test
            boot_meta['endian'] = kernel_endian
            boot_meta['fastboot'] = fastboot
            # TODO: Fix this
            boot_meta['initrd'] = None
            boot_meta['initrd_addr'] = initrd_addr
            if arch == 'arm':
                boot_meta['kernel_image'] = 'zImage'
            elif arch == 'arm64':
                boot_meta['kernel_image'] = 'Image'
            else:
                boot_meta['kernel_image'] = 'bzImage'
            boot_meta['loadaddr'] = kernel_addr
            json_file = 'boot-%s.json' % platform_name
            utils.write_json(json_file, directory, boot_meta)
            print 'Creating html version of boot log for %s' % platform_name
            cmd = 'python log2html.py %s' % os.path.join(directory, log)
            subprocess.check_output(cmd, shell=True)
            if config.get("lab") and config.get("api") and config.get("token"):
                print 'Sending boot result to %s for %s' % (config.get("api"), platform_name)
                headers = {
                    'Authorization': config.get("token"),
                    'Content-Type': 'application/json'
                }
                api_url = urlparse.urljoin(config.get("api"), '/boot')
                push('POST', api_url, data=json.dumps(boot_meta), headers=headers)
                headers = {
                    'Authorization': config.get("token"),
                }
                print 'Uploading text version of boot log'
                with open(os.path.join(directory, log)) as lh:
                    data = lh.read()
                api_url = urlparse.urljoin(config.get("api"), '/upload/%s/%s/%s/%s/%s' % (kernel_tree,
                                                                                 kernel_version,
                                                                                 kernel_defconfig,
                                                                                 config.get("lab"),
                                                                                 log))
                push('PUT', api_url, data=data, headers=headers)
                print 'Uploading html version of boot log'
                with open(os.path.join(directory, html)) as lh:
                    data = lh.read()
                api_url = urlparse.urljoin(config.get("api"), '/upload/%s/%s/%s/%s/%s' % (kernel_tree,
                                                                                 kernel_version,
                                                                                 kernel_defconfig,
                                                                                 config.get("lab"),
                                                                                 html))
                push('PUT', api_url, data=data, headers=headers)

    if results and kernel_tree and kernel_version:
        print 'Creating boot summary for %s' % kernel_version
        boot = '%s-boot-report.txt' % kernel_version
        passed = 0
        failed = 0
        for defconfig, results_list in results.items():
            for result in results_list:
                if result['result'] == 'PASS':
                    passed += 1
                else:
                    failed += 1
        total = passed + failed
        if config.get("lab"):
            report_directory = os.path.join(results_directory, config.get("lab"))
            utils.mkdir(report_directory)
        else:
            report_directory = results_directory
        with open(os.path.join(report_directory, boot), 'a') as f:
            f.write('To: %s\n' % config.get("email"))
            f.write('From: [email protected]\n')
            f.write('Subject: %s boot: %s boots: %s passed, %s failed (%s)\n' % (kernel_tree,
                                                                                str(total),
                                                                                str(passed),
                                                                                str(failed),
                                                                                kernel_version))
            f.write('\n')
            f.write('Full Build Report: http://kernelci.org/build/%s/kernel/%s/\n' % (kernel_tree, kernel_version))
            f.write('Full Boot Report: http://kernelci.org/boot/all/job/%s/kernel/%s/\n' % (kernel_tree, kernel_version))
            f.write('\n')
            f.write('Total Duration: %.2f minutes\n' % (duration / 60))
            f.write('Tree/Branch: %s\n' % kernel_tree)
            f.write('Git Describe: %s\n' % kernel_version)
            first = True
            for defconfig, results_list in results.items():
                for result in results_list:
                    if result['result'] == 'OFFLINE':
                        if first:
                            f.write('\n')
                            f.write('Boards Offline:\n')
                            first = False
                        f.write('\n')
                        f.write(defconfig)
                        f.write('\n')
                        break
                for result in results_list:
                    if result['result'] == 'OFFLINE':
                        f.write('    %s   %ss   boot-test: %s\n' % (result['device_type'],
                                                                    result['kernel_boot_time'],
                                                                    result['result']))
                        f.write('\n')
            first = True
            for defconfig, results_list in results.items():
                for result in results_list:
                    if result['result'] == 'FAIL':
                        if first:
                            f.write('\n')
                            f.write('Failed Boot Tests:\n')
                            first = False
                        f.write('\n')
                        f.write(defconfig)
                        f.write('\n')
                        break
                for result in results_list:
                    if result['result'] == 'FAIL':
                        f.write('    %s   %ss   boot-test: %s\n' % (result['device_type'],
                                                                    result['kernel_boot_time'],
                                                                    result['result']))
                        if config.get("lab"):
                            f.write('    http://storage.kernelci.org/kernel-ci/%s/%s/%s/%s/boot-%s.html' % (kernel_tree,
                                                                                                            kernel_version,
                                                                                                            defconfig,
                                                                                                            config.get("lab"),
                                                                                                            result['device_type']))
                        else:
                            f.write('    http://storage.kernelci.org/kernel-ci/%s/%s/%s/boot-%s.html' % (kernel_tree,
                                                                                                         kernel_version,
                                                                                                         defconfig,
                                                                                                         result['device_type']))
                        f.write('\n')
            f.write('\n')
            f.write('Full Boot Report:\n')
            for defconfig, results_list in results.items():
                f.write('\n')
                f.write(defconfig)
                f.write('\n')
                for result in results_list:
                    f.write('    %s   %ss   boot-test: %s\n' % (result['device_type'], result['kernel_boot_time'], result['result']))

    # dt-self-test
    if results and kernel_tree and kernel_version and dt_tests:
        print 'Creating device tree runtime self test summary for %s' % kernel_version
        dt_self_test = '%s-dt-runtime-self-test-report.txt' % kernel_version
        passed = 0
        failed = 0
        for defconfig, results_list in results.items():
            for result in results_list:
                if result['dt_test_result'] == 'PASS':
                    passed += 1
                elif result['dt_test_result'] == 'FAIL':
                    failed += 1
        total = passed + failed
        with open(os.path.join(report_directory, dt_self_test), 'a') as f:
            f.write('To: %s\n' % config.get("email"))
            f.write('From: [email protected]\n')
            f.write('Subject: %s dt-runtime-unit-tests: %s boards tested: %s passed, %s failed (%s)\n' % (kernel_tree,
                                                                                                           str(total),
                                                                                                           str(passed),
                                                                                                           str(failed),
                                                                                                           kernel_version))
            f.write('\n')
            f.write('Full Build Report: http://kernelci.org/build/%s/kernel/%s/\n' % (kernel_tree, kernel_version))
            f.write('Full Boot Report: http://kernelci.org/boot/all/job/%s/kernel/%s/\n' % (kernel_tree, kernel_version))
            f.write('Full Test Report: http://kernelci.org/test/%s/kernel/%s/\n' % (kernel_tree, kernel_version))
            f.write('\n')
            f.write('Tree/Branch: %s\n' % kernel_tree)
            f.write('Git Describe: %s\n' % kernel_version)
            first = True
            for defconfig, results_list in results.items():
                for result in results_list:
                    if result['dt_test_result'] == 'FAIL':
                        if first:
                            f.write('\n')
                            f.write('Failed Device Tree Unit Tests:\n')
                            first = False
                        f.write('\n')
                        f.write(defconfig)
                        f.write('\n')
                        break
                for result in results_list:
                    if result['dt_test_result'] == "FAIL":
                        f.write('    %s   passed: %s / failed: %s   dt-runtime-unit-tests: %s\n' % (result['device_type'],
                                                                                                    result['dt_tests_passed'],
                                                                                                    result['dt_tests_failed'],
                                                                                                    result['dt_test_result']))
                        if config.get("lab"):
                            f.write('    http://storage.kernelci.org/kernel-ci/%s/%s/%s/%s/boot-%s.html' % (kernel_tree,
                                                                                                        kernel_version,
                                                                                                        defconfig,
                                                                                                        config.get("lab"),
                                                                                                        result['device_type']))
                        else:
                            f.write('    http://storage.kernelci.org/kernel-ci/%s/%s/%s/boot-%s.html' % (kernel_tree,
                                                                                                         kernel_version,
                                                                                                         defconfig,
                                                                                                         result['device_type']))
            f.write('\n')
            f.write('\n')
            f.write('Full Unit Test Report:\n')
            for defconfig, results_list in results.items():
                first = True
                for result in results_list:
                    if result['dt_test_result']:
                        if first:
                            f.write('\n')
                            f.write(defconfig)
                            f.write('\n')
                            first = False
                        f.write('    %s   passed: %s / failed: %s   dt-runtime-unit-tests: %s\n' % (result['device_type'],
                                                                                                    result['dt_tests_passed'],
                                                                                                    result['dt_tests_failed'],
                                                                                                    result['dt_test_result']))

    # sendmail
    if config.get("email"):
        print 'Sending e-mail summary to %s' % config.get("email")
        if os.path.exists(report_directory):
            cmd = 'cat %s | sendmail -t' % os.path.join(report_directory, boot)
            subprocess.check_output(cmd, shell=True)
        if dt_tests:
            if os.path.exists(report_directory):
                cmd = 'cat %s | sendmail -t' % os.path.join(report_directory, dt_self_test)
                subprocess.check_output(cmd, shell=True)