Beispiel #1
0
def show_dependencies(filename, draw):
    with open(filename) as fp:
        deps = refactor.get_dependencies(ast.parse(fp.read()))
    if draw:
        visualize.draw(deps)
    else:
        click.echo(pprint.pformat(deps))
def main():

    cInit = [1]
    cBase = [50, 100]
    #modelResults = {(np.log10(init), np.log10(base)): evaluate(init, base) for init, base in it.product(cInit, cBase)}

    numTotalSimulationTimes = [64]
    numTrees = [1]
    subtleties = [500]
    #, 500, 11, 3.3, 1.83, 0.92, 0.31, 0.001]
    #, 50, 3.3, 0.92]
    precisionToSubtletyDict = {
        500: 0,
        50: 5,
        11: 30,
        3.3: 60,
        1.83: 90,
        0.92: 120,
        0.31: 150,
        0.001: 180
    }
    modelResults = {
        (numTree, precisionToSubtletyDict[chasingSubtlety]):
        evaluate(numTree, chasingSubtlety, numTotalSimulation, init, base)
        for numTree, chasingSubtlety, numTotalSimulation, init, base in
        it.product(numTrees, subtleties, numTotalSimulationTimes, cInit, cBase)
    }

    print("Finished evaluating")
    # Visualize
    #independentVariableNames = ['cInit', 'cBase']
    independentVariableNames = ['numTree', 'chasingSubtlety']
    draw(modelResults, independentVariableNames)
Beispiel #3
0
def main():

    cInit = [0.1, 1, 10]
    cBase = [0.01, 0.1, 1]
    modelResults = {(np.log10(init), np.log10(base)): evaluate(init, base)
                    for init, base in it.product(cInit, cBase)}

    print("Finished evaluating")
    # Visualize
    independentVariableNames = ['cInit', 'cBase']
    draw(modelResults, independentVariableNames)
def worlddet_frame(data, frame, i_frame):
    dets, colors, calib = data
    det = dets[dets['frame_number'] == i_frame]

    frame = draw(frame, det, colors, coords='world', calib=calib)

    return frame
Beispiel #5
0
def detections_video(detections, videopath, outvideopath, classnames, dataset, res, fps=15, conf_thresh=0.75, show_frame_number=True, coords='pixels'):
    """ Renders a video with the detections drawn on top
    
    Arguments:
    detections        -- the detections as a pandas table
    videopath         -- path to input video
    outvideopath      -- path to output video showing the detections
    classnames        -- list of all the classes
    dataset           -- name of the dataset
    res               -- resolution of output video and coordinates in csv file (assumed to be the same). Probably SSD resolution if performed on direct csv files, and probably the video resolution if performed on csv files with world coordinates
    fps               -- frames-per-second of output video
    conf_thresh       -- Detections with confidences below this are not shown in output video. Set to negative to not visualize confidences, or set to 0.0 to show all of them.   
    show_frame_number -- writes the frame number in the top left corner of the video
    coords            -- coordinate system of detections
    """
    
    masker = Masker(dataset)
    
    calib = None
    if coords == 'world':
        calib = Calibration(dataset)

    num_classes = len(classnames)+1
    colors = class_colors(num_classes)

    outwidth = make_divisible(res[0], 16)
    outheight = make_divisible(res[1], 16)
    pad_vid = True
    if (outwidth == res[0]) and (outheight == res[1]):
        pad_vid = False
    
    with io.get_reader(videopath) as vid:
        with io.get_writer(outvideopath, fps=fps) as outvid:
            for i,frame in enumerate(vid):
                frame = masker.mask(frame, alpha=0.5)
                frame = cv2.resize(frame, (res[0], res[1]))
                
                dets = detections[detections['frame_number']==i]
                if len(dets) > 0:
                    frame = draw(frame, dets, colors, conf_thresh=conf_thresh, coords=coords, calib=calib)
                
                if pad_vid:
                    padded = 255*np.ones((outheight, outwidth, 3), dtype=np.uint8)
                    padded[0:res[1], 0:res[0], :] = frame
                    frame = padded    
                
                if show_frame_number:
                    cv2.putText(frame, 'Frame {}'.format(i), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
                
                outvid.append_data(frame)
                
                if i%500 == 0:
                    print_flush("Frame {}".format(i))
def pixeldet_frame(data, frame, i_frame):
    dets, colors, x_scale, y_scale = data
    det = dets[dets['frame_number'] == i_frame]

    frame = draw(frame,
                 det,
                 colors,
                 x_scale=x_scale,
                 y_scale=y_scale,
                 coords='pixels')

    return frame
Beispiel #7
0
def slideshow(dataset, outpath, fps=10, repeat=20):

    ld = LoadDetections()
    dets = ld.custom(dataset)

    imfiles = list(set(dets.image_file))
    if not imfiles:
        return False

    cc = class_colors()

    mask = Masker(dataset)

    classnames = get_classnames(dataset)

    with io.get_writer(outpath, fps=fps) as vid:
        for imfile in imfiles:
            d = dets[dets.image_file == imfile]

            # Add "class_name" and "class_index" columns which are missing
            d = d.rename(index=str, columns={"type": "class_name"})
            indices = [1 + classnames.index(x) for x in d['class_name']]
            d['class_index'] = indices

            im = io.imread(imfile)
            im = mask.mask(im, alpha=0.5)

            width = float(im.shape[1])
            height = float(im.shape[0])
            frame = draw(im,
                         d,
                         cc,
                         conf_thresh=-1.0,
                         x_scale=width,
                         y_scale=height)

            for i in range(repeat):
                vid.append_data(frame)

    return True
Beispiel #8
0
def main(args):

    n_sequences = get_n_sequences(args.infile)
    print("Number of sequences:", n_sequences)

    print("create alignment")
    align(args.infile, args.outfile, n_sequences * 10)

    print("read alignment result")
    headers, sequences = read_fasta(args.outfile)
    print("make consensus")
    consensus_sequence, consensus_frequencies = consensus.consensus(sequences)
    headers.append('consensus')
    sequences.append(consensus_sequence)

    colors = colourschemes.create_colour_scheme(args.colors)
    img = visualize.draw(headers,
                         sequences,
                         consensus_frequencies,
                         AA_colors=colors,
                         max_width=args.width,
                         same_length=args.same_length)
    img.save(args.plotfile)
Beispiel #9
0
def main():
    # action space
    actionSpace = [[10, 0], [7, 7], [0, 10], [-7, 7], [-10, 0], [-7, -7], [0, -10], [7, -7]]
    numActionSpace = len(actionSpace)

    # state space
    numStateSpace = 4
    xBoundary = [0, 360]
    yBoundary = [0, 360]
    checkBoundaryAndAdjust = ag.CheckBoundaryAndAdjust(xBoundary, yBoundary)

    initSheepPositionMean = np.array([180, 180])
    initWolfPositionMean = np.array([180, 180])
    initSheepPositionNoise = np.array([120, 120])
    initWolfPositionNoise = np.array([60, 60])
    sheepPositionReset = ag.SheepPositionReset(initSheepPositionMean, initSheepPositionNoise, checkBoundaryAndAdjust)
    wolfPositionReset = ag.WolfPositionReset(initWolfPositionMean, initWolfPositionNoise, checkBoundaryAndAdjust)

    numOneAgentState = 2
    positionIndex = [0, 1]

    sheepPositionTransition = ag.SheepPositionTransition(numOneAgentState, positionIndex, checkBoundaryAndAdjust)
    wolfPositionTransition = ag.WolfPositionTransition(numOneAgentState, positionIndex, checkBoundaryAndAdjust)

    numAgent = 2
    sheepId = 0
    wolfId = 1
    transitionFunction = env.TransitionFunction(sheepId, wolfId, sheepPositionReset, wolfPositionReset,
                                                sheepPositionTransition, wolfPositionTransition)
    minDistance = 15
    isTerminal = env.IsTerminal(sheepId, wolfId, numOneAgentState, positionIndex, minDistance)

    screen = pg.display.set_mode([xBoundary[1], yBoundary[1]])
    screenColor = [255, 255, 255]
    circleColorList = [[50, 255, 50], [50, 50, 50], [50, 50, 50], [50, 50, 50], [50, 50, 50], [50, 50, 50],
                       [50, 50, 50], [50, 50, 50], [50, 50, 50]]
    circleSize = 8
    saveImage = False
    saveImageFile = 'image'
    render = env.Render(numAgent, numOneAgentState, positionIndex, screen, screenColor, circleColorList, circleSize,
                        saveImage, saveImageFile)

    aliveBouns = -1
    deathPenalty = 20
    rewardDecay = 0.99
    rewardFunction = reward.TerminalPenalty(sheepId, wolfId, numOneAgentState, positionIndex, aliveBouns, deathPenalty, isTerminal)
    accumulateRewards = PG.AccumulateRewards(rewardDecay, rewardFunction)

    maxTimeStep = 150
    sampleTrajectory = PG.SampleTrajectory(maxTimeStep, transitionFunction, isTerminal)

    approximatePolicy = PG.ApproximatePolicy(actionSpace)
    trainPG = PG.TrainTensorflow(actionSpace)

    numTrajectory = 20
    maxEpisode = 1000

    # Generate models.
    learningRate = 1e-4
    hiddenNeuronNumbers = [128, 256, 512, 1024]
    hiddenDepths = [2, 4, 8]
    # hiddenNeuronNumbers = [128]
    # hiddenDepths = [2]
    generateModel = GeneratePolicyNet(numStateSpace, numActionSpace, learningRate)
    models = {(n, d): generateModel(d, round(n / d)) for n, d in it.product(hiddenNeuronNumbers, hiddenDepths)}
    print("Models generated")

    # Train.
    policyGradient = PG.PolicyGradient(numTrajectory, maxEpisode, render)
    trainModel = lambda model: policyGradient(model, approximatePolicy,
                                                             sampleTrajectory,
                                                             accumulateRewards,
                                                             trainPG)
    trainedModels = {key: trainModel(model) for key, model in models.items()}
    print("Finished training")

    # Evaluate
    modelEvaluate = Evaluate(numTrajectory, approximatePolicy, sampleTrajectory, rewardFunction)
    meanEpisodeRewards = {key: modelEvaluate(model) for key, model in trainedModels.items()}
    print("Finished evaluating")
    # print(meanEpisodeRewards)

    # Visualize
    independentVariableNames = ['NeuroTotalNumber', 'layerNumber']
    draw(meanEpisodeRewards, independentVariableNames)
    print("Finished visualizing", meanEpisodeRewards)
Beispiel #10
0
def test_on_video(model,
                  name,
                  experiment,
                  videopath,
                  outvideopath,
                  classnames,
                  batch_size=32,
                  input_shape=(480, 640, 3),
                  soft=False,
                  width=480,
                  height=640,
                  conf_thresh=0.75,
                  csv_conf_thresh=0.75):
    """ Applies a trained SSD model to a video
    
    Arguments:
    model           -- the SSD model, e.g. from get_model
    name            -- name of dataset
    experiment      -- name of training run
    videopath       -- path to input video
    outvideopath    -- path to output video showing the detections
    classnames      -- list of all the classes
    batch_size      -- number of images processed in parallell, lower this if you get out-of-memory errors
    input_shape     -- size of images fed to SSD
    soft            -- Whether to do soft NMS or normal NMS
    width           -- Width to scale detections with (can be set to 1 if detections are already on right scale)
    height          -- Height to scale detections with (can be set to 1 if detections are already on right scale)
    conf_thresh     -- Detections with confidences below this are not shown in output video. Set to negative to not visualize confidences.
    csv_conf_thresh -- Detections with confidences below this are ignored. This should be same as conf_thresh unless conf_thresh is negative.
    
    """
    masker = Masker(name)

    num_classes = len(classnames) + 1
    colors = class_colors(num_classes)

    make_vid = True
    suffix = outvideopath.split('.')[-1]
    if suffix == 'csv':
        make_vid = False
        csvpath = outvideopath
    else:
        csvpath = outvideopath.replace('.{}'.format(suffix), '.csv')

    print_flush('Generating priors')
    im_in = np.random.random(
        (1, input_shape[1], input_shape[0], input_shape[2]))
    priors = model.predict(im_in, batch_size=1)[0, :, -8:]
    bbox_util = BBoxUtility(num_classes, priors)

    vid = io.get_reader(videopath)
    if make_vid:
        outvid = io.get_writer(outvideopath, fps=30)

    inputs = []
    frames = []

    all_detections = []
    for i, frame in enumerate(vid):
        frame = masker.mask(frame)
        resized = cv2.resize(frame, (input_shape[0], input_shape[1]))

        frames.append(frame.copy())
        inputs.append(resized)

        if len(inputs) == batch_size:
            inputs = np.array(inputs).astype(np.float64)
            inputs = preprocess_input(inputs)

            preds = model.predict(inputs, batch_size=batch_size, verbose=0)
            results = bbox_util.detection_out(preds, soft=soft)

            for result, frame, frame_number in zip(results, frames,
                                                   range(i - batch_size, i)):
                result = [
                    r if len(r) > 0 else np.zeros((1, 6)) for r in result
                ]
                raw_detections = pd.DataFrame(np.vstack(result),
                                              columns=[
                                                  'class_index', 'confidence',
                                                  'xmin', 'ymin', 'xmax',
                                                  'ymax'
                                              ])

                rescale(raw_detections, 'xmin', width)
                rescale(raw_detections, 'xmax', width)
                rescale(raw_detections, 'ymin', height)
                rescale(raw_detections, 'ymax', height)
                rescale(raw_detections, 'class_index', 1)

                ci = raw_detections['class_index']
                cn = [classnames[int(x) - 1] for x in ci]
                raw_detections['class_name'] = cn

                raw_detections['frame_number'] = (frame_number + 2)
                all_detections.append(raw_detections[
                    raw_detections.confidence > csv_conf_thresh])

                if make_vid:
                    frame = draw(frame,
                                 raw_detections,
                                 colors,
                                 conf_thresh=conf_thresh)
                    outvid.append_data(frame)

            frames = []
            inputs = []

        if i % (10 * batch_size) == 0:
            print_flush(i)

    detections = pd.concat(all_detections)

    detections.to_csv(csvpath)
Beispiel #11
0
from visualize import draw

if __name__ == "__main__":
    test_examples = [
        "+ a * b c", " + * 5 ^ x 2 * 8 x", "+ * 3 ^ * 3 * 2 ^ x 2 x 6"
    ]

    inexp_list = list()
    for ind, test_example in enumerate(test_examples):
        print()
        pre_exp = parse_input(test_example)
        in_exp = pre2in(pre_exp)
        post_exp = in2post(in_exp)
        pre_exp = in2pre(in_exp)

        print("prefix expression:", print_expression(pre_exp))
        print("infix expression:", print_expression(in_exp))
        print("postfix expression:", print_expression(post_exp))
        # print("write expression from infix expression:", write_exp(in_exp))

        if ind == 1 or ind == 2:
            assigned_post = assign("x", 3, post_exp)
            print("assign result:", cal_postfix_expression(assigned_post))
            inexp_list.append(in_exp)

    # Visualize
    root_add_id = PostExpTree(in2post(
        compound_inexp(inexp_list[0], inexp_list[1])),
                              add_id=True)
    draw(root_add_id)
Beispiel #12
0
	dataSet = PD.loadData(dataSetPath)
	random.shuffle(dataSet)

	trainingDataSizes = [8000] #list(range(10000, 10000, 1000))
	trainingDataList = [([state for state, _ in dataSet[:size]], [label for _, label in dataSet[:size]]) for size in trainingDataSizes]
	trainingData = trainingDataList[0]
	learningRate = 0.0001
	regularizationFactor = 1e-4
	generatePolicyNet = NN.GeneratePolicyNet(numStateSpace, numActionSpace, learningRate, regularizationFactor)
	policyNetDepth = [2, 3, 4, 5]
	policyNetWidth = [32, 64, 128, 256]
	nnList = it.product(policyNetDepth, policyNetWidth)
	models = {(width, depth): generatePolicyNet(depth, width) for depth, width in nnList}

	maxStepNum = 50000
	reportInterval = 500
	lossChangeThreshold = 1e-6
	lossHistorySize = 10
	train = SL.Train(maxStepNum, learningRate, lossChangeThreshold, lossHistorySize, reportInterval,
	                 summaryOn=False, testData=None)

	trainedModels = {key: train(model, trainingData) for key, model in models.items()}

	evalTrain = {key: SL.evaluate(model, trainingData) for key, model in trainedModels.items()}

	evaluateDataPath = 'NeuralNetworkEvaluation.pkl'
	file = open(evaluateDataPath, "wb")
	pickle.dump(dataSet, file)
	file.close()
	VI.draw(evalTrain, ["neurons per layer", "layer"], ["Loss", "Accuracy"])
Beispiel #13
0
def main():
    #tf.set_random_seed(123)
    #np.random.seed(123)

    actionSpace = [[10, 0], [7, 7], [0, 10], [-7, 7], [-10, 0], [-7, -7],
                   [0, -10], [7, -7]]
    numActionSpace = len(actionSpace)
    numStateSpace = 4

    xBoundary = [0, 360]
    yBoundary = [0, 360]
    checkBoundaryAndAdjust = ag.CheckBoundaryAndAdjust(xBoundary, yBoundary)

    initSheepPosition = np.array([180, 180])
    initWolfPosition = np.array([180, 180])
    initSheepVelocity = np.array([0, 0])
    initWolfVelocity = np.array([0, 0])
    initSheepPositionNoise = np.array([120, 120])
    initWolfPositionNoise = np.array([60, 60])
    sheepPositionReset = ag.SheepPositionReset(initSheepPosition,
                                               initSheepPositionNoise,
                                               checkBoundaryAndAdjust)
    wolfPositionReset = ag.WolfPositionReset(initWolfPosition,
                                             initWolfPositionNoise,
                                             checkBoundaryAndAdjust)

    numOneAgentState = 2
    positionIndex = [0, 1]

    sheepPositionTransition = ag.SheepPositionTransition(
        numOneAgentState, positionIndex, checkBoundaryAndAdjust)
    wolfPositionTransition = ag.WolfPositionTransition(numOneAgentState,
                                                       positionIndex,
                                                       checkBoundaryAndAdjust)

    numAgent = 2
    sheepId = 0
    wolfId = 1
    transitionFunction = env.TransitionFunction(sheepId, wolfId,
                                                sheepPositionReset,
                                                wolfPositionReset,
                                                sheepPositionTransition,
                                                wolfPositionTransition)
    minDistance = 15
    isTerminal = env.IsTerminal(sheepId, wolfId, numOneAgentState,
                                positionIndex, minDistance)

    screen = pg.display.set_mode([xBoundary[1], yBoundary[1]])
    screenColor = [255, 255, 255]
    circleColorList = [[50, 255, 50], [50, 50, 50], [50, 50, 50], [50, 50, 50],
                       [50, 50, 50], [50, 50, 50], [50, 50, 50], [50, 50, 50],
                       [50, 50, 50]]
    circleSize = 8
    saveImage = False
    saveImageFile = 'image'
    render = env.Render(numAgent, numOneAgentState, positionIndex, screen,
                        screenColor, circleColorList, circleSize, saveImage,
                        saveImageFile)

    aliveBouns = -1
    deathPenalty = 20
    rewardDecay = 0.99
    rewardFunction = reward.RewardFunctionTerminalPenalty(
        sheepId, wolfId, numOneAgentState, positionIndex, aliveBouns,
        deathPenalty, isTerminal)
    accumulateReward = A2CMC.AccumulateReward(rewardDecay, rewardFunction)

    maxTimeStep = 150
    sampleTrajectory = A2CMC.SampleTrajectory(maxTimeStep, transitionFunction,
                                              isTerminal)

    approximatePolicy = A2CMC.ApproximatePolicy(actionSpace)
    approximateValue = A2CMC.approximateValue
    trainCritic = A2CMC.TrainCriticMonteCarloTensorflow(accumulateReward)
    estimateAdvantage = A2CMC.EstimateAdvantageMonteCarlo(accumulateReward)
    trainActor = A2CMC.TrainActorMonteCarloTensorflow(actionSpace)

    numTrajectory = 5
    maxEpisode = 1
    actorCritic = A2CMC.OfflineAdvantageActorCritic(numTrajectory, maxEpisode,
                                                    render)

    # Generate models.
    learningRateActor = 1e-4
    learningRateCritic = 3e-4
    hiddenNeuronNumbers = [128, 256, 512, 1024]
    hiddenDepths = [2, 4, 8]
    generateModel = GenerateActorCriticModel(numStateSpace, numActionSpace,
                                             learningRateActor,
                                             learningRateCritic)
    modelDict = {(n, d): generateModel(d, round(n / d))
                 for n, d in it.product(hiddenNeuronNumbers, hiddenDepths)}

    print("Generated graphs")
    # Train.
    actorCritic = A2CMC.OfflineAdvantageActorCritic(numTrajectory, maxEpisode,
                                                    render)
    modelTrain = lambda actorModel, criticModel: actorCritic(
        actorModel, criticModel, approximatePolicy, sampleTrajectory,
        trainCritic, approximateValue, estimateAdvantage, trainActor)
    trainedModelDict = {
        key: modelTrain(model[0], model[1])
        for key, model in modelDict.items()
    }

    print("Finished training")
    # Evaluate
    modelEvaluate = Evaluate(numTrajectory, approximatePolicy,
                             sampleTrajectory, rewardFunction)
    meanEpisodeRewards = {
        key: modelEvaluate(model[0], model[1])
        for key, model in trainedModelDict.items()
    }

    print("Finished evaluating")
    # Visualize
    independentVariableNames = ['NeuroTotalNumber', 'layerNumber']
    draw(meanEpisodeRewards, independentVariableNames)
                                       NUM_OF_INDIVIDUALS)
                average_fitness.append(calc_average_fitness(population))
                gen += 1
            results = []

            # Storing the final Rank 1 solutions
            for key, value in population.items():
                if value['Rank'] == 1:
                    results.append(value['result'])

            # Plot using plotly
            color_index = vis.draw_solution(pieces=packages)
            vis.draw(results, color_index)

            # Plot using matplotlib
            color_index = vis2.draw(pieces=packages, title="True Solution Packing")
            for each in results:
                vis2.draw(each, color_index, title="Rank 1 Solution Packing For Iteration {}".format(i))
            draw_pareto(population)
            average_vol.append(average_fitness[-1][0])
            average_num.append(average_fitness[-1][1])
            average_value.append(average_fitness[-1][2])
            plot_stats(average_fitness,
                       title="Average Fitness Values for Run {} over {} generations".format(i + 1,
                                                                                            NUM_OF_GENERATIONS))

        print(tabulate(
            [['Problem Set', p_ind], ['Runs', NUM_OF_ITERATIONS], ['Avg. Volume%', sum(average_vol) / len(average_vol)],
             ['Avg. Number%', sum(average_num) / len(average_num)],
             ['Avg. Value%', sum(average_value) / len(average_value)]],
            headers=['Parameter', 'Value'], tablefmt="github"))
                                             regularizationFactor)
    models = [generatePolicyNet(3, 32) for _ in range(len(trainingDataSizes))]

    maxStepNum = 50000
    reportInterval = 500
    lossChangeThreshold = 1e-6
    lossHistorySize = 10
    train = SL.Train(maxStepNum,
                     learningRate,
                     lossChangeThreshold,
                     lossHistorySize,
                     reportInterval,
                     summaryOn=False,
                     testData=None)

    trainedModels = [
        train(model, data) for model, data in zip(models, trainingDataList)
    ]
    # evalResults = [(SL.evaluate(model, trainingData), SL.evaluate(model, testData)) for trainingData, model in zip(trainingDataList, trainedModels)]

    evalTrain = {
        ("Train", len(trainingData[0])): SL.evaluate(model, trainingData)
        for trainingData, model in zip(trainingDataList, trainedModels)
    }
    evalTest = {("Test", len(trainingData[0])): SL.evaluate(model, testData)
                for trainingData, model in zip(trainingDataList, trainedModels)
                }
    evalTrain.update(evalTest)

    VI.draw(evalTrain, ["mode", "training_set_size"], ["Loss", "Accuracy"])
Beispiel #16
0
def run(f, i, cH, cD, vH, vW, minS, maxS, dirMin, dirMax, pNum):
    global out2, out3
    frameNum = int(f)
    iterationTime = int(i)
    cameraHeight = cH
    cameraDistance = cD
    viewHeight = vH
    viewWidth = vW
    minSpeed = minS
    maxSpeed = maxS
    walkingDirectionRange = [dirMin, dirMax]
    personNum = int(pNum)

    start_position = (0, 0)
    box_width_height = (10, 10)
    moving_speed = 10
    gap = 1

    randomX = 0
    randomY = 360


    # perform random walk
    for j in range(0, iterationTime):
        personList = []
        for k in range(0, personNum):
            newPerson = loadPersonOne(cwd, viewWidth, viewHeight)
            newPerson.startFromEdge()
            personList.append(newPerson)
            randomNoiseIndex = randint(10, 40)

        for i in range(1, frameNum + int(frameNum/10) + 1):
            currPoseList = []
            # person 1
            for person in personList:
                speedOne = randint(minSpeed, maxSpeed)
                direction = 0
                if i % 20 == 0:
                    randX , randY = getRandomRange()
                    direction += randint(randX, randY)
                person.walk(speedOne, direction)
                currPose = person.getCurrentWalkingPose()
                if i == randomNoiseIndex:
                    currPose = createRandomNoise(currPose)
                currPoseList.append(currPose)

                minX, minY, maxX, maxY = currPose.getBound()
                if minX < 0 or minY < 0 or maxX >= 1980 or maxY >= 1080:
                    out3.write("-1 -1 -1 -1 ")
                    # for n in currPoseList[0].getPoseNodes():
                    #     n.invalid()
                else:
                    out3.write(str(minX) + " " + str(minY) + " " + str(maxX) + " " + str(maxY) + " ")

            # specify the output file order
            inputDataWithRankingOrder(currPoseList)

            out2.write("\n")
            out3.write("\n")


    out2.close()
    out3.close()

    #inputData = open(cwd + "/data/inputData.txt", 'w')
    #outputData = open(cwd + "/data/outputData.txt", 'w')

    inputData = open("/Users/mars/Desktop/PData/inputData.txt", 'w')
    outputData = open("/Users/mars/Desktop/PData/outputData.txt", 'w')

    out2 = open(cwd + "/data/out2.txt", 'r')
    out3 = open(cwd + "/data/out3.txt", 'r')

    # this is for a bug in file operations, remove unrelated lines and spaces
    for line in out2:
        if line != "\n":
            inputData.write(line)

    for line in out3:
        b = "100000 100000 -1 -1 " in line
        if b == False:
            outputData.write(line)
    # ***********************************************************************

    inputData.close()
    outputData.close()

    draw()