This script plots a trajectory into an image sequence. 
    ''')
    parser.add_argument('image_list',
                        help='input image list (format: timestamp filename)')
    parser.add_argument(
        'trajectory_file',
        help='input trajectory (format: timestamp tx ty tz qx qy qz qw)')
    parser.add_argument('out_image',
                        help='file name of the result (format: png)')
    args = parser.parse_args()

    image_list = read_file_list(args.image_list)
    pose_list = read_file_list(args.trajectory_file)
    traj = read_trajectory(args.trajectory_file)

    matches = associate(image_list, pose_list, 0, 0.02)

    stamps = image_list.keys()
    stamps.sort()

    matches_dict = dict(matches)
    for stamp in stamps:
        image_file = image_list[stamp][0]
        image = Image.open(image_file)
        print "image stamp: %f" % stamp

        if stamp in matches_dict:
            print "pose stamp: %f" % matches_dict[stamp]
            pose = traj[matches_dict[stamp]]

            stamps = traj.keys()
Ejemplo n.º 2
0
        help='save individual point clouds (instead of one large point cloud)',
        action='store_true')
    parser.add_argument(
        '--pcd_format',
        help='Write pointclouds in pcd format (implies --individual)',
        action='store_true')

    parser.add_argument('output_file', help='output PLY file (format: ply)')
    args = parser.parse_args()

    rgb_list = read_file_list(args.rgb_list)
    depth_list = read_file_list(args.depth_list)
    pose_list = read_file_list(args.trajectory_file)

    matches_rgb_depth = dict(
        associate(rgb_list, depth_list, float(args.depth_offset),
                  float(args.depth_max_difference)))
    matches_rgb_traj = associate(matches_rgb_depth, pose_list,
                                 float(args.traj_offset),
                                 float(args.traj_max_difference))
    matches_rgb_traj.sort()

    if args.pcd_format:
        args.individual = True
        traj = read_trajectory(args.trajectory_file, False)
    else:
        traj = read_trajectory(args.trajectory_file)

    all_points = []
    list = range(0, len(matches_rgb_traj), int(args.nth))
    for frame, i in enumerate(list):
        rgb_stamp, traj_stamp = matches_rgb_traj[i]
Ejemplo n.º 3
0
    while (t < NGer):

        ##        if(t == 150):
        ##            weight = array([0.0,1.0,3.0,10.0,5.0]) # Array of weights used for TOPSIS

        Rt = Pt.addPopulation(Qt)  # Combined population
        Rt.fastNonDominatedSort()  # Nondominated sorting
        ##    Rt.topsisPop(rank=rankType) # Rank within each front
        ##    Rt.globalRankEval()

        if (sampleAll):

            RtObj = Rt.obj  # Save original objective values of St
            Zr, a = normalize(Rt, objRec, minim, Z, p)
            ZRef = Zr * a + objRec.objIdeal
            associate(Rt, Zr, 0)
            ##            distPareto(Rt,ZRef,objRec.objIdeal,a)
            hvContribution(Rt, objRec.objIdeal, a)
            indRemove = niching(NPop, Rt, array([0, 2 * NPop]), weight,
                                multiple)
            Pt = Rt.removeMembers(indRemove, RtObj)

        else:

            indList = zeros(
                NPop,
                dtype=int)  # List of indexes of members to the next population
            i = 0  # Counter of fronts
            sizeEvol = array([
                0, len(Rt.fronts[i])
            ])  # Evolution of population's size by adding the fronts
    

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='''
    This script plots a trajectory into an image sequence. 
    ''')
    parser.add_argument('image_list', help='input image list (format: timestamp filename)')
    parser.add_argument('trajectory_file', help='input trajectory (format: timestamp tx ty tz qx qy qz qw)')
    parser.add_argument('out_image', help='file name of the result (format: png)')
    args = parser.parse_args()
    
    image_list = read_file_list(args.image_list)
    pose_list = read_file_list(args.trajectory_file)
    traj = read_trajectory(args.trajectory_file)

    matches = associate(image_list, pose_list,0,0.02)

    stamps = image_list.keys()
    stamps.sort()
    
    matches_dict = dict(matches)
    for stamp in stamps:
        image_file = image_list[stamp][0]
        image = Image.open(image_file)
        print "image stamp: %f"%stamp
        
        if stamp in matches_dict: 
            print "pose stamp: %f"%matches_dict[stamp]
            pose = traj[matches_dict[stamp]]
            
            stamps = traj.keys()
Ejemplo n.º 5
0
def plot_camera(cams, rvec, tvec, dim=(0.5, 1), vertices):
    ax = plt.gca(projection='3d')

    tvec = tvec.reshape((3, 1)).astype(np.float)
    rvec = rvec.reshape((3, 1)).astype(np.float)
    
    rmat = cv2.Rodrigues(rvec.astype(np.float))[0]
    
    if(len(oc) == 0):
        oc = c
    
    if len(dim) == 1:
        offset = float(dim[0])
        length = offset
    else:
        offset = float(dim[0])
        length = float(dim[1])
    
#    cam = np.tile(tvec, (1, 5))
    cam = np.zeros([3, 5])
    cam[0, 1:3] += offset
    cam[0, 3:] -= offset
    cam[1, 1::2] += offset
    cam[1, 2::2] -= offset
    cam[2, 1:] += 2.5 * length
    
#    cam = rmat.dot(cam - tvec) + tvec
    cam = rmat.T.dot(cam - tvec)
    vertexNo = cams.shape[0]/5;
    #    ax.plot3D(cam[0, [0, 1]], cam[1, [0, 1]], cam[2, [0, 1]], color=c)
#    ax.plot3D(cam[0, [0, 2]], cam[1, [0, 2]], cam[2, [0, 2]], color=c)
#    ax.plot3D(cam[0, [0, 3]], cam[1, [0, 3]], cam[2, [0, 3]], color=c)
#    ax.plot3D(cam[0, [0, 4]], cam[1, [0, 4]], cam[2, [0, 4]], color=c)
#    ax.plot3D(cam[0, [1, 2]], cam[1, [1, 2]], cam[2, [1, 2]], color=c)
#    ax.plot3D(cam[0, [2, 4]], cam[1, [2, 4]], cam[2, [2, 4]], color=c)
#    ax.plot3D(cam[0, [3, 4]], cam[1, [3, 4]], cam[2, [3, 4]], color=c)
#    ax.plot3D(cam[0, [3, 1]], cam[1, [3, 1]], cam[2, [3, 1]], color=c)
    for i in range(8):
        for j in range(i+1,8):
        vertices.append("%d %d %d %d %d"%(i,j,255,0,0))

    for i in range(cam.shape[1]):
        cams.append("%f %f %f %d %d %d 0\n"%(cam[0,i],vec_transf[1,i],vec_transf[2,i],255,0,0))
        
    #return points.append("%f %f %f %d %d %d 0\n"%(vec_transf[0,0],vec_transf[1,0],vec_transf[2,0],255,0,0))
#    print 'cam', cam[0, 0], cam[1, 0], cam[2, 0]
#    ax.scatter(cam[0, 0], cam[1, 0], cam[2, 0], color=oc, marker='.')
#    ax.scatter(cam[0, 1::], cam[1, 1::], cam[2, 1::], color=c, marker='.')
#    ax.plot3D(cam[0, [0, 1]], cam[1, [0, 1]], cam[2, [0, 1]], color=c)
#    ax.plot3D(cam[0, [0, 2]], cam[1, [0, 2]], cam[2, [0, 2]], color=c)
#    ax.plot3D(cam[0, [0, 3]], cam[1, [0, 3]], cam[2, [0, 3]], color=c)
#    ax.plot3D(cam[0, [0, 4]], cam[1, [0, 4]], cam[2, [0, 4]], color=c)
#    ax.plot3D(cam[0, [1, 2]], cam[1, [1, 2]], cam[2, [1, 2]], color=c)
#    ax.plot3D(cam[0, [2, 4]], cam[1, [2, 4]], cam[2, [2, 4]], color=c)
#    ax.plot3D(cam[0, [3, 4]], cam[1, [3, 4]], cam[2, [3, 4]], color=c)
#    ax.plot3D(cam[0, [3, 1]], cam[1, [3, 1]], cam[2, [3, 1]], color=c)
    
def generate_pointcloudWithCamera(rgb_file,depth_file,transform,downsample,pcd=False):
    """
    Generate a colored point cloud 
    
    Input:
    rgb_file -- filename of color image
    depth_file -- filename of depth image
    transform -- camera pose, specified as a 4x4 homogeneous matrix
    downsample -- downsample point cloud in x/y direction
    pcd -- true: output in (binary) PCD format
           false: output in (text) PLY format
           
    Output:
    list of colored points (either in binary or text format, see pcd flag)
    """
    
    rgb = Image.open(rgb_file)
    depth = Image.open(depth_file)
    
    if rgb.size != depth.size:
        raise Exception("Color and depth image do not have the same resolution.")
    if rgb.mode != "RGB":
        raise Exception("Color image is not in RGB format")
    if depth.mode != "I":
        raise Exception("Depth image is not in intensity format")

    points = []    
    for v in range(0,rgb.size[1],downsample):
        for u in range(0,rgb.size[0],downsample):
            color = rgb.getpixel((u,v))
            Z = depth.getpixel((u,v)) / scalingFactor
            if Z==0: continue
            X = (u - centerX) * Z / focalLength
            Y = (v - centerY) * Z / focalLength
            vec_org = numpy.matrix([[X],[Y],[Z],[1]])
            if pcd:
              points.append(struct.pack("fffI",vec_org[0,0],vec_org[1,0],vec_org[2,0],color[0]*2**16+color[1]*2**8+color[2]*2**0))
            else:
              vec_transf = numpy.dot(transform,vec_org)
              points.append("%f %f %f %d %d %d 0\n"%(vec_transf[0,0],vec_transf[1,0],vec_transf[2,0],color[0],color[1],color[2]))
            
    return points
    
def generate_pointcloud(rgb_file,depth_file,transform,downsample,pcd=False):
    """
    Generate a colored point cloud 
    
    Input:
    rgb_file -- filename of color image
    depth_file -- filename of depth image
    transform -- camera pose, specified as a 4x4 homogeneous matrix
    downsample -- downsample point cloud in x/y direction
    pcd -- true: output in (binary) PCD format
           false: output in (text) PLY format
           
    Output:
    list of colored points (either in binary or text format, see pcd flag)
    """
    
    rgb = Image.open(rgb_file)
    depth = Image.open(depth_file)
    
    if rgb.size != depth.size:
        raise Exception("Color and depth image do not have the same resolution.")
    if rgb.mode != "RGB":
        raise Exception("Color image is not in RGB format")
    if depth.mode != "I":
        raise Exception("Depth image is not in intensity format")

    points = []    
    for v in range(0,rgb.size[1],downsample):
        for u in range(0,rgb.size[0],downsample):
            color = rgb.getpixel((u,v))
            Z = depth.getpixel((u,v)) / scalingFactor
            if Z==0: continue
            X = (u - centerX) * Z / focalLength
            Y = (v - centerY) * Z / focalLength
            vec_org = numpy.matrix([[X],[Y],[Z],[1]])
            if pcd:
              points.append(struct.pack("fffI",vec_org[0,0],vec_org[1,0],vec_org[2,0],color[0]*2**16+color[1]*2**8+color[2]*2**0))
            else:
              vec_transf = numpy.dot(transform,vec_org)
              points.append("%f %f %f %d %d %d 0\n"%(vec_transf[0,0],vec_transf[1,0],vec_transf[2,0],color[0],color[1],color[2]))
            
    return points

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='''
    This script reads a registered pair of color and depth images and generates a colored 3D point cloud in the
    PLY format. 
    ''')
    parser.add_argument('rgb_list', help='input color image (format: timestamp filename)')
    parser.add_argument('depth_list', help='input depth image (format: timestamp filename)')
    parser.add_argument('trajectory_file', help='input trajectory (format: timestamp tx ty tz qx qy qz qw)')
    parser.add_argument('--depth_offset', help='time offset added to the timestamps of the depth file (default: 0.00)',default=0.00)
    parser.add_argument('--depth_max_difference', help='maximally allowed time difference for matching rgb and depth entries (default: 0.02)',default=0.02)
    parser.add_argument('--traj_offset', help='time offset added to the timestamps of the trajectory file (default: 0.00)',default=0.00)
    parser.add_argument('--traj_max_difference', help='maximally allowed time difference for matching rgb and traj entries (default: 0.01)',default=0.02)
    parser.add_argument('--downsample', help='downsample images by this factor (default: 1)',default=1)
    parser.add_argument('--nth', help='only consider every nth image pair (default: 1)',default=1)
    parser.add_argument('--individual', help='save individual point clouds (instead of one large point cloud)', action='store_true')
    parser.add_argument('--pcd_format', help='Write pointclouds in pcd format (implies --individual)', action='store_true')
    
    parser.add_argument('output_file', help='output PLY file (format: ply)')
    args = parser.parse_args()

    rgb_list = read_file_list(args.rgb_list)
    depth_list = read_file_list(args.depth_list)
    pose_list = read_file_list(args.trajectory_file)
    #print np.shape(rgb_list),np.shape(depth_list),np.shape(pose_list)
    matches_rgb_depth = dict(associate(rgb_list, depth_list,float(args.depth_offset),float(args.depth_max_difference)))    
    matches_rgb_traj = associate(matches_rgb_depth, pose_list,float(args.traj_offset),float(args.traj_max_difference))
    matches_rgb_traj.sort()
    #print np.shape(matches_rgb_traj)
    if args.pcd_format:
      args.individual = True
      traj = read_trajectory(args.trajectory_file, False)
    else:
      traj = read_trajectory(args.trajectory_file)
    
    all_points = []
    list  = range(0,len(matches_rgb_traj),int(args.nth))
    for frame,i in enumerate(list):
        rgb_stamp,traj_stamp = matches_rgb_traj[i]

        if args.individual:
          if args.pcd_format:
            out_filename = "%s-%f.pcd"%(os.path.splitext(args.output_file)[0],rgb_stamp)
          else:
            out_filename = "%s-%f.ply"%(os.path.splitext(args.output_file)[0],rgb_stamp)
          if os.path.exists(out_filename):
            print "skipping existing cloud file ", out_filename
            continue

        rgb_file = rgb_list[rgb_stamp][0]
        depth_file = depth_list[matches_rgb_depth[rgb_stamp]][0]
        pose = traj[traj_stamp]
        points = generate_pointcloud(rgb_file,depth_file,pose,int(args.downsample), args.pcd_format)

        if args.individual:
          if args.pcd_format:
            write_pcd(out_filename,points,pose)
          else:
            write_ply(out_filename,points)
        else:
            all_points += points
            print "Frame %d/%d, number of points so far: %d"%(frame+1,len(list),len(all_points))
        if (frame+1>50):
            break;
            
    if not args.individual:
      write_ply(args.output_file,all_points)
    parser.add_argument('--depth_max_difference', help='maximally allowed time difference for matching rgb and depth entries (default: 0.02)',default=0.02)
    parser.add_argument('--traj_offset', help='time offset added to the timestamps of the trajectory file (default: 0.00)',default=0.00)
    parser.add_argument('--traj_max_difference', help='maximally allowed time difference for matching rgb and traj entries (default: 0.01)',default=0.01)
    parser.add_argument('--downsample', help='downsample images by this factor (default: 1)',default=1)
    parser.add_argument('--nth', help='only consider every nth image pair (default: 1)',default=1)
    parser.add_argument('--individual', help='save individual point clouds (instead of one large point cloud)', action='store_true')
    parser.add_argument('--pcd_format', help='Write pointclouds in pcd format (implies --individual)', action='store_true')
    
    parser.add_argument('output_file', help='output PLY file (format: ply)')
    args = parser.parse_args()

    rgb_list = read_file_list(args.rgb_list)
    depth_list = read_file_list(args.depth_list)
    pose_list = read_file_list(args.trajectory_file)

    matches_rgb_depth = dict(associate(rgb_list, depth_list,float(args.depth_offset),float(args.depth_max_difference)))    
    matches_rgb_traj = associate(matches_rgb_depth, pose_list,float(args.traj_offset),float(args.traj_max_difference))
    matches_rgb_traj.sort()
    
    if args.pcd_format:
      args.individual = True
      traj = read_trajectory(args.trajectory_file, False)
    else:
      traj = read_trajectory(args.trajectory_file)
    
    all_points = []
    list  = range(0,len(matches_rgb_traj),int(args.nth))
    for frame,i in enumerate(list):
        rgb_stamp,traj_stamp = matches_rgb_traj[i]

        if args.individual:
Ejemplo n.º 7
0
def runMOMCEDA(NPop,
               NEval,
               function,
               Nref,
               nReps,
               RTPlot,
               refPoint,
               weight,
               seed=None):

    print 'Running MOMCEDA\n'

    NGer = NEval / NPop - 1  # Number of generations
    NObj = 2  # Number of objectives to optimize
    minim = 1  # minim = 1 if minimizing objectives, minim = 0 otherwise
    p = Nref - 1  # Number of objective axes divisions to generate structured ref. points

    random.seed(seed)

    if (function == 'ZDT4'):
        Nvar = 10
        Vmin = append(0, -5 * ones(Nvar - 1))  # Limits of chromosome values
        Vmax = append(1, 5 * ones(Nvar - 1))
        sigma = append(1.0, 0.1 * ones(Nvar - 1))  # Mutation parameter
    else:
        Nvar = 30
        if (function == 'ZDT6'):
            Nvar = 10
        Vmin = 0.0 * ones(Nvar)  # Limits of chromosome values
        Vmax = 1.0 * ones(Nvar)
        sigma = 1.0 * ones(Nvar) / 2.0  # Mutation parameter

    minObj = array([0.0, 0.0])  # Limits of objective values
    maxObj = array([1.0, 1.0])

    objRec = objectiveRecords(NObj, minim)  # Records for objective values

    Z = generateRefPoints(NObj, p)  # Generate structured reference points
    rankType = 'hv'  # Type of rank used for TOPSIS

    multiple = True  #    multiple: use multiple criteria to select solutions from the last front
    sampleAll = True  #sampleAll: if true, all members from parent population are sampled with the TOPSIS rank

    hvValues = zeros((nReps, NGer))
    conv = []
    finalPop = []
    extime = []

    # Plot parameters
    color = plt.cm.get_cmap('Reds')
    deltac = 0.3
    ObjNames = ['f1', 'f2', 'f3']
    scale = 1.0 / (maxObj - minObj)
    center = array([0.0, 0.0, 0.0])
    countFig = 0  # counter of figures

    ## Offspring parameters

    pMut = 1.0 / Nvar  # Mutation probability
    pSwitch = 0.5  # Probability to switch variables between members
    spread = 0.5  # Parameter to control the spread of generated members
    nc = 30
    nm = 20

    ##sigma = append(1.0,0.1*ones(Nvar-1))
    ##sigma0 = r_[array([1.0]),1.0/10*ones(Nvar-1)]
    ##sigmaf = r_[array([1.0/10]),1.0/500*ones(Nvar-1)]
    ###deltaSigma = (sigma0 - sigmaf) / NGer
    ##qSigma = (sigmaf/sigma)**(1.0/NGer)
    ##sigma = sigma0

    spreadf = 0.05
    qSpread = (spreadf / spread)**(1.0 / NGer)
    deltaSpread = (spread - spreadf) / NGer

    dist = zeros((NGer, Nvar))

    ## Distribution variables

    # Coeffients of the gaussians of the mixture

    ##choiceOK = False
    ##while(not choiceOK):
    ##    dec = input('Choose decay type:\n(1) Linear\n(2) Exponential\n(3) Logarithmic\n')
    ##    if(dec in [1,2,3]):
    ##        choiceOK = True

    dec = 1  # decay type:1 - Linear, 2 - Exponential, 3 - Logarithmic
    coefGau = calc_coefGau(NPop, dec)

    with open(''.join(['../dev/pareto_front/zdt', function[3],
                       '_front.json'])) as optimal_front_data:
        optimal_front = json.load(optimal_front_data)

    for nExec in arange(nReps, dtype=int):

        start = time.time()

        print 'Starting execution %d ...' % (nExec + 1)

        ## Initialization ##

        t = 0  # Counter of generations
        Pt = Population(NPop, Nvar, Vmin, Vmax, NObj, minim,
                        function)  # Initial Population
        Pt.fastNonDominatedSort()  # Nondominated sorting
        Pt.topsisPop()  # Rank within each front
        Qt = Population(NPop, Nvar, Vmin, Vmax, NObj, minim,
                        function)  # Offspring population

        if (RTPlot):
            #plt.ion()
            plt.figure(figsize=(12, 12))
            plt.title('Population at execution %d' % (nExec + 1), fontsize=18)
            f = array(optimal_front)
            plt.plot(f[:, 0], f[:, 1], color='b', label='Pareto front')
            plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=18)
            #plt.draw()

        ## Main loop ##

        while (t < NGer):

            ##        if(t == 150):
            ##            weight = array([0.0,1.0,3.0,10.0,5.0]) # Array of weights used for TOPSIS

            Rt = Pt.addPopulation(Qt)  # Combined population
            Rt.fastNonDominatedSort()  # Nondominated sorting
            ##    Rt.topsisPop(rank=rankType) # Rank within each front
            ##    Rt.globalRankEval()

            if (sampleAll):

                RtObj = Rt.obj  # Save original objective values of St
                Zr, a = normalize(Rt, objRec, minim, Z, p)
                ZRef = Zr * a + objRec.objIdeal
                associate(Rt, Zr, 0)
                ##            distPareto(Rt,ZRef,objRec.objIdeal,a)
                hvContribution(Rt, objRec.objIdeal, a)
                indRemove = niching(NPop, Rt, array([0, 2 * NPop]), weight,
                                    multiple)
                Pt = Rt.removeMembers(indRemove, RtObj)

            else:

                indList = zeros(
                    NPop, dtype=int
                )  # List of indexes of members to the next population
                i = 0  # Counter of fronts
                sizeEvol = array([
                    0, len(Rt.fronts[i])
                ])  # Evolution of population's size by adding the fronts

                # Fill population with the first fronts
                while (sizeEvol[i + 1] <= NPop):
                    #         Rt.crowd(Rt.fronts[i]) = Rt.crowdingDistanceAssignment(Rt.fronts[i],minObj,maxObj)
                    indList[sizeEvol[i]:sizeEvol[i + 1]] = Rt.fronts[
                        i]  # Add members to the list
                    i = i + 1
                    sizeEvol = append(sizeEvol,
                                      sizeEvol[i] + len(Rt.fronts[i]))

                # Sort members of the last front according to
                # crowding distance
            #     Rt.crowd[Rt.fronts[i]] = Rt.crowdingDistanceAssignment(Rt.fronts[i],minObj,maxObj)
            #     ind = argsort(Rt.crowd[Rt.fronts[i]])[::-1]

            # Sort members of the last front according to the rank
            #     ind = argsort(Rt.rank[Rt.fronts[i],1])

                listSt = r_[indList[:sizeEvol[i]], Rt.fronts[i]]
                St = Rt.addMembers(listSt)

                K = NPop - sizeEvol[i]
                StObj = St.obj  # Save original objective values of St
                Zr, a = normalize(St, objRec, minim, Z, p)
                ZRef = Zr * a + objRec.objIdeal
                associate(St, Zr, sizeEvol[i])
                ZRef = Zr * a + objRec.objIdeal
                ##    distPareto(St,ZRef,objRec.objIdeal,a)
                hvContribution(St, objRec.objIdeal, a)
                indRemove = niching(K, St, sizeEvol, weight, multiple)
                Pt = St.removeMembers(indRemove,
                                      StObj)  # Next generation's population

            if (RTPlot):
                Pt.plot(color((1 - deltac) * float(t) / NGer + deltac), scale,
                        center, ObjNames, countFig)
                #axes = plt.gca()
                #axes.set_ylim([0,1])
            Pt.topsisPop(weight, rank=rankType)  # Rank within each front
            Qt = Pt.offspringPop(
                coefGau, sigma, pMut, spread,
                pSwitch)  # Selection, recombination and mutation

            ##    dist[t] = maxDist(Pt.members)/(1*(Vmax - Vmin))
            ##    sigma = dist[t]
            ##        sigma = random.rand()*append(10.0,1.0*ones(Nvar-1))

            ##    MEv = sqrt(sum(Pt.members[:,1:]**2)/(NPop*(Nvar-1)))
            ##    print 'Generation = ', t, 'Mean Square Value =', MEv

            hv = HyperVolume(refPoint)
            hvValues[nExec, t] = hv.compute(Pt.obj)

            t = t + 1
            ##    sigma = sigma - deltaSigma
            ##        sigma = sigma*qSigma

            ##        spread = spread*qSpread
            spread = spread - deltaSpread

        end = time.time()
        extime.append(end - start)

        conv.append(convergence(Pt.obj.tolist(), optimal_front))

        print 'Hypervolume = ', hvValues[nExec, -1]
        print 'Convergence metric = ', conv[nExec]
        print 'Execution ', nExec + 1, ' completed in ', extime[
            -1], ' seconds \n'

        ##    normIgd,Zint = normIGDmetric(ZRef,objRec.objIdeal,a,Pt.obj,function)
        ##    igd = IGDmetric(ZRef,objRec.objIdeal,Pt.obj,function)[0]

        ##    normIgd2,Zint = normIGDmetric2(ZRef,objRec.objIdeal,a,Pt.obj,function)
        ##    igd2 = IGDmetric2(ZRef,objRec.objIdeal,Pt.obj,function)[0]

        ##    print 'NormIGD = {0:e}'.format(normIgd)
        ##    print 'IGD = {0:e}'.format(igd)
        ##    print 'NormIGD2 = {0:e}'.format(normIgd2)
        ##    print 'IGD2 = {0:e}'.format(igd2)

        ##    with open(''.join(['Pareto/Prt_',function,'.pk1']), 'r') as filename:
        ##        f = pickle.load(filename)
        ##step = len(f)/50
        ##plt.scatter(f[::step,0],f[::step,1],s=1,color='b')

        if (RTPlot):
            Pt.plot(color((1 - deltac) * float(t) / NGer + deltac), scale,
                    center, ObjNames, countFig)
            #axes = plt.gca()
            #axes.set_ylim([0,1])
            #plt.draw()
            #plt.ioff()
            plt.show()
            #plt.savefig(''.join(['../figures/',function,'.png']), bbox_inches='tight')

        countFig = countFig + NObj * (NObj - 1) / 2

        finalPop.append(Pt)

##    if(nReps == 1):
##        MEv = sqrt(sum(Pt.members[:,1:]**2)/(NPop*(Nvar-1)))
##        print 'Generation = ', t, 'Mean Square Value =', MEv
##
##        print 'rho =', Pt.rho
##        print 'minObj=', objRec.objIdeal

##refPoint = objRec.extPoints.max(axis=0)*1.1
##print 'refPoint: ',refPoint
##        hv = HyperVolume(refPoint)
##        print 'hypervolume=', hv.compute(Pt.obj)

##ind = where((St.obj[:,0] != 0) & (St.obj[:,1]!=0))[0]
##j = random.choice(ind)
##a = (StObj[j] - objRec.objIdeal) / St.obj[j]
##ZPlot = Z*a + objRec.objIdeal
##
##for i in arange(0,len(Z),len(Z)/10):
##    plt.plot(vstack((objRec.objIdeal[0],ZPlot[i,0])),vstack((objRec.objIdeal[1],ZPlot[i,1])),'-',color='k')
##plt.plot(vstack((objRec.objIdeal[0],ZPlot[-1,0])),vstack((objRec.objIdeal[1],ZPlot[-1,1])),'-',color='k')
##inter = diag(a) + objRec.objIdeal
##plt.plot(inter[:,0],inter[:,1],'-',color='b')

##inter = diag(a) + objRec.objIdeal
##plt.plot(inter[:,0],inter[:,1],'-',color='b')

##plt.show()
##        axes = plt.gca()
##        axes.set_ylim([0,1])
##
##        plt.savefig(''.join(['../figures/',function,'.png']), bbox_inches='tight')

##        plt.figure(2)
##        plt.plot(hvValues)
##        plt.savefig(''.join(['../figures/HV_',function,'.png']), bbox_inches='tight')

##        # Save Population
##        with open(''.join(['../dev/files/Pop_',function,'_MOMCEDA','.pk1']), 'wb') as output:
##            pickle.dump(finalPop, output, pickle.HIGHEST_PROTOCOL)
##
##        # Save Hypervolume
##        with open(''.join(['../dev/files/HV_',function,'_MOMCEDA','.pk1']), 'wb') as output:
##            pickle.dump(hvValues, output, pickle.HIGHEST_PROTOCOL)
##
##        # Save Elapsed time
##        with open(''.join(['../dev/files/time_',function,'_MOMCEDA','.pk1']), 'wb') as output:
##            pickle.dump(extime, output, pickle.HIGHEST_PROTOCOL)

    print 'Average hypervolume=', mean(hvValues[:, -1])
    print 'Best hypervolume=', max(hvValues[:, -1])

    ##        plt.figure(countFig+1,figsize=(12, 12))
    ##        plt.title('Average hypervolume evolution for %s problem' %(function), fontsize=18)
    ##        plt.xlabel('Generations', fontsize=18)
    ##        plt.ylabel('Average hypervolume', fontsize=18)
    ##        plt.plot(arange(1,NGer+1),hvValues.mean(axis=0))
    ##        plt.savefig(''.join(['../figures/meanHV_',function,'.png']), bbox_inches='tight')

    # Save Population
    with open(''.join(['../dev/files/Pop_', function, '_MOMCEDA', '.pk1']),
              'wb') as output:
        pickle.dump(finalPop, output, pickle.HIGHEST_PROTOCOL)

    # Save Hypervolume
    with open(''.join(['../dev/files/HV_', function, '_MOMCEDA', '.pk1']),
              'wb') as output:
        pickle.dump(hvValues, output, pickle.HIGHEST_PROTOCOL)

    # Save Elapsed time
    with open(''.join(['../dev/files/time_', function, '_MOMCEDA', '.pk1']),
              'wb') as output:
        pickle.dump(extime, output, pickle.HIGHEST_PROTOCOL)

    # Save Convergence
    with open(''.join(['../dev/files/conv_', function, '_MOMCEDA.json']),
              'w') as outfile:
        json.dump(conv, outfile)

    print '\nMOMCEDA finished all experiments\n'