コード例 #1
0
ファイル: movie.py プロジェクト: vodenizeka/polyworld
def plot(cluster_file,
         run_dir='../run/',
         anim_path=None,
         frame_path=None,
         min_size=None):
    # configure default animation and frame path
    if anim_path is None:
        anim_path = 'anim'
    if frame_path is None:
        frame_path = os.path.join(anim_path, 'frames')

    # ensure paths exist
    if not os.path.isdir(anim_path):
        os.mkdir(anim_path)
    if not os.path.isdir(frame_path):
        os.mkdir(frame_path)

    # load clusters
    clusters = rc.load_clusters(cluster_file, sort='random')

    # compress, if a threshold is set
    if min_size is not None:
        clusters = compress_clusters(clusters, min_size)

    # calculate number of clusters
    n_clusters = len(clusters)
    agent_cluster = rc.agent_cluster(clusters)

    # establish cluster object as a shared memory object.
    #manager = Manager()
    #cluster = manager.dict(agent_cluster)

    start = 1
    stop = 300

    # grouping to keep memory footprint reasonably small.
    # TODO: Fix this using iterators and multiprocessing.Pool(). Running into
    # major issues with serialization of Agent class that is preventing
    # plot_step from being picklable.
    for begin in xrange(start, stop, 1500):
        end = min(begin + 1499, stop)

        p = get_population_during_time(begin, end)

        for t in xrange(begin, end, 3):
            plot_step(t,
                      p,
                      agent_cluster,
                      n_clusters,
                      frame_path,
                      cmap='Paired')

    subprocess.call((
        "mencoder mf://%s/*.png -o %s/output.avi -mf type=png:w=600:h=800:fps=30 -ovc x264 -x264encopts qp=20"
        % (frame_path, anim_path)).split())
コード例 #2
0
ファイル: movie.py プロジェクト: JaimieMurdock/polyworld
def plot(cluster_file, run_dir='../run/', anim_path=None, frame_path=None,
         min_size=None):
    # configure default animation and frame path
    if anim_path is None:
        anim_path = 'anim'
    if frame_path is None:
        frame_path = os.path.join(anim_path, 'frames')

    # ensure paths exist
    if not os.path.isdir(anim_path):
        os.mkdir(anim_path)
    if not os.path.isdir(frame_path):
        os.mkdir(frame_path)
   
    # load clusters
    clusters = rc.load_clusters(cluster_file, sort='random')

    # compress, if a threshold is set
    if min_size is not None:
        clusters = compress_clusters(clusters, min_size)
   
    # calculate number of clusters
    n_clusters = len(clusters)
    agent_cluster = rc.agent_cluster(clusters)

    # establish cluster object as a shared memory object.
    #manager = Manager()
    #cluster = manager.dict(agent_cluster)

    start = 1 
    stop = 300
    
    # grouping to keep memory footprint reasonably small.
    # TODO: Fix this using iterators and multiprocessing.Pool(). Running into
    # major issues with serialization of Agent class that is preventing
    # plot_step from being picklable.
    for begin in xrange(start, stop, 1500):
        end = min(begin + 1499, stop)
        
        p = get_population_during_time(begin, end)
              
        for t in xrange(begin, end, 3):
            plot_step(t, p, agent_cluster, n_clusters, frame_path, cmap='Paired')
   
    subprocess.call(("mencoder mf://%s/*.png -o %s/output.avi -mf type=png:w=600:h=800:fps=30 -ovc x264 -x264encopts qp=20" % (frame_path, anim_path) ).split())
コード例 #3
0
    print cluster
    exit()
    '''

    import readcluster
    clusters = readcluster.load_clusters(sys.argv[-1])

    new_clust = [clust for clust in clusters if len(clust) > 700]

    small_clust = []
    for clust in clusters:
        if len(clust) <= 700:
            small_clust.extend(clust)

    new_clust.append(small_clust)
    cluster = readcluster.agent_cluster(new_clust)

    n_clusters = len(set(cluster.values()))
    print n_clusters

    print "processing..."
    # establish cluster object as a shared memory object.
    from multiprocessing import Pool, Manager
    manager = Manager()
    cluster = manager.dict(cluster)
    #p = manager.list(p)

    start = 1
    stop = 30000
    '''
    # for testing purposes:
コード例 #4
0
    print cluster
    exit()
    '''
    
    import readcluster
    clusters = readcluster.load_clusters(sys.argv[-1])

    new_clust = [clust for clust in clusters if len(clust) > 700]

    small_clust = []
    for clust in clusters:
        if len(clust) <= 700:
            small_clust.extend(clust)

    new_clust.append(small_clust)
    cluster = readcluster.agent_cluster(new_clust)
    
    n_clusters = len(set(cluster.values()))
    print n_clusters


    print "processing..." 
    # establish cluster object as a shared memory object.
    from multiprocessing import Pool, Manager
    manager = Manager()
    cluster = manager.dict(cluster)
    #p = manager.list(p)

    start = 1 
    stop = 30000
    '''