示例#1
0
    def array(self, N=0, filename=None, component=None, root=0):
        """Dump data to numpy format on root processor."""
        assert (N == 0 or N == 1)
        is_root = comm.Get_rank() == root
        size = self.get_total_number_probes() if is_root else len(self)
        comp = self.value_size() if component is None else 1
        z = zeros((size, comp))

        # Retrieve all values
        if len(self) > 0:
            for k in range(comp):
                if is_root:
                    ids = self.get_probe_ids()
                    z[ids, k] = self.get_probes_component_and_snapshot(k, N)
                else:
                    z[:, k] = self.get_probes_component_and_snapshot(k, N)

        # Collect on root
        recvfrom = comm.gather(len(self), root=root)
        if is_root:
            for j, k in enumerate(recvfrom):
                if comm.Get_rank() != j:
                    ids = comm.recv(source=j, tag=101)
                    z0 = comm.recv(source=j, tag=102)
                    z[ids, :] = z0[:, :]
        else:
            ids = self.get_probe_ids()
            comm.send(ids, dest=root, tag=101)
            comm.send(z, dest=root, tag=102)

        if is_root:
            if filename:
                save(filename + "_statistics", z)
            return squeeze(z)
示例#2
0
 def array(self, N=0, filename=None, component=None, root=0):
     """Dump data to numpy format on root processor."""
     assert(N == 0 or N == 1)
     is_root = comm.Get_rank() == root
     size = self.get_total_number_probes() if is_root else len(self)
     comp = self.value_size() if component is None else 1        
     z  = zeros((size, comp))
     
     # Retrieve all values
     if len(self) > 0: 
         for k in range(comp):
             if is_root:
                 ids = self.get_probe_ids()
                 z[ids, k] = self.get_probes_component_and_snapshot(k, N)
             else:
                 z[:, k] = self.get_probes_component_and_snapshot(k, N)
                  
     # Collect on root
     recvfrom = comm.gather(len(self), root=root)
     if is_root:
         for j, k in enumerate(recvfrom):                
             if comm.Get_rank() != j:
                 ids = comm.recv(source=j, tag=101)
                 z0 = comm.recv(source=j, tag=102)
                 z[ids, :] = z0[:, :]
     else:
         ids = self.get_probe_ids()
         comm.send(ids, dest=root, tag=101)
         comm.send(z, dest=root, tag=102)
         
     if is_root:
         if filename:
             z.dump(filename+"_statistics.probes")
         return squeeze(z)
示例#3
0
def calc(psi, nt, C):
    i = slice(halo, psi.size - halo)

    for _ in range(nt):
        ###
        #psi[:halo] = psi[i][-halo:]
        #psi[-halo:] = psi[i][:halo]

        rank = mpi.Get_rank()
        size = mpi.Get_size()

        right = (rank + 1) % size
        left = (rank - 1 + size) % size

        psi_without_halo = psi[i]
        psi_with_halo = psi

        mpi.send(psi_without_halo[-halo:], dest=right)
        psi_with_halo[:halo] = mpi.recv(source=left)
        mpi.send(psi_without_halo[:halo], dest=left)
        psi_with_halo[-halo:] = mpi.recv(source=right)
        ###

        psi[i] = upwind(psi, i, C)

    return psi
示例#4
0
    def update(self, tag):
        """Update file list and global configurations
        """
        atimer = Timer('Worker_Update')

        if tag == UPDATE_MAP:
            self.mapIn = world.recv(source=0, tag=tag)
        elif tag == UPDATE_REDUCE:
            self.reduceIn = world.recv(source=0, tag=tag)
            self.reduceOut = [ os.path.splitext(file)[0]+'.red' for file in self.reduceIn ]
        elif tag == UPDATE_CONFIG:
            self.config = world.recv(source=0, tag=tag)
        else:
            raise ValueError('Wrong tag specified.')
示例#5
0
    def array(self, N=None, filename=None, component=None, root=0):
        """Dump data to numpy format on root processor for all or one snapshot."""
        is_root = comm.Get_rank() == root
        size = self.get_total_number_probes() if is_root else len(self)
        comp = self.value_size() if component is None else 1
        if not N is None:
            z = zeros((size, comp))
        else:
            z = zeros((size, comp, self.number_of_evaluations()))

        # Get all values
        if len(self) > 0:
            if not N is None:
                for k in range(comp):
                    if is_root:
                        ids = self.get_probe_ids()
                        z[ids,
                          k] = self.get_probes_component_and_snapshot(k, N)
                    else:
                        z[:, k] = self.get_probes_component_and_snapshot(k, N)
            else:
                for i, (index, probe) in enumerate(self):
                    j = index if is_root else i
                    if not N is None:
                        z[j, :] = probe.get_probe_at_snapshot(N)
                    else:
                        for k in range(self.value_size()):
                            z[j, k, :] = probe.get_probe_sub(k)

        # Collect values on root
        recvfrom = comm.gather(len(self), root=root)
        if is_root:
            for j, k in enumerate(recvfrom):
                if comm.Get_rank() != j:
                    ids = comm.recv(source=j, tag=101)
                    z0 = comm.recv(source=j, tag=102)
                    z[ids, :] = z0[:, :]
        else:
            ids = self.get_probe_ids()
            comm.send(ids, dest=root, tag=101)
            comm.send(z, dest=root, tag=102)

        if is_root:
            if filename:
                if not N is None:
                    save(filename + "_snapshot_" + str(N), z)
                else:
                    save(filename + "_all", z)
            return squeeze(z)
示例#6
0
 def array(self, N=None, filename=None, component=None, root=0):
     """Dump data to numpy format on root processor for all or one snapshot."""
     is_root = comm.Get_rank() == root
     size = self.get_total_number_probes() if is_root else len(self)
     comp = self.value_size() if component is None else 1
     if not N is None:
         z  = zeros((size, comp))
     else:
         z  = zeros((size, comp, self.number_of_evaluations()))
     
     # Get all values
     if len(self) > 0: 
         if not N is None:
             for k in range(comp):
                 if is_root:
                     ids = self.get_probe_ids()
                     z[ids, k] = self.get_probes_component_and_snapshot(k, N)
                 else:
                     z[:, k] = self.get_probes_component_and_snapshot(k, N)
         else:                
             for i, (index, probe) in enumerate(self):
                 j = index if is_root else i
                 if not N is None:
                     z[j, :] = probe.get_probe_at_snapshot(N)
                 else:
                     for k in range(self.value_size()):
                         z[j, k, :] = probe.get_probe_sub(k)
                     
     # Collect values on root
     recvfrom = comm.gather(len(self), root=root)
     if is_root:
         for j, k in enumerate(recvfrom):                
             if comm.Get_rank() != j:
                 ids = comm.recv(source=j, tag=101)
                 z0 = comm.recv(source=j, tag=102)
                 z[ids, :] = z0[:, :]
     else:
         ids = self.get_probe_ids()
         comm.send(ids, dest=root, tag=101)
         comm.send(z, dest=root, tag=102)
         
     if is_root:
         if filename:
             if not N is None:
                 z.dump(filename+"_snapshot_"+str(N)+".probes")
             else:
                 z.dump(filename+"_all.probes")
         return squeeze(z)
示例#7
0
    def wait(self, running, tag):
        """Test if any worker has finished its job.
        If so, decrease its key and make it available
        """
        atimer = Timer('Wait')

        inittime = time()
        status = MPI.Status()
        while time() - inittime < self.config['jobwait']:
            if world.Iprobe(source=MPI.ANY_SOURCE, tag=tag, status=status):
                jobf = world.recv(source=status.source, tag=tag)
                idx = 0
                for ii, worker in enumerate(self.workers):
                    if worker.id == status.source:
                        idx = ii
                        break
                if self.config['verbosity'] >= 8:
                    print('Freeing worker ' + str(self.workers[idx].id))
                worker = self.workers[idx]

                # faulty worker's job has already been cleaned
                if not worker.isFaulty():
                    del running[jobf]
                else:
                    self.nActive += 1
                worker.setFree()
                heapq._siftup(self.workers, idx)
示例#8
0
    def map(self, f, tasks):
        N = len(tasks)
        P = self.P
        Pless1 = P - 1
        if self.rank != 0:
            self.wait()
            return

        if f is not self.f:
            self.f = f
            requests = []
            for p in range(1, self.P):
                r = COMM_WORLD.isend(f, dest=p)
                requests.append(r)
            MPI.Request.waitall(requests)

        requests = []
        for i, task in enumerate(tasks):
            r = COMM_WORLD.isend(task, dest=(i % Pless1) + 1, tag=i)
            requests.append(r)
        MPI.Request.waitall(requests)

        results = []
        for i in range(N):
            result = COMM_WORLD.recv(source=(i % Pless1) + 1, tag=i)
            results.append(result)
        return results
示例#9
0
    def update(self, tag):
        """Update file list and global configurations
        """
        atimer = Timer('Worker_Update')

        if tag == UPDATE_MAP:
            self.mapIn = world.recv(source=0, tag=tag)
        elif tag == UPDATE_REDUCE:
            self.reduceIn = world.recv(source=0, tag=tag)
            self.reduceOut = [
                os.path.splitext(file)[0] + '.red' for file in self.reduceIn
            ]
        elif tag == UPDATE_CONFIG:
            self.config = world.recv(source=0, tag=tag)
        else:
            raise ValueError('Wrong tag specified.')
示例#10
0
    def map(self, tag):
        """
        Execute supplied mapfn on each key-value pair read from file
        assigned by the master node
        """
        atimer = Timer('Worker_Map')

        # load key-value pairs from filename
        filename = world.recv(source=0, tag=tag)
        data = self.read(filename, tag)

        buffer = [ [] for ii in range(self.config['nReduce']) ]
        for key, val in data.items():
            for newKey, newVal in self.config['mapfn'](key, val):
                idx = self.config['hashfn'](newKey) % self.config['nReduce']
                buffer[idx].append( (newKey, newVal) )

        # write out new key-value pairs in scattered files
        for ii in range(self.config['nReduce']):
            tmpfile = self.reduceIn[ii]+'-tmp'+str(world.rank)
            # dump in append mode
            with open(tmpfile, 'a+b') as fout:
                pickle.dump(buffer[ii], fout, pickle.HIGHEST_PROTOCOL)

        # report back as successful completion of task
        world.send(filename, dest=0, tag=MAP_FINISH)
示例#11
0
    def run(self):
        """Receiving job instructions from the master node until
        TERMINATE signal received. Allowed tasks are defined in taskDict
        """
        atimer = Timer('Worker')

        # tasks define signal-behavior in the run function
        taskDict = { MAP_START: self.map, REDUCE_START: self.reduce,\
                INIT_START: self.map,\
                UPDATE_MAP: self.update, UPDATE_REDUCE: self.update,\
                UPDATE_CONFIG: self.update }

        status = MPI.Status()
        while True:
            # ping input
            if not world.Iprobe(source=0, tag=MPI.ANY_TAG, status=status):
                sleep(self.config['delay']);

            # entire calculation finished
            elif status.tag == TERMINATE:
                term = world.recv(source=0, tag=TERMINATE); break

            # check allowed tasks
            elif status.tag in taskDict:
                taskDict[status.tag](status.tag);

            # no instruction found, looping
            else:
                sleep(self.config['delay'])
示例#12
0
    def wait(self, running, tag):
        """Test if any worker has finished its job.
        If so, decrease its key and make it available
        """
        atimer = Timer('Wait')

        inittime = time()
        status = MPI.Status()
        while time() - inittime < self.config['jobwait']:
            if world.Iprobe(source=MPI.ANY_SOURCE,tag=tag,status=status):
                jobf = world.recv(source=status.source, tag=tag)
                idx = 0
                for ii, worker in enumerate(self.workers):
                    if worker.id == status.source: idx = ii; break
                if self.config['verbosity'] >= 8:
                    print('Freeing worker '+str(self.workers[idx].id))
                worker = self.workers[idx]

                # faulty worker's job has already been cleaned
                if not worker.isFaulty():
                    del running[jobf]
                else:
                    self.nActive += 1
                worker.setFree()
                heapq._siftup(self.workers, idx)
示例#13
0
    def map(self, f, tasks):
        N = len(tasks)
        P = self.P
        Pless1 = P - 1
        if self.rank != 0:
            self.wait()
            return

        if f is not self.f:
            self.f = f
            requests = []
            for p in range(1, self.P):
                r = COMM_WORLD.isend(f, dest=p)
                requests.append(r)
            MPI.Request.waitall(requests)

        requests = []
        for i, task in enumerate(tasks):
            r = COMM_WORLD.isend(task, dest=(i%Pless1)+1, tag=i)
            requests.append(r)
        MPI.Request.waitall(requests)

        results = []
        for i in range(N):
            result = COMM_WORLD.recv(source=(i%Pless1)+1, tag=i)
            results.append(result)
        return results
示例#14
0
    def map(self, tag):
        """
        Execute supplied mapfn on each key-value pair read from file
        assigned by the master node
        """
        atimer = Timer('Worker_Map')

        # load key-value pairs from filename
        filename = world.recv(source=0, tag=tag)
        data = self.read(filename, tag)

        buffer = [[] for ii in range(self.config['nReduce'])]
        for key, val in data.items():
            for newKey, newVal in self.config['mapfn'](key, val):
                idx = self.config['hashfn'](newKey) % self.config['nReduce']
                buffer[idx].append((newKey, newVal))

        # write out new key-value pairs in scattered files
        for ii in range(self.config['nReduce']):
            tmpfile = self.reduceIn[ii] + '-tmp' + str(world.rank)
            # dump in append mode
            with open(tmpfile, 'a+b') as fout:
                pickle.dump(buffer[ii], fout, pickle.HIGHEST_PROTOCOL)

        # report back as successful completion of task
        world.send(filename, dest=0, tag=MAP_FINISH)
示例#15
0
    def run(self):
        """Receiving job instructions from the master node until
        TERMINATE signal received. Allowed tasks are defined in taskDict
        """
        atimer = Timer('Worker')

        # tasks define signal-behavior in the run function
        taskDict = { MAP_START: self.map, REDUCE_START: self.reduce,\
                INIT_START: self.map,\
                UPDATE_MAP: self.update, UPDATE_REDUCE: self.update,\
                UPDATE_CONFIG: self.update }

        status = MPI.Status()
        while True:
            # ping input
            if not world.Iprobe(source=0, tag=MPI.ANY_TAG, status=status):
                sleep(self.config['delay'])

            # entire calculation finished
            elif status.tag == TERMINATE:
                term = world.recv(source=0, tag=TERMINATE)
                break

            # check allowed tasks
            elif status.tag in taskDict:
                taskDict[status.tag](status.tag)

            # no instruction found, looping
            else:
                sleep(self.config['delay'])
示例#16
0
 def wait(self):
     if self.rank == 0:
         raise RuntimeError("Proc 0 cannot wait!")
     status = MPI.Status()
     while True:
         task = COMM_WORLD.recv(source=0, tag=MPI.ANY_TAG, status=status)
         if not task:
             break
         if isinstance(task, FunctionType):
             self.f = task
             continue
         result = self.f(task)
         COMM_WORLD.isend(result, dest=0, tag=status.tag)
示例#17
0
 def wait(self):
     if self.rank == 0:
         raise RuntimeError("Proc 0 cannot wait!")
     status = MPI.Status()
     while True:
         task = COMM_WORLD.recv(source=0, tag=MPI.ANY_TAG, status=status)
         if not task:
             break
         if isinstance(task, FunctionType):
             self.f = task
             continue
         result = self.f(task)
         COMM_WORLD.isend(result, dest=0, tag=status.tag)
示例#18
0
    def reduce(self, tag):
        """Use supplied reducefn to operate on
        a list of values from a given key, generated by self.map()
        """
        atimer = Timer('Worker_Reduce')

        filename = world.recv(source=0, tag=tag)
        files = glob.glob(filename + '-tmp*')
        dataList = []
        for file in files:
            with open(file, 'rb') as fin:
                try:
                    while True:
                        dataList.extend(pickle.load(fin))
                except EOFError:  # read in every instance of pickle dump
                    pass
        data = {}
        for key, val in dataList:
            if key in data:
                data[key].append(val)
            else:
                data[key] = [val]
        results = []
        for key, values in data.items():
            results.append((key, self.config['reducefn'](key, values)))
        results.sort(key=itemgetter(0))

        # write out in dictionary format
        idx = self.reduceIn.index(filename)
        if self.config['appendReduce']:
            with open(self.reduceOut[idx], 'a+') as fout:
                pickle.dump(dict(results), fout, pickle.HIGHEST_PROTOCOL)
        else:
            with open(self.reduceOut[idx], 'w+') as fout:
                pickle.dump(dict(results), fout, pickle.HIGHEST_PROTOCOL)

        world.send(filename, dest=0, tag=REDUCE_FINISH)
示例#19
0
    def reduce(self, tag):
        """Use supplied reducefn to operate on
        a list of values from a given key, generated by self.map()
        """
        atimer = Timer('Worker_Reduce')

        filename = world.recv(source=0, tag=tag)
        files = glob.glob(filename+'-tmp*')
        dataList = []
        for file in files:
            with open(file, 'rb') as fin:
                try:
                    while True: dataList.extend( pickle.load(fin) )
                except EOFError: # read in every instance of pickle dump
                    pass
        data = {}
        for key, val in dataList:
            if key in data:
                data[key].append(val)
            else:
                data[key] = [val]
        results = []
        for key, values in data.items():
            results.append( ( key, self.config['reducefn'](key, values) ) )
        results.sort(key=itemgetter(0))

        # write out in dictionary format
        idx = self.reduceIn.index(filename)
        if self.config['appendReduce']:
            with open(self.reduceOut[idx], 'a+') as fout:
                pickle.dump(dict(results), fout, pickle.HIGHEST_PROTOCOL)
        else:
            with open(self.reduceOut[idx], 'w+') as fout:
                pickle.dump(dict(results), fout, pickle.HIGHEST_PROTOCOL)

        world.send(filename, dest=0, tag=REDUCE_FINISH)
示例#20
0
    todo = []
    for i in range(0, len(A_grid)):
        for j in range(0, len(B_grid)):
            for k in range(0, len(C_grid)):
                todo.append([i, j, k])

    job_index = 0
    for i in range(1, size):
        if (job_index < len(todo)):
            COMM_WORLD.send([job_index, todo[job_index]], dest = i, tag = 0)
            job_index += 1

    finished = 0
    while (finished != len(todo)):
        result    = COMM_WORLD.recv(source = MPI.ANY_SOURCE, tag = 0)
        finished += 1
        print repr(finished) + '/' + repr(len(todo))
        # record result
        th_e[result[0], result[1], result[2]] = result[3]
        # send next job to this processor
        if (job_index < len(todo)):
            COMM_WORLD.send([job_index, todo[job_index]], dest = result[4], tag = 0)
            job_index += 1

    # kill *all* processors
    for i in range(1, size):
        COMM_WORLD.send([-1, -1], dest = i, tag = 0)

    f = h5py.File('coolfunc_table_ch.h5', 'w')
    f.create_dataset('A_grid', (len(A_grid),), dtype = 'f')
示例#21
0
def main():

    args = parse_inputs()
    if args.adaptive_bins == 'False':
        args.adaptive_bins = False
    else:
        args.adaptive_bins = True

    file = open(args.file, 'r')
    loaded_fields = pickle.load(file)
    distance = loaded_fields[0]
    y = loaded_fields[1]
    bin_data = loaded_fields[2]

    rank = CW.Get_rank()
    size = CW.Get_size()

    dist_min = np.min(distance)
    dist_max = np.max(distance)
    if args.adaptive_bins:
        rs = [0] + list(set(distance))
        rs = np.sort(rs)
        #rs = rs[::2]
        rs = np.array(rs)
        rs = np.append(rs, (dist_max + (rs[-1] - rs[-2])))
    else:
        rs = np.linspace(dist_min, dist_max, args.no_of_bins)
        rs = np.append(rs, (dist_max + (rs[-1] - rs[-2])))
    bin_size = rs[-1] - rs[-2]
    rs = np.append(rs, rs[-1] + bin_size)
    gradient = np.array(np.zeros(np.shape(distance)))
    #print "RS:", rs

    rit = 1
    printed = False
    print_cen = False
    for r in range(len(rs)):
        if rank == rit:
            grad_add = np.array(np.zeros(np.shape(distance)))
            if r - 1 < 0:
                r_0 = rs[r]
            else:
                r_0 = rs[r - 1]
            r_1 = rs[r]
            if r + 1 == len(rs):
                r_2 = rs[r]
            else:
                r_2 = rs[r + 1]
            if r + 2 == len(rs) + 1:
                r_3 = rs[r]
            elif r + 2 == len(rs):
                r_3 = rs[r + 1]
            else:
                r_3 = rs[r + 2]
            mid_01 = (r_1 + r_0) / 2.
            mid_23 = (r_3 + r_2) / 2.
            shell_01 = np.where((distance >= r_0) & (distance < r_1))[0]
            shell_12 = np.where((distance >= r_1) & (distance < r_2))[0]
            shell_23 = np.where((distance >= r_2) & (distance < r_3))[0]
            if len(shell_01) == 0:
                print("FOUND EMPTY SHELL")
                y_01 = 0.0
            else:
                y_01 = np.mean(y[shell_01])
            if len(shell_23) == 0:
                y_23 = 0.0
                print("FOUND EMPTY SHELL")
            else:
                y_23 = np.mean(y[shell_23])
            grad_val = (y_23 - y_01) / (2. * (mid_23 - mid_01))
            #if rank == 1:
            #print "r_0, r_1, r_2, r_3:", r_0, r_1, r_2, r_3
            #print "mid_01, mid_12, mid_23:", mid_01, mid_12, mid_23
            #print "y_01, y_12, y_23:", y_01, y_12, y_23, "on rank", rank
            #print "grad_1, grad_2, average", grad_1, grad_2, grad_val, "on rank", rank
            #print "Gradient =", grad_val, "at Distance =", np.mean([mid_01, mid_23]), "on rank", rank
            grad_add[shell_12] = grad_val
            #grad_add[shell] = grad_val
            CW.send(grad_add, dest=0, tag=rank)
        if rank == 0:
            grad_add = CW.recv(source=rit, tag=rit)
            gradient = gradient + grad_add
        rit = rit + 1
        if rit == size:
            rit = 1

    if rank == 0:
        os.remove(args.file)
        file = open(args.save_file, 'w+')
        print("pickle file:", args.save_file)
        pickle.dump(gradient, file)
        file.close()
示例#22
0
def main():

    args = parse_inputs()
    center = int(args.center)
    if args.adaptive_bins == 'False':
        args.adaptive_bins = False
    a = args.semimajor_axis
    max_radius = args.max_r

    file = open(args.file, 'r')
    loaded_fields = pickle.load(file)
    distance = loaded_fields[0]
    cell_mass = loaded_fields[1]
    part_mass = loaded_fields[2]

    rank = CW.Get_rank()
    size = CW.Get_size()

    dist_min = np.min(distance)
    dist_max = np.max(distance)
    if args.adaptive_bins:
        rs = [0]
        rs = rs + list(set(distance[distance <= max_radius]))
        rs = np.sort(rs)
        bin_freq = len(rs) / 500
        if bin_freq > 0:
            rs = rs[::bin_freq]
        rs = np.array(rs)
        rs = np.append(rs, max_radius)
    else:
        rs = np.linspace(0.0, max_radius, args.no_of_bins)
    bin_size = rs[-1] - rs[-2]
    #print "bin_size =", bin_size/1.49597870751e+13
    #print "max_radius =", max_radius/1.49597870751e+13
    rs = np.append(rs, rs[-1] + bin_size)
    enclosed_mass = np.array(np.zeros(np.shape(distance)))

    rit = 1
    printed = False
    print_cen = False
    for r in range(len(rs))[1:]:
        if rank == rit:
            enclosed_mass = np.array(np.zeros(np.shape(distance)))
            ind = np.where((distance >= rs[r - 1]) & (distance < rs[r]))[0]
            enclosed_dist = np.where(distance < rs[r - 1])[0]
            if len(enclosed_dist) == 0:
                enclosed_dist = np.where(distance < (dist_min + 1))
            enclosed_mass_val = np.sum(cell_mass[enclosed_dist])
            if center != 0:
                enclosed_mass_val = enclosed_mass_val + part_mass[center - 1]
                if print_cen == False:
                    #print "Centered on particle with mass", part_mass[center-1]/1.98841586e+33
                    print_cen = True
            if center != 0 and rs[r] > a and len(part_mass) > 1:
                if center == 1:
                    enclosed_mass_val = enclosed_mass_val + part_mass[1]
                else:
                    enclosed_mass_val = enclosed_mass_val + part_mass[0]
                if printed == False:
                    #print "Added other particle with mass", part_mass[0]/1.98841586e+33
                    printed = True
            elif center == 0 and rs[r] > a / 2. and len(part_mass) > 0:
                enclosed_mass_val = enclosed_mass_val + np.sum(part_mass)
                if printed == False:
                    #print "Added both particles with mass", np.sum(part_mass)/1.98841586e+33
                    printed = True
            enclosed_mass[ind] = enclosed_mass_val
            #print "enclosed mass =", enclosed_mass_val/1.98841586e+33, ", Radius =", rs[r]/1.49597870751e+13, "on rank", rank
            CW.send(enclosed_mass, dest=0, tag=rank)
        if rank == 0:
            enclosed_mass_add = CW.recv(source=rit, tag=rit)
            enclosed_mass = enclosed_mass + enclosed_mass_add
        rit = rit + 1
        if rit == size:
            rit = 1

    if rank == 0:
        os.remove(args.file)
        file = open(args.save_file, 'w+')
        print("pickle file:", args.save_file)
        pickle.dump(enclosed_mass, file)
        file.close()
示例#23
0
def main():

    args = parse_inputs()
    center = int(args.center)
    if args.adaptive_bins == 'False':
        args.adaptive_bins = False
    a = args.semimajor_axis
    max_radius = args.max_r

    file = open(args.file, 'r')
    loaded_fields = pickle.load(file)
    distance = loaded_fields[0]
    y = loaded_fields[1]

    rank = CW.Get_rank()
    size = CW.Get_size()

    dist_min = np.min(distance)
    dist_max = np.max(distance)
    if args.adaptive_bins:
        rs = [0]
        rs = rs + list(set(distance[distance <= max_radius]))
        rs = np.sort(rs)
        rs = rs[::2]
        rs = np.array(rs)
        rs = np.append(rs, max_radius)
    else:
        rs = np.linspace(0.0, max_radius, args.no_of_bins)
    bin_size = rs[-1] - rs[-2]
    rs = np.append(rs, rs[-1] + bin_size)
    gradient = np.array(np.zeros(np.shape(distance)))

    rit = 1
    printed = False
    print_cen = False
    for r in range(len(rs)):
        if rank == rit:
            grad_add = np.array(np.zeros(np.shape(distance)))
            if r - 1 < 0:
                r_0 = rs[r]
            else:
                r_0 = rs[r - 1]
            r_1 = rs[r]
            if r + 1 == len(rs):
                r_2 = rs[r]
            else:
                r_2 = rs[r + 1]
            if r + 2 == len(rs) + 1:
                r_3 = rs[r]
            elif r + 2 == len(rs):
                r_3 = rs[r + 1]
            else:
                r_3 = rs[r + 2]
            shell_0_1 = np.where((distance >= r_0) & (distance < r_1))[0]
            shell_1_2 = np.where((distance >= r_1) & (distance < r_2))[0]
            shell_2_3 = np.where((distance >= r_2) & (distance < r_3))[0]
            if len(shell_0_1) == 0:
                y_0_1 = 0.0
            else:
                y_0_1 = np.mean(y[shell_0_1])
            if len(shell_1_2) == 0:
                y_1_2 = 0.0
            else:
                y_1_2 = np.mean(y[shell_1_2])
            if len(shell_2_3) == 0:
                y_2_3 = 0.0
            else:
                y_2_3 = np.mean(y[shell_2_3])
            grad_val = ((y_1_2 - y_0_1) / (r_1 - r_0) + (y_2_3 - y_1_2) /
                        (r_2 - r_1)) / 2.
            print("Gradient =", grad_val, "at Distance =", r_1, "on rank",
                  rank)
            grad_add[shell_1_2] = grad_val
            if len(enclosed_dist) == 0:
                enclosed_dist = np.where(distance < (dist_min + 1))
            enclosed_mass_val = np.sum(cell_mass[enclosed_dist])
            if center != 0:
                enclosed_mass_val = enclosed_mass_val + part_mass[center - 1]
                if print_cen == False:
                    #print "Centered on particle with mass", part_mass[center-1]/1.98841586e+33
                    print_cen = True
            if center != 0 and rs[r] > a and len(part_mass) > 1:
                if center == 1:
                    enclosed_mass_val = enclosed_mass_val + part_mass[1]
                else:
                    enclosed_mass_val = enclosed_mass_val + part_mass[0]
                if printed == False:
                    #print "Added other particle with mass", part_mass[0]/1.98841586e+33
                    printed = True
            elif center == 0 and rs[r] > a / 2. and len(part_mass) > 0:
                enclosed_mass_val = enclosed_mass_val + np.sum(part_mass)
                if printed == False:
                    #print "Added both particles with mass", np.sum(part_mass)/1.98841586e+33
                    printed = True
            enclosed_mass[ind] = enclosed_mass_val
            print("enclosed mass =", enclosed_mass_val / 1.98841586e+33,
                  ", Radius =", rs[r] / 1.49597870751e+13, "on rank", rank)
            CW.send(enclosed_mass, dest=0, tag=rank)
        if rank == 0:
            enclosed_mass_add = CW.recv(source=rit, tag=rit)
            enclosed_mass = enclosed_mass + enclosed_mass_add
        rit = rit + 1
        if rit == size:
            rit = 1

    if rank == 0:
        os.remove(args.file)
        file = open(args.save_file, 'w+')
        print("pickle file:", args.save_file)
        pickle.dump(enclosed_mass, file)
        file.close()
示例#24
0
plt.clf()
plt.plot(x, y, '.')
plt.plot(x, data_first_guess, label='first guess')
plt.plot(fine_t, data_fit, label='after fitting')
plt.legend()
plt.savefig("fit_for_rank_" + str(rank) + ".png")

#But if you can also communicate between processes. The simplest methods is to send and receive between specific ranks

data = {}

if rank == 0:
    data = {'a': 7, 'b': 3.14}
    CW.send(data, dest=1, tag=11)
elif rank == 1:
    data = CW.recv(source=0, tag=11)

print("On rank", rank, "data =", data)

CW.Barrier()

#But if your sending Numpy arrays, this method is much faster!
data = np.array([])
if rank == 0:
    data = np.arange(100, dtype=np.float64)
    CW.Send(data, dest=1, tag=13)
elif rank == 1:
    data = np.empty(100, dtype=np.float64)
    CW.Recv(data, source=0, tag=13)

print("On rank", rank, "data =", data)
示例#25
0
def main():

    rank = CW.Get_rank()
    size = CW.Get_size()

    args = parse_inputs()

    n_orb = int(args.no_orbits)
    n_systems = int(args.no_systems)
    q_min = 0.05
    my_orb = bo.random_orbits(n_orb=n_orb)
    US_group_vel = 10.
    UCL_group_vel = 4.
    #Madsen, 2002 gives the STANDARD ERROR of the US and UCL velcs to be 1.3 and 1.9km/s
    US_group_std = 1.3 * args.group_velocity_sigma  #From Preibisch et al., 2008
    UCL_group_std = 1.3 * args.group_velocity_sigma
    standard_std = {'F': 1.08, 'G': 0.63, 'K': 1.43, 'M': 2.27}  # 2.0
    astrophysical_std = args.astrophysical_std  #Astrophysical radial velocity uncertainty

    Object = []
    Region = []
    IR_excess = []
    Temp_sptype = []
    Pref_template = []
    Obs_info = []
    all_bayes = [[], []]

    RV_standard_info = {}

    sys.stdout.flush()
    CW.Barrier()

    #Read in RV standard list
    header = 0
    with open('/home/100/rlk100/RV_standard_list.csv', 'rU') as f:
        reader = csv.reader(f)
        for row in reader:
            if header != 0:
                RV_standard_info[row[0]] = (float(row[5]), float(row[6]),
                                            float(row[7]))
            else:
                header = 1
        f.close()

    sys.stdout.flush()
    CW.Barrier()

    print("Reading in current spreadsheet", args.input_file)
    header = 0
    reshape_len = -1
    with open(args.input_file, 'rU') as f:
        reader = csv.reader(f)
        for row in reader:
            if header != 0:
                if 'U4' in row[0]:
                    row[0] = 'UCAC4' + row[0].split('U4')[-1]
                Object.append(row[0])
                Region.append(row[1])
                IR_excess.append(row[5])
                Pref_template.append(row[15])  #row[18])
                Temp_sptype.append(row[16])  #row[19])
                if len(row) > 17:
                    Obs = np.array(row[17:])
                    Obs = np.delete(Obs, np.where(Obs == ''))
                    if reshape_len == -1:
                        for ob in Obs:
                            reshape_len = reshape_len + 1
                            if '/' in ob and ob != Obs[0]:
                                break
                    #if len(Obs) > 5:
                    #    Obs = np.reshape(Obs, (len(Obs)/5, 5))
                    Obs = np.reshape(Obs,
                                     (len(Obs) / reshape_len, reshape_len))
                    for ind_obs in Obs:
                        if '/' in ind_obs[0]:
                            new_format = '20' + ind_obs[0].split('/')[
                                -1] + '-' + ind_obs[0].split('/')[-2] + '-' + (
                                    "%02d" % int(ind_obs[0].split('/')[-3]))
                            ind_obs[0] = new_format
                else:
                    Obs = np.array([])
                Obs_info.append(Obs)
            if header == 0:
                header = 1
        f.close()
    del header

    sys.stdout.flush()
    CW.Barrier()

    Obj_bayes = np.nan * np.zeros(len(Object))

    #Read in currently calculated Bayes Factors:
    if args.restart_calc != 'False':
        print("Reading in calulated Bayes factors")
        header = 0
        with open(args.bayes_file, 'rU') as f:
            reader = csv.reader(f)
            for row in reader:
                if header != 0:
                    ind = Object.index(row[0])
                    Obj_bayes[ind] = float(row[2])
                    if row[1] == 'US':
                        all_bayes[0].append(float(row[2]))
                    else:
                        all_bayes[1].append(float(row[2]))
                    del ind
                else:
                    header = 1
            f.close()
        del header

    sys.stdout.flush()
    CW.Barrier()

    if args.restart_calc != 'False' and rank == 0:
        print("Creating new bayes file")
        f = open(args.bayes_file, 'w')
        f.write('Object,Region,Bayes_factor\n')
        f.close()

    sys.stdout.flush()
    CW.Barrier()

    inds = list(range(len(Object)))
    skip_inds = np.where(np.array(IR_excess) == 'NN')[0]
    for skit in skip_inds:
        inds.remove(skit)
    skip_inds = np.where(np.array(Pref_template) == '')[0]
    for skit in skip_inds:
        inds.remove(skit)
    del skip_inds
    del IR_excess

    rit = 0
    sys.stdout.flush()
    CW.Barrier()
    for obj in inds:
        Pref_template_name = Pref_template[obj].split('_')[0]
        if np.isnan(Obj_bayes[obj]) and rank == rit:
            print("Doing object:", Object[obj], "on rank:", rank)
            likelihoods = []
            single_likelihoods = []

            #Produces masses within +/- 10% of the mass of the template.
            #!!! Mike suggests a single mass.
            M_1 = (np.random.random(n_systems) *
                   (RV_standard_info[Pref_template_name][1] -
                    RV_standard_info[Pref_template_name][0])
                   ) + RV_standard_info[Pref_template_name][0]

            #Generates mass ratios with minium mass ratio of q_min (default 0.01?, should this be dependant on the primary mass? Because sometimes low mass ratios could give very low mass companions i.e. BD mass...)
            #!!! Mike suggests 0.05 due to brown dwarf desert.
            q = (np.random.random(n_systems) * (1 - q_min)) + q_min

            #from Primary masses and mass ratios, secondary masses can get calculated
            M_2 = M_1 * q

            #Get dates of the observations of the object
            jds = Obs_info[obj][:, 1].astype(np.float)

            #get observed data, and add in the error in the standards in quadrature.
            #This relates to the spectrograph stability
            #There is also an astrophysical error due to these objects being rapid rotators etc.
            RV_standard_err = standard_std[Temp_sptype[obj][0]]
            err = np.sqrt(Obs_info[obj][:, 3].astype(float)**2 +
                          RV_standard_err**2 + astrophysical_std**2)
            observed_rv = Obs_info[obj][:, 2].astype(float)

            #IN A LOOP iterate over random orbits:
            for orb in range(n_orb):
                #FIXME: Figure out which velocity to use!
                if Region[obj] == 'US':
                    if args.group_velocity == 'True':
                        v_group = np.random.normal(
                            US_group_vel,
                            np.sqrt(US_group_std**2 + RV_standard_err**2),
                            n_systems)
                    else:
                        v_group = np.random.normal(
                            np.mean(observed_rv),
                            np.sqrt(US_group_std**2 + RV_standard_err**2),
                            n_systems)
                else:
                    if args.group_velocity == 'True':
                        v_group = np.random.normal(
                            UCL_group_vel,
                            np.sqrt(UCL_group_std**2 + RV_standard_err**2),
                            n_systems)
                    else:
                        v_group = np.random.normal(
                            np.mean(observed_rv),
                            np.sqrt(UCL_group_std**2 + RV_standard_err**2),
                            n_systems)

                #generate orbit?
                #!!! Find just one set of orbital parameters at at a time, and
                #scale the RVS. OR if you really want you can compute a, i etc
                #yourself and plug these into my_orb, but some RV scalign is still needed.
                rho, theta, normalised_vr = bo.binary_orbit(my_orb,
                                                            jds,
                                                            plot_orbit_no=orb)
                for system in range(n_systems):
                    actual_vr = bo.scale_rv(normalised_vr,
                                            my_orb['P'][orb],
                                            M_1[system],
                                            M_2[system],
                                            my_orb['i'][orb],
                                            group_velocity=v_group[system])

                    this_likelihood = bo.calc_likelihood(
                        actual_vr, observed_rv, err)
                    likelihoods.append(this_likelihood)
                    #THEN CALCULATE PROBABILITY OF BEING A SINGLE STAR
                    single_likelihoods.append(
                        bo.calc_likelihood(v_group[system], observed_rv, err))
                    del actual_vr
                    del this_likelihood
                del v_group
            del M_1
            del q
            del M_2
            del jds
            del RV_standard_err
            del err
            del observed_rv

            #THEN CALCULATE BAYES FACTOR
            bayes_factor = np.mean(likelihoods) / np.mean(single_likelihoods)
            print(("Bayes Factor: {0:5.2f} for ".format(bayes_factor) +
                   Object[obj]), "on rank", rank, "with SpT", Temp_sptype[obj])
            del likelihoods
            del single_likelihoods
            if Region[obj] == 'US':
                send_data = [0.0, float(obj), bayes_factor, Temp_sptype[obj]]
                #print "Sending data:", send_data, "from rank:", rank
                if rank == 0:
                    bayes_update = send_data
                else:
                    CW.send(send_data, dest=0, tag=rank)
            else:
                send_data = [1.0, float(obj), bayes_factor, Temp_sptype[obj]]
                #print "Sending data:", send_data, "from rank:", rank
                if rank == 0:
                    bayes_update = send_data
                else:
                    CW.send(send_data, dest=0, tag=rank)
            del send_data
            if rank == 0:
                all_bayes[int(bayes_update[0])].append(bayes_update[2])
                Obj_bayes[int(bayes_update[1])] = bayes_update[2]
                print("Updated Bayes factors retrieved from rank 0 for object",
                      Object[int(bayes_update[1])])
                f = open(args.bayes_file, 'a')
                write_string = Object[int(bayes_update[1])] + ',' + Region[int(
                    bayes_update[1])] + ',' + str(bayes_update[2]) + ',' + str(
                        bayes_update[3]) + '\n'
                f.write(write_string)
                f.close()
                del bayes_update
                del write_string

        rit = rit + 1
        if rit == size:
            sys.stdout.flush()
            CW.Barrier()
            rit = 0
            if rank == 0:

                print("UPDATING CALCULATED BAYES VALUES")
                for orit in range(1, size):
                    bayes_update = CW.recv(source=orit, tag=orit)
                    all_bayes[int(bayes_update[0])].append(bayes_update[2])
                    Obj_bayes[int(bayes_update[1])] = bayes_update[2]
                    print("Updated Bayes factors retrieved from rank", orit,
                          "for object", Object[int(bayes_update[1])])
                    f = open(args.bayes_file, 'a')
                    write_string = Object[int(
                        bayes_update[1])] + ',' + Region[int(
                            bayes_update[1])] + ',' + str(
                                bayes_update[2]) + ',' + str(
                                    bayes_update[3]) + '\n'
                    f.write(write_string)
                    f.close()
                    del bayes_update
                    del write_string
            sys.stdout.flush()
            CW.Barrier()

    sys.stdout.flush()
    CW.Barrier()
    if rank == 0:

        print("UPDATING CALCULATED BAYES VALUES")
        for orit in range(1, size):
            bayes_update = CW.recv(source=orit, tag=orit)
            all_bayes[int(bayes_update[0])].append(bayes_update[2])
            Obj_bayes[int(bayes_update[1])] = bayes_update[2]
            print("Updated Bayes factors retrieved from rank", orit,
                  "for object", Object[int(bayes_update[1])])
            f = open(args.bayes_file, 'a')
            write_string = Object[int(bayes_update[1])] + ',' + Region[int(
                bayes_update[1])] + ',' + str(bayes_update[2]) + ',' + str(
                    bayes_update[3]) + '\n'
            f.write(write_string)
            f.close()
            del bayes_update
            del write_string
        sys.stdout.flush()
        CW.Barrier()
    print("Finished Calculating bayes factors!")