Ejemplo n.º 1
0
    def get_colors(self, qty):

        qty = np.power(qty / qty.max(), 1.0 / CONTRAST)

        if COLORMAP == 0:
            rgba = cm.gray(qty, alpha=ALPHA)
        elif COLORMAP == 1:
            rgba = cm.afmhot(qty, alpha=ALPHA)
        elif COLORMAP == 2:
            rgba = cm.hot(qty, alpha=ALPHA)
        elif COLORMAP == 3:
            rgba = cm.gist_heat(qty, alpha=ALPHA)
        elif COLORMAP == 4:
            rgba = cm.copper(qty, alpha=ALPHA)
        elif COLORMAP == 5:
            rgba = cm.gnuplot2(qty, alpha=ALPHA)
        elif COLORMAP == 6:
            rgba = cm.gnuplot(qty, alpha=ALPHA)
        elif COLORMAP == 7:
            rgba = cm.gist_stern(qty, alpha=ALPHA)
        elif COLORMAP == 8:
            rgba = cm.gist_earth(qty, alpha=ALPHA)
        elif COLORMAP == 9:
            rgba = cm.spectral(qty, alpha=ALPHA)

        return rgba
Ejemplo n.º 2
0
    def get_colors(self, qty):

        qty = np.power(qty / qty.max(), 1.0 / CONTRAST)

        if COLORMAP == 0:
            rgba = cm.gray(qty, alpha=ALPHA)
        elif COLORMAP == 1:
            rgba = cm.afmhot(qty, alpha=ALPHA)
        elif COLORMAP == 2:
            rgba = cm.hot(qty, alpha=ALPHA)
        elif COLORMAP == 3:
            rgba = cm.gist_heat(qty, alpha=ALPHA)
        elif COLORMAP == 4:
            rgba = cm.copper(qty, alpha=ALPHA)
        elif COLORMAP == 5:
            rgba = cm.gnuplot2(qty, alpha=ALPHA)
        elif COLORMAP == 6:
            rgba = cm.gnuplot(qty, alpha=ALPHA)
        elif COLORMAP == 7:
            rgba = cm.gist_stern(qty, alpha=ALPHA)
        elif COLORMAP == 8:
            rgba = cm.gist_earth(qty, alpha=ALPHA)
        elif COLORMAP == 9:
            rgba = cm.spectral(qty, alpha=ALPHA)

        return rgba
Ejemplo n.º 3
0
 def comp_sweep_kin(self, rng):
     '''compare kinetics from different sweeps within rng of WL
     rng = [wl1 min, wl1 max, wl2 min, wl2 max, ... wlx min, wlx max]
     author DP, last change 28/04/20'''
     idx = sup.get_idx(*rng, axis=self.wl)
     _, ax1 = plt.subplots()
     for j in range(self.n_sweeps):
         cmap = cm.gist_heat((j) / self.n_sweeps, 1)
         for i in range(int(len(rng) / 2)):
             kin = np.mean(self.sweeps[j][:, idx[2 * i]:idx[2 * i + 1]],
                           axis=1)
         if self.inc_sweeps[j]:
             ax1.plot(self._t, kin, label=j, color=cmap)
         else:
             ax1.plot(self._t,
                      kin,
                      '--',
                      linewidth=1,
                      label=f'{j} not in av',
                      color=cmap)
     # TODO works only for single rng.
     kin_av = np.mean(self.data[:, idx[2 * i]:idx[2 * i + 1]], axis=1)
     plt.plot(self._t, kin_av, linewidth=3, label='av kin')
     plt.xscale('Log')
     plt.legend()
     plt.show()
Ejemplo n.º 4
0
def animate_watershed(event, screen, state, settings):
    if event.type == pygame.MOUSEBUTTONDOWN:
        source = (pygame.mouse.get_pos(), )
    elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
        source = None
    else:
        return

    screen.fill((0, 0, 0))

    state.node_flows, _ = calculate_watershed(state, source=source)
    state.node_flow_items = list(state.node_flows.items())
    numpy_image = numpy.zeros(
        (settings.screen_size[1], settings.screen_size[0], 3),
        dtype=numpy.uint8)

    print(f"Num pixels to process {len(state.node_flow_items)}")

    for i in tqdm(range(len(state.node_flow_items)), desc="processing pixels"):
        node, flow = state.node_flow_items[i]
        colour = [int(i * 255) for i in cm.gist_heat(flow)[:3]]
        # print(node)
        # print(flow)
        # print(colour)
        for point in node:
            numpy_image[point[1], point[0]] = colour
        if i % 20 == 0:
            # print("Updating screen")
            write_colour_to_screen(screen, numpy_image)
            yield

    write_colour_to_screen(screen, numpy_image)
    yield
	def plot_distribution (self):
		ccc = cm.gist_heat(np.linspace(0., 0.8, self.N_max))
		for i in np.arange(self.N_max):
			if (self.data[i,0] > 0):
				plt.plot (self.beta*1e6, self.data[i, 1:], label =  str(self.data[i,0]), color = ccc[i])
		plt.legend()
		plt.show()
Ejemplo n.º 6
0
 def plot_distribution(self):
     ccc = cm.gist_heat(np.linspace(0., 0.8, self.N_max))
     for i in np.arange(self.N_max):
         if (self.data[i, 0] > 0):
             plt.plot(self.beta * 1e6,
                      self.data[i, 1:],
                      label=str(self.data[i, 0]),
                      color=ccc[i])
     plt.legend()
     plt.show()
def plot_feature_importance(Feature_importance, n):
    '''
    plot top n features
    '''
    plt.rcParams['figure.figsize'] = (12, 5)
    Feature_importance = pd.DataFrame(rf.feature_importances_, X_train.columns)
    Feature_importance.columns = ['features']
    Feature_importance = Feature_importance.sort_values(by='features',
                                                        axis=0,
                                                        ascending=False)
    colors = cm.gist_heat(np.linspace(0, 1, len(tagged_df.columns)))
    Feature_importance.head(n).plot(title="Counts of Tags",
                                    color=colors,
                                    kind='bar')
    plt.show()
def barplot(labels,data,username):
    pos=arange(len(data))
    c=[];
    for i in data:
     c.append(cm.gist_heat(i/100,20));
     
    pyplot.ylim(0,100);
    pyplot.xticks(pos+0.4,labels,color="black")
    pyplot.bar(pos,data,color=c);
    pyplot.xlabel("Class",fontsize="14",color="black");
    pyplot.ylabel("Percentage",fontsize="14",color="black");
    pyplot.title("Performance",fontsize="16",color="black");
    imn="../student/graphs/"+username+".png";
    pyplot.savefig(imn)
    pyplot.close()
Ejemplo n.º 9
0
def animate_flow(event, screen, state: VisState,
                 settings: VisSettings) -> Generator:
    if event.type == pygame.MOUSEBUTTONDOWN:
        source = find_clicked_node(pygame.mouse.get_pos(), state)
    elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
        source = None
    else:
        return
    circle_radius = int(max(*state.float_pixel_size) * 0.35)
    screen.blit(state.pygame_img, (0, 0))
    yield

    for flows in calculate_flow(state, 200, source=source):
        # Draw edges between nodes
        edges_surface = pygame.Surface(settings.screen_size, pygame.SRCALPHA,
                                       32).convert_alpha()
        for node in flows:
            new_location = get_node_centerpoint(node)
            for neighbour in state.graph[node]:
                if neighbour > node and neighbour in flows:
                    neighbour_location = get_node_centerpoint(neighbour)
                    _draw_line(edges_surface, new_location, neighbour_location,
                               state)

        circles_surface = pygame.Surface(settings.screen_size, pygame.SRCALPHA,
                                         32).convert_alpha()
        circle_drawn = False
        for node, flow in flows.items():
            if flow != 0:
                new_location = get_node_centerpoint(node)
                circle_center = [
                    int(new_location[i] * state.float_pixel_size[i] +
                        state.center_offset[i]) for i in (0, 1)
                ]
                colour = [i * 255 for i in cm.gist_heat(flow)[:3]]
                pygame.draw.circle(circles_surface, colour, circle_center,
                                   circle_radius)
                circle_drawn = True

        screen.blit(state.pygame_img, (0, 0))
        screen.blit(edges_surface, (0, 0))
        screen.blit(circles_surface, (0, 0))
        yield

        if not circle_drawn:
            break
Ejemplo n.º 10
0
    def old_real_space_analysis(self,
                                log_plot=False,
                                corrected=False,
                                n_points=10000):
        self.reps = 30
        if (self.N_values == None):
            self.N_values = np.arange(self.N - 1) + 2

        ccc = cm.gist_heat(np.linspace(0., 0.8, len(self.N_values)))

        ind = 0
        for N in self.N_values:
            print 'Evaluating N = ', N, ' msmnts'
            p0 = ProbabilityDistribution(B_max=B_max, n_points=n_points)
            for j in np.arange(self.reps):
                r = AdaptiveRamsey(N_msmnts=N, B_max=B_max)
                r.msmnt_results = self.data[j, :N]
                r.msmnt_phases = np.zeros(N)
                r.msmnt_times = self.ramsey_times[:N]

                p = r.analysis()
                p0.add(prob=p.data())
            p0.normalize()
            self.mean.append(p0.mean())
            self.variance.append((p0.variance() / B_max))
            plt.plot(p0.beta * 1e-6,
                     p0.data() / max(p0.data()),
                     linewidth=1,
                     label=str(N),
                     color=ccc[ind])
            ind = ind + 1

        plt.xlabel('Magnetic field [MHz]')
        #plt.xlim([-3, 5])
        plt.legend()
        plt.show()

        total_time = 1e-9 * (2**(self.N_values + 1) - 1)
        plt.plot(self.N_values, self.mean, 'ob')
        plt.show()

        plt.loglog(total_time * 1e6, self.variance * total_time, 'ob')
        plt.xlabel('Total msmnt time [us]')
        plt.show()
Ejemplo n.º 11
0
def animate_watershed(event, screen, state: VisState,
                      settings: VisSettings) -> Generator:
    if event.type == pygame.MOUSEBUTTONDOWN:
        source = find_clicked_node(pygame.mouse.get_pos(), state)
    elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
        source = None
    else:
        return
    circle_radius = int(max(*state.float_pixel_size) * 0.35)
    screen.blit(state.pygame_img, (0, 0))

    state.node_flows, state.edge_flows = calculate_watershed(state,
                                                             source=source)
    state.node_flow_items = list(state.node_flows.items())
    node_flow_indexes = {
        state.node_flow_items[i][0]: i
        for i in range(len(state.node_flow_items))
    }

    j = 0
    for i in range(len(state.node_flow_items)):
        node, flow = state.node_flow_items[i]
        if flow > 0.0001:
            new_location = get_node_centerpoint(node)

            for neighbour in state.graph[node]:
                if neighbour in node_flow_indexes and node_flow_indexes[
                        neighbour] > i:
                    neighbour_location = get_node_centerpoint(neighbour)
                    _draw_line(screen, new_location, neighbour_location, state)

            j += 1
            circle_center = [
                int(new_location[i] * state.float_pixel_size[i] +
                    state.center_offset[i]) for i in (0, 1)
            ]
            colour = [i * 255 for i in cm.gist_heat(flow)[:3]]
            pygame.draw.circle(screen, colour, circle_center, circle_radius)
            if j % 20 == 0:
                yield
    yield
	def old_real_space_analysis (self, log_plot=False, corrected = False, n_points=10000):
		self.reps = 30
		if (self.N_values == None):
			self.N_values = np.arange(self.N-1)+2
		
		ccc = cm.gist_heat(np.linspace(0., 0.8, len(self.N_values)))


		ind = 0
		for N in self.N_values:
			print 'Evaluating N = ', N, ' msmnts'
			p0 = ProbabilityDistribution (B_max=B_max, n_points=n_points)
			for j in np.arange(self.reps):
				r = AdaptiveRamsey(N_msmnts=N, B_max=B_max)
				r.msmnt_results = self.data [j,:N] 
				r.msmnt_phases = np.zeros(N)
				r.msmnt_times = self.ramsey_times [:N]

				p = r.analysis()
				p0.add(prob=p.data())
			p0.normalize()
			self.mean.append(p0.mean())
			self.variance.append((p0.variance()/B_max))
			plt.plot (p0.beta*1e-6, p0.data()/max(p0.data()), linewidth = 1, label = str(N), color = ccc[ind])
			ind = ind+1

		plt.xlabel ('magnetic field [MHz]')
		#plt.xlim([-3, 5])
		plt.legend()
		plt.show()
		
		total_time = 1e-9*(2**(self.N_values+1)-1)
		plt.plot (self.N_values, self.mean, 'ob')
		plt.show()

		plt.loglog (total_time*1e6, self.variance*total_time, 'ob')
		plt.xlabel ('total msmnt time [us]')
		plt.show()
Ejemplo n.º 13
0
def main():
# Input asp-style ascii profile will be first argument.
# (command-line options to come later)
     prog_name = argv[0].strip('.py')

# Scale factor for plotting difference plots
     scale = 2.0
     
#     arg = get_opt(prog_name)

#     prof_file = argv[1]
#     ref_file = argv[1]
     ref_file = argv[len(argv)-1]

     input_files = []
     for file in argv[1:]:
# For some reason this works and ".append()" doesn't:
          input_files[len(input_files):] = glob.glob(file)
#          input_files.append(glob.glob(file))

     input_files.reverse()

# Remove reference file from list if it is in the plotting list
     if(input_files.count(ref_file) > 0):
          input_files.remove(ref_file)
   
     duty = get_duty('0737-3039A')
# Read in reference profile
     ref_header = read_asc_header(ref_file)
     ref_data = read_asc_prof(ref_file)
     # ref_data['i'] = norm(ref_data['i'], duty)
    
# Make first prof in list for plot to be the ref profile
     prof_data = [ref_data]
     ref_date = mjd.mjdtodate(ref_header['imjd'], \
                                   dateformat='%Y %b %d')
     date_text = [(0.5, 0.20, ref_date)]

     nobs = []      
# Now calculate difference profiles and append to plotting list
     for i_prof in np.arange(len(input_files)):
          prof_header = read_asc_header(input_files[i_prof])
          nobs.append(prof_header['obscode'])
          print 'NOBS = ', nobs
          prof_data_temp = read_asc_prof(input_files[i_prof])
          # prof_data_temp['i'] = norm(prof_data_temp['i'], duty)
          diff_prof = remove_base((prof_data_temp['i'] - ref_data['i']), duty)
          prof_data_temp['i'] = scale*(diff_prof) + i_prof + 1.2 
          print "Index = ", i_prof
          prof_data.append(prof_data_temp)         
# Set up labelling for each profile:
          prof_date = mjd.mjdtodate(prof_header['imjd'], \
                                              dateformat='%Y %b %d')
          date_text.append((0.5, i_prof+1.4+scale*0.02, prof_date))
          
     print "Date = ", date_text

     nobs_unique = list(set(nobs))
     clr=[]
     for i_nobs in range(len(nobs)):
         if(nobs_unique <= 1):
             clr.append('black')
         else:
             clr.append(cm.gist_heat(float(nobs_unique.index(nobs[i_nobs]))/float(len(nobs_unique))))
          

# Do this just to make the ordering such that the first alphanumerically
# is at the top...
#      prof_data.reverse()
#      date_text.reverse()

     plot_prof(prof_data, yticks=False, canvassize=(8,10), vgrid=False, \
                    ylim=(np.min(prof_data[0]['i'])-0.1, len(input_files)+1 +0.1), \
                    figtext=date_text, linecolour=clr)



# meaningThe following means that the 2nd argument is the 
# desired output plot file name
     # plot_file = 'diff_profile.png'
     plot_file = 'diff_profile.pdf'

     plt.savefig(plot_file)
def sw(image, image_array, x=1.6, y=2.4, save_files=True, threshold=0.97):

    #CONVERTING NUMPY ARRAY TO TENSOR AND PUTTING IT IN MODEL
    image_tensor = torch.from_numpy(image_array)
    output_tensor = working_model(image_tensor)
    print("out", output_tensor.shape)
    output_squeeze = torch.squeeze(
        output_tensor, 0).detach().numpy()  # all dimensions of size 1 removed
    print(output_squeeze.shape)

    heatmap = output_squeeze[0]  # the channel or the 2nd one is eliminated
    print(heatmap, "HEATMAP", heatmap.shape)
    heatmap_thr = heatmap.copy()

    # applying threshold or a simpler version of non-max suppression
    heatmap_thr[heatmap[:, :] > threshold] = 100
    heatmap_thr[heatmap[:, :] <= threshold] = 0

    boxes = []

    #converting PIL image to DrawImage
    image_copy = image.copy()
    draw = ImageDraw.Draw(image)
    draw_copy = ImageDraw.Draw(image_copy)

    heatmap_img = Image.fromarray(np.uint8(cm.gist_heat(heatmap) * 255))
    heatmap_img.show()

    print("with threshold")
    heatmap_img_thresh = Image.fromarray(
        np.uint8(cm.gist_heat(heatmap_thr) * 255))
    heatmap_img_thresh.show()
    # print(heatmap.shape[0])
    print("---------")
    # print(np.arange(heatmap.shape[0]))
    print("----------")

    # yy-rows have the same elements, xx- columns have the same elements
    xx, yy = np.meshgrid(np.arange(heatmap.shape[1]),
                         np.arange(heatmap.shape[0]))

    # x_det and y_det contain all those indices of heatmap whose value is greater than threshold.
    x_det = (xx[heatmap[:, :] > threshold])
    y_det = (yy[heatmap[:, :] > threshold])
    print(y_det)

    # Scaling of the model
    shrink_ratio = (image.width / heatmap_img.width,
                    image.height / heatmap_img.height)

    # Appending the dimensions of the bounding boxes
    for i, j in zip(x_det, y_det):
        if not save_files:
            if i > heatmap_img.width // 1.95 and j > int(
                    heatmap_img.height / 1.95):
                boxes.append([int(i * 13), int(j * 13), int(48), int(96)])
        else:
            boxes.append([int(i * 13), int(j * 13), int(48), int(96)])

    #group interecting rectanlgles
    bound_boxes = cv2.groupRectangles(boxes, 2, 1)
    bound_boxes = bound_boxes[:1]

    print("Number of Objects: ", len(bound_boxes[0]))
    print(bound_boxes)

    # draw the box on the image and its copy
    for box in bound_boxes:
        for b in box:
            print(b, "BOX")
            draw_copy.rectangle(
                (b[0], b[1], b[2] + b[0], b[1] + b[3]),
                outline='blue')  #draw.draw_rectangle(xy, ink, 0, width)

    for box in bound_boxes[0]:
        draw.rectangle(
            (box[0] - (x - 1) * box[2] // 2, box[1] - (y - 1) * box[2] // 2,
             box[2] * x + box[0] - (x - 1) * box[2] // 2, box[3] * y + box[1] -
             (y - 1) * box[2] // 2),
            outline='green')

    #saving the images
    if (save_files):
        image.save("Actual output.png")
        image_copy.save("Copy_Output.png")
        heatmap_img.save("Heatmap.png")
        heatmap_img_thresh.save("Heatmap_thresh.png")

    image.show()
Ejemplo n.º 15
0
def main():
    # Input asp-style ascii profile will be first argument.
    # (command-line options to come later)
    prog_name = argv[0].strip(".py")

    # Scale factor for plotting difference plots
    scale = 2.0

    #     ref_file = argv[len(argv)-1]
    ref_file = argv[1]

    #     arg = get_opt(prog_name)
    print "n_args = ", len(argv) - 1
    #     prof_file = argv[1]

    input_files = []
    for file in argv[1:]:
        # For some reason this works and ".appen()" doesn't:
        input_files[len(input_files) :] = glob.glob(file)
    #          input_files.append(glob.glob(file))

    input_files.reverse()

    if input_files.count(ref_file) > 0:
        input_files.remove(ref_file)

    # First, read in profile data file, and assign each column to a separate
    # numpy array
    #     prof_file = input_files[0]
    #     prof_header = read_asc_header(prof_file)
    #     prof_data[0] = read_asc_prof(prof_file)

    ref_header = read_asc_header(ref_file)
    ref_data = read_asc_prof(ref_file)
    # ref_data['i'] = norm(ref_data['i'], duty)

    #     if(len(input_files) > 1):
    #     for prof_file in input_files:
    prof_data = []
    date_text = []
    nobs = []
    for i_prof in np.arange(len(input_files)):
        prof_header = read_asc_header(input_files[i_prof])
        duty = get_duty(prof_header["psrname"])
        nobs.append(prof_header["obscode"])
        print "NOBS = ", nobs
        prof_data_temp = read_asc_prof(input_files[i_prof])
        #          prof_data_temp['i'] = norm(prof_data_temp['i'], duty)
        #          prof_data_temp['i'] = prof_data_temp['i'] + i_prof
        diff_prof = remove_base((prof_data_temp["i"] - ref_data["i"]), duty)
        prof_data_temp["i"] = scale * (diff_prof) + i_prof + 1.2
        print "Index = ", i_prof, ", Min = ", np.min(prof_data_temp["i"]), ", Max = ", np.max(prof_data_temp["i"])
        prof_data.append(prof_data_temp)
        # Set up labelling for each profile:
        prof_date = mjd.mjdtodate(prof_header["imjd"], dateformat="%Y %b %d")
        date_text.append((0.8, i_prof + 0.25, prof_date))

    print "Date = ", date_text

    # Make first prof in list for plot to be the ref profile
    prof_data.append(ref_data)
    ref_date = mjd.mjdtodate(ref_header["imjd"], dateformat="%Y %b %d")
    date_text.append((0.5, 0.20, ref_date))
    nobs.append(ref_header["obscode"])

    nobs_unique = list(set(nobs))
    clr = []
    for i_nobs in range(len(nobs)):
        if nobs_unique <= 1:
            clr.append("black")
        else:
            clr.append(cm.gist_heat(float(nobs_unique.index(nobs[i_nobs])) / float(len(nobs_unique))))

    # Do this just to make the ordering such that the first alphanumerically
    # is at the top...
    # prof_data.reverse()
    # date_text.reverse()

    print "LENGTH of PROF DATA = ", len(prof_data)
    print "LENGTH of colour = ", len(clr)

    plot_prof(
        prof_data,
        yticks=False,
        canvassize=(8, 10),
        hgrid=False,
        vgrid=False,
        ylim=(np.min(prof_data[0]["i"]) - 0.1, len(input_files) + 0.1 + 1.0),
        figtext=date_text,
        linecolour=clr,
    )

    # meaningThe following means that the 2nd argument is the
    # desired output plot file name
    # plot_file = 'multi_profile.png'
    plot_file = "diff_profile.pdf"

    #    plt.show()
    plt.savefig(plot_file)
Ejemplo n.º 16
0
def prepare_spec_image(spectrogram):
    spectrogram = (spectrogram - np.min(spectrogram)) / ((np.max(spectrogram)) - np.min(spectrogram))
    spectrogram = np.flip(spectrogram, axis=0)
    return np.uint8(cm.gist_heat(spectrogram) * 255)
    plt.xlabel("Being Types (sorted based on profitable traits)")
    plt.ylabel("Number of Beings")
    #plt.savefig("plot_final.png")
    plt.show()
    # %%
    fig = pylab.figure()
    ax = Axes3D(fig)
    x = list()
    y = list()
    for day in range(10):
        for species in range(types_of_beings):
            y.append(day)
            x.append(species)
    z = [0 for i in range(10 * types_of_beings)]
    dx = 1
    dy = 1
    dz = np.array(barHeights)
    x = np.array(x)
    y = np.array(y)
    ax.set_xlabel("Species Index")
    ax.set_ylabel("Time")
    ax.set_zlabel("Population")
    values = (dz - dz.min()) / np.float_(dz.max() - dz.min())
    colors = cm.gist_heat(values)
    #colors = cm.gnuplot2(values)
    #colors = cm.rainbow(values)
    ax.bar3d(x, y, z, dx, dy, dz, shade=True, color=colors)
    plt.show()

# %%
Ejemplo n.º 18
0
def field_list_to_json(fields, filename=None):
    """ Given a list of fields, create a FeatureCollection JSON file to use with the 
        interactive survey coverage viewer.
        
        The final structure should look like this, where "name" is the field id,
        and coordinates are a list of the coordinates of the 4 corners of the field.
        
            {"type" : "FeatureCollection",
                "features": [
                    {"type" : "Feature",
                     "properties" : { "name" : "2471"},
                                      "geometry": { "type" : "Polygon",
                                                    "coordinates" : [[ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0] ]]
                                                  },
                     "id":"2471"}, etc.
                            ]
            }      
        
        Parameters
        ----------
        fields : list, iterable
            Must be a list of PTF Field objects. See: ptf.photometricdatabase.Field
        filename : str (optional)
            If filename is specified, will save the JSON to the file. Otherwise, return the JSON
    """
    
    final_dict = dict(type="FeatureCollection", features=[])
    
    # Minimum and maximum number of observations for all fields
    min_obs = 1
    
    # There is one crazy outlier: the Orion field 101001, so I remove that for calculating the max...
    num_exposures = np.array([field.number_of_exposures for field in fields if field.id != 101001])
    max_obs = num_exposures.max()
    
    # Create a matplotlib lognorm scaler between these values
    scaler = matplotlib.colors.LogNorm(vmin=min_obs, vmax=max_obs)
    
    for field in fields:
        if field.number_of_exposures < 1:
            logger.debug("Skipping field {}".format(field))
            continue
        
        try:
            field.ra
            field.dec
        except AttributeError:
            this_field = all_fields[all_fields["id"] == field.id]
            if len(this_field) != 1: 
                logger.warning("Field {} is weird".format(field))
                continue
                
            field.ra = g.RA.fromDegrees(this_field["ra"][0])
            field.dec = g.Dec.fromDegrees(this_field["dec"][0])
        
        if field.dec.degrees < -40:
            logger.warning("Field {} is weird, dec < -40".format(field))
            continue
        
        feature = field_to_feature(field)
        
        # Determine color of field
        #rgb = cm.autumn(scaler(field.number_of_exposures))
        rgb = cm.gist_heat(scaler(field.number_of_exposures))
        feature["properties"]["color"] = mc.rgb2hex(rgb)
        feature["properties"]["alpha"] = scaler(field.number_of_exposures)*0.75 + 0.05
        feature["properties"]["number_of_observations"] = str(field.number_of_exposures)
        feature["properties"]["ra"] = "{:.5f}".format(field.ra.degrees)
        feature["properties"]["dec"] = "{:.5f}".format(field.dec.degrees)
        
        final_dict["features"].append(feature)
    
    blob = json.dumps(final_dict)
    
    if filename != None:
        f = open(filename, "wb")
        f.write(blob)
        f.close()
        
        return
    else:
        return blob
Ejemplo n.º 19
0
def main():
# Input asp-style ascii profile will be first argument.
# (command-line options to come later)
     prog_name = argv[0].strip('.py')
     
#     arg = get_opt(prog_name)
     print 'n_args = ', len(argv) - 1
#     prof_file = argv[1]

     input_files = []
     for file in argv[1:]:
# For some reason this works and ".appen()" doesn't:
          input_files[len(input_files):] = glob.glob(file)
#          input_files.append(glob.glob(file))

     input_files.reverse()
          
     prof_data = []
#     prof_date = []
     date_text = []
# First, read in profile data file, and assign each column to a separate
# numpy array     
#     prof_file = input_files[0]
#     prof_header = read_asc_header(prof_file)
#     prof_data[0] = read_asc_prof(prof_file)
      

#     if(len(input_files) > 1):
#     for prof_file in input_files:
     nobs = []
     for i_prof in np.arange(len(input_files)):
          prof_header = read_asc_header(input_files[i_prof])
          duty = get_duty(prof_header['psrname'])
          nobs.append(prof_header['obscode'])
          print 'NOBS = ', nobs
          prof_data_temp = read_asc_prof(input_files[i_prof], ionly=True)
          prof_data_temp['i'] = norm(prof_data_temp['i'], duty)
          prof_data_temp['i'] = prof_data_temp['i'] + i_prof
          print "Index = ", i_prof, \
                   ", Min = ", np.min(prof_data_temp['i']), \
                   ", Max = ", np.max(prof_data_temp['i']) 
          prof_data.append(prof_data_temp)         
# Set up labelling for each profile:
          prof_date = mjd.mjdtodate(prof_header['imjd'], \
                                              dateformat='%Y %b %d')
          date_text.append((0.8, i_prof+0.25, prof_date))
          
     print "Date = ", date_text
     
     nobs_unique = list(set(nobs))
     clr=[]
     for i_nobs in range(len(nobs)):
         if(nobs_unique <= 1):
             clr.append('black')
         else:
             clr.append(cm.gist_heat(float(nobs_unique.index(nobs[i_nobs]))/float(len(nobs_unique))))
          

# Do this just to make the ordering such that the first alphanumerically
# is at the top...
     # prof_data.reverse()
     # date_text.reverse()

     plot_prof(prof_data, yticks=False, canvassize=(8,10), \
                    hgrid=False, vgrid=False, \
                    ylim=(np.min(prof_data[0]['i'])-0.1, len(input_files)+0.1),
                    figtext=date_text, linecolour=clr)



# meaningThe following means that the 2nd argument is the 
# desired output plot file name
     # plot_file = 'multi_profile.png'
     plot_file = 'multi_profile.png'

#    plt.show()
     plt.savefig(plot_file)
Ejemplo n.º 20
0
def field_list_to_json(stats_filename, filename=None):
    """ Given a list of fields, create a FeatureCollection JSON file to use with the
        interactive survey coverage viewer.

        The final structure should look like this, where "name" is the field id,
        and coordinates are a list of the coordinates of the 4 corners of the field.

            {"type" : "FeatureCollection",
                "features": [
                    {"type" : "Feature",
                     "properties" : { "name" : "2471"},
                                      "geometry": { "type" : "Polygon",
                                                    "coordinates" : [[ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0] ]]
                                                  },
                     "id":"2471"}, etc.
                            ]
            }

        Parameters
        ----------
        fields : list, iterable
            Must be a list of PTF Field objects. See: ptf.photometricdatabase.Field
        filename : str (optional)
            If filename is specified, will save the JSON to the file. Otherwise, return the JSON
    """

    data = np.genfromtxt(stats_filename,
                         dtype=None,
                         names=['field_id', 'mjd', 'ra', 'dec'])

    # final feature dictionary
    final_dict = dict(type="FeatureCollection", features=[])

    # Minimum and maximum number of observations for all fields
    min_obs = 1
    max_obs = 2576

    # Create a matplotlib lognorm scaler between these values
    scaler = matplotlib.colors.LogNorm(vmin=min_obs, vmax=max_obs)

    for field_id in np.unique(data['field_id']):
        field_data = data[data['field_id'] == field_id]
        nexposures = len(field_data)
        ra = field_data['ra'].mean()
        dec = field_data['dec'].mean()
        mjds = field_data['mjd']

        feature = field_to_feature(field_id, ra, dec)

        # Determine color of field
        #rgb = cm.autumn(scaler(field.number_of_exposures))
        rgb = cm.gist_heat(scaler(nexposures))
        feature["properties"]["color"] = mc.rgb2hex(rgb)
        feature["properties"]["alpha"] = scaler(nexposures) * 0.75 + 0.05
        feature["properties"]["nexposures"] = str(nexposures)
        feature["properties"]["ra"] = "{:.5f}".format(ra)
        feature["properties"]["dec"] = "{:.5f}".format(dec)
        feature["properties"]["mjds"] = list(mjds)
        final_dict["features"].append(feature)

    blob = json.dumps(final_dict)

    if filename != None:
        f = open(filename, "wb")
        f.write(blob)
        f.close()

        return
    else:
        return blob
Ejemplo n.º 21
0
def plot_widths(width_data, canvassize=None, msize=None, \
                     xval='year', yunits='phase', \
                     xticks=True, yticks=True, xlabel=True, ylabel=True, \
                     sym='o', colour=None, mf_colour=None, me_colour=None, \
                     csize=3, xlim=None, ylim=None, \
                     figtext=None, gridlines=None,
                     ticklabelsize=18, axislabelsize=18):

     possible_xval = ['mjd', 'mjd0', 'year', 'serial']
     if(possible_xval.count(xval) == 0):
          print "There is not value "+xval+ \
              " in the data that we can plot on x axis. Exiting."
          exit()

     possible_yunits = ['phase', 'deg', 'rad', 'cos_phi']
     if(possible_yunits.count(yunits) == 0):
          print yunits+ " is not an option for y axis units. Exiting."
          exit()

          

     # If we have just the one set of residuals, cast it as a one-element
     # list to make things nice and general
     if(type(width_data) is dict):
          width_data = [width_data] 

# mjd0 will subtract the nearest 100 of the smallest mjd value from the 
# mjd arrays
     if(xval=='mjd0'):
          min_mjd = np.array([])
          for width in width_data:
               min_mjd = np.append(min_mjd, np.amin(width['mjd']))
          mjdint = np.floor(np.amin(min_mjd))
          for width in width_data:
               width['mjd0'] = width['mjd'] - mjdint
      
     if(xval=='year'):
          for width in width_data:
               # date_out = [mjd.mjdtodate(m) for m in width['mjd']]
               width['year'] = [mjd.mjdtoyear(m) for m in width['mjd']]
               # width['year'] = [d.year + d.day/365. + \
               #                    d.hour/(365.*24.) + \
               #                    d.minute/(365.*24.*60.) + \
               #                    d.second/(365.*24.*60.*60.) \
               #                    for d in date_out]
     
# Set up plot limits now
     xmin = np.amin(width_data[0][xval])-0.003
     xmax = np.amax(width_data[0][xval])+0.003
     ymin = np.amin(width_data[0]['width'] - width_data[0]['werr'])
     ymax = np.amax(width_data[len(width_data)-1]['width'] + \
                         width_data[len(width_data)-1]['werr'])
     xspan = abs(xmax - xmin)
     yspan = abs(ymax - ymin)
          

# Set up the plot:
     fig = plt.figure(figsize=canvassize)
     ax = fig.add_axes([0.12, 0.14, 0.86, 0.83])
     ax.xaxis.set_tick_params(labelsize=ticklabelsize, pad=8)
     ax.yaxis.set_tick_params(labelsize=ticklabelsize, pad=8)
     if(xlim==None):
          ax.set_xlim(xmin - 0.01*xspan, xmax + 0.01*xspan)
     else:
          ax.set_xlim(xlim)
     if(ylim==None):
          ax.set_ylim(ymin - 0.01*yspan, ymax + 0.02*yspan)
     else:
          ax.set_ylim(ylim)

     if (xlabel):          
          if(xval=='serial'):               
               ax.set_xlabel('Serial number', fontsize=axislabelsize, labelpad=12)
          elif(xval=='mjd'):
               ax.set_xlabel('MJD', fontsize=axislabelsize, labelpad=12)
          elif(xval=='mjd0'):
               ax.set_xlabel('MJD - {:d}'.format(int(mjdint)), fontsize=axislabelsize, labelpad=12)
          elif(xval=='year'):
               # Set formatting for years so that they have %d formatting:
               xmajorFormatter = FormatStrFormatter('%d')
               ax.set_xlabel('Year', fontsize=axislabelsize, labelpad=12)
               ax.xaxis.set_major_formatter(xmajorFormatter)
               
     if (ylabel):
          ax.set_ylabel('Pulse width (degrees)', fontsize=axislabelsize)#, labelpad=8)

     if(not xticks):
          for tick in ax.xaxis.get_major_ticks():
               tick.label1On = False
               tick.label2On = False

     if(not yticks):
          for tick in ax.yaxis.get_major_ticks():
               tick.label1On = False
               tick.label2On = False

     

     for i_width in range(len(width_data)):
          width = width_data[i_width]
          # Get colours
          if(colour!=None):
              if (type(colour) is list):
                  clr = colour[i_width]
              else:
                  clr = colour
          else:
              # Set up automated colours
              if (len(width_data)==1):
                  clr = 'black'
              else:
                  clr = cm.gist_heat(float(i_width)/float(len(width_data))) 
              
          if (type(mf_colour) is list):
               mf_clr = mf_colour[i_width]
          else:
               mf_clr = clr
          if (type(me_colour) is list):
               me_clr = me_colour[i_width]
          else:
               me_clr = clr
          #ax.plot(res[xval], res['res'], 'o', markersize=msize, color=col)
          if(gridlines!=None):
               for ycoord in gridlines:
                    ax.axhline(ycoord, linestyle='--', color='black', \
                                    linewidth=0.4)
# Change to appropriate units: 
          if (yunits=='deg'):
               # Set formatting for degrees so that they have correct formatting:
               ymajorFormatter = FormatStrFormatter('%5.1f')
               ax.yaxis.set_major_formatter(ymajorFormatter)
               y_plot = width['width']*360.
               yerr_plot = width['werr']*360.
          elif (yunits=='rad'):
               y_plot = width['width']*2.*np.pi
               yerr_plot = width['werr']*2.*np.pi               
          elif (yunits=='cos_phi'):
               y_plot = cos(width['width']/2.)
               yerr_plot = cos(width['werr']/2.)
          else: # Just in units of phase as given in data file
               y_plot = width['width']
               yerr_plot = width['werr']

          if(i_width==0):
               xmin = np.amin(width[xval])
               xmax = np.amax(width[xval])
               ymin = np.amin(y_plot-yerr_plot)
               ymax = np.amax(y_plot+yerr_plot)               
          else:
               xmin = np.amin(np.append(width[xval], xmin))
               xmax = np.amax(np.append(width[xval], xmax))
               ymin = np.amin(np.append(y_plot-yerr_plot, ymin))
               ymax = np.amax(np.append(y_plot+yerr_plot, ymax))

          xspan = abs(xmax - xmin)
          yspan = abs(ymax - ymin)



# Overplot error bars.  Use fmt=None to tell it not to plot points:
          ax.plot(width[xval], y_plot, sym, color=clr, mfc=mf_clr, mec=me_clr)
          ax.errorbar(width[xval], y_plot, yerr=yerr_plot, \
                           capsize=csize, fmt=None, ecolor=clr, \
                           markersize=msize)
          
     if(xlim==None):
          ax.set_xlim(xmin - 0.025*xspan, xmax + 0.025*xspan)
     else:
          ax.set_xlim(xlim)
     if(ylim==None):
          ax.set_ylim(ymin - 0.1*yspan, ymax + 0.1*yspan)
     else:
          ax.set_ylim(ylim)

# Figure text must be a list of tuples: [(x, y, text), (x, y, text), ...]
     if(figtext!=None):
          for txt in figtext:
               ax.text(txt[0], txt[1], txt[2], fontsize=10, \
                            horizontalalignment='center', \
                            verticalalignment='center')


#     plt.savefig('test_widths.png')

     return ax
Ejemplo n.º 22
0
def field_list_to_json(stats_filename, filename=None):
    """ Given a list of fields, create a FeatureCollection JSON file to use with the
        interactive survey coverage viewer.

        The final structure should look like this, where "name" is the field id,
        and coordinates are a list of the coordinates of the 4 corners of the field.

            {"type" : "FeatureCollection",
                "features": [
                    {"type" : "Feature",
                     "properties" : { "name" : "2471"},
                                      "geometry": { "type" : "Polygon",
                                                    "coordinates" : [[ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0] ]]
                                                  },
                     "id":"2471"}, etc.
                            ]
            }

        Parameters
        ----------
        fields : list, iterable
            Must be a list of PTF Field objects. See: ptf.photometricdatabase.Field
        filename : str (optional)
            If filename is specified, will save the JSON to the file. Otherwise, return the JSON
    """

    data = np.genfromtxt(stats_filename, dtype=None, names=['field_id','mjd','ra','dec'])

    # final feature dictionary
    final_dict = dict(type="FeatureCollection", features=[])

    # Minimum and maximum number of observations for all fields
    min_obs = 1
    max_obs = 2576

    # Create a matplotlib lognorm scaler between these values
    scaler = matplotlib.colors.LogNorm(vmin=min_obs, vmax=max_obs)

    for field_id in np.unique(data['field_id']):
        field_data = data[data['field_id'] == field_id]
        nexposures = len(field_data)
        ra = field_data['ra'].mean()
        dec = field_data['dec'].mean()
        mjds = field_data['mjd']

        feature = field_to_feature(field_id, ra, dec)

        # Determine color of field
        #rgb = cm.autumn(scaler(field.number_of_exposures))
        rgb = cm.gist_heat(scaler(nexposures))
        feature["properties"]["color"] = mc.rgb2hex(rgb)
        feature["properties"]["alpha"] = scaler(nexposures)*0.75 + 0.05
        feature["properties"]["nexposures"] = str(nexposures)
        feature["properties"]["ra"] = "{:.5f}".format(ra)
        feature["properties"]["dec"] = "{:.5f}".format(dec)
        feature["properties"]["mjds"] = list(mjds)
        final_dict["features"].append(feature)

    blob = json.dumps(final_dict)

    if filename != None:
        f = open(filename, "wb")
        f.write(blob)
        f.close()

        return
    else:
        return blob
Ejemplo n.º 23
0
    pos1[:, 0:2] = -1+2*np.random.rand(5000, 2)
    pos2[:, 0:2] = -1+2*np.random.rand(10000, 2)

    r2 = np.sqrt(pos2[:, 0]**2+pos2[:, 1]**2)
    k2, = np.where(r2 < 0.5)
    pos2 = pos2[k2, :]

    qv1 = QuickView(pos1.T, np.ones(len(pos1)),
                    r='infinity', logscale=False, plot=False,
                    extent=[-1, 1, -1, 1], x=0, y=0, z=0)
    qv2 = QuickView(pos2.T, np.ones(len(pos2)),
                    r='infinity', logscale=False, plot=False,
                    extent=[-1, 1, -1, 1], x=0, y=0, z=0)

    image1 = cm.gist_heat(get_normalized_image(qv1.get_image()))
    image2 = cm.gist_stern(get_normalized_image(qv2.get_image()))

    fig = plt.figure(1, figsize=(10, 5))
    ax1 = fig.add_subplot(121)
    ax2 = fig.add_subplot(122)

    blend = Blend(image1, 1.0*image2)
    screen = blend.Screen()
    overlay = blend.Overlay()

    ax1.imshow(screen, origin='lower', extent=qv1.get_extent())
    ax1.set_title('Screen')
    ax2.imshow(overlay, origin='lower', extent=qv1.get_extent())
    ax2.set_title('Overlay')
    plt.show()
Ejemplo n.º 24
0
    pos2[:,0:2] = -1+2*np.random.rand(10000,2)


    r2 = np.sqrt(pos2[:,0]**2+pos2[:,1]**2)
    k2, = np.where(r2 < 0.5)
    pos2 = pos2[k2,:]

    qv1 = QuickView(pos1.T, np.ones(len(pos1)),
                    r='infinity', logscale=False, plot=False,
                    extent=[-1,1,-1,1], x=0, y=0, z=0)
    qv2 = QuickView(pos2.T, np.ones(len(pos2)),
                    r='infinity', logscale=False, plot=False,
                    extent=[-1,1,-1,1], x=0, y=0, z=0)


    image1 = cm.gist_heat(get_normalized_image(qv1.get_image()))
    image2 = cm.gist_stern(get_normalized_image(qv2.get_image()))


    fig = plt.figure(1, figsize=(10,5))
    ax1 = fig.add_subplot(121)
    ax2 = fig.add_subplot(122)

    blend = Blend(image1, 1.0*image2)
    screen  = blend.Screen()
    overlay = blend.Overlay()

    ax1.imshow(screen, origin='lower', extent=qv1.get_extent())
    ax1.set_title('Screen')
    ax2.imshow(overlay, origin='lower', extent=qv1.get_extent())
    ax2.set_title('Overlay')