Ejemplo n.º 1
0
 def _split_variable(self):
     """Split by variable."""
     outputfiles = [
         self._define_outputfilename(var, self.years)
         for var in self.variables
     ]
     years = len(outputfiles) * [self.years]
     if not self.threads:
         pool = Pool()
     else:
         pool = Pool(nodes=self.threads)
     pool.map(self._getdata, self.variables, years, outputfiles)
Ejemplo n.º 2
0
 def _split_variable_yr(self):
     """Fetch variable split by variable and year."""
     outputfiles = []
     variables = []
     for var in self.variables:
         outputfiles = [
             self._define_outputfilename(var, [yr]) for yr in self.years
         ]
         variables += len(outputfiles) * [var]
     if not self.threads:
         pool = Pool()
     else:
         pool = Pool(nodes=self.threads)
     pool.map(self._getdata, variables, self.years, outputfiles)
Ejemplo n.º 3
0
    def filter_results(self,
                       im_array,
                       results,
                       image_times,
                       model,
                       psf_sigma=1.0,
                       batch_size=32,
                       chunk_size=10000):
        """
        Use a keras neural network model to detect real objects based upon
        the coadded postage stamps of those objects. Filter and keep only
        actual objects going forward.

        Parameters
        ----------

        im_array: numpy array, required
        The masked original images. See loadMaskedImages
        in searchImage.py.

        results_arr: numpy recarray, required
        The results output from findObjects in searchImage.

        image_times: numpy array, required
        An array containing the image times in DAYS with the first image at
        time 0.
        Note: This is different than other methods so the  units of 
        this may change. Watch this documentation.

        model: keras model, required
        A previously trained model loaded from an hdf5 file.

        batch_size: int
        Batch size for keras predict.

        Returns
        -------

        filtered_results: numpy array
        An edited version of results_arr with only the rows where 
        true objects were classified.
        
        """

        keep_objects = np.array([])
        total_chunks = np.ceil(len(results) / float(chunk_size))
        chunk_num = 1
        circle_vals = []

        enumerated_results = list(enumerate(results))
        self.im_array = im_array
        self.image_times = image_times
        self.psf_sigma = psf_sigma

        #        for chunk_start in range(0, len(results), chunk_size):
        #            test_class = []
        #            p_stamp_arr = []
        #            #circle_chunk = []
        #            for imNum in range(chunk_start, chunk_start+chunk_size):
        #                try:
        #                    p_stamp = self.createPostageStamp(im_array,
        #                                                      list(results[['t0_x', 't0_y']][imNum]),
        #                                                      np.array(list(results[['v_x', 'v_y']][imNum])),
        #                                                      image_times, [25., 25.])[0]
        #                    p_stamp = np.array(p_stamp)
        #                    p_stamp[np.isnan(p_stamp)] = 0.
        #                    p_stamp[np.isinf(p_stamp)] = 0.
        #                    #p_stamp -= np.min(p_stamp)
        #                    #p_stamp /= np.max(p_stamp)
        #                    #p_stamp
        #                    image_thresh = np.max(p_stamp)*0.5
        #                    image = (p_stamp > image_thresh)*1.
        #                    #pre_image = p_stamp > image_thresh
        #                    #image = np.array(pre_image*1.)
        #                    mom = measure.moments(image)
        #                    cr = mom[0,1]/mom[0,0]
        #                    cc = mom[1,0]/mom[0,0]
        #                    #moments = measure.moments(image, order=3)
        #                    #cr = moments[0,1]/moments[0,0]
        #                    #cc = moments[1,0]/moments[0,0]
        #                    cent_mom = measure.moments_central(image, cr, cc, order=4)
        #                    norm_mom = measure.moments_normalized(cent_mom)
        #                    hu_mom = measure.moments_hu(norm_mom)
        #                    #p_stamp_arr.append(hu_mom)
        #                    #print moments[0,0], measure.perimeter(image)
        #                    #circularity = (4*np.pi*moments[0,0])/(measure.perimeter(image)**2.)
        #                    #circularity = (cent_mom[0,0]**2.)/(2.*np.pi*(cent_mom[2,0] + cent_mom[0,2]))
        #                    circularity = (1/(2.*np.pi))*(1/hu_mom[0])
        #                    #circularity = (cent_mom[0,0]**2.)/(2*np.pi*(cent_mom[2,0] + cent_mom[0,2]))
        #                    psf_sigma = psf_sigma
        #                    gaussian_fwhm = psf_sigma*2.35
        #                    fwhm_area = np.pi*(gaussian_fwhm/2.)**2.
        #                    #print circularity, cr, cc
        #                    if ((circularity > 0.6) & (cr > 10.) & (cr < 14.) & (cc > 10.) & (cc < 14.) &
        #                        (cent_mom[0,0] < (9.0*fwhm_area)) & (cent_mom[0,0] > 3.0)): #Use 200% error margin on psf_sigma for now
        #                    #    test_class.append(1.)
        #                    #    print circularity, cr, cc, moments[0,0]
        #                    #else:
        #                    #    test_class.append(0.)
        #                        test_class.append(1.)
        #                    else:
        #                        test_class.append(0.)
        #                    circle_vals.append([circularity, cr, cc, cent_mom[0,0], image_thresh])
        #                    #print circularity, cr, cc, cent_mom[0,0], image_thresh
        #                except:
        #                    #p_stamp_arr.append(np.ones((25, 25)))
        #                    p_stamp_arr.append(np.zeros(7))
        #                    test_class.append(0.)
        #                    circle_vals.append([0., 0., 0., 0., 0.])
        #                    continue
        #            p_stamp_arr = np.array(p_stamp_arr)#.reshape(chunk_size, 625)
        #test_class = model.predict_classes(p_stamp_arr, batch_size=batch_size,
        #                                   verbose=1)
        pool = Pool(nodes=8)
        test_classes = pool.map(self.circularity_test, enumerated_results)
        test_classes = np.array(test_classes).T
        keep_idx = test_classes[0][np.where(
            np.array(test_classes[1]) > .5)]  # + chunk_start
        print keep_idx
        #print np.where(np.array(test_class) > .5)
        print test_classes[0][np.where(np.array(test_classes[1]) > .5)]
        keep_objects = keep_idx  #np.append(keep_objects, keep_idx)
        #circle_vals[keep_idx] = np.array(circle_chunk)
        print "Finished chunk %i of %i" % (chunk_num, total_chunks)
        chunk_num += 1

        #        keep_objects = np.arange(len(results))
        filtered_results = results[np.array(keep_objects, dtype=np.int)]
        #circle_vals = np.array(circle_vals)
        #circle_vals_keep = circle_vals[np.array(keep_objects, dtype=np.int)]

        return filtered_results  #, circle_vals_keep
saver = tf.train.Saver(max_to_keep = 4)
# =============================================================================
#  Initialize the variables (i.e. assign their default value)
# =============================================================================
init = tf.global_variables_initializer()

# =============================================================================
#  Start Training
# =============================================================================
# Start a new TF session
conf = tf.ConfigProto()
conf.gpu_options.allow_growth=True
conf.log_device_placement=False #@myself: use this for debugging
conf.allow_soft_placement=True
P = Pool()
with tf.Session(config = conf) as sess:

    # Run the initializer
    sess.run(init)
    sess.run(normalize_rel_op)
    # Training
    NOW_DISPLAY = False
    epoch=1
    step=1    
    temp_Type2Data = deepcopy(Type2Data)
    mean_losses = np.zeros([5])
    mean_delta = 0
    while (epoch < NUM_EPOCHS):
        if sum(map(len,temp_Type2Data.values())) < 0.1 * TOT_RELATIONS:
            epoch += 1