Ejemplo n.º 1
1
def merge_csvs(in_list):
    for idx, in_file in enumerate(in_list):
        try:
            in_array = np.loadtxt(in_file, delimiter=',')
        except ValueError as ex:
            try:
                in_array = np.loadtxt(in_file, delimiter=',', skiprows=1)
            except ValueError as ex:
                with open(in_file, 'r') as first:
                    header_line = first.readline()

                header_list = header_line.split(',')
                n_cols = len(header_list)
                try:
                    in_array = np.loadtxt(
                        in_file, delimiter=',', skiprows=1,
                        usecols=list(range(1, n_cols))
                    )
                except ValueError as ex:
                    in_array = np.loadtxt(
                        in_file, delimiter=',', skiprows=1, usecols=list(range(1, n_cols - 1)))
        if idx == 0:
            out_array = in_array
        else:
            out_array = np.dstack((out_array, in_array))
    out_array = np.squeeze(out_array)
    iflogger.info('Final output array shape:')
    iflogger.info(np.shape(out_array))
    return out_array
Ejemplo n.º 2
0
 def __init__(self, width, height):
     self.width = width
     self.height = height
     self.table = [
         [[' ', 2] for y in range(width)]
         for i in range(height)
     ]
Ejemplo n.º 3
0
def relaxed_solver(prob, table, redCosts, target):
    """
    Generate columns (tables) with negative reduced costs
    """
    dvs = []
    neg_guests = [g for g in guests
                       if redCosts[x[(g,table)]] < 0.0]
    neg_guests.sort()
    # find all possible tables between two end points
    for pos1, pos2 in [(i, j) for i in range(len(neg_guests))
                            for j in range(len(neg_guests))
                                if j > i]:
        # find the suitable guests that can be included in between the end 
        # points
        candidate_guests = [(redCosts[x[(g,table)]], g)
                                    for g in neg_guests[pos1+1:pos2]]
        candidate_guests.sort()
        # pick the best guests (ie those with the negative reduced costs)
        possible_table_inner = [g 
                            for _, g in candidate_guests[:max_table_size-2]]
        #This is the best table between the end points
        possible_table = [neg_guests[pos1]] + possible_table_inner +\
                            [neg_guests[pos2]]
        # calculate the sum of the reduced costs for each of the guests
        neg_cost = sum(redCosts[x[(g, table)]] for g in possible_table)
        table_happiness = happiness(possible_table[0], possible_table[-1])
        rc = neg_cost + table_happiness * redCosts[happy[table]]
        var_values = [(x[(g, table)], 1) 
                      for g in possible_table]
        var_values.append((happy[table], table_happiness))
        dvs.append(dict(var_values))
        if debug_print:
            print('Table: ', table, 'Happiness: ', table_happiness, 'RC: ', rc)
    return DipSolStatOptimal, dvs
Ejemplo n.º 4
0
  def test_progress(self):
    file_name, expected_data = write_data(10)
    assert len(expected_data) == 10
    source = TextSource(file_name, 0, CompressionTypes.UNCOMPRESSED, True,
                        coders.StrUtf8Coder())
    splits = list(source.split(desired_bundle_size=100000))
    assert len(splits) == 1
    fraction_consumed_report = []
    split_points_report = []
    range_tracker = splits[0].source.get_range_tracker(
        splits[0].start_position, splits[0].stop_position)
    for _ in splits[0].source.read(range_tracker):
      fraction_consumed_report.append(range_tracker.fraction_consumed())
      split_points_report.append(range_tracker.split_points())

    self.assertEqual(
        [float(i) / 10 for i in range(0, 10)], fraction_consumed_report)
    expected_split_points_report = [
        ((i - 1), iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
        for i in range(1, 10)]

    # At last split point, the remaining split points callback returns 1 since
    # the expected position of next record becomes equal to the stop position.
    expected_split_points_report.append((9, 1))

    self.assertEqual(
        expected_split_points_report, split_points_report)
Ejemplo n.º 5
0
def set_cpu_affinity(n, process_ids, actual=not Conf.TESTING):
    """
    Sets the cpu affinity for the supplied processes.
    Requires the optional psutil module.
    :param int n: affinity
    :param list process_ids: a list of pids
    :param bool actual: Test workaround for Travis not supporting cpu affinity
    """
    # check if we have the psutil module
    if not psutil:
        logger.warning("Skipping cpu affinity because psutil was not found.")
        return
    # check if the platform supports cpu_affinity
    if actual and not hasattr(psutil.Process(process_ids[0]), "cpu_affinity"):
        logger.warning("Faking cpu affinity because it is not supported on this platform")
        actual = False
    # get the available processors
    cpu_list = list(range(psutil.cpu_count()))
    # affinities of 0 or gte cpu_count, equals to no affinity
    if not n or n >= len(cpu_list):
        return
    # spread the workers over the available processors.
    index = 0
    for pid in process_ids:
        affinity = []
        for k in range(n):
            if index == len(cpu_list):
                index = 0
            affinity.append(cpu_list[index])
            index += 1
        if psutil.pid_exists(pid):
            p = psutil.Process(pid)
            if actual:
                p.cpu_affinity(affinity)
            logger.info(_("{} will use cpu {}").format(pid, affinity))
def run_benchmark(num_runs=50, input_per_source=4000, num_sources=4):
  print("Number of runs:", num_runs)
  print("Input size:", num_sources * input_per_source)
  print("Sources:", num_sources)

  times = []
  for i in range(num_runs):
    counter_factory = CounterFactory()
    state_sampler = statesampler.StateSampler('basic', counter_factory)
    state_sampler.start()
    with state_sampler.scoped_state('step1', 'state'):
      si_counter = opcounters.SideInputReadCounter(
          counter_factory, state_sampler, 'step1', 1)
      si_counter = opcounters.NoOpTransformIOCounter()
      sources = [
          FakeSource(long_generator(i, input_per_source))
          for i in range(num_sources)]
      iterator_fn = sideinputs.get_iterator_fn_for_sources(
          sources, read_counter=si_counter)
      start = time.time()
      list(iterator_fn())
      time_cost = time.time() - start
      times.append(time_cost)
    state_sampler.stop()

  print("Runtimes:", times)

  avg_runtime = sum(times) / len(times)
  print("Average runtime:", avg_runtime)
  print("Time per element:", avg_runtime / (input_per_source *
                                            num_sources))
Ejemplo n.º 7
0
    def _stimcorr_core(self, motionfile, intensityfile, designmatrix, cwd=None):
        """
        Core routine for determining stimulus correlation

        """
        if not cwd:
            cwd = os.getcwd()
        # read in motion parameters
        mc_in = np.loadtxt(motionfile)
        g_in = np.loadtxt(intensityfile)
        g_in.shape = g_in.shape[0], 1
        dcol = designmatrix.shape[1]
        mccol = mc_in.shape[1]
        concat_matrix = np.hstack((np.hstack((designmatrix, mc_in)), g_in))
        cm = np.corrcoef(concat_matrix, rowvar=0)
        corrfile = self._get_output_filenames(motionfile, cwd)
        # write output to outputfile
        file = open(corrfile, 'w')
        file.write("Stats for:\n")
        file.write("Stimulus correlated motion:\n%s\n" % motionfile)
        for i in range(dcol):
            file.write("SCM.%d:" % i)
            for v in cm[i, dcol + np.arange(mccol)]:
                file.write(" %.2f" % v)
            file.write('\n')
        file.write("Stimulus correlated intensity:\n%s\n" % intensityfile)
        for i in range(dcol):
            file.write("SCI.%d: %.2f\n" % (i, cm[i, -1]))
        file.close()
Ejemplo n.º 8
0
def set_cpu_affinity(n, process_ids, actual=not Conf.TESTING):
    """
    Sets the cpu affinity for the supplied processes.
    Requires the optional psutil module.
    :param int n:
    :param list process_ids: a list of pids
    :param bool actual: Test workaround for Travis not supporting cpu affinity
    """
    # check if we have the psutil module
    if not psutil:
        return
    # get the available processors
    cpu_list = list(range(psutil.cpu_count()))
    # affinities of 0 or gte cpu_count, equals to no affinity
    if not n or n >= len(cpu_list):
        return
    # spread the workers over the available processors.
    index = 0
    for pid in process_ids:
        affinity = []
        for k in range(n):
            if index == len(cpu_list):
                index = 0
            affinity.append(cpu_list[index])
            index += 1
        if psutil.pid_exists(pid):
            p = psutil.Process(pid)
            if actual:
                p.cpu_affinity(affinity)
            logger.info('{} will use cpu {}'.format(pid, affinity))
Ejemplo n.º 9
0
 def tableToString(table):
     tablestring = ''
     for i in range(len(table)):
         for j in range(len(table[0])):
             tablestring = tablestring + table[i][j] + ','
     tablestring = tablestring[:-1]
     return tablestring
Ejemplo n.º 10
0
    def view_delta_rmsd_vs_steps(self):
        self._calculate_complete_rmsd_matrix()
        fig, axes = plt.subplots(2)
        a_rmsd = np_nans(len(self._cg_sequence) // 2)
        min_rmsd = np_nans(len(self._cg_sequence) // 2)
        max_rmsd = np_nans(len(self._cg_sequence) // 2)
        for d in range(len(a_rmsd)):
            l = [self._rmsd[self._cg_sequence[i], self._cg_sequence[i + d]]
                 for i in range(len(self._cg_sequence) - d)]
            a_rmsd[d] = sum(l) / len(l)
            min_rmsd[d] = min(l)
            max_rmsd[d] = max(l)
        for ax in axes:
            ax.set_xlabel("Steps apart")
            ax.set_ylabel("Average RMSD")
            ax.plot(list(range(len(a_rmsd))), a_rmsd, label="Average RMSD")
            ax.plot(list(range(len(min_rmsd))), min_rmsd, label="Minimal RMSD")
            ax.plot(list(range(len(max_rmsd))), max_rmsd, label="Maximal RMSD")
            ax.plot([0, len(max_rmsd)], [np.max(self._rmsd), np.max(
                self._rmsd)], "-.", label="Maximal RMSD in whole simulation")
            ax.plot([0, len(max_rmsd)], [np.mean(self._rmsd), np.mean(
                self._rmsd)], "-.", label="Average RMSD in whole simulation")
            ax.legend(prop={'size': 6})
        axes[1].set_xlim([0, 50])

        plt.savefig("rmsd_steps_apart_{}.svg".format(self._cgs[0].name))

        plt.clf()
        plt.close()
Ejemplo n.º 11
0
def repeat_to_match_shape(x, axis, keepdims):
    """Returns a function that repeats an array along axis to get a given shape.
       Also returns the number of repetitions of the array."""
    assert isinstance(axis, (type(None), int, tuple))

    if not isarray(x):
        return I, 1
    shape = x.shape
    if axis is None:
        dtype=None
        if anp.iscomplexobj(x):
            dtype = getval(anp.array(x)).dtype   # np.full() has a bug for complex numbers
        if keepdims:
            return lambda g : anp.full(shape, anp.sum(g), dtype=dtype), anp.prod(shape)
        else:
            return lambda g : anp.full(shape, g, dtype=dtype), anp.prod(shape)
    elif isinstance(axis, int):
        if keepdims:
            return lambda g : anp.repeat(g, shape[axis], axis), shape[axis]
        else:
            return lambda g : anp.repeat(anp.expand_dims(g, axis),
                                         shape[axis], axis), shape[axis]
    else:
        repeats  = [shape[i] if i in axis else 1 for i in range(len(shape))]
        expanded = [shape[i] if i not in axis else 1 for i in range(len(shape))]
        num_reps = anp.prod(anp.array(shape)[list(axis)])

        if keepdims:
            return lambda g: anp.tile(g, repeats), num_reps
        else:
            return lambda g: anp.tile(anp.reshape(g, expanded), repeats), num_reps
Ejemplo n.º 12
0
def set_menus_langbar(website):
    """
    add the 'menu' and 'langbar' key containt html list to all pages 
    """
    
    # list used langages
    langlist=list()
    for page in website.pagelist:
        if not page['lang'] in langlist:
            langlist.append(page['lang'])
            
    website.langlist=langlist

    # create list of menus per lang  and per page      
    for lang in langlist:
        menulist=list()
        for i in range(len(website.pagelist)):
            page=website.pagelist[i]
            if page['lang']== lang and page['in_menu'] :
                menulist.append(i)
        # for all pages
        for i in range(len(website.pagelist)):
            page=website.pagelist[i]
            if page['lang'] == lang:
                page['menu']=get_menu(website,menulist,i) 
                page['menulist']=get_menulist(website,menulist,i)
                
        # for all posts
        for i in range(len(website.postlist)):
            page=website.postlist[i]
            if page['lang'] == lang:
                page['menu']=get_menu_post(website,menulist,i)   
                page['menulist']=get_menu_postlist(website,menulist,i)
Ejemplo n.º 13
0
def make_grad_tensordot(argnum, ans, A, B, axes=2):
    if type(axes) is int:
        axes = (list(range(anp.ndim(A)))[-axes:],
                list(range(anp.ndim(B)))[:axes])

    def gradfun(g):
        N_axes_summed = len(axes[0])
        if argnum == 0:
            X, Y = A, B
            X_axes_summed, Y_axes_summed = axes
            g_axes_from_Y = list(range(anp.ndim(g)))[(anp.ndim(X) - N_axes_summed):]
        else:
            X, Y = B, A
            X_axes_summed, Y_axes_summed = axes[::-1]
            g_axes_from_Y = list(range(anp.ndim(g)))[:(anp.ndim(Y) - N_axes_summed)]

        Y_axes_ignored = [i for i in range(anp.ndim(Y)) if i not in Y_axes_summed]
        result = anp.tensordot(g, Y, axes=[g_axes_from_Y, Y_axes_ignored])
        sorted_axes_pairs = sorted(zip(X_axes_summed, Y_axes_summed), key = lambda x : x[1])
        forward_permutation = ([i for i in range(anp.ndim(X)) if i not in X_axes_summed]
                             + [i for i, _ in sorted_axes_pairs])
        reverse_permutation = list(anp.argsort(forward_permutation))
        if result.ndim == 0:
            result = result[()]
        return anp.transpose(result, axes=reverse_permutation)
    return gradfun
Ejemplo n.º 14
0
  def test_delete_batch(self, *unused_args):
    gcsio.BatchApiRequest = FakeBatchApiRequest
    file_name_pattern = 'gs://gcsio-test/delete_me_%d'
    file_size = 1024
    num_files = 10

    # Test deletion of non-existent files.
    result = self.gcs.delete_batch(
        [file_name_pattern % i for i in range(num_files)])
    self.assertTrue(result)
    for i, (file_name, exception) in enumerate(result):
      self.assertEqual(file_name, file_name_pattern % i)
      self.assertEqual(exception, None)
      self.assertFalse(self.gcs.exists(file_name_pattern % i))

    # Insert some files.
    for i in range(num_files):
      self._insert_random_file(self.client, file_name_pattern % i, file_size)

    # Check files inserted properly.
    for i in range(num_files):
      self.assertTrue(self.gcs.exists(file_name_pattern % i))

    # Execute batch delete.
    self.gcs.delete_batch([file_name_pattern % i for i in range(num_files)])

    # Check files deleted properly.
    for i in range(num_files):
      self.assertFalse(self.gcs.exists(file_name_pattern % i))
Ejemplo n.º 15
0
  def test_should_sample(self):
    # Order of magnitude more buckets than highest constant in code under test.
    buckets = [0] * 300
    # The seed is arbitrary and exists just to ensure this test is robust.
    # If you don't like this seed, try your own; the test should still pass.
    random.seed(1717)
    # Do enough runs that the expected hits even in the last buckets
    # is big enough to expect some statistical smoothing.
    total_runs = 10 * len(buckets)

    # Fill the buckets.
    for _ in range(total_runs):
      opcounts = OperationCounters(CounterFactory(), 'some-name',
                                   coders.PickleCoder(), 0)
      for i in range(len(buckets)):
        if opcounts.should_sample():
          buckets[i] += 1

    # Look at the buckets to see if they are likely.
    for i in range(10):
      self.assertEqual(total_runs, buckets[i])
    for i in range(10, len(buckets)):
      self.assertTrue(buckets[i] > 7 * total_runs / i,
                      'i=%d, buckets[i]=%d, expected=%d, ratio=%f' % (
                          i, buckets[i],
                          10 * total_runs / i,
                          buckets[i] / (10.0 * total_runs / i)))
      self.assertTrue(buckets[i] < 14 * total_runs / i,
                      'i=%d, buckets[i]=%d, expected=%d, ratio=%f' % (
                          i, buckets[i],
                          10 * total_runs / i,
                          buckets[i] / (10.0 * total_runs / i)))
Ejemplo n.º 16
0
    def _convert_to_timeseries(self, data):
        """Convert timeseries from numpy structures to shyft.api timeseries.

        We assume the time axis is regular, and that we can use a point time
        series with a parametrized time axis definition and corresponding
        vector of values. If the time series is missing on the data, we insert
        it into non_time_series.

        Returns
        -------
        timeseries: dict
            Time series arrays keyed by type
        """
        tsc = api.TsFactory().create_point_ts
        time_series = {}
        for key, (data, ta) in data.items():
            fslice = (len(data.shape) - 2)*[slice(None)]
            I, J = data.shape[-2:]

            def construct(d):
                if ta.size() != d.size:
                    raise AromeDataRepositoryError("Time axis size {} not equal to the number of "
                                                   "data points ({}) for {}"
                                                   "".format(ta.size(), d.size, key))
                return tsc(ta.size(), ta.start, ta.delta_t,
                           api.DoubleVector_FromNdArray(d.flatten()), api.point_interpretation_policy.POINT_AVERAGE_VALUE)
            time_series[key] = np.array([[construct(data[fslice + [i, j]])
                                          for j in range(J)] for i in range(I)])
        return time_series
Ejemplo n.º 17
0
def rotate2(im):
  H, W = im.shape
  im2 = np.zeros((W, H))
  for i in range(H):
    for j in range(W):
      im2[j,H - i - 1] = im[i,j]
  return im2
Ejemplo n.º 18
0
    def update(self, data=None):
        """
        Return an updated copy with provided data.

        :param data: any supported object.
                    If None return updated and referenced copy of itself.
        :return: new directory referenced to itself.
        """
        if isinstance(data, list):  # if list
            if len(data) > len(self.repr):
                for i in range(len(self.repr)):
                    self.repr[i] = data[i]
                for j in range(i + 1, len(data)):
                    self.repr.append(data[j])
            else:
                for i in range(len(data)):
                    self.repr[i] = data[i]
                del self.repr[i + 1:]
            return Directory(self)  # string is immutable and must be renewed
        elif isinstance(data, dict):  # if dictionary
            for k, v in data.items():  # self.__dict__.update(data)
                setattr(self, k, v)
            return Directory(self)
        elif data:  # if not list or dict
            return self.update([data])
        else:  # if None return updated version
            return Directory(self)
Ejemplo n.º 19
0
    def processAlgorithm(self, parameters, context, feedback):
        layer = QgsProcessingUtils.mapLayerFromString(self.getParameterValue(self.INPUT_VECTOR), context)

        rasterPath = str(self.getParameterValue(self.INPUT_RASTER))

        rasterDS = gdal.Open(rasterPath, gdal.GA_ReadOnly)
        geoTransform = rasterDS.GetGeoTransform()
        rasterDS = None

        fields = QgsFields()
        fields.append(QgsField('id', QVariant.Int, '', 10, 0))
        fields.append(QgsField('line_id', QVariant.Int, '', 10, 0))
        fields.append(QgsField('point_id', QVariant.Int, '', 10, 0))

        writer = self.getOutputFromName(self.OUTPUT_LAYER).getVectorWriter(fields, QgsWkbTypes.Point,
                                                                           layer.crs(), context)

        outFeature = QgsFeature()
        outFeature.setFields(fields)

        self.fid = 0
        self.lineId = 0
        self.pointId = 0

        features = QgsProcessingUtils.getFeatures(layer, context)
        total = 100.0 / layer.featureCount() if layer.featureCount() else 0
        for current, f in enumerate(features):
            geom = f.geometry()
            if geom.isMultipart():
                lines = geom.asMultiPolyline()
                for line in lines:
                    for i in range(len(line) - 1):
                        p1 = line[i]
                        p2 = line[i + 1]

                        (x1, y1) = raster.mapToPixel(p1.x(), p1.y(),
                                                     geoTransform)
                        (x2, y2) = raster.mapToPixel(p2.x(), p2.y(),
                                                     geoTransform)

                        self.buildLine(x1, y1, x2, y2, geoTransform,
                                       writer, outFeature)
            else:
                points = geom.asPolyline()
                for i in range(len(points) - 1):
                    p1 = points[i]
                    p2 = points[i + 1]

                    (x1, y1) = raster.mapToPixel(p1.x(), p1.y(), geoTransform)
                    (x2, y2) = raster.mapToPixel(p2.x(), p2.y(), geoTransform)

                    self.buildLine(x1, y1, x2, y2, geoTransform, writer,
                                   outFeature)

            self.pointId = 0
            self.lineId += 1

            feedback.setProgress(int(current * total))

        del writer
    def setup_data(self):
        """
        This function performs all initializations necessary:
        load the data sets and set the training set indices and response column index
        """

        # create and clean out the sandbox directory first
        self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
        
        # randomly choose which family of GBM algo to use
        self.family = self.families[random.randint(0, len(self.families)-1)]

        # preload datasets, set x_indices, y_index and change response to factor for classification
        if 'multinomial' in self.family:
            self.training_metric = 'logloss'
            self.training1_data = h2o.import_file(path=pyunit_utils.locate(self.training1_filenames[1]))
            self.y_index = self.training1_data.ncol-1
            self.x_indices = list(range(self.y_index))
            self.training1_data[self.y_index] = self.training1_data[self.y_index].round().asfactor()
            self.scale_model = 1

        else:
            self.training1_data = h2o.import_file(path=pyunit_utils.locate(self.training1_filenames[0]))
            self.y_index = self.training1_data.ncol-1
            self.x_indices = list(range(self.y_index))
            self.scale_model = 0.75

        # save the training data files just in case the code crashed.
        pyunit_utils.remove_csv_files(self.current_dir, ".csv", action='copy', new_dir_path=self.sandbox_dir)
Ejemplo n.º 21
0
 def _get_character_map_format4(self, offset):
     # This is absolutely, without question, the *worst* file
     # format ever.  Whoever the f*****t is that thought this up is
     # a f*****t. 
     header = _read_cmap_format4Header(self._data, offset)
     seg_count = header.seg_count_x2 // 2
     array_size = struct.calcsize('>%dH' % seg_count)
     end_count = self._read_array('>%dH' % seg_count, 
         offset + header.size)
     start_count = self._read_array('>%dH' % seg_count, 
         offset + header.size + array_size + 2)
     id_delta = self._read_array('>%dh' % seg_count,
         offset + header.size + array_size + 2 + array_size)
     id_range_offset_address = \
         offset + header.size + array_size + 2 + array_size + array_size
     id_range_offset = self._read_array('>%dH' % seg_count, 
         id_range_offset_address)
     character_map = {}
     for i in range(0, seg_count):
         if id_range_offset[i] != 0:
             if id_range_offset[i] == 65535:
                 continue  # Hack around a dodgy font (babelfish.ttf)
             for c in range(start_count[i], end_count[i] + 1):
                 addr = id_range_offset[i] + 2*(c - start_count[i]) + \
                     id_range_offset_address + 2*i
                 g = struct.unpack('>H', self._data[addr:addr+2])[0]
                 if g != 0:
                     character_map[chr(c)] = (g + id_delta[i]) % 65536
         else:
             for c in range(start_count[i], end_count[i] + 1):
                 g = (c + id_delta[i]) % 65536
                 if g != 0:
                     character_map[chr(c)] = g
     return character_map
Ejemplo n.º 22
0
    def test_key_times(self):
        """
        test how much time memoizeDict takes in saving keys when they are
        added each time.
        """
        mydict = self.mydict
        len_keys = 1000
        to_mean = 10
        data = (("12"*100)*100)*100

        # calculate expected time of writing
        time_write_expected = time()
        for i in range(1, to_mean):
            mydict[-i] = data
        time_write_expected = (time() - time_write_expected)/to_mean
        mydict.clear()

        for i in range(len_keys):
            time_write = time()
            mydict[i] = data
            time_write = time()-time_write

            # check in each iteration
            self.assertAlmostEqual(
                time_write,
                time_write_expected,
                delta=time_write_expected * 0.2, # permissive seconds
                msg="At added data No {}, it takes {} seg which is not close to "
                    "{} seg".format(i,time_write,time_write_expected)
            )
Ejemplo n.º 23
0
def tuneOffsets(ccd=None, feeControl=None):
    amps = list(range(8))
    feeControl.zeroOffsets(amps)

    im, fname = ccdFuncs.fullExposure('bias', ccd=ccd,
                                      feeControl=feeControl, nrows=300)
    exp = geom.Exposure(im)

    ampIms, osIms, _ = exp.splitImage(doTrim=False)

    means = []
    for a_i in range(8):
        reg = osIms[a_i][20:-20][2:-2]
        means.append(reg.mean())

    m, r = calcOffsets(1000,np.array(means))
    print("applying master: %s" % (m))
    print("applying refs  : %s" % (r))

    feeControl.setOffsets(amps, m, leg='n', doSave=False)
    feeControl.setOffsets(amps, r, leg='p', doSave=True)
    feeControl.setMode('offset')
    
    im, fname = ccdFuncs.fullExposure('bias', ccd=ccd,
                                      feeControl=feeControl, nrows=200)
    exp = geom.Exposure(im)

    ampIms, osIms, _ = exp.splitImage(doTrim=False)
    means = []
    for a_i in range(8):
        reg = osIms[a_i][20:-20][2:-2]
        means.append(reg.mean())
    print("final means: %s" % ' '.join(["%0.1f" % m for m in means]))
Ejemplo n.º 24
0
def grid_glrm_iris():
  print("Importing iris_wheader.csv data...")
  irisH2O = h2o.upload_file(pyunit_utils.locate("smalldata/iris/iris_wheader.csv"))
  irisH2O.describe()
  transform_opts = ["NONE", "DEMEAN", "DESCALE", "STANDARDIZE"]
  k_opts = random.sample(list(range(1,8)),3)
  size_of_hyper_space = len(transform_opts) * len(k_opts)
  hyper_parameters = OrderedDict()
  hyper_parameters["k"] = k_opts
  hyper_parameters["transform"] = transform_opts
  gx = random.uniform(0,1)
  gy = random.uniform(0,1)
  print("H2O GLRM with , gamma_x = " + str(gx) + ", gamma_y = " + str(gy) +\
        ", hyperparameters = " + str(hyper_parameters))

  gs = H2OGridSearch(H2OGeneralizedLowRankEstimator(loss="Quadratic", gamma_x=gx, gamma_y=gy), hyper_params=hyper_parameters)
  gs.train(x=list(range(4)), y=4, training_frame=irisH2O)
  for model in gs:
    assert isinstance(model, H2OGeneralizedLowRankEstimator)
  print(gs.sort_by("mse"))
  #print gs.hit_ratio_table()

  assert len(gs) == size_of_hyper_space
  total_grid_space = list(map(list, itertools.product(*list(hyper_parameters.values()))))
  for model in gs.models:
      combo = [model.parms['k']['actual_value']] + [model.parms['transform']['actual_value']]
      assert combo in total_grid_space
      total_grid_space.remove(combo)
Ejemplo n.º 25
0
def kp(obj, weights, capacity):
    assert len(obj) == len(weights)
    n = len(obj)

    if n == 0:
        return 0, []

    if capacity == 0:
        return 0, [0 for i in range(n)]
    
    n = len(obj)

    # Don't include item
    zbest, solbest = kp(obj, weights, capacity - 1)
    # Check all items for inclusion
    for i in range(n):
        if weights[i] <= capacity:
            zyes, solyes = kp(obj, weights, \
                              capacity - weights[i])
            zyes += obj[i]
            solyes[i] += 1
            if zbest > zyes:
                zbest = zyes
                solbest = solyes

    return zbest, solbest
Ejemplo n.º 26
0
 def setUp(self):
     super(ArvPutUploadJobTest, self).setUp()
     run_test_server.authorize_with('active')
     # Temp files creation
     self.tempdir = tempfile.mkdtemp()
     subdir = os.path.join(self.tempdir, 'subdir')
     os.mkdir(subdir)
     data = "x" * 1024 # 1 KB
     for i in range(1, 5):
         with open(os.path.join(self.tempdir, str(i)), 'w') as f:
             f.write(data * i)
     with open(os.path.join(subdir, 'otherfile'), 'w') as f:
         f.write(data * 5)
     # Large temp file for resume test
     _, self.large_file_name = tempfile.mkstemp()
     fileobj = open(self.large_file_name, 'w')
     # Make sure to write just a little more than one block
     for _ in range((arvados.config.KEEP_BLOCK_SIZE>>20)+1):
         data = random.choice(['x', 'y', 'z']) * 1024 * 1024 # 1 MiB
         fileobj.write(data)
     fileobj.close()
     # Temp dir containing small files to be repacked
     self.small_files_dir = tempfile.mkdtemp()
     data = 'y' * 1024 * 1024 # 1 MB
     for i in range(1, 70):
         with open(os.path.join(self.small_files_dir, str(i)), 'w') as f:
             f.write(data + str(i))
     self.arvfile_write = getattr(arvados.arvfile.ArvadosFileWriter, 'write')
     # Temp dir to hold a symlink to other temp dir
     self.tempdir_with_symlink = tempfile.mkdtemp()
     os.symlink(self.tempdir, os.path.join(self.tempdir_with_symlink, 'linkeddir'))
     os.symlink(os.path.join(self.tempdir, '1'),
                os.path.join(self.tempdir_with_symlink, 'linkedfile'))
Ejemplo n.º 27
0
    def __init__(self, transmat, tree, ncat=1, alpha=1):
        """
        Initialise the simulator with a transition matrix and a tree.
        The tree should have branch lengths. If it doesn't this will
        trigger a warning, but will continue.
        """
        # store the tree
        self.tree = tree
        self.states = np.array(transmat.model.states)
        self.state_indices = np.array(list(range(transmat.model.size)), dtype=np.intc)
        # initialise equilibrium frequency distribution
        self.freqs = transmat.freqs
        # Gamma rate categories
        self.ncat = ncat
        self.alpha = alpha
        self.gamma_rates = discrete_gamma(alpha, ncat)
        
        # initialise probabilities on tree
        for node in self.tree.preorder(skip_seed=True):
            l = node.edge.length or 0
            if l == 0:
                print ('warning')
                #logger.warn('This tree has zero length edges')
            nstates = self.states.shape[0]
            node.pmats = np.empty((ncat, nstates, nstates))
            for i in range(ncat):
                node.pmats[i] = transmat.get_p_matrix(l*self.gamma_rates[i])

        self.sequences = {}
Ejemplo n.º 28
0
def big_init():
    M = 5
    K = 3
    D = 2

    pi = np.array([1, 0, 0, 0, 0])  # initial state distribution

    A = np.array([
        [0.9, 0.025, 0.025, 0.025, 0.025],
        [0.025, 0.9, 0.025, 0.025, 0.025],
        [0.025, 0.025, 0.9, 0.025, 0.025],
        [0.025, 0.025, 0.025, 0.9, 0.025],
        [0.025, 0.025, 0.025, 0.025, 0.9],
    ])  # state transition matrix - likes to stay where it is

    R = np.ones((M, K)) / K  # mixture proportions

    mu = np.array([
        [[0, 0], [1, 1], [2, 2]],
        [[5, 5], [6, 6], [7, 7]],
        [[10, 10], [11, 11], [12, 12]],
        [[15, 15], [16, 16], [17, 17]],
        [[20, 20], [21, 21], [22, 22]],
    ])  # M x K x D

    sigma = np.zeros((M, K, D, D))
    for m in range(M):
        for k in range(K):
            sigma[m, k] = np.eye(D)
    return M, K, D, pi, A, R, mu, sigma
    def input_data_indexes(self, dataset_obj, indexes):
        "Faster version of what is in acos_file.SoundingDataFile"

        if self.inp_shape_names == None:
            raise ValueError('No shape names are defined for dataset: %s in file: %s' % (dataset_name, self.filename))
        elif len(self.inp_shape_names) != len(dataset_obj.shape):
            raise ValueError('Length of shape names: %s does not match length of data: %s for: %s' % (len(self.inp_shape_names), len(dataset_obj.shape), dataset_obj.name))

        if len(self.id_names) > 1:
            index_get = [ itemgetter(idx) for idx in range(len(self.id_names)) ]
        else:
            index_get = [ lambda f:f ]

        data_dim_indexes = []
        for dim_idx, shp_name in enumerate(self.inp_shape_names):
            if shp_name in self.id_names:
                shape_idxs = tuple(map(index_get[self.id_names.index(shp_name)], indexes))

                if len(shape_idxs) == 1:
                    shape_idxs = shape_idxs[0]
                data_dim_indexes.append(shape_idxs)
            else:
                data_dim_indexes.append( slice(dataset_obj.shape[dim_idx]) )

        # Eliminate duplicate shape names
        for name, num_occurance in list(Counter(self.inp_shape_names).items()):
            if num_occurance > 1:
                for name_count in range(1, num_occurance+1):
                    self.inp_shape_names[self.inp_shape_names.index(name)] = "%s_%d" % (name, name_count)

        return tuple(data_dim_indexes)
Ejemplo n.º 30
0
def stdExposures_Fe55(ccd=None, feeControl=None, comment='Fe55 sequence'):
    """ Take standard set of Fe55 exposures.

    The Fe55 source illuminates a pretty narrow area, so we move the arm
    to three positions. At each position we take 10 30s and 10 60s exposures.

    In practice, the calling routine would run this many times.
    """
    
    explist = []
    explist.append(('bias', 0),)
    for i in range(10):
        explist.append(('dark', 30),)
    for i in range(10):
        explist.append(('dark', 60),)

    opticslab.setPower('off')
    
    for pos in 35,45,55:
        opticslab.setFe55(pos)
        
        ccdFuncs.expList(explist, ccd=ccd,
                         feeControl=feeControl,
                         comment='Fe55 dark %s'%str(pos),
                         title='Fe55 darks')
Ejemplo n.º 31
0
 def dequeue_entries(self, workq, count):
     for i in range(count):
         workq.dequeue()
Ejemplo n.º 32
0
def setting_channel_new(item):
    import xbmcgui

    # Load list of options (active user channels that allow global search)
    lista = []
    ids = []
    lista_lang = []
    lista_ctgs = []
    channels_list = channelselector.filterchannels('all')
    for channel in channels_list:
        if channel.action == '':
            continue

        channel_parameters = channeltools.get_channel_parameters(
            channel.channel)

        # Do not include if "include_in_global_search" does not exist in the channel configuration
        if not channel_parameters['include_in_global_search']:
            continue

        lbl = '%s' % channel_parameters['language']
        lbl += ' %s' % ', '.join(
            config.get_localized_category(categ)
            for categ in channel_parameters['categories'])

        it = xbmcgui.ListItem(channel.title, lbl)
        it.setArt({'thumb': channel.thumbnail, 'fanart': channel.fanart})
        lista.append(it)
        ids.append(channel.channel)
        lista_lang.append(channel_parameters['language'])
        lista_ctgs.append(channel_parameters['categories'])

    # Pre-select dialog
    preselecciones = [
        config.get_localized_string(70570),
        config.get_localized_string(70571),
        # 'Modificar partiendo de Recomendados',
        # 'Modificar partiendo de Frecuentes',
        config.get_localized_string(70572),
        config.get_localized_string(70573),
        # 'Modificar partiendo de Castellano',
        # 'Modificar partiendo de Latino'
    ]
    # presel_values = ['skip', 'actual', 'recom', 'freq', 'all', 'none', 'cast', 'lat']
    presel_values = ['skip', 'actual', 'all', 'none']

    categs = [
        'movie', 'tvshow', 'documentary', 'anime', 'vos', 'direct', 'torrent'
    ]
    for c in categs:
        preselecciones.append(
            config.get_localized_string(70577) +
            config.get_localized_category(c))
        presel_values.append(c)

    if item.action == 'setting_channel':  # Configuración de los canales incluídos en la búsqueda
        del preselecciones[0]
        del presel_values[0]
    # else: # Call from "search on other channels" (you can skip the selection and go directly to the search)

    ret = platformtools.dialog_select(config.get_localized_string(59994),
                                      preselecciones)
    if ret == -1:
        return False  # order cancel
    if presel_values[ret] == 'skip':
        return True  # continue unmodified
    elif presel_values[ret] == 'none':
        preselect = []
    elif presel_values[ret] == 'all':
        preselect = list(range(len(ids)))
    elif presel_values[ret] in ['cast', 'lat']:
        preselect = []
        for i, lg in enumerate(lista_lang):
            if presel_values[ret] in lg or '*' in lg:
                preselect.append(i)
    elif presel_values[ret] == 'actual':
        preselect = []
        for i, canal in enumerate(ids):
            channel_status = config.get_setting('include_in_global_search',
                                                canal)
            if channel_status:
                preselect.append(i)

    elif presel_values[ret] == 'recom':
        preselect = []
        for i, canal in enumerate(ids):
            _not, set_canal_list = channeltools.get_channel_controls_settings(
                canal)
            if set_canal_list.get('include_in_global_search', False):
                preselect.append(i)

    elif presel_values[ret] == 'freq':
        preselect = []
        for i, canal in enumerate(ids):
            frequency = channeltools.get_channel_setting('frequency', canal, 0)
            if frequency > 0:
                preselect.append(i)
    else:
        preselect = []
        for i, ctgs in enumerate(lista_ctgs):
            if presel_values[ret] in ctgs:
                preselect.append(i)

    # Dialog to select
    ret = xbmcgui.Dialog().multiselect(config.get_localized_string(59994),
                                       lista,
                                       preselect=preselect,
                                       useDetails=True)
    if not ret:
        return False  # order cancel
    seleccionados = [ids[i] for i in ret]

    # Save changes to search channels
    for canal in ids:
        channel_status = config.get_setting('include_in_global_search', canal)
        # if not channel_status:
        #     channel_status = True

        if channel_status and canal not in seleccionados:
            config.set_setting('include_in_global_search', False, canal)
        elif not channel_status and canal in seleccionados:
            config.set_setting('include_in_global_search', True, canal)

    return True
Ejemplo n.º 33
0
def showdata(vars,
             titles=[],
             legendlabels=[],
             surf=[],
             polar=[],
             tslice=0,
             movie=0,
             intv=1,
             Ncolors=25,
             x=[],
             y=[],
             global_colors=False,
             symmetric_colors=False):
    """
    A Function to animate time dependent data from BOUT++
    Requires numpy, mpl_toolkits, matplotlib, boutdata libaries.  
    
    To animate multiple variables on different axes:
    showdata([var1, var2, var3])
    
    To animate more than one line on a single axes:
    showdata([[var1, var2, var3]])
    
    The default graph types are:
    2D (time + 1 spatial dimension) arrays = animated line plot
    3D (time + 2 spatial dimensions) arrays = animated contour plot.
    
    To use surface or polar plots:
    showdata(var, surf = 1)
    showdata(var, polar = 1)
    
    Can plot different graph types on different axes.  Default graph types will be used depending on the dimensions of the input arrays.  To specify polar/surface plots on different axes:
    showdata([var1,var2], surf = [1,0], polar = [0,1])
    
    Movies require FFmpeg to be installed.

    The tslice variable is used to control the time value that is printed on each
    frame of the animation.  If the input data matches the time values found within
    BOUT++'s dmp data files, then these time values will be used.  Otherwise, an
    integer counter is used.

    During animation click once to stop in the current frame. Click again to continue.

    global_colors = True: if "vars" is a list the colorlevels are determined from the mximum of the maxima and and the minimum of the  minima in all fields in vars. 
    
    symmetric_colors = True: colorlevels are symmetric.  
    """
    plt.ioff()

    # Check to see whether vars is a list or not.
    if isinstance(vars, list):
        Nvar = len(vars)
    else:
        vars = [vars]
        Nvar = len(vars)

    if Nvar < 1:
        raise ValueError("No data supplied")

    # Check to see whether each variable is a list - used for line plots only
    Nlines = []
    for i in range(0, Nvar):
        if isinstance(vars[i], list):
            Nlines.append(len(vars[i]))
        else:
            Nlines.append(1)
            vars[i] = [vars[i]]

    # Sort out titles
    if len(titles) == 0:
        for i in range(0, Nvar):
            titles.append(('Var' + str(i + 1)))
    elif len(titles) != Nvar:
        raise ValueError(
            'The length of the titles input list must match the length of the vars list.'
        )

    # Sort out legend labels
    if len(legendlabels) == 0:
        for i in range(0, Nvar):
            legendlabels.append([])
            for j in range(0, Nlines[i]):
                legendlabels[i].append(chr(97 + j))
    elif (isinstance(legendlabels[0], list) != 1):
        if Nvar != 1:
            check = 0
            for i in range(0, Nvar):
                if len(legendlabels) != Nlines[i]:
                    check = check + 1
            if check == 0:
                print(
                    "Warning, the legendlabels list does not contain a sublist for each variable, but it's length matches the number of lines on each plot. Will apply labels to each plot"
                )
                legendlabelsdummy = []
                for i in range(0, Nvar):
                    legendlabelsdummy.append([])
                    for j in range(0, Nlines[i]):
                        legendlabelsdummy[i].append(legendlabels[j])
                legendlabels = legendlabelsdummy
            else:
                print(
                    "Warning, the legendlabels list does not contain a sublist for each variable, and it's length does not match the number of lines on each plot. Will default apply labels to each plot"
                )
                legendlabels = []
                for i in range(0, Nvar):
                    legendlabels.append([])
                    for j in range(0, Nlines[i]):
                        legendlabels[i].append(chr(97 + j))
        else:
            if (Nlines[0] == len(legendlabels)):
                legendlabels = [legendlabels]
    elif len(legendlabels) != Nvar:
        print(
            "Warning, the length of the legendlabels list does not match the length of the vars list, will continue with default values"
        )
        legendlabels = []
        for i in range(0, Nvar):
            legendlabels.append([])
            for j in range(0, Nlines[i]):
                legendlabels[i].append(chr(97 + j))
    else:
        for i in range(0, Nvar):
            if isinstance(legendlabels[i], list):
                if len(legendlabels[i]) != Nlines[i]:
                    print(
                        'Warning, the length of the legendlabel (sub)list for each plot does not match the number of datasets for each plot. Will continue with default values'
                    )
                legendlabels[i] = []
                for j in range(0, Nlines[i]):
                    legendlabels[i].append(chr(97 + j))
            else:
                legendlabels[i] = [legendlabels[i]]
            if len(legendlabels[i]) != Nlines[i]:
                print(
                    'Warning, the length of the legendlabel (sub)list for each plot does not match the number of datasets for each plot.  Will continue with default values'
                )
                legendlabels[i] = []
                for j in range(0, Nlines[i]):
                    legendlabels[i].append(chr(97 + j))

    # Sort out surf list
    if isinstance(surf, list):
        if (len(surf) == Nvar):
            for i in range(0, Nvar):
                if surf[i] >= 1:
                    surf[i] = 1
                else:
                    surf[i] = 0
        elif (len(surf) == 1):
            if surf[0] >= 1:
                surf[0] = 1
            else:
                surf[0] = 0
            if (Nvar > 1):
                for i in range(1, Nvar):
                    surf.append(surf[0])
        elif (len(surf) == 0):
            for i in range(0, Nvar):
                surf.append(0)
        else:
            print(
                'Warning, length of surf list does not match number of variables.  Will default to no polar plots'
            )
            for i in range(0, Nvar):
                surf.append(0)

    else:
        surf = [surf]
        if surf[0] >= 1:
            surf[0] = 1
        else:
            surf[0] = 0
        if (Nvar > 1):
            for i in range(1, Nvar):
                surf.append(surf[0])

    # Sort out polar list
    if isinstance(polar, list):
        if (len(polar) == Nvar):
            for i in range(0, Nvar):
                if polar[i] >= 1:
                    polar[i] = 1
                else:
                    polar[i] = 0
        elif (len(polar) == 1):
            if polar[0] >= 1:
                polar[0] = 1
            else:
                polar[0] = 0
            if (Nvar > 1):
                for i in range(1, Nvar):
                    polar.append(polar[0])
        elif (len(polar) == 0):
            for i in range(0, Nvar):
                polar.append(0)
        else:
            print(
                'Warning, length of polar list does not match number of variables.  Will default to no polar plots'
            )
            for i in range(0, Nvar):
                polar.append(0)
    else:
        polar = [polar]
        if polar[0] >= 1:
            polar[0] = 1
        else:
            polar[0] = 0
        if (Nvar > 1):
            for i in range(1, Nvar):
                polar.append(polar[0])

    # Determine shapes of arrays
    dims = []
    Ndims = []
    lineplot = []
    contour = []
    for i in range(0, Nvar):
        dims.append([])
        Ndims.append([])
        for j in range(0, Nlines[i]):
            dims[i].append(array((vars[i][j].shape)))
            Ndims[i].append(dims[i][j].shape[0])
            # Perform check to make sure that data is either 2D or 3D
            if (Ndims[i][j] < 2):
                raise ValueError(
                    'data must be either 2 or 3 dimensional.  Exiting')

            if (Ndims[i][j] > 3):
                raise ValueError(
                    'data must be either 2 or 3 dimensional.  Exiting')

            if ((Ndims[i][j] == 2) & (polar[i] != 0)):
                print(
                    'Warning, data must be  3 dimensional (time, r, theta) for polar plots.  Will plot lineplot instead'
                )

            if ((Ndims[i][j] == 2) & (surf[i] != 0)):
                print(
                    'Warning, data must be  3 dimensional (time, x, y) for surface plots.  Will plot lineplot instead'
                )

            if ((Ndims[i][j] == 3) & (Nlines[i] != 1)):
                raise ValueError(
                    'cannot have multiple sets of 3D (time + 2 spatial dimensions) on each subplot'
                )

            if ((Ndims[i][j] != Ndims[i][0])):
                raise ValueError(
                    'Error, Number of dimensions must be the same for all variables on each plot.'
                )

        if (Ndims[i][0] == 2):  # Set polar and surf list entries to 0
            polar[i] = 0
            surf[i] = 0
            lineplot.append(1)
            contour.append(0)
        else:
            if ((polar[i] == 1) & (surf[i] == 1)):
                print(
                    'Warning - cannot do polar and surface plots at the same time.  Default to contour plot'
                )
                contour.append(1)
                lineplot.append(0)
                polar[i] = 0
                surf[i] = 0
            elif (polar[i] == 1) | (surf[i] == 1):
                contour.append(0)
                lineplot.append(0)
            else:
                contour.append(1)
                lineplot.append(0)

    # Obtain size of data arrays
    Nt = []
    Nx = []
    Ny = []
    for i in range(0, Nvar):
        Nt.append([])
        Nx.append([])
        Ny.append([])
        for j in range(0, Nlines[i]):
            Nt[i].append(vars[i][j].shape[0])
            Nx[i].append(vars[i][j].shape[1])
            if (Nt[i][j] != Nt[0][0]):
                raise ValueError(
                    'time dimensions must be the same for all variables.')

            #if (Nx[i][j] != Nx[i][0]):
            #    raise ValueError('Dimensions must be the same for all variables on each plot.')

            if (Ndims[i][j] == 3):
                Ny[i].append(vars[i][j].shape[2])
                #if (Ny[i][j] != Ny[i][0]):
                #    raise ValueError('Dimensions must be the same for all variables.')

    # Collect time data from file
    if (tslice == 0):  # Only wish to collect time data if it matches
        try:
            t = collect('t_array')
            if t == None:
                raise ValueError("t_array is None")
            if len(t) != Nt[0][0]:
                raise ValueError("t_array is wrong size")
        except:
            t = linspace(0, Nt[0][0], Nt[0][0])

    # Obtain number of frames
    Nframes = int(Nt[0][0] / intv)

    # Generate grids for plotting
    x = []
    y = []
    for i in range(0, Nvar):
        x.append([])
        for j in range(0, Nlines[i]):
            x[i].append(linspace(0, Nx[i][j] - 1, Nx[i][j]))

        #x.append(linspace(0,Nx[i][0]-1, Nx[i][0]))
        if (Ndims[i][0] == 3):
            y.append(linspace(0, Ny[i][0] - 1, Ny[i][0]))
        else:
            y.append(0)

    # Determine range of data.  Used to ensure constant colour map and
    # to set y scale of line plot.
    fmax = []
    fmin = []
    xmax = []
    dummymax = []
    dummymin = []
    clevels = []

    for i in range(0, Nvar):

        dummymax.append([])
        dummymin.append([])
        for j in range(0, Nlines[i]):
            dummymax[i].append(max(vars[i][j]))
            dummymin[i].append(min(vars[i][j]))

        fmax.append(max(dummymax[i]))
        fmin.append(min(dummymin[i]))

        if (symmetric_colors):
            absmax = max(abs(fmax[i]), abs(fmin[i]))
            fmax[i] = absmax
            fmin[i] = -absmax

        for j in range(0, Nlines[i]):
            dummymax[i][j] = max(x[i][j])
        xmax.append(max(dummymax[i]))

        if not (global_colors):
            clevels.append(linspace(fmin[i], fmax[i], Ncolors))
    if (global_colors):
        fmaxglobal = max(fmax)
        fminglobal = min(fmin)
        for i in range(0, Nvar):
            fmax[i] = fmaxglobal
            fmin[i] = fminglobal
            clevels.append(linspace(fmin[i], fmax[i], Ncolors))

    # Create figures for animation plotting
    if (Nvar < 2):
        row = 1
        col = 1
        h = 6.0
        w = 8.0
    elif (Nvar < 3):
        row = 1
        col = 2
        h = 6.0
        w = 12.0
    elif (Nvar < 5):
        row = 2
        col = 2
        h = 8.0
        w = 12.0

    elif (Nvar < 7):
        row = 2
        col = 3
        h = 8.0
        w = 14.0

    elif (Nvar < 10):
        row = 3
        col = 3
        h = 12.0
        w = 14.0
    else:
        raise ValueError('too many variables...')

    fig = plt.figure(figsize=(w, h))
    title = fig.suptitle(r' ', fontsize=14)

    # Initiate all list variables required for plotting here
    ax = []
    lines = []
    plots = []
    cbars = []
    xstride = []
    ystride = []
    r = []
    theta = []

    # Initiate figure frame
    for i in range(0, Nvar):
        lines.append([])
        if (lineplot[i] == 1):
            ax.append(fig.add_subplot(row, col, i + 1))
            ax[i].set_xlim((0, xmax[i]))
            ax[i].set_ylim((fmin[i], fmax[i]))
            for j in range(0, Nlines[i]):
                lines[i].append(ax[i].plot([], [],
                                           lw=2,
                                           label=legendlabels[i][j])[0])
                #Need the [0] to 'unpack' the line object from tuple.  Alternatively:
                #lines[i], = lines[i]
            ax[i].set_xlabel(r'x')
            ax[i].set_ylabel(titles[i])
            if (Nlines[i] != 1):
                legendneeded = 1
                for k in range(0, i):
                    if (Nlines[i] == Nlines[k]):
                        legendneeded = 0
                if (legendneeded == 1):
                    plt.axes(ax[i])
                    plt.legend(loc=0)
            # Pad out unused list variables with zeros
            plots.append(0)
            cbars.append(0)
            xstride.append(0)
            ystride.append(0)
            r.append(0)
            theta.append(0)

        elif (contour[i] == 1):
            ax.append(fig.add_subplot(row, col, i + 1))
            ax[i].set_xlim((0, Nx[i][0] - 1))
            ax[i].set_ylim((0, Ny[i][0] - 1))
            ax[i].set_xlabel(r'x')
            ax[i].set_ylabel(r'y')
            ax[i].set_title(titles[i])
            plots.append(ax[i].contourf(x[i][0],
                                        y[i],
                                        vars[i][0][0, :, :].T,
                                        Ncolors,
                                        lw=0,
                                        levels=clevels[i]))
            plt.axes(ax[i])
            cbars.append(fig.colorbar(plots[i], format='%1.1e'))
            # Pad out unused list variables with zeros
            lines[i].append(0)
            xstride.append(0)
            ystride.append(0)
            r.append(0)
            theta.append(0)

        elif (surf[i] == 1):
            x[i][0], y[i] = meshgrid(x[i][0], y[i])
            if (Nx[i][0] <= 20):
                xstride.append(1)
            else:
                xstride.append(int(floor(Nx[i][0] / 20)))
            if (Ny[i][0] <= 20):
                ystride.append(1)
            else:
                ystride.append(int(floor(Ny[i][0] / 20)))
            ax.append(fig.add_subplot(row, col, i + 1, projection='3d'))
            plots.append(ax[i].plot_wireframe(x[i][0],
                                              y[i],
                                              vars[i][0][0, :, :].T,
                                              rstride=ystride[i],
                                              cstride=xstride[i]))
            title = fig.suptitle(r'', fontsize=14)
            ax[i].set_xlabel(r'x')
            ax[i].set_ylabel(r'y')
            ax[i].set_zlabel(titles[i])
            # Pad out unused list variables with zeros
            lines[i].append(0)
            cbars.append(0)
            r.append(0)
            theta.append(0)

        elif (polar[i] == 1):
            r.append(linspace(1, Nx[i][0], Nx[i][0]))
            theta.append(linspace(0, 2 * pi, Ny[i][0]))
            r[i], theta[i] = meshgrid(r[i], theta[i])
            ax.append(fig.add_subplot(row, col, i + 1, projection='polar'))
            plots.append(ax[i].contourf(theta[i],
                                        r[i],
                                        vars[i][0][0, :, :].T,
                                        levels=clevels[i]))
            plt.axes(ax[i])
            cbars.append(fig.colorbar(plots[i], format='%1.1e'))
            ax[i].set_rmax(Nx[i][0] - 1)
            ax[i].set_title(titles[i])
            # Pad out unused list variables with zeros
            lines[i].append(0)
            xstride.append(0)
            ystride.append(0)

    def onClick(event):
        global pause
        pause ^= True

    def control():
        global j, pause
        if j == Nframes - 1: j = -1
        if not pause:
            j = j + 1

        return j

    # Animation function
    def animate(i):
        j = control()

        index = j * intv

        for j in range(0, Nvar):
            if (lineplot[j] == 1):
                for k in range(0, Nlines[j]):
                    lines[j][k].set_data(x[j][k], vars[j][k][index, :])
            elif (contour[j] == 1):
                plots[j] = ax[j].contourf(x[j][0],
                                          y[j],
                                          vars[j][0][index, :, :].T,
                                          Ncolors,
                                          lw=0,
                                          levels=clevels[j])
            elif (surf[j] == 1):
                ax[j] = fig.add_subplot(row, col, j + 1, projection='3d')
                plots[j] = ax[j].plot_wireframe(x[j][0],
                                                y[j],
                                                vars[j][0][index, :, :].T,
                                                rstride=ystride[j],
                                                cstride=xstride[j])
                ax[j].set_zlim(fmin[j], fmax[j])
                ax[j].set_xlabel(r'x')
                ax[j].set_ylabel(r'y')
                ax[j].set_title(titles[j])
            elif (polar[j] == 1):
                plots[j] = ax[j].contourf(theta[j],
                                          r[j],
                                          vars[j][0][index, :, :].T,
                                          levels=clevels[j])
                ax[j].set_rmax(Nx[j][0] - 1)

        if (tslice == 0):
            title.set_text('t = %1.2e' % t[index])
        else:
            title.set_text('t = %i' % index)
        return plots

    def init():
        global j, pause
        j = -2
        pause = False
        return animate(0)

    # Call Animation function

    fig.canvas.mpl_connect('button_press_event', onClick)
    anim = animation.FuncAnimation(fig,
                                   animate,
                                   init_func=init,
                                   frames=Nframes)

    # Save movie with given name
    if ((isinstance(movie, str) == 1)):
        try:
            anim.save(movie + '.mp4',
                      writer=FFwriter,
                      fps=30,
                      extra_args=['-vcodec', 'libx264'])
        except Exception:
            print("Save failed: Check ffmpeg path")

    # Save movie with default name
    if ((isinstance(movie, str) == 0)):
        if (movie != 0):
            try:
                anim.save('animation.mp4',
                          writer=FFwriter,
                          fps=28,
                          extra_args=['-vcodec', 'libx264'])
            except Exception:
                print("Save failed: Check ffmpeg path")

    # Show animation
    if (movie == 0):
        plt.show()
Ejemplo n.º 34
0
    def setup_vms(self, vn_fixtures, vmi_fixtures, vm=None):
        '''
        Input vm format:
            vm = {'count':2, 'launch_mode':'distribute',
                  'vm1':{'vn':['vn1'], 'vmi':['vmi1'], 'userdata':{
                    'vlan': str(vmi['vmi3']['vlan'])} },
                  'vm2':{'vn':['vn1'], 'vmi':['vmi2'], 'userdata':{
                    'vlan': str(vmi['vmi4']['vlan'])} }
                }
            launch_mode can be distribute or non-distribute
        '''
        vm_count = vm['count'] if vm else 1
        launch_mode = vm.get('launch_mode', 'default')
        vm_fixtures = {}  # Hash to store VM fixtures

        compute_nodes = self.orch.get_hosts()
        compute_nodes_len = len(compute_nodes)
        index = random.randint(0, compute_nodes_len - 1)
        for i in range(0, vm_count):
            vm_id = 'vm' + str(i + 1)
            vn_list = vm[vm_id]['vn']
            vmi_list = vm[vm_id]['vmi']
            # Get the userdata related to sub interfaces
            userdata = vm[vm_id].get('userdata', None)
            userdata_file = None
            if userdata:
                file_obj = self.create_user_data(userdata['vlan'])
                userdata_file = file_obj.name

            vn_fix_obj_list = []
            vmi_fix_uuid_list = []

            # Build the VN fixtures objects
            for vn in vn_list:
                vn_fix_obj_list.append(vn_fixtures[vn].obj)

        # Build the VMI UUIDs
            for vmi in vmi_list:
                vmi_fix_uuid_list.append(vmi_fixtures[vmi].uuid)

            # VM launch mode handling
            # Distribute mode, generate the new random index
            # Non Distribute mode, use previously generated index
            # Default mode, Nova takes care of launching
            if launch_mode == 'distribute':
                index = i % compute_nodes_len
                node_name = self.inputs.compute_names[index]
            elif launch_mode == 'non-distribute':
                node_name = self.inputs.compute_names[index]
            elif launch_mode == 'default':
                node_name = None

            vm_fixture = self.create_vm(vn_objs=vn_fix_obj_list,
                                        port_ids=vmi_fix_uuid_list,
                                        userdata=userdata_file,
                                        node_name=node_name)
            vm_fixtures[vm_id] = vm_fixture
            if userdata:
                file_obj.close()

        for vm_fixture in list(vm_fixtures.values()):
            assert vm_fixture.wait_till_vm_is_up()
        self.update_vms_for_pbb(list(vm_fixtures.values()))

        return vm_fixtures
Ejemplo n.º 35
0
# Copyright  2018  Ashish Arora

from builtins import range
import argparse
import os

parser = argparse.ArgumentParser(description="""Creates the list of characters and words in lexicon""")
parser.add_argument('dir', type=str, help='output path')
args = parser.parse_args()

### main ###
lex = {}
text_path = os.path.join('data', 'train', 'text')
text_fh = open(text_path, 'r', encoding='utf-8')

with open(text_path, 'r', encoding='utf-8') as f:
    for line in f:
        line_vect = line.strip().split(' ')
        for i in range(1, len(line_vect)):
            characters = list(line_vect[i])
            # Put SIL instead of "|". Because every "|" in the beginning of the words is for initial-space of that word
            characters = " ".join(['SIL' if char == '|' else char for char in characters])
            lex[line_vect[i]] = characters
            if line_vect[i] == '#':
                lex[line_vect[i]] = "<HASH>"

with open(os.path.join(args.dir, 'lexicon.txt'), 'w', encoding='utf-8') as fp:
    for key in sorted(lex):
        fp.write(key + " " + lex[key] + "\n")
Ejemplo n.º 36
0
    def loss(self, X, y=None):
        """
        Compute loss and gradient for the fully-connected net.

        Input / output: Same as TwoLayerNet above.
        """
        X = X.astype(self.dtype)
        mode = 'test' if y is None else 'train'

        # Set train/test mode for batchnorm params and dropout param since they
        # behave differently during training and testing.
        if self.use_dropout:
            self.dropout_param['mode'] = mode
        if self.normalization=='batchnorm':
            for bn_param in self.bn_params:
                bn_param['mode'] = mode
        scores = None
        ############################################################################
        # TODO: Implement the forward pass for the fully-connected net, computing  #
        # the class scores for X and storing them in the scores variable.          #
        #                                                                          #
        # When using dropout, you'll need to pass self.dropout_param to each       #
        # dropout forward pass.                                                    #
        #                                                                          #
        # When using batch normalization, you'll need to pass self.bn_params[0] to #
        # the forward pass for the first batch normalization layer, pass           #
        # self.bn_params[1] to the forward pass for the second batch normalization #
        # layer, etc.                                                              #
        ############################################################################
#         np_model = np.array([])
        cache_fc = {}#[np_model for i in range(self.num_layers)]
        cache_relu = {}#[np_model for i in range(self.num_layers)]
        cache_dropout = {}#[np_model for i in range(self.num_layers)]
        
        for i in range(self.num_layers - 1): #最后一层是不一样的,因为只有一个单单的affine, 因此需要差别对待
            scores_fc, cache_fc[i] = affine_forward(X, self.params['W'+str(i+1)], self.params['b'+str(i+1)]) #计算线性的forward的结果,保存在out里
#             print("fc shape", scores_fc.shape)
            if self.normalization=='batchnorm':
                scores_bn, cache_bn = batchnorm_backward(scores_fc, self.params['gamma'+str(i+1)], self.params['beta'+str(i+1)], self.bn_params[i]) #gamma, beta, bn_param):
                scores_relu, cache_relu[i] = relu_forward(scores_bn) 
            else:
                scores_relu, cache_relu[i] = relu_forward(scores_fc)
#                 print("@@@", i, cache_relu[i].shape)
#             print("Relu shape:", scores_relu.shape)
           
            X = scores_relu #相当于滚动给下一个循环的X使用
            if self.use_dropout:
                scores_dropout, cache_dropout[i] = dropout_forward(scores_relu, self.dropout_param) #突然有个疑惑,为什么所有层的dropout的参数是一样的,而每一层的bn_params都不一样(连变量名都加上s了)
                X = scores_dropout
#         print("Before last shape", X.shape)    
#         print("param shape", self.params['W'+str(self.num_layers)].shape)
        scores, final_cache = affine_forward(X, self.params['W'+str(self.num_layers)], self.params['b'+str(self.num_layers)])  
#         print(scores.shape)
        
                            
        ############################################################################
        #                             END OF YOUR CODE                             #
        ############################################################################

        # If test mode return early
        if mode == 'test':
            return scores

#         print("-----------------------------------------")
        loss, grads = 0.0, {}
        ############################################################################
        # TODO: Implement the backward pass for the fully-connected net. Store the #
        # loss in the loss variable and gradients in the grads dictionary. Compute #
        # data loss using softmax, and make sure that grads[k] holds the gradients #
        # for self.params[k]. Don't forget to add L2 regularization!               #
        #                                                                          #
        # When using batch/layer normalization, you don't need to regularize the scale   #
        # and shift parameters.                                                    #
        #                                                                          #
        # NOTE: To ensure that your implementation matches ours and you pass the   #
        # automated tests, make sure that your L2 regularization includes a factor #
        # of 0.5 to simplify the expression for the gradient.                      #
        ############################################################################
        #先把好算的loss给算了
        loss, dsoftmax = softmax_loss(scores, y)
        loss += 0.5*self.reg*(np.sum(self.params[str('W'+str(self.num_layers))]*self.params[str('W'+str(self.num_layers))])) #???为什么这个正则化的误差只要计算最后一层的权重了?之前的那些层不要了吗
#         print(loss)
        
        ###然后反向传播计算grads了
#         dx2, dw2, db2 = affine_backward(dsoftmax, cache_fc2)
#         drelu = relu_backward(dx2, cache_relu)
#         dx1, dw1, db1 = affine_backward(drelu, cache_fc1)

#         grads['W2'], grads['b2'] = dw2 + self.reg*self.params['W2'], db2
#         grads['W1'], grads['b1'] = dw1 + self.reg*self.params['W1'], db1
        
        
        dx_final, dw_final, db_final = affine_backward(dsoftmax, final_cache)
        grads['W'+str(self.num_layers)], grads['b'+str(self.num_layers)] = dw_final + self.reg*self.params['W'+str(self.num_layers)], db_final
        
        dx_last = dx_final
#         print("dx_last.shape=", dx_last.shape)
        for i in range(self.num_layers - 1, 0, -1):
#             print("i=", i)
#             print("cache_relu.shape=", cache_relu[i-1].shape)
            if self.use_dropout: #如果再relu层之后有一个dropout,我们需要在dropout层后加上这个
                ddropout = dropout_backward(dx_last, cache_dropout[i-1])
                dx_last = ddropout
            drelu = relu_backward(dx_last, cache_relu[i-1])
            dx, dw, db = affine_backward(drelu, cache_fc[i-1])
            dx_last = dx
            grads['W'+str(i)], grads['b'+str(i)] = dw, db
            
        
        ############################################################################
        #                             END OF YOUR CODE                             #
        ############################################################################

        return loss, grads
Ejemplo n.º 37
0
    def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,
                 dropout=1, normalization=None, reg=0.0,
                 weight_scale=1e-2, dtype=np.float32, seed=None):
        """
        Initialize a new FullyConnectedNet. 实现一个新的全连接的网络

        Inputs:
        - hidden_dims: A list of integers giving the size of each hidden layer. #这个是一个很重要的参数,代表的是每一个隐藏层的大小
        - input_dim: An integer giving the size of the input.
        - num_classes: An integer giving the number of classes to classify.
        - dropout: Scalar between 0 and 1 giving dropout strength. If dropout=1 then
          the network should not use dropout at all.
        - normalization: What type of normalization the network should use. Valid values
          are "batchnorm", "layernorm", or None for no normalization (the default).
        - reg: Scalar giving L2 regularization strength.
        - weight_scale: Scalar giving the standard deviation for random
          initialization of the weights.
        - dtype: A numpy datatype object; all computations will be performed using
          this datatype. float32 is faster but less accurate, so you should use
          float64 for numeric gradient checking.
          dtype: 
          dtype是一个numpy的数据对象;所有的计算都会用这种类型,float32类型会更快、但是不那么准确,对于数值的梯度检查,你需要使用float64类型
        - seed: If not None, then pass this random seed to the dropout layers. This
          will make the dropout layers deteriminstic so we can gradient check the
          model.
        """
        self.normalization = normalization
        self.use_dropout = dropout != 1 #如果dropout不为1,那么就认为是使用dropout;
        self.reg = reg
        self.num_layers = 1 + len(hidden_dims) #num_layer的个数
        self.dtype = dtype
        self.params = {}

        ############################################################################
        # TODO: Initialize the parameters of the network, storing all values in    #
        # the self.params dictionary. Store weights and biases for the first layer #
        # in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #
        # initialized from a normal distribution centered at 0 with standard       #
        # deviation equal to weight_scale. Biases should be initialized to zero.   #
        #                                                                          #
        # When using batch normalization, store scale and shift parameters for the #
        # first layer in gamma1 and beta1; for the second layer use gamma2 and     #
        # beta2, etc. Scale parameters should be initialized to ones and shift     #
        # parameters should be initialized to zeros. 
        # 当使用batch normalization的时候,保存scale、改变第一层参数的gamma1和beta1, 对于第二层网络使用gamma2和beta2,等等。
        # 尺度参数(scale)应当初始值为1,
        ############################################################################
#         self.params['W1'] = np.random.normal(0, weight_scale, [input_dim, hidden_dim])
#         self.params['b1'] = np.zeros([hidden_dim])
#         self.params['W2'] = np.random.normal(0, weight_scale, [hidden_dim, num_classes])
#         self.params['b2'] = np.zeros([num_classes])
#         print("num_classes", num_classes)
#         print("input_dim=", input_dim)
#         print("self.num_layers=", self.num_layers)
        for i in range(self.num_layers):
            if (i == 0):
                last_dim = input_dim
            else:
                last_dim = hidden_dims[i-1]
            
            if (i == self.num_layers-1):
                next_dim = num_classes
            else:
                next_dim = hidden_dims[i]
            
            if self.normalization=='batchnorm':
                self.params['beta' + str(i+1)] = np.zeros([hidden_dims[i]])
                self.params['gamma' + str(i+1)] = np.ones([hidden_dims[i]])
                
            self.params['W'+str(i+1)] = np.random.normal(0, weight_scale, [last_dim, next_dim])
            self.params['b'+str(i+1)] = np.zeros(next_dim)
        
        
        '''
        print("num_classes", num_classes)
        print("input_dim=", input_dim)
        for i in range(self.num_layers - 1):
            self.params['W' + str(i+1)] = np.random.normal(0, weight_scale, [input_dim, hidden_dims[i]])
            self.params['b' + str(i+1)] = np.zeros([hidden_dims[i]])

            if self.normalization=='batchnorm':
                self.params['beta' + str(i+1)] = np.zeros([hidden_dims[i]])
                self.params['gamma' + str(i+1)] = np.ones([hidden_dims[i]])

            input_dim = hidden_dims[i]  # Set the input dim of next layer to be output dim of current layer.

        # Initialise the weights and biases for final FC layer
        self.params['W' + str(self.num_layers)] = np.random.normal(0, weight_scale, [input_dim, num_classes])
        print("SSSape", self.params['W'+str(self.num_layers)].shape)
        self.params['b' + str(self.num_layers)] = np.zeros([num_classes])
        '''

                                                     
        ############################################################################
        #                             END OF YOUR CODE                             #
        ############################################################################

        # When using dropout we need to pass a dropout_param dictionary to each
        # dropout layer so that the layer knows the dropout probability and the mode
        # (train / test). You can pass the same dropout_param to each dropout layer.
        # 当使用dropout的时候,我们需要传递一个dropout_param的字典给每一个dropout层,因此这个层知道dropout概率和模式(训练集/测试集)
        # 你需要传递相同的dropout_param给每一个dropout层
        self.dropout_param = {}
        if self.use_dropout: 
            self.dropout_param = {'mode': 'train', 'p': dropout}
            if seed is not None:
                self.dropout_param['seed'] = seed

        # With batch normalization we need to keep track of running means and
        # variances, so we need to pass a special bn_param object to each batch
        # normalization layer. You should pass self.bn_params[0] to the forward pass
        # of the first batch normalization layer, self.bn_params[1] to the forward
        # pass of the second batch normalization layer, etc.
        # 对于batch normalization, 我们需要关注运行的平均值和方差,所以我们需要传递一个特定的bn_param对象来给每一个batch normlization层。
        # 你应当传递self.bn_params[0]给第一层的batch normalization层
        self.bn_params = []
        if self.normalization=='batchnorm': #!!!self.normlization有两种方式,一种是batch-normalization, 另外一种则是layernorm
            self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]
        if self.normalization=='layernorm':
            self.bn_params = [{} for i in range(self.num_layers - 1)]

        # Cast all parameters to the correct datatype
        for k, v in self.params.items():
            self.params[k] = v.astype(dtype)
Ejemplo n.º 38
0
            if len(comments) > 0:
                comments.pop()
            continue

        if inPrefsDlgSection:
            if sline.startswith('#'):
                m = commentObj.match(sline)  # extract comment text from line.
                comment = m.group(1)
                # Store comment and its location. This check is necessary
                # because some parameters share the same hint string.
                if not comment in comments:
                    comments.append(comment)
                    locations.append([specfile, currentSection, lineNum])

    # Merge comments detected from each .spec file.
    for i in range(len(comments)):
        if comments[i] not in comments_all:
            comments_all.append(comments[i])
            locations_all.append(locations[i])

# Output hint.py
try:
    fp = open(hintsFile, write_mode)
    fp.write('#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n')
    fp.write('# This file was generated by generateHints.py.\n')
    fp.write('# Following strings are used to localize hints in '
             'Preference Dialog of \n# the PsychoPy application.\n')
    fp.write('# Rebuild this file if comments in *.spec files '
             'are modified.\n\n')
    fp.write('from __future__ import absolute_import, print_function\n')
    fp.write('from psychopy.localization import _translate\n\n')
Ejemplo n.º 39
0
    def test_makeKeyTokens_(self):
        # see http://www.w3.org/TR/REC-xml/#d0e804 for a list of valid characters

        invalidTokens = []
        validTokens = []

        # all test tokens will be generated by prepending or inserting characters to this token
        validBase = "valid"

        # some invalid characters, not allowed anywhere in a token
        # note that '/' must not be added here because it is taken as a separator by makeKeyTokens_()
        invalidChars = "+*,;<>|!$%()=?#\x01"

        # generate the characters that are allowed at the start of a token (and at every other position)
        validStartChars = ":_"
        charRanges = [
            (ord('a'), ord('z')),
            (ord('A'), ord('Z')),
            (0x00F8, 0x02FF),
            (0x0370, 0x037D),
            (0x037F, 0x1FFF),
            (0x200C, 0x200D),
            (0x2070, 0x218F),
            (0x2C00, 0x2FEF),
            (0x3001, 0xD7FF),
            (0xF900, 0xFDCF),
            (0xFDF0, 0xFFFD),
            # (0x10000, 0xEFFFF),   while actually valid, these are not yet accepted by makeKeyTokens_()
        ]
        for r in charRanges:
            for c in range(r[0], r[1]):
                validStartChars += chr(c)

        # generate the characters that are only allowed inside a token, not at the start
        validInlineChars = "-.\xB7"
        charRanges = [
            (ord('0'), ord('9')),
            (0x0300, 0x036F),
            (0x203F, 0x2040),
        ]
        for r in charRanges:
            for c in range(r[0], r[1]):
                validInlineChars += chr(c)

        # test forbidden start characters
        for c in invalidChars + validInlineChars:
            invalidTokens.append(c + validBase)

        # test forbidden inline characters
        for c in invalidChars:
            invalidTokens.append(validBase[:4] + c + validBase[4:])

        # test each allowed start character
        for c in validStartChars:
            validTokens.append(c + validBase)

        # test each allowed inline character
        for c in validInlineChars:
            validTokens.append(validBase[:4] + c + validBase[4:])

        logger = QgsApplication.messageLog()
        logger.messageReceived.connect(self.catchMessage)
        prj = QgsProject.instance()

        for token in validTokens:
            self.messageCaught = False
            prj.readEntry("test", token)
            myMessage = "valid token '%s' not accepted" % (token)
            assert not self.messageCaught, myMessage

        for token in invalidTokens:
            self.messageCaught = False
            prj.readEntry("test", token)
            myMessage = "invalid token '%s' accepted" % (token)
            assert self.messageCaught, myMessage

        logger.messageReceived.disconnect(self.catchMessage)
Ejemplo n.º 40
0
def voc_eval(detpath, annopath, imageset_file, classname, annocache, ovthresh=0.5, use_07_metric=False):
    """
    pascal voc evaluation
    :param detpath: detection results detpath.format(classname)
    :param annopath: annotations annopath.format(classname)
    :param imageset_file: text file containing list of images
    :param classname: category name
    :param annocache: caching annotations
    :param ovthresh: overlap threshold
    :param use_07_metric: whether to use voc07's 11 point ap computation
    :return: rec, prec, ap
    """
    with open(imageset_file, 'r') as f:
        lines = f.readlines()
    image_filenames = [x.strip() for x in lines]

    # load annotations from cache
    if not os.path.isfile(annocache):
        recs = {}
        for ind, image_filename in enumerate(image_filenames):
            recs[image_filename] = parse_voc_rec(annopath.format(image_filename))
            if ind % 100 == 0:
                print('reading annotations for {:d}/{:d}'.format(ind + 1, len(image_filenames)))
        print('saving annotations cache to {:s}'.format(annocache))
        with open(annocache, 'wb') as f:
            pickle.dump(recs, f, protocol=pickle.HIGHEST_PROTOCOL)
    else:
        with open(annocache, 'rb') as f:
            recs = pickle.load(f)

    # extract objects in :param classname:
    class_recs = {}
    npos = 0
    for image_filename in image_filenames:
        objects = [obj for obj in recs[image_filename] if obj['name'] == classname]
        bbox = np.array([x['bbox'] for x in objects])
        difficult = np.array([x['difficult'] for x in objects]).astype(np.bool)
        det = [False] * len(objects)  # stand for detected
        npos = npos + sum(~difficult)
        class_recs[image_filename] = {'bbox': bbox,
                                      'difficult': difficult,
                                      'det': det}

    # read detections
    detfile = detpath.format(classname)
    with open(detfile, 'r') as f:
        lines = f.readlines()

    splitlines = [x.strip().split(' ') for x in lines]
    image_ids = [x[0] for x in splitlines]
    confidence = np.array([float(x[1]) for x in splitlines])
    bbox = np.array([[float(z) for z in x[2:]] for x in splitlines])

    # sort by confidence
    sorted_inds = np.argsort(-confidence)
    sorted_scores = np.sort(-confidence)
    bbox = bbox[sorted_inds, :]
    image_ids = [image_ids[x] for x in sorted_inds]

    # go down detections and mark true positives and false positives
    nd = len(image_ids)
    tp = np.zeros(nd)
    fp = np.zeros(nd)
    for d in range(nd):
        r = class_recs[image_ids[d]]
        bb = bbox[d, :].astype(float)
        ovmax = -np.inf
        bbgt = r['bbox'].astype(float)

        if bbgt.size > 0:
            # compute overlaps
            # intersection
            ixmin = np.maximum(bbgt[:, 0], bb[0])
            iymin = np.maximum(bbgt[:, 1], bb[1])
            ixmax = np.minimum(bbgt[:, 2], bb[2])
            iymax = np.minimum(bbgt[:, 3], bb[3])
            iw = np.maximum(ixmax - ixmin + 1., 0.)
            ih = np.maximum(iymax - iymin + 1., 0.)
            inters = iw * ih

            # union
            uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
                   (bbgt[:, 2] - bbgt[:, 0] + 1.) *
                   (bbgt[:, 3] - bbgt[:, 1] + 1.) - inters)

            overlaps = old_div(inters, uni)
            ovmax = np.max(overlaps)
            jmax = np.argmax(overlaps)

        if ovmax > ovthresh:
            if not r['difficult'][jmax]:
                if not r['det'][jmax]:
                    tp[d] = 1.
                    r['det'][jmax] = 1
                else:
                    fp[d] = 1.
        else:
            fp[d] = 1.

    # compute precision recall
    fp = np.cumsum(fp)
    tp = np.cumsum(tp)
    rec = old_div(tp, float(npos))
    # avoid division by zero in case first detection matches a difficult ground ruth
    prec = old_div(tp, np.maximum(tp + fp, np.finfo(np.float64).eps))
    ap = voc_ap(rec, prec, use_07_metric)

    return rec, prec, ap
Ejemplo n.º 41
0
def calculate_image_overlap(dataset_name, dataset_dir, phi_path, moving_id,
                            target_id):
    """
    Calculate the overlapping rate of a specified case
    :param dataset_name: 'LPBA', 'IBSR', 'CUMC' or 'MGH'
    :param dataset_dir: path to the label datasets
    :param phi_path: deformation field path
    :param moving_id: moving image id
    :param target_id: target image id
    :return:
    """

    if dataset_name == 'LPBA':
        label_name = './l_Labels.mat'
        label_files_dir = os.path.join(dataset_dir, 'LPBA_label_affine/')
        dataset_size = 40
        label_prefix = 's'
    elif dataset_name == 'IBSR':
        label_name = './c_Labels.mat'
        label_files_dir = os.path.join(dataset_dir, 'IBSR_label_affine/')
        dataset_size = 18
        label_prefix = 'c'
    elif dataset_name == 'CUMC':
        label_name = './m_Labels.mat'
        label_files_dir = os.path.join(dataset_dir, 'label_affine_icbm/')
        dataset_size = 2
        label_prefix = 'm'
    elif dataset_name == 'MGH':
        label_name = './g_Labels.mat'
        label_files_dir = os.path.join(dataset_dir, 'MGH_label_affine/')
        dataset_size = 10
        label_prefix = 'g'
    else:
        raise TypeError(
            "Unknown Dataset Name: Dataset name must be 'LPBA', 'IBSR', 'CUMC' or 'MGH'"
        )

    Labels = sio.loadmat(label_name)
    result = np.zeros((len(Labels['Labels'])))
    result_mean = np.zeros((1))

    label_images = [None] * dataset_size

    for i in range(dataset_size):
        label_images[i] = sitk.GetArrayFromImage(
            sitk.ReadImage(label_files_dir + label_prefix + str(i + 1) +
                           '.nii')).squeeze()

    label_from = label_images[moving_id - 1]
    label_to = label_images[target_id - 1]
    phi = nib.load(phi_path).get_data().squeeze()
    warp_result = warp_image_nn(label_from, phi)
    for label_idx in range(len(Labels['Labels'])):
        warp_idx = np.reshape(warp_result == Labels['Labels'][label_idx],
                              (warp_result.shape[0] * warp_result.shape[1] *
                               warp_result.shape[2], 1))
        to_idx = np.reshape(
            label_to == Labels['Labels'][label_idx],
            (label_to.shape[0] * label_to.shape[1] * label_to.shape[2], 1))
        result[label_idx] = float(np.sum(np.logical_and(
            warp_idx, to_idx))) / np.sum(to_idx)

    single_result = result
    single_result = single_result[~np.isnan(single_result)]
    result_mean = np.mean(single_result)

    print('overlapping rate of without unNaN value')
    print(single_result)
    print('Averaged overlapping rate')
    print(result_mean)
Ejemplo n.º 42
0
def calculate_dataset_overlap(dataset_name, dataset_dir, output_name):
    """
    Calculate the overlapping rate of specified dataset
    :param dataset_name: 'LPBA', 'IBSR', 'CUMC' or 'MGH'
    :param directory for the dataset
    :param output_name: saved result name in .mat format
    :return: averaged overlapping rate among each labels, saved in .mat format file
    """

    if dataset_name == 'LPBA':
        label_name = './l_Labels.mat'
        label_files_dir = os.path.join(dataset_dir, 'LPBA_label_affine/')
        dataset_size = 40
        label_prefix = 's'
    elif dataset_name == 'IBSR':
        label_name = './c_Labels.mat'
        label_files_dir = os.path.join(dataset_dir, 'IBSR_label_affine/')
        dataset_size = 18
        label_prefix = 'c'
    elif dataset_name == 'CUMC':
        label_name = './m_Labels.mat'
        label_files_dir = os.path.join(dataset_dir, 'label_affine_icbm/')
        dataset_size = 12
        label_prefix = 'm'
    elif dataset_name == 'MGH':
        label_name = './g_Labels.mat'
        label_files_dir = os.path.join(dataset_dir, 'MGH_label_affine/')
        dataset_size = 10
        label_prefix = 'g'
    else:
        raise TypeError(
            "Unknown Dataset Name: Dataset name must be 'LPBA', 'IBSR', 'CUMC' or 'MGH'"
        )

    Labels = sio.loadmat(label_name)
    result = np.zeros(
        (dataset_size * (dataset_size - 1), len(Labels['Labels'])))
    result_mean = np.zeros((dataset_size * (dataset_size - 1), 1))
    registration_results_dir = np.chararray((dataset_size, dataset_size),
                                            itemsize=200)

    # Change the directory if needed (one directory for one phiinv.nii.gz file)
    for i in range(dataset_size):
        for j in range(dataset_size):
            if (i == j):
                continue
            registration_results_dir[i][j] += './' + dataset_name + '/' + str(
                i + 1) + '_to_' + str(j + 1) + '/'

    label_images = [None] * dataset_size

    for i in range(dataset_size):
        label_images[i] = sitk.GetArrayFromImage(
            sitk.ReadImage(label_files_dir + label_prefix + str(i + 1) +
                           '.nii')).squeeze()

    base_idx = 0
    for L_from in range(dataset_size):
        for L_to in range(dataset_size):
            if L_from == L_to:
                continue
            label_from = label_images[L_from]
            label_to = label_images[L_to]
            registration_results_path = registration_results_dir[L_from][
                L_to] + 'phiinv.nii.gz'
            print(registration_results_path)
            phi = nib.load(registration_results_path).get_data().squeeze()
            warp_result = warp_image_nn(label_from, phi)
            for label_idx in range(len(Labels['Labels'])):
                warp_idx = np.reshape(
                    warp_result == Labels['Labels'][label_idx],
                    (warp_result.shape[0] * warp_result.shape[1] *
                     warp_result.shape[2], 1))
                to_idx = np.reshape(label_to == Labels['Labels'][label_idx],
                                    (label_to.shape[0] * label_to.shape[1] *
                                     label_to.shape[2], 1))
                result[base_idx][label_idx] = float(
                    np.sum(np.logical_and(warp_idx, to_idx))) / np.sum(to_idx)
            base_idx += 1
            print((base_idx, ' out of ', dataset_size * (dataset_size - 1)))

    for i in range(dataset_size * (dataset_size - 1)):
        single_result = result[i, :]
        single_result = single_result[~np.isnan(single_result)]
        result_mean[i] = np.mean(single_result)

    sio.savemat(output_name, {'result_mean': result_mean})
Ejemplo n.º 43
0
def cml_find_structure(Prj, Ori, Rot, outdir, outname, maxit, first_zero, flag_weights):
	from projection import cml_export_progress, cml_disc, cml_export_txtagls
	import time, sys

	# global vars
	global g_i_prj, g_n_prj, g_n_anglst, g_anglst, g_d_psi, g_debug, g_n_lines, g_seq

	# list of free orientation
	ocp = [-1] * g_n_anglst

	if first_zero:
		listprj = list(range(1, g_n_prj))
		ocp[0]  = 0 
	else:   listprj = list(range(g_n_prj))

	# to stop when the solution oscillates
	period_disc = [0, 0, 0]
	period_ct   = 0
	period_th   = 2

	# iteration loop
	for ite in range(maxit):
		t_start = time.time()

		# loop over i prj
		change = False
		for iprj in listprj:

			# Store current the current orientation
			ind          = 4*iprj
			store_phi    = Ori[ind]
			store_theta  = Ori[ind+1]
			store_psi    = Ori[ind+2]
			cur_agl      = Ori[ind+3]
			if cur_agl  != -1: ocp[cur_agl] = -1

			# prepare active index of cml for weighting in order to earn time later
			iw = [0] * (g_n_prj - 1)
			c  = 0
			ct = 0
			for i in range(g_n_prj):
				for j in range(i+1, g_n_prj):
					if i == iprj or j == iprj:
						iw[ct] = c
						ct += 1
					c += 1

			# loop over all angles
			best_disc = 1.0e20
			best_psi  = -1
			best_iagl = -1
			for iagl in range(g_n_anglst):
				# if orientation is free
				if ocp[iagl] == -1:
					# assign new orientation
					Ori[ind]   = g_anglst[iagl][0]
					Ori[ind+1] = g_anglst[iagl][1]
					Rot        = Util.cml_update_rot(Rot, iprj, Ori[ind], Ori[ind+1], 0.0)
					# weights
					if flag_weights:
						cml = Util.cml_line_in3d(Ori, g_seq, g_n_prj, g_n_lines)
						weights = Util.cml_weights(cml)
						mw  = max(weights)
						for i in range(g_n_lines): weights[i]  = mw - weights[i]
						sw = sum(weights)
						if sw == 0:
							weights = [6.28 / float(g_n_lines)] * g_n_lines
						else:
							for i in range(g_n_lines):
								weights[i] /= sw
								weights[i] *= weights[i]
					else:   weights = [1.0] * g_n_lines

					# spin all psi
					com = Util.cml_line_insino(Rot, iprj, g_n_prj)
					res = Util.cml_spin_psi(Prj, com, weights, iprj, iw, g_n_psi, g_d_psi, g_n_prj)

					# select the best
					if res[0] < best_disc:
						best_disc = res[0]
						best_psi  = res[1]
						best_iagl = iagl

					if g_debug: cml_export_progress(outdir, ite, iprj, iagl, res[1], res[0], 'progress')
				else:
					if g_debug: cml_export_progress(outdir, ite, iprj, iagl, -1, -1, 'progress')

			# if change, assign
			if best_iagl != cur_agl:
				ocp[best_iagl] = iprj
				Ori[ind]       = g_anglst[best_iagl][0] # phi
				Ori[ind+1]     = g_anglst[best_iagl][1] # theta
				Ori[ind+2]     = best_psi * g_d_psi     # psi
				Ori[ind+3]     = best_iagl              # index
				change = True
			else:
				if cur_agl != -1: ocp[cur_agl] = iprj
				Ori[ind]    = store_phi
				Ori[ind+1]  = store_theta
				Ori[ind+2]  = store_psi
				Ori[ind+3]  = cur_agl

			Rot = Util.cml_update_rot(Rot, iprj, Ori[ind], Ori[ind+1], Ori[ind+2])

			if g_debug: cml_export_progress(outdir, ite, iprj, best_iagl, best_psi * g_d_psi, best_disc, 'choose')

		# if one change, compute new full disc
		disc = cml_disc(Prj, Ori, Rot, flag_weights)

		# display in the progress file
		cml_export_txtagls(outdir, outname, Ori, disc, 'Ite: %03i' % (ite + 1))

		if not change: break

		# to stop when the solution oscillates
		period_disc.pop(0)
		period_disc.append(disc)
		if period_disc[0] == period_disc[2]:
			period_ct += 1
			if period_ct >= period_th and min(period_disc) == disc:
				angfile = open(outdir + '/' + outname, 'a')
				angfile.write('\nSTOP SOLUTION UNSTABLE\n')
				angfile.write('Discrepancy period: %s\n' % period_disc)
				angfile.close()
				break
		else:
			period_ct = 0

	return Ori, disc, ite
Ejemplo n.º 44
0
def cml_find_structure2(Prj, Ori, Rot, outdir, outname, maxit, first_zero, flag_weights, myid, main_node, number_of_proc):
	from projection import cml_export_progress, cml_disc, cml_export_txtagls
	import time, sys
	from random import shuffle,random

	from mpi import MPI_FLOAT, MPI_INT, MPI_SUM, MPI_COMM_WORLD
	from mpi import mpi_reduce, mpi_bcast, mpi_barrier

	# global vars
	global g_i_prj, g_n_prj, g_n_anglst, g_anglst, g_d_psi, g_debug, g_n_lines, g_seq

	# list of free orientation
	ocp = [-1] * g_n_anglst

	if first_zero:
		listprj = list(range(1, g_n_prj))
		ocp[0]  = 0 
	else:   listprj = list(range(g_n_prj))

	# to stop when the solution oscillates
	period_disc = [0, 0, 0]
	period_ct   = 0
	period_th   = 2
	#if not flag_weights:   weights = [1.0] * g_n_lines

	# iteration loop
	for ite in range(maxit):
		#print ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>    ite = ", ite, "   myid = ", myid
		t_start = time.time()

		# loop over i prj
		change = False
		tlistprj = listprj[:]
		shuffle(tlistprj)
		nnn = len(tlistprj)
		tlistprj = mpi_bcast(tlistprj, nnn, MPI_INT, main_node, MPI_COMM_WORLD)
		tlistprj = list(map(int, tlistprj))
		"""
		if(ite>1 and ite%5 == 0  and ite<140):
			if(myid == main_node):
				for i in xrange(0,len(tlistprj),5):
					ind          = 4*i
					Ori[ind]      =  360.*random()
					Ori[ind+1]    =  180.*random()
					Ori[ind+2]    =  360.*random()
					Ori[ind+3]    =  -1
				for i in xrange(len(tlistprj)):
					ind          = 4*i
					Ori[ind+3]    = float(Ori[ind+3])
			nnn = len(Ori)
			Ori = mpi_bcast(Ori, nnn, MPI_FLOAT, main_node, MPI_COMM_WORLD)
			Ori = map(float, Ori)
			for i in xrange(len(tlistprj)):
				ind          = 4*i
				Ori[ind+3]    = int(Ori[ind+3])
		"""

		for iprj in tlistprj:
			#print "**********************************  iprj = ", iprj, g_n_anglst

			# Store current the current orientation
			ind          = 4*iprj
			store_phi    = Ori[ind]
			store_theta  = Ori[ind+1]
			store_psi    = Ori[ind+2]
			cur_agl      = Ori[ind+3]
			if cur_agl  != -1: ocp[cur_agl] = -1

			# prepare active index of cml for weighting in order to earn time later
			iw = [0] * (g_n_prj - 1)
			c  = 0
			ct = 0
			for i in range(g_n_prj):
				for j in range(i+1, g_n_prj):
					if i == iprj or j == iprj:
						iw[ct] = c
						ct += 1
					c += 1

			# loop over all angles
			best_disc_list = [0]*g_n_anglst
			best_psi_list  = [0]*g_n_anglst
			for iagl in range(myid, g_n_anglst, number_of_proc):
				# if orientation is free
				if ocp[iagl] == -1:
					# assign new orientation
					Ori[ind]   = g_anglst[iagl][0]
					Ori[ind+1] = g_anglst[iagl][1]
					Rot        = Util.cml_update_rot(Rot, iprj, Ori[ind], Ori[ind+1], 0.0)
					# weights
					if flag_weights:
						cml = Util.cml_line_in3d(Ori, g_seq, g_n_prj, g_n_lines)
						weights = Util.cml_weights(cml)
						mw  = max(weights)
						for i in range(g_n_lines): weights[i]  = mw - weights[i]
						sw = sum(weights)
						if sw == 0:
							weights = [6.28 / float(g_n_lines)] * g_n_lines
						else:
							for i in range(g_n_lines):
								weights[i] /= sw
								weights[i] *= weights[i]

					# spin all psi
					com = Util.cml_line_insino(Rot, iprj, g_n_prj)
					if flag_weights:
						res = Util.cml_spin_psi(Prj, com, weights, iprj, iw, g_n_psi, g_d_psi, g_n_prj)
					else:
						res = Util.cml_spin_psi_now(Prj, com, iprj, iw, g_n_psi, g_d_psi, g_n_prj)

					# select the best
					best_disc_list[iagl] = res[0]
					best_psi_list[iagl]  = res[1]

					if g_debug: cml_export_progress(outdir, ite, iprj, iagl, res[1], res[0], 'progress')
				else:
					if g_debug: cml_export_progress(outdir, ite, iprj, iagl, -1, -1, 'progress')
			best_disc_list = mpi_reduce(best_disc_list, g_n_anglst, MPI_FLOAT, MPI_SUM, main_node, MPI_COMM_WORLD)
			best_psi_list = mpi_reduce(best_psi_list, g_n_anglst, MPI_FLOAT, MPI_SUM, main_node, MPI_COMM_WORLD)

			best_psi = -1
			best_iagl = -1

			if myid == main_node:
				best_disc = 1.0e20
				for iagl in range(g_n_anglst):
					if best_disc_list[iagl] > 0.0 and best_disc_list[iagl] < best_disc:
						best_disc = best_disc_list[iagl]
						best_psi = best_psi_list[iagl]
						best_iagl = iagl
			best_psi = mpi_bcast(best_psi, 1, MPI_FLOAT, main_node, MPI_COMM_WORLD)
			best_iagl = mpi_bcast(best_iagl, 1, MPI_INT, main_node, MPI_COMM_WORLD)
			best_psi = float(best_psi[0])
			best_iagl =  int(best_iagl[0])
			
			#print "xxxxx myid = ", myid, "    best_psi = ", best_psi, "   best_ialg = ", best_iagl

			# if change, assign
			if best_iagl != cur_agl:
				ocp[best_iagl] = iprj
				Ori[ind]       = g_anglst[best_iagl][0] # phi
				Ori[ind+1]     = g_anglst[best_iagl][1] # theta
				Ori[ind+2]     = best_psi * g_d_psi     # psi
				Ori[ind+3]     = best_iagl              # index
				change = True
			else:
				if cur_agl != -1: ocp[cur_agl] = iprj
				Ori[ind]    = store_phi
				Ori[ind+1]  = store_theta
				Ori[ind+2]  = store_psi
				Ori[ind+3]  = cur_agl

			Rot = Util.cml_update_rot(Rot, iprj, Ori[ind], Ori[ind+1], Ori[ind+2])

			if g_debug: cml_export_progress(outdir, ite, iprj, best_iagl, best_psi * g_d_psi, best_disc, 'choose')

		# if one change, compute new full disc
		disc = cml_disc(Prj, Ori, Rot, flag_weights)

		# display in the progress file
		if myid == main_node:
			cml_export_txtagls(outdir, outname, Ori, disc, 'Ite: %03i' % (ite + 1))

		if not change: break

		# to stop when the solution oscillates
		period_disc.pop(0)
		period_disc.append(disc)
		if period_disc[0] == period_disc[2]:
			period_ct += 1
			if period_ct >= period_th and min(period_disc) == disc and myid == main_node:
				angfile = open(outdir + '/' + outname, 'a')
				angfile.write('\nSTOP SOLUTION UNSTABLE\n')
				angfile.write('Discrepancy period: %s\n' % period_disc)
				angfile.close()
				break
		else:
			period_ct = 0
		mpi_barrier(MPI_COMM_WORLD)

	return Ori, disc, ite
Ejemplo n.º 45
0
def cml_open_proj(stack, ir, ou, lf, hf, dpsi = 1):
	from projection   import cml_sinogram
	from utilities    import model_circle, get_params_proj, model_blank, get_im
	from fundamentals import fftip
	from filter       import filt_tanh

	# number of projections
	if  type(stack) == type(""): nprj = EMUtil.get_image_count(stack)
	else:                       nprj = len(stack)
	Prj  = []                                          # list of projections
	Ori  = [-1] * 4 * nprj                             # orientation intial (phi, theta, psi, index) for each projection

	for i in range(nprj):
		image = get_im(stack, i)

		# read initial angles if given
		try:	Ori[4*i], Ori[4*i+1], Ori[4*i+2], s2x, s2y = get_params_proj(image)
		except:	pass
		
		if(i == 0):
			nx = image.get_xsize()
			if(ou < 1): ou = nx // 2 - 1
			diameter = int(2 * ou)
			mask2D   = model_circle(ou, nx, nx)
			if ir > 0:  mask2D -= model_circle(ir, nx, nx)

		# normalize under the mask
		[mean_a, sigma, imin, imax] = Util.infomask(image, mask2D, True)
		image -= mean_a
		Util.mul_scalar(image, 1.0/sigma)
		Util.mul_img(image, mask2D)

		# sinogram
		sino = cml_sinogram(image, diameter, dpsi)

		# prepare the cut positions in order to filter (lf: low freq; hf: high freq)
		ihf = min(int(2 * hf * diameter), diameter + (diameter + 1) % 2)
		ihf = ihf + (ihf + 1) % 2    # index ihf must be odd to take the img part
		ilf = max(int(2 * lf * diameter), 0)
		ilf = ilf + ilf % 2          # index ilf must be even to fall in the real part
		bdf = ihf - ilf + 1

		# process lines
		nxe = sino.get_xsize()
		nye = sino.get_ysize()
		prj = model_blank(bdf, 2*nye)
		pp = model_blank(nxe, 2*nye)
		for li in range(nye):
			# get the line li
			line = Util.window(sino, nxe, 1, 1, 0, li-nye//2, 0)
			# u2 (not improve the results)
			#line = filt_tanh(line, ou / float(nx), ou / float(nx))
			# normalize this line
			[mean_l, sigma_l, imin, imax] = Util.infomask(line, None, True)
			line = old_div((line - mean_l), sigma_l)
			# fft
			fftip(line)
			# filter (cut part of coef) and create mirror line
			Util.cml_prepare_line(prj, line, ilf, ihf, li, nye)

		# store the projection
		Prj.append(prj)

	return Prj, Ori
Ejemplo n.º 46
0
def cml_end_log(Ori):
	from utilities import print_msg
	global g_n_prj
	print_msg('\n\n')
	for i in range(g_n_prj): print_msg('Projection #%03i: phi %10.5f    theta %10.5f    psi %10.5f\n' % (i, Ori[4*i], Ori[4*i+1], Ori[4*i+2]))
Ejemplo n.º 47
0
 def addRow(self):
     items = [
         QStandardItem('0')
         for i in range(self.tblView.model().columnCount())
     ]
     self.tblView.model().appendRow(items)
Ejemplo n.º 48
0
def merge_qp(output,files,verbose=False):
    #read all the files and display main info in each of them
    print("=========input=========")
    filenames = [ f.name for f in files]
    datasets  = [ Dataset(filename) for filename in filenames]
    QP_table, QP_kpts, QP_E_E0_Z = [], [], []
    for d,filename in zip(datasets,filenames):
        _, nkpoints, nqps, _, nstrings = list(map(int,d['PARS'][:]))
        print("filename:    ", filename)
        if verbose:
            print("description:")
            for i in range(1,nstrings+1):
                print(''.join(d['DESC_strings_%05d'%i][0]))
        else:
            print("description:", ''.join(d['DESC_strings_%05d'%(nstrings)][0]))
        print() 
        QP_table.append( d['QP_table'][:].T )
        QP_kpts.append( d['QP_kpts'][:].T )
        QP_E_E0_Z.append( d['QP_E_Eo_Z'][:] )

    # create the QP_table
    QP_table_save = np.vstack(QP_table)

    # create the kpoints table
    #create a list with the bigger size of QP_table
    nkpoints = int(max(QP_table_save[:,2]))
    QP_kpts_save = np.zeros([nkpoints,3])
    #iterate over the QP's and store the corresponding kpoint
    for qp_file,kpts in zip(QP_table,QP_kpts):
        #iterate over the kpoints and save the coordinates on the list
        for qp in qp_file:
            n1,n2,nk = list(map(int,qp))
            QP_kpts_save[nk-1] = kpts[nk-1]

    # create the QPs energies table
    QP_E_E0_Z_save = np.concatenate(QP_E_E0_Z,axis=1)

    #create reference file from one of the files
    fin  = datasets[0]
    fout = Dataset(output,'w') 

    variables_update = ['QP_table', 'QP_kpts', 'QP_E_Eo_Z']
    variables_save   = [QP_table_save.T, QP_kpts_save.T, QP_E_E0_Z_save]
    variables_dict   = dict(list(zip(variables_update,variables_save))) 
    PARS_save = fin['PARS'][:]
    PARS_save[1:3] = nkpoints,len(QP_table_save)

    #create the description string
    kmin,kmax = np.amin(QP_table_save[:,2]),np.amax(QP_table_save[:,2])
    bmin,bmax = np.amin(QP_table_save[:,1]),np.amax(QP_table_save[:,1])
    description = "QP @ K %03d - %03d : b %03d - %03d"%(kmin,kmax,bmin,bmax)
    description_save = np.array([i for i in " %s"%description])

    #output data
    print("========output=========")
    print("filename:    ", output)
    print("description: ", description)

    #copy dimensions
    for dname, the_dim in list(fin.dimensions.items()):
        fout.createDimension(dname, len(the_dim) if not the_dim.isunlimited() else None)

    #get dimensions
    def dimensions(array):
        return tuple([ 'D_%010d'%d for d in array.shape ])

    #create missing dimensions
    for v in variables_save:
        for dname,d in zip( dimensions(v),v.shape ):
            if dname not in list(fout.dimensions.keys()):
                fout.createDimension(dname, d)

    #copy variables
    for v_name, varin in list(fin.variables.items()):
        if v_name in variables_update:
            #get the variable
            merged = variables_dict[v_name]
            # create the variable
            outVar = fout.createVariable(v_name, varin.datatype, dimensions(merged))
            # Copy variable attributes
            outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
            #save outvar
            outVar[:] = merged

        else:
            # create the variable
            outVar = fout.createVariable(v_name, varin.datatype, varin.dimensions)
            # Copy variable attributes
            outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
            if v_name=='PARS':
                outVar[:] = PARS_save[:]
            elif v_name=='DESC_strings_%05d'%(nstrings):
                outVar[:] = varin[:]
                outVar[:,:len(description_save)] = description_save.T
            else:
                outVar[:] = varin[:]
            
    fout.close()
Ejemplo n.º 49
0
    def loss(self, X, y=None):
        """
        Compute loss and gradient for the fully-connected net.

        Input / output: Same as TwoLayerNet above.
        """
        X = X.astype(self.dtype)
        mode = 'test' if y is None else 'train'

        # Set train/test mode for batchnorm params and dropout param since they
        # behave differently during training and testing.
        if self.use_dropout:
            self.dropout_param['mode'] = mode
        if self.normalization=='batchnorm':
            for bn_param in self.bn_params:
                bn_param['mode'] = mode
        scores = None
        ############################################################################
        # TODO: Implement the forward pass for the fully-connected net, computing  #
        # the class scores for X and storing them in the scores variable.          #
        #                                                                          #
        # When using dropout, you'll need to pass self.dropout_param to each       #
        # dropout forward pass.                                                    #
        #                                                                          #
        # When using batch normalization, you'll need to pass self.bn_params[0] to #
        # the forward pass for the first batch normalization layer, pass           #
        # self.bn_params[1] to the forward pass for the second batch normalization #
        # layer, etc.                                                              #
        ############################################################################
        A = X
        Z = {}
        cache = {}
        bn_cache = {}
        for l in range(self.num_layers-1):
            Z[l], cache[l] = affine_forward(A, self.params['W' + str(l+1)], self.params['b' + str(l+1)])
            ###  batch normal
            if self.normalization=='batchnorm':
                Z[l], bn_cache[l] = batchnorm_forward(Z[l], self.params['gamma' + str(l+1)], self.params['beta' + str(l+1)], self.bn_params[l])
            if self.normalization=='layernorm':
                Z[l], bn_cache[l] = layernorm_forward(Z[l], self.params['gamma' + str(l+1)], self.params['beta' + str(l+1)], self.bn_params[l])
                
            #####
            A, _ = relu_forward(Z[l])
        #### dropout forward
        if self.use_dropout:
            A, drop_cache = dropout_forward(A, self.dropout_param)
        ####
        
        Zout, cache_out = affine_forward(A, self.params['W' + str(self.num_layers)], self.params['b' + str(self.num_layers)])
        scores = Zout
        
        ############################################################################
        #                             END OF YOUR CODE                             #
        ############################################################################

        # If test mode return early
        if mode == 'test':
            return scores

        loss, grads = 0.0, {}
        ############################################################################
        # TODO: Implement the backward pass for the fully-connected net. Store the #
        # loss in the loss variable and gradients in the grads dictionary. Compute #
        # data loss using softmax, and make sure that grads[k] holds the gradients #
        # for self.params[k]. Don't forget to add L2 regularization!               #
        #                                                                          #
        # When using batch/layer normalization, you don't need to regularize the scale   #
        # and shift parameters.                                                    #
        #                                                                          #
        # NOTE: To ensure that your implementation matches ours and you pass the   #
        # automated tests, make sure that your L2 regularization includes a factor #
        # of 0.5 to simplify the expression for the gradient.                      #
        ############################################################################
        
        loss, dZout = softmax_loss(Zout, y)
        for l in range(self.num_layers-1):
            loss += 0.5 * self.reg * (np.sum(self.params['W' + str(l+1)] ** 2))
        
        ################  backward 
        
        dA, grads['W' + str(self.num_layers)], grads['b' + str(self.num_layers)] = affine_backward(dZout, cache_out)
        
         #### dropout
        if self.use_dropout:
            dA = dropout_backward(dA, drop_cache)
        ####
        for l in reversed(range(self.num_layers-1)):
           
            dZ = relu_backward(dA, Z[l])
            
            if self.normalization=='batchnorm':
                dZ, grads['gamma' + str(l+1)], grads['beta' + str(l+1)] = batchnorm_backward(dZ, bn_cache[l])
            if self.normalization=='layernorm':
                dZ, grads['gamma' + str(l+1)], grads['beta' + str(l+1)] = layernorm_backward(dZ, bn_cache[l])
                
            dA, grads['W' + str(l+1)], grads['b' + str(l+1)] = affine_backward(dZ, cache[l])
            
            grads['W' + str(l+1)] += self.reg *self.params['W' + str(l+1)]
           
        ############################################################################
        #                             END OF YOUR CODE                             #
        ############################################################################
     

        return loss, grads
Ejemplo n.º 50
0
def nf_read_roi(fileobj):
    '''
    points = read_roi(fileobj)
    Read ImageJ's ROI format

    Addapted from https://gist.github.com/luispedro/3437255
    '''
    # This is based on:
    # http://rsbweb.nih.gov/ij/developer/source/ij/io/RoiDecoder.java.html
    # http://rsbweb.nih.gov/ij/developer/source/ij/io/RoiEncoder.java.html

    # TODO: Use an enum
    #SPLINE_FIT = 1
    #DOUBLE_HEADED = 2
    #OUTLINE = 4
    #OVERLAY_LABELS = 8
    #OVERLAY_NAMES = 16
    #OVERLAY_BACKGROUNDS = 32
    #OVERLAY_BOLD = 64
    SUB_PIXEL_RESOLUTION = 128
    #DRAW_OFFSET = 256

    pos = [4]

    def get8():
        pos[0] += 1
        s = fileobj.read(1)
        if not s:
            raise IOError('readroi: Unexpected EOF')
        return ord(s)

    def get16():
        b0 = get8()
        b1 = get8()
        return (b0 << 8) | b1

    def get32():
        s0 = get16()
        s1 = get16()
        return (s0 << 16) | s1

    def getfloat():
        v = np.int32(get32())
        return v.view(np.float32)

    magic = fileobj.read(4)
    if magic != 'Iout':
        #        raise IOError('Magic number not found')
        print('Magic number not found')
    version = get16()

    # It seems that the roi type field occupies 2 Bytes, but only one is used

    roi_type = get8()
    # Discard second Byte:
    get8()

    #    if not (0 <= roi_type < 11):
    #        print(('roireader: ROI type %s not supported' % roi_type))
    #
    #    if roi_type != 7:
    #
    #        print(('roireader: ROI type %s not supported (!= 7)' % roi_type))

    top = get16()
    left = get16()
    bottom = get16()
    right = get16()
    n_coordinates = get16()

    x1 = getfloat()
    y1 = getfloat()
    x2 = getfloat()
    y2 = getfloat()
    stroke_width = get16()
    shape_roi_size = get32()
    stroke_color = get32()
    fill_color = get32()
    subtype = get16()
    if subtype != 0:
        raise ValueError('roireader: ROI subtype %s not supported (!= 0)' %
                         subtype)
    options = get16()
    arrow_style = get8()
    arrow_head_size = get8()
    rect_arc_size = get16()
    position = get32()
    header2offset = get32()

    if options & SUB_PIXEL_RESOLUTION:
        getc = getfloat
        points = np.empty((n_coordinates, 2), dtype=np.float32)
    else:
        getc = get16
        points = np.empty((n_coordinates, 2), dtype=np.int16)
    points[:, 1] = [getc() for i in range(n_coordinates)]
    points[:, 0] = [getc() for i in range(n_coordinates)]
    points[:, 1] += left
    points[:, 0] += top
    points -= 1

    return points
Ejemplo n.º 51
0
def distance_masks(M_s, cm_s, max_dist, enclosed_thr=None):
    """
    Compute distance matrix based on an intersection over union metric. Matrix are compared in order,
    with matrix i compared with matrix i+1

    Parameters:
    ----------
    M_s: tuples of 1-D arrays
        The thresholded A matrices (masks) to compare, output of threshold_components

    cm_s: list of list of 2-ples
        the centroids of the components in each M_s

    max_dist: float
        maximum distance among centroids allowed between components. This corresponds to a distance
        at which two components are surely disjoined

    enclosed_thr: float
        if not None set distance to at most the specified value when ground truth is a subset of inferred


    Returns:
    --------
    D_s: list of matrix distances

    Raise:
    ------
    Exception('Nan value produced. Error in inputs')


    """
    D_s = []

    for gt_comp, test_comp, cmgt_comp, cmtest_comp in zip(
            M_s[:-1], M_s[1:], cm_s[:-1], cm_s[1:]):

        # todo : better with a function that calls itself
        print('New Pair **')
        # not to interfer with M_s
        gt_comp = gt_comp.copy()[:, :]
        test_comp = test_comp.copy()[:, :]

        # the number of components for each
        nb_gt = np.shape(gt_comp)[-1]
        nb_test = np.shape(test_comp)[-1]
        D = np.ones((nb_gt, nb_test))

        cmgt_comp = np.array(cmgt_comp)
        cmtest_comp = np.array(cmtest_comp)
        if enclosed_thr is not None:
            gt_val = gt_comp.T.dot(gt_comp).diagonal()
        for i in range(nb_gt):
            # for each components of gt
            k = gt_comp[:, np.repeat(i, nb_test)] + test_comp
            # k is correlation matrix of this neuron to every other of the test
            for j in range(nb_test):  # for each components on the tests
                dist = np.linalg.norm(cmgt_comp[i] - cmtest_comp[j])
                # we compute the distance of this one to the other ones
                if dist < max_dist:
                    # union matrix of the i-th neuron to the jth one
                    union = k[:, j].sum()
                    # we could have used OR for union and AND for intersection while converting
                    # the matrice into real boolean before

                    # product of the two elements' matrices
                    # we multiply the boolean values from the jth omponent to the ith
                    intersection = np.array(gt_comp[:, i].T.dot(
                        test_comp[:, j]).todense()).squeeze()

                    # if we don't have even a union this is pointless
                    if union > 0:

                        # intersection is removed from union since union contains twice the overlaping area
                        # having the values in this format 0-1 is helpfull for the hungarian algorithm that follows
                        D[i, j] = 1 - 1. * intersection / \
                            (union - intersection)
                        if enclosed_thr is not None:
                            if intersection == gt_val[i]:
                                D[i, j] = min(D[i, j], 0.5)
                    else:
                        D[i, j] = 1.

                    if np.isnan(D[i, j]):
                        raise Exception('Nan value produced. Error in inputs')
                else:
                    D[i, j] = 1

        D_s.append(D)
    return D_s
Ejemplo n.º 52
0
 def enqueue_entries(self, workq, count):
     for i in range(count):
         workq.enqueue(WorkQueueEntry(i))
Ejemplo n.º 53
0
def nf_match_neurons_in_binary_masks(masks_gt,
                                     masks_comp,
                                     thresh_cost=.7,
                                     min_dist=10,
                                     print_assignment=False,
                                     plot_results=False,
                                     Cn=None,
                                     labels=None,
                                     cmap='viridis',
                                     D=None,
                                     enclosed_thr=None):
    """
    Match neurons expressed as binary masks. Uses Hungarian matching algorithm

    Parameters:
    -----------

    masks_gt: bool ndarray  components x d1 x d2
        ground truth masks

    masks_comp: bool ndarray  components x d1 x d2
        mask to compare to

    thresh_cost: double
        max cost accepted

    min_dist: min distance between cm

    print_assignment:
        for hungarian algorithm

    plot_results: bool

    Cn:
        correlation image or median

    D: list of ndarrays
        list of distances matrices

    enclosed_thr: float
        if not None set distance to at most the specified value when ground truth is a subset of inferred

    Returns:
    --------
    idx_tp_1:
        indeces true pos ground truth mask

    idx_tp_2:
        indeces true pos comp

    idx_fn_1:
        indeces false neg

    idx_fp_2:
        indeces false pos

    performance:

    """

    _, d1, d2 = np.shape(masks_gt)
    dims = d1, d2

    # transpose to have a sparse list of components, then reshaping it to have a 1D matrix red in the Fortran style
    A_ben = scipy.sparse.csc_matrix(
        np.reshape(masks_gt[:].transpose([1, 2, 0]), (
            np.prod(dims),
            -1,
        ),
                   order='F'))
    A_cnmf = scipy.sparse.csc_matrix(
        np.reshape(masks_comp[:].transpose([1, 2, 0]), (
            np.prod(dims),
            -1,
        ),
                   order='F'))

    # have the center of mass of each element of the two masks
    cm_ben = [scipy.ndimage.center_of_mass(mm) for mm in masks_gt]
    cm_cnmf = [scipy.ndimage.center_of_mass(mm) for mm in masks_comp]

    if D is None:
        #% find distances and matches
        # find the distance between each masks
        D = distance_masks([A_ben, A_cnmf], [cm_ben, cm_cnmf],
                           min_dist,
                           enclosed_thr=enclosed_thr)
        level = 0.98
    else:
        level = .98

    matches, costs = find_matches(D, print_assignment=print_assignment)
    matches = matches[0]
    costs = costs[0]

    #%% compute precision and recall
    TP = np.sum(np.array(costs) < thresh_cost) * 1.
    FN = np.shape(masks_gt)[0] - TP
    FP = np.shape(masks_comp)[0] - TP
    TN = 0

    performance = dict()
    performance['recall'] = old_div(TP, (TP + FN))
    performance['precision'] = old_div(TP, (TP + FP))
    performance['accuracy'] = old_div((TP + TN), (TP + FP + FN + TN))
    performance['f1_score'] = 2 * TP / (2 * TP + FP + FN)
    print(performance)
    #%%
    idx_tp = np.where(np.array(costs) < thresh_cost)[0]
    idx_tp_ben = matches[0][idx_tp]  # ground truth
    idx_tp_cnmf = matches[1][idx_tp]  # algorithm - comp

    idx_fn = np.setdiff1d(list(range(np.shape(masks_gt)[0])),
                          matches[0][idx_tp])

    idx_fp = np.setdiff1d(list(range(np.shape(masks_comp)[0])),
                          matches[1][idx_tp])

    idx_fp_cnmf = idx_fp

    idx_tp_gt, idx_tp_comp, idx_fn_gt, idx_fp_comp = idx_tp_ben, idx_tp_cnmf, idx_fn, idx_fp_cnmf

    if plot_results:
        try:  # Plotting function
            pl.rcParams['pdf.fonttype'] = 42
            font = {'family': 'Myriad Pro', 'weight': 'regular', 'size': 10}
            pl.rc('font', **font)
            lp, hp = np.nanpercentile(Cn, [5, 95])
            ses_1 = mpatches.Patch(color='red', label='Session 1')
            ses_2 = mpatches.Patch(color='white', label='Session 2')
            pl.subplot(1, 2, 1)
            pl.imshow(Cn, vmin=lp, vmax=hp, cmap=cmap)
            [
                pl.contour(norm_nrg(mm),
                           levels=[level],
                           colors='w',
                           linewidths=1) for mm in masks_comp[idx_tp_comp]
            ]
            [
                pl.contour(norm_nrg(mm),
                           levels=[level],
                           colors='r',
                           linewidths=1) for mm in masks_gt[idx_tp_gt]
            ]
            if labels is None:
                pl.title('MATCHES')
            else:
                pl.title('MATCHES: ' + labels[1] + '(w), ' + labels[0] + '(r)')
            pl.legend(handles=[ses_1, ses_2])
            pl.show()
            pl.axis('off')
            pl.subplot(1, 2, 2)
            pl.imshow(Cn, vmin=lp, vmax=hp, cmap=cmap)
            [
                pl.contour(norm_nrg(mm),
                           levels=[level],
                           colors='w',
                           linewidths=1) for mm in masks_comp[idx_fp_comp]
            ]
            [
                pl.contour(norm_nrg(mm),
                           levels=[level],
                           colors='r',
                           linewidths=1) for mm in masks_gt[idx_fn_gt]
            ]
            if labels is None:
                pl.title('FALSE POSITIVE (w), FALSE NEGATIVE (r)')
            else:
                pl.title(labels[1] + '(w), ' + labels[0] + '(r)')
            pl.legend(handles=[ses_1, ses_2])
            pl.show()
            pl.axis('off')
        except Exception as e:
            print(
                "not able to plot precision recall usually because we are on travis"
            )
            print(e)
    return idx_tp_gt, idx_tp_comp, idx_fn_gt, idx_fp_comp, performance
Ejemplo n.º 54
0
def extract_binary_masks_from_structural_channel(Y,
                                                 min_area_size=30,
                                                 min_hole_size=15,
                                                 gSig=5,
                                                 expand_method='closing',
                                                 selem=np.ones((3, 3))):
    """Extract binary masks by using adaptive thresholding on a structural channel

    Inputs:
    ------
    Y:                  caiman movie object
                        movie of the structural channel (assumed motion corrected)

    min_area_size:      int
                        ignore components with smaller size

    min_hole_size:      int
                        fill in holes up to that size (donuts)

    gSig:               int
                        average radius of cell

    expand_method:      string
                        method to expand binary masks (morphological closing or dilation)

    selem:              np.array
                        morphological element with which to expand binary masks

    Output:
    -------
    A:                  sparse column format matrix
                        matrix of binary masks to be used for CNMF seeding

    mR:                 np.array
                        mean image used to detect cell boundaries
    """

    mR = Y.mean(axis=0)
    img = cv2.blur(mR, (gSig, gSig))
    img = (img - np.min(img)) / (np.max(img) - np.min(img)) * 255.
    img = img.astype(np.uint8)

    th = cv2.adaptiveThreshold(img, np.max(img),
                               cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                               cv2.THRESH_BINARY, gSig, 0)
    th = remove_small_holes(th > 0, min_size=min_hole_size)
    th = remove_small_objects(th, min_size=min_area_size)
    areas = label(th)

    A = np.zeros((np.prod(th.shape), areas[1]), dtype=bool)

    for i in range(areas[1]):
        temp = (areas[0] == i + 1)
        if expand_method == 'dilation':
            temp = dilation(temp, selem=selem)
        elif expand_method == 'closing':
            temp = dilation(temp, selem=selem)

        A[:, i] = temp.flatten('F')

    return A, mR
Ejemplo n.º 55
0
    def loss(self, X, y=None):
        """
        Compute loss and gradient for the fully-connected net.

        Input / output: Same as TwoLayerNet above.
        """
        X = X.astype(self.dtype)
        mode = 'test' if y is None else 'train'

        # Set train/test mode for batchnorm params and dropout param since they
        # behave differently during training and testing.
        if self.use_dropout:
            self.dropout_param['mode'] = mode
        if self.use_batchnorm:
            for bn_param in self.bn_params:
                bn_param['mode'] = mode

        scores = None
        ############################################################################
        # TODO: Implement the forward pass for the fully-connected net, computing  #
        # the class scores for X and storing them in the scores variable.          #
        #                                                                          #
        # When using dropout, you'll need to pass self.dropout_param to each       #
        # dropout forward pass.                                                    #
        #                                                                          #
        # When using batch normalization, you'll need to pass self.bn_params[0] to #
        # the forward pass for the first batch normalization layer, pass           #
        # self.bn_params[1] to the forward pass for the second batch normalization #
        # layer, etc.                                                              #
        ############################################################################
        #前向传播
        #        out1, cache1 = affine_relu_forward(  X, self.params['W1'], self.params['b1'])
        #        scores, cache2 = affine_forward( out1, self.params['W2'], self.params['b2'])
        out_list = [X]
        cache_list = []
        dropout_cache = []
        for i in range(1, self.num_layers + 1):
            if i != self.num_layers:  #各个隐层
                if self.use_batchnorm:
                    out, cache = affine_bn_relu_forward(
                        out_list[i - 1], self.params['W' + str(i)],
                        self.params['b' + str(i)],
                        self.params['gamma' + str(i)],
                        self.params['beta' + str(i)], self.bn_params[i - 1])
                else:
                    out, cache = affine_relu_forward(out_list[i - 1],
                                                     self.params['W' + str(i)],
                                                     self.params['b' + str(i)])

#使用dropout
                if self.use_dropout:
                    out, d_cache = dropout_forward(out, self.dropout_param)
                    dropout_cache.append(d_cache)

            else:  #输出层
                out, cache = affine_forward(out_list[-1],
                                            self.params['W' + str(i)],
                                            self.params['b' + str(i)])

            out_list.append(out)
            cache_list.append(cache)

        scores = out_list[-1]
        ############################################################################
        #                             END OF YOUR CODE                             #
        ############################################################################

        # If test mode return early
        if mode == 'test':
            return scores

        loss, grads = 0.0, {}
        ############################################################################
        # TODO: Implement the backward pass for the fully-connected net. Store the #
        # loss in the loss variable and gradients in the grads dictionary. Compute #
        # data loss using softmax, and make sure that grads[k] holds the gradients #
        # for self.params[k]. Don't forget to add L2 regularization!               #
        #                                                                          #
        # When using batch normalization, you don't need to regularize the scale   #
        # and shift parameters.                                                    #
        #                                                                          #
        # NOTE: To ensure that your implementation matches ours and you pass the   #
        # automated tests, make sure that your L2 regularization includes a factor #
        # of 0.5 to simplify the expression for the gradient.                      #
        ############################################################################
        pass
        #后向传播
        #        dout_list = []
        loss, dscores = softmax_loss(scores, y)
        for i in range(1, self.num_layers + 1)[::-1]:
            if i == self.num_layers:  #输出层
                dout, grads['W' +
                            str(i)], grads['b' + str(i)] = affine_backward(
                                dscores, cache_list[i - 1])
            else:
                if self.use_dropout:
                    dout = dropout_backward(dout, dropout_cache[i - 1])
                if self.use_batchnorm:
                    dout, grads['W' + str(i)], grads['b' + str(i)], grads[
                        'gamma' +
                        str(i)], grads['beta' +
                                       str(i)] = affine_bn_relu_backward(
                                           dout, cache_list[i - 1])
                else:
                    dout, grads['W' +
                                str(i)], grads['b' +
                                               str(i)] = affine_relu_backward(
                                                   dout, cache_list[i - 1])

            #增加正则项
            grads['W' + str(i)] += self.reg * self.params['W' + str(i)]

            loss += 0.5 * self.reg * np.sum(
                self.params['W' + str(i)] * self.params['W' + str(i)])

        ############################################################################
        #                             END OF YOUR CODE                             #
        ############################################################################

        return loss, grads
Ejemplo n.º 56
0
def register_ROIs(A1,
                  A2,
                  dims,
                  template1=None,
                  template2=None,
                  align_flag=True,
                  D=None,
                  thresh_cost=.7,
                  max_dist=10,
                  enclosed_thr=None,
                  print_assignment=False,
                  plot_results=False,
                  Cn=None,
                  cmap='viridis'):
    """
    Register ROIs across different sessions using an intersection over union metric
    and the Hungarian algorithm for optimal matching

    Parameters:
    -----------

    A1: ndarray or csc_matrix  # pixels x # of components
        ROIs from session 1

    A2: ndarray or csc_matrix  # pixels x # of components
        ROIs from session 2

    dims: list or tuple
        dimensionality of the FOV

    template1: ndarray dims
        template from session 1

    template2: ndarray dims
        template from session 2

    align_flag: bool
        align the templates before matching

    D: ndarray
        matrix of distances in the event they are pre-computed

    thresh_cost: scalar
        maximum distance considered

    max_dist: scalar
        max distance between centroids

    enclosed_thr: float
        if not None set distance to at most the specified value when ground truth is a subset of inferred

    print_assignment: bool
        print pairs of matched ROIs

    plot_results: bool
        create a plot of matches and mismatches

    Cn: ndarray
        background image for plotting purposes

    cmap: string
        colormap for background image

    Returns:
    --------
    matched_ROIs1: list
        indeces of matched ROIs from session 1

    matched_ROIs2: list
        indeces of matched ROIs from session 2

    non_matched1: list
        indeces of non-matched ROIs from session 1

    non_matched2: list
        indeces of non-matched ROIs from session 1

    performance:  list
        (precision, recall, accuracy, f_1 score) with A1 taken as ground truth

    """

    if template1 is None or template2 is None:
        align_flag = False

    if align_flag:  # first align ROIs from session 2 to the template from session 1
        template2, shifts, _, xy_grid = tile_and_correct(
            template2,
            template1 - template1.min(),
            [int(dims[0] / 4), int(dims[1] / 4)], [16, 16], [10, 10],
            add_to_movie=template2.min(),
            shifts_opencv=True)
        A_2t = np.reshape(A2.toarray(), dims + (-1, ),
                          order='F').transpose(2, 0, 1)
        dims_grid = tuple(
            np.max(np.stack(xy_grid, axis=0), axis=0) -
            np.min(np.stack(xy_grid, axis=0), axis=0) + 1)
        _sh_ = np.stack(shifts, axis=0)
        shifts_x = np.reshape(_sh_[:, 1], dims_grid,
                              order='C').astype(np.float32)
        shifts_y = np.reshape(_sh_[:, 0], dims_grid,
                              order='C').astype(np.float32)
        x_grid, y_grid = np.meshgrid(
            np.arange(0., dims[0]).astype(np.float32),
            np.arange(0., dims[1]).astype(np.float32))
        x_remap = (-np.resize(shifts_x, dims) + x_grid).astype(np.float32)
        y_remap = (-np.resize(shifts_y, dims) + y_grid).astype(np.float32)
        A2 = np.stack([
            cv2.remap(img.astype(np.float32), x_remap, y_remap,
                      cv2.INTER_CUBIC) for img in A_2t
        ],
                      axis=0)
        A2 = np.reshape(A2.transpose(1, 2, 0), (A1.shape[0], A_2t.shape[0]),
                        order='F')

    if D is None:
        if 'csc_matrix' not in str(type(A1)):
            A1 = scipy.sparse.csc_matrix(A1)
        if 'csc_matrix' not in str(type(A2)):
            A2 = scipy.sparse.csc_matrix(A2)

        cm_1 = com(A1, dims[0], dims[1])
        cm_2 = com(A2, dims[0], dims[1])
        A1_tr = (A1 > 0).astype(float)
        A2_tr = (A2 > 0).astype(float)
        D = distance_masks([A1_tr, A2_tr], [cm_1, cm_2],
                           max_dist,
                           enclosed_thr=enclosed_thr)

    matches, costs = find_matches(D, print_assignment=print_assignment)
    matches = matches[0]
    costs = costs[0]

    #%% store indeces

    idx_tp = np.where(np.array(costs) < thresh_cost)[0]
    if len(idx_tp) > 0:
        matched_ROIs1 = matches[0][idx_tp]  # ground truth
        matched_ROIs2 = matches[1][idx_tp]  # algorithm - comp
        non_matched1 = np.setdiff1d(list(range(D[0].shape[0])),
                                    matches[0][idx_tp])
        non_matched2 = np.setdiff1d(list(range(D[0].shape[1])),
                                    matches[1][idx_tp])
        TP = np.sum(np.array(costs) < thresh_cost) * 1.
    else:
        TP = 0.
        plot_results = False
        matched_ROIs1 = []
        matched_ROIs2 = []
        non_matched1 = list(range(D[0].shape[0]))
        non_matched2 = list(range(D[0].shape[1]))

    #%% compute precision and recall

    FN = D[0].shape[0] - TP
    FP = D[0].shape[1] - TP
    TN = 0

    performance = dict()
    performance['recall'] = old_div(TP, (TP + FN))
    performance['precision'] = old_div(TP, (TP + FP))
    performance['accuracy'] = old_div((TP + TN), (TP + FP + FN + TN))
    performance['f1_score'] = 2 * TP / (2 * TP + FP + FN)
    print(performance)

    if plot_results:
        if Cn is None:
            if template1 is not None:
                Cn = template1
            elif template2 is not None:
                Cn = template2
            else:
                Cn = np.reshape(A1.sum(1) + A2.sum(1), dims, order='F')

        masks_1 = np.reshape(A1.toarray(), dims + (-1, ),
                             order='F').transpose(2, 0, 1)
        masks_2 = np.reshape(A2.toarray(), dims + (-1, ),
                             order='F').transpose(2, 0, 1)
        #        try : #Plotting function
        level = 0.98
        pl.rcParams['pdf.fonttype'] = 42
        font = {'family': 'Myriad Pro', 'weight': 'regular', 'size': 10}
        pl.rc('font', **font)
        lp, hp = np.nanpercentile(Cn, [5, 95])
        pl.subplot(1, 2, 1)
        pl.imshow(Cn, vmin=lp, vmax=hp, cmap=cmap)
        [
            pl.contour(norm_nrg(mm), levels=[level], colors='w', linewidths=1)
            for mm in masks_1[matched_ROIs1]
        ]
        [
            pl.contour(norm_nrg(mm), levels=[level], colors='r', linewidths=1)
            for mm in masks_2[matched_ROIs2]
        ]
        pl.title('Matches')
        pl.axis('off')
        pl.subplot(1, 2, 2)
        pl.imshow(Cn, vmin=lp, vmax=hp, cmap=cmap)
        [
            pl.contour(norm_nrg(mm), levels=[level], colors='w', linewidths=1)
            for mm in masks_1[non_matched1]
        ]
        [
            pl.contour(norm_nrg(mm), levels=[level], colors='r', linewidths=1)
            for mm in masks_2[non_matched2]
        ]
        pl.title('Mismatches')
        pl.axis('off')
#        except Exception as e:
#            print("not able to plot precision recall usually because we are on travis")
#            print(e)

    return matched_ROIs1, matched_ROIs2, non_matched1, non_matched2, performance
Ejemplo n.º 57
0
 def _make_headers(self, num_col):
     header = self.inputs.header_prefix if \
         isdefined(self.inputs.header_prefix) else self._header
     headers = ['{}{:02d}'.format(header, i) for i in range(num_col)]
     return '\t'.join(headers)
        def impl():
            workflow = self.shell.projectManager.workflow
            pixClassApplet = workflow.pcApplet
            gui = pixClassApplet.getMultiLaneGui()
            opPix = pixClassApplet.topLevelOperator

            # Select the labeling drawer
            self.shell.setSelectedAppletDrawer(PIXEL_CLASSIFICATION_INDEX)
            assert isinstance(
                self.shell.workflow.applets[PIXEL_CLASSIFICATION_INDEX],
                PixelClassificationApplet)

            # Turn off the huds and so we can capture the raw image
            viewMenu = gui.currentGui().menus()[0]
            viewMenu.actionToggleAllHuds.trigger()

            ## Turn off the slicing position lines
            ## FIXME: This disables the lines without unchecking the position
            ##        box in the VolumeEditorWidget, making the checkbox out-of-sync
            # gui.currentGui().editor.navCtrl.indicateSliceIntersection = False

            # Do our tests at position 0,0,0
            gui.currentGui().editor.posModel.slicingPos = (0, 0, 0)

            assert gui.currentGui()._labelControlUi.liveUpdateButton.isChecked(
            ) == False
            assert gui.currentGui()._labelControlUi.labelListModel.rowCount(
            ) == 2, "Got {} rows".format(
                gui.currentGui()._labelControlUi.labelListModel.rowCount())

            # Add label classes
            for i in range(3):
                gui.currentGui()._labelControlUi.AddLabelButton.click()
                assert (
                    gui.currentGui()._labelControlUi.labelListModel.rowCount()
                    == 3 + i
                ), "Expected {}, but got {} rows".format(
                    2 + i,
                    gui.currentGui()._labelControlUi.labelListModel.rowCount())

            # Select the brush
            gui.currentGui()._labelControlUi.paintToolButton.click()

            # Set the brush size
            gui.currentGui()._labelControlUi.brushSizeComboBox.setCurrentIndex(
                1)

            # Let the GUI catch up: Process all events
            QApplication.processEvents()

            # Draw some arbitrary labels in each view using mouse events.
            for i in range(3):
                # Post this as an event to ensure sequential execution.
                gui.currentGui()._labelControlUi.labelListModel.select(i)

                imgView = gui.currentGui().editor.imageViews[i]
                self.strokeMouseFromCenter(imgView, self.LABEL_START,
                                           self.LABEL_STOP)

            self.waitForViews(gui.currentGui().editor.imageViews)

            # Verify the actual rendering of each view
            for i in range(3):
                imgView = gui.currentGui().editor.imageViews[i]
                observedColor = self.getPixelColor(imgView, self.LABEL_SAMPLE)
                expectedColor = gui.currentGui()._colorTable16[i + 1]
                assert observedColor == expectedColor, "Label was not drawn correctly.  Expected {}, got {}".format(
                    hex(expectedColor), hex(observedColor))

            # Save the project
            saveThread = self.shell.onSaveProjectActionTriggered()
            saveThread.join()
Ejemplo n.º 59
0
    def _run_interface(self, runtime):
        mask_images = []
        if isdefined(self.inputs.mask_files):
            mask_images = combine_mask_files(self.inputs.mask_files,
                                             self.inputs.merge_method,
                                             self.inputs.mask_index)

        if self.inputs.use_regress_poly:
            self.inputs.pre_filter = 'polynomial'

        # Degree 0 == remove mean; see compute_noise_components
        degree = (self.inputs.regress_poly_degree
                  if self.inputs.pre_filter == 'polynomial' else 0)

        imgseries = nb.load(self.inputs.realigned_file, mmap=NUMPY_MMAP)

        if len(imgseries.shape) != 4:
            raise ValueError('{} expected a 4-D nifti file. Input {} has '
                             '{} dimensions (shape {})'.format(
                                 self._header, self.inputs.realigned_file,
                                 len(imgseries.shape), imgseries.shape))

        if len(mask_images) == 0:
            img = nb.Nifti1Image(np.ones(imgseries.shape[:3], dtype=np.bool),
                                 affine=imgseries.affine,
                                 header=imgseries.header)
            mask_images = [img]

        skip_vols = self.inputs.ignore_initial_volumes
        if skip_vols:
            imgseries = imgseries.__class__(
                imgseries.get_data()[..., skip_vols:], imgseries.affine,
                imgseries.header)

        mask_images = self._process_masks(mask_images, imgseries.get_data())

        TR = 0
        if self.inputs.pre_filter == 'cosine':
            if isdefined(self.inputs.repetition_time):
                TR = self.inputs.repetition_time
            else:
                # Derive TR from NIfTI header, if possible
                try:
                    TR = imgseries.header.get_zooms()[3]
                    if imgseries.header.get_xyzt_units()[1] == 'msec':
                        TR /= 1000
                except (AttributeError, IndexError):
                    TR = 0

                if TR == 0:
                    raise ValueError(
                        '{} cannot detect repetition time from image - '
                        'Set the repetition_time input'.format(self._header))

        components, filter_basis = compute_noise_components(
            imgseries.get_data(), mask_images, self.inputs.num_components,
            self.inputs.pre_filter, degree, self.inputs.high_pass_cutoff, TR)

        if skip_vols:
            old_comp = components
            nrows = skip_vols + components.shape[0]
            components = np.zeros((nrows, components.shape[1]),
                                  dtype=components.dtype)
            components[skip_vols:] = old_comp

        components_file = os.path.join(os.getcwd(),
                                       self.inputs.components_file)
        np.savetxt(components_file,
                   components,
                   fmt=b"%.10f",
                   delimiter='\t',
                   header=self._make_headers(components.shape[1]),
                   comments='')

        if self.inputs.pre_filter and self.inputs.save_pre_filter:
            pre_filter_file = self._list_outputs()['pre_filter_file']
            ftype = {
                'polynomial': 'Legendre',
                'cosine': 'Cosine'
            }[self.inputs.pre_filter]
            ncols = filter_basis.shape[1] if filter_basis.size > 0 else 0
            header = ['{}{:02d}'.format(ftype, i) for i in range(ncols)]
            if skip_vols:
                old_basis = filter_basis
                # nrows defined above
                filter_basis = np.zeros((nrows, ncols + skip_vols),
                                        dtype=filter_basis.dtype)
                if old_basis.size > 0:
                    filter_basis[skip_vols:, :ncols] = old_basis
                filter_basis[:skip_vols, -skip_vols:] = np.eye(skip_vols)
                header.extend([
                    'NonSteadyStateOutlier{:02d}'.format(i)
                    for i in range(skip_vols)
                ])
            np.savetxt(pre_filter_file,
                       filter_basis,
                       fmt=b'%.10f',
                       delimiter='\t',
                       header='\t'.join(header),
                       comments='')

        return runtime
Ejemplo n.º 60
0
    def __init__(self,
                 hidden_dims,
                 input_dim=3 * 32 * 32,
                 num_classes=10,
                 dropout=0,
                 use_batchnorm=False,
                 reg=0.0,
                 weight_scale=1e-2,
                 dtype=np.float32,
                 seed=None):
        """
        Initialize a new FullyConnectedNet.

        Inputs:
        - hidden_dims: A list of integers giving the size of each hidden layer.
        - input_dim: An integer giving the size of the input.
        - num_classes: An integer giving the number of classes to classify.
        - dropout: Scalar between 0 and 1 giving dropout strength. If dropout=0 then
          the network should not use dropout at all.
        - use_batchnorm: Whether or not the network should use batch normalization.
        - reg: Scalar giving L2 regularization strength.
        - weight_scale: Scalar giving the standard deviation for random
          initialization of the weights.
        - dtype: A numpy datatype object; all computations will be performed using
          this datatype. float32 is faster but less accurate, so you should use
          float64 for numeric gradient checking.
        - seed: If not None, then pass this random seed to the dropout layers. This
          will make the dropout layers deteriminstic so we can gradient check the
          model.
        """
        self.use_batchnorm = use_batchnorm
        self.use_dropout = dropout > 0
        self.reg = reg
        self.num_layers = 1 + len(hidden_dims)  #1是输出层
        self.dtype = dtype
        self.params = {}

        ############################################################################
        # TODO: Initialize the parameters of the network, storing all values in    #
        # the self.params dictionary. Store weights and biases for the first layer #
        # in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #
        # initialized from a normal distribution with standard deviation equal to  #
        # weight_scale and biases should be initialized to zero.                   #
        #                                                                          #
        # When using batch normalization, store scale and shift parameters for the #
        # first layer in gamma1 and beta1; for the second layer use gamma2 and     #
        # beta2, etc. Scale parameters should be initialized to one and shift      #
        # parameters should be initialized to zero.                                #
        ############################################################################
        pass

        #初始化权重矩阵和偏置向量
        for i in range(1, self.num_layers + 1):
            if i == 1:
                self.params['W' + str(i)] = np.random.normal(
                    loc=0.0,
                    scale=weight_scale,
                    size=(input_dim, hidden_dims[i - 1]))
                self.params['b' + str(i)] = np.zeros(hidden_dims[i - 1])
                if self.use_batchnorm:
                    self.params['gamma' + str(i)] = np.ones(
                        hidden_dims[i - 1])  #缩放
                    self.params['beta' + str(i)] = np.zeros(
                        hidden_dims[i - 1])  #平移

            elif i != self.num_layers:
                self.params['W' + str(i)] = np.random.normal(
                    loc=0.0,
                    scale=weight_scale,
                    size=(hidden_dims[i - 2], hidden_dims[i - 1]))
                self.params['b' + str(i)] = np.zeros(hidden_dims[i - 1])

                if self.use_batchnorm:
                    self.params['gamma' + str(i)] = np.ones(
                        hidden_dims[i - 1])  #缩放
                    self.params['beta' + str(i)] = np.zeros(
                        hidden_dims[i - 1])  #平移
            else:  #输出层
                self.params['W' + str(i)] = np.random.normal(
                    loc=0.0,
                    scale=weight_scale,
                    size=(hidden_dims[i - 2], num_classes))
                self.params['b' + str(i)] = np.zeros(num_classes)
        ############################################################################
        #                             END OF YOUR CODE                             #
        ############################################################################

#这里的实现是所有的dropout层的参数都一样
# When using dropout we need to pass a dropout_param dictionary to each
# dropout layer so that the layer knows the dropout probability and the mode
# (train / test). You can pass the same dropout_param to each dropout layer.
        self.dropout_param = {}
        if self.use_dropout:
            self.dropout_param = {'mode': 'train', 'p': dropout}
            if seed is not None:
                self.dropout_param['seed'] = seed

        # With batch normalization we need to keep track of running means and
        # variances, so we need to pass a special bn_param object to each batch
        # normalization layer. You should pass self.bn_params[0] to the forward pass
        # of the first batch normalization layer, self.bn_params[1] to the forward
        # pass of the second batch normalization layer, etc.
        self.bn_params = []
        if self.use_batchnorm:
            self.bn_params = [{
                'mode': 'train'
            } for i in range(self.num_layers - 1)]

        # Cast all parameters to the correct datatype
        for k, v in self.params.items():
            self.params[k] = v.astype(dtype)