Пример #1
0
def norm_jit2d(a):
    sm = 0
    n, m = a.shape
    for i in xrange(n):
        for j in xrange(m):
            sm += a[i, j] ** 2
    return np.sqrt(sm)
Пример #2
0
def sum0(arr):
    m, n = arr.shape
    a = np.zeros(n)
    for i in xrange(m):
        for j in xrange(n):
            a[j] += arr[i, j]
    return a
Пример #3
0
def sum0(arr):
    m, n = arr.shape
    a = np.zeros(n)
    for i in xrange(m):
        for j in xrange(n):
            a[j] += arr[i, j]
    return a
Пример #4
0
def metrics_to_tuples(raw_metrics):
    """ Converts metric dictionary of name:values_array into list of tuples
        Use case: writing out benchmark to CSV, etc

        Input:
        {'metric':[value1,value2...], 'metric2':[value1,value2,...]...}

        Output: list, with tuple header row, then list of tuples of values
        [('metric','metric',...), (metric1_value1,metric2_value1, ...) ... ]
    """
    if not isinstance(raw_metrics, dict):
        raise TypeError("Input must be dictionary!")

    metrics = sorted(raw_metrics.keys())
    arrays = [raw_metrics[metric] for metric in metrics]

    num_rows = len(arrays[0])  # Assume all same size or this fails
    output = list()
    output.append(tuple(metrics))  # Add headers

    # Create list of tuples mimicking 2D array from input
    for row in xrange(0, num_rows):
        new_row = tuple([arrays[col][row] for col in xrange(0, len(arrays))])
        output.append(new_row)
    return output
Пример #5
0
def norm_jit2d(a):
    sm = 0
    n, m = a.shape
    for i in xrange(n):
        for j in xrange(m):
            sm += a[i, j]**2
    return np.sqrt(sm)
Пример #6
0
 def test_factory_text(self):
     """ Test the basic generator """
     charsets = [string.ascii_letters, string.digits,
                 string.ascii_uppercase, string.hexdigits]
     # Test multiple charsets and string lengths
     for charset in charsets:
         # Test different lengths for charset
         for my_length in xrange(1, 17):
             gen = generators.factory_generate_text(
                 legal_characters=charset, min_length=my_length, max_length=my_length)()
             for x in xrange(0, 10):
                 val = next(gen)
                 self.assertEqual(my_length, len(val))
    def _box_neighbors(self, v):
        row, col = v.split('_')
        _row = int((ord(row) - 65) // 3)
        _col = int(int(col) // 3)
        neighbors = []

        for i in xrange(3 * _row, 3 * _row + 3):
            for j in xrange(3 * _col, 3 * _col + 3):
                excluded_cell = '{}_{}'.format(chr(i + 65), j)
                if v != excluded_cell:
                    neighbors.append('{}_{}'.format(chr(i + 65), j))

        return neighbors
Пример #8
0
 def test_factory_text(self):
     """ Test the basic generator """
     charsets = [string.ascii_letters, string.digits,
                 string.ascii_uppercase, string.hexdigits]
     # Test multiple charsets and string lengths
     for charset in charsets:
         # Test different lengths for charset
         for my_length in xrange(1, 17):
             gen = generators.factory_generate_text(
                 legal_characters=charset, min_length=my_length, max_length=my_length)()
             for x in xrange(0, 10):
                 val = next(gen)
                 self.assertEqual(my_length, len(val))
Пример #9
0
    def step(self):
        """Take a step in the optimization"""
        rnd_cross = _random((self.npop, self.ndim))
        for i in xrange(self.npop):
            t0, t1, t2 = i, i, i
            while t0 == i:
                t0 = _randint(self.npop)
            while t1 == i or t1 == t0:
                t1 = _randint(self.npop)
            while t2 == i or t2 == t0 or t2 == t1:
                t2 = _randint(self.npop)

            v = self.population[t0, :] + self.F * (self.population[t1, :] -
                                                   self.population[t2, :])

            crossover = rnd_cross[i] <= self.C
            u = np.where(crossover, v, self.population[i, :])

            ri = _randint(self.ndim)
            u[ri] = v[ri]

            ufit = self.m * self.fun(u)

            if ufit < self.fitness[i]:
                self.population[i, :] = u
                self.fitness[i] = ufit
Пример #10
0
    def __setitem__(self, n, v):
        if isinstance(n, int):  # d[1]
            self._dir[n] = v
        elif isinstance(n, slice):
            # d['foo':] adds a new foo
            n = n.start
            if self._dNS and not islst(n):
                n = (self._dNS, n)

            nv = Element(n)
            self._dir.append(nv)

        else:  # d["foo"] replaces first <foo> and dels rest
            if self._dNS and not islst(n):
                n = (self._dNS, n)

            nv = Element(n)
            nv._dir.append(v)
            replaced = False

            todel = []
            for i in xrange(len(self)):
                if self[i]._name == n:
                    if replaced:
                        todel.append(i)
                    else:
                        self[i] = nv
                        replaced = True
            if not replaced:
                self._dir.append(nv)
            for i in sorted(todel, reverse=True):
                del self[i]
    def _col_neighbors(self, v):
        row, col = v.split('_')
        neighbors = []
        for i in xrange(9):
            if i != int(col):
                neighbors.append('{}_{}'.format(row, i))

        return neighbors
    def _row_neighbors(self, v):
        row, col = v.split('_')
        neighbors = []
        for i in xrange(9):
            if (i + 65) != ord(row):
                neighbors.append('{}_{}'.format(chr(i + 65), col))

        return neighbors
Пример #13
0
 def generate_text():
     local_min_len = min_length
     local_max_len = max_length
     rand = random.Random()
     while(True):
         length = random.randint(local_min_len, local_max_len)
         array = [random.choice(legal_characters)
                  for x in xrange(0, length)]
         yield ''.join(array)
Пример #14
0
 def generate_text():
     local_min_len = min_length
     local_max_len = max_length
     rand = random.Random()
     while(True):
         length = random.randint(local_min_len, local_max_len)
         array = [random.choice(legal_characters)
                  for x in xrange(0, length)]
         yield ''.join(array)
Пример #15
0
    def generator_repeat_test(self, generator_input):
        """ Basic test of a configured generator """
        val = next(generator_input)

        # Check for not repeating easily
        for x in xrange(0, 5):
            val2 = next(generator_input)
            self.assertTrue(val)
            self.assertTrue(val != val2)
            val = val2
Пример #16
0
    def generator_basic_test(self, generator, value_test_function=None):
        """ Basic test for a generator, checks values and applies test function """
        self.assertTrue(isinstance(generator, types.GeneratorType))

        for x in xrange(0, 100):
            val = next(generator)
            self.assertTrue(val is not None)
            if value_test_function:
                self.assertTrue(value_test_function(
                    val), 'Test failed with value {0}'.format(val))
Пример #17
0
def convert_time_couples_to_qdb_filtered_range_t_vector(time_couples):
    vec = impl.FilteredRangeVec()

    c = len(time_couples)

    vec.resize(c)
    for i in xrange(c):
        vec[i] = convert_time_couple_to_qdb_filtered_range_t(time_couples[i])

    return vec
Пример #18
0
def xrange(*args, **kwargs):
    major_version = sys.version_info.major
    if major_version == 3:
        import builtins
        return builtins.range(*args, **kwargs)
    elif major_version == 2:
        import builtins
        return builtins.xrange(*args, **kwargs)
    else:
        raise RuntimeError("Unsupported version of Python.")
Пример #19
0
    def generator_basic_test(self, generator, value_test_function=None):
        """ Basic test for a generator, checks values and applies test function """
        self.assertTrue(isinstance(generator, types.GeneratorType))

        for x in xrange(0, 100):
            val = next(generator)
            self.assertTrue(val is not None)
            if value_test_function:
                self.assertTrue(value_test_function(
                    val), 'Test failed with value {0}'.format(val))
Пример #20
0
    def generator_repeat_test(self, generator_input):
        """ Basic test of a configured generator """
        val = next(generator_input)

        # Check for not repeating easily
        for x in xrange(0, 5):
            val2 = next(generator_input)
            self.assertTrue(val)
            self.assertTrue(val != val2)
            val = val2
Пример #21
0
 def generate_text():
     """ generates the random values for string"""
     local_min_len = min_length
     local_max_len = max_length
     rand = random.Random()
     while True:
         length = random.randint(local_min_len, local_max_len)
         array = [
             random.choice(legal_characters) for x in xrange(0, length)
         ]
         yield ''.join(array)
Пример #22
0
    def test_parse_text_generator(self):
        """ Test the text generator parsing """
        config = dict()
        config['type'] = 'random_text'
        config['character_set'] = 'reallyINVALID'

        try:
            gen = generators.parse_generator(config)
            self.fail(
                "Should never parse an invalid character_set successfully, but did!"
            )
        except ValueError:
            pass

        # Test for character set handling
        for charset in generators.CHARACTER_SETS:
            try:
                config['character_set'] = charset
                gen = generators.parse_generator(config)
                myset = set(generators.CHARACTER_SETS[charset])
                for x in xrange(0, 50):
                    val = next(gen)
                    self.assertTrue(set(val).issubset(set(myset)))
            except Exception as e:
                print('Exception occurred with charset: ' + charset)
                raise e

        my_min = 1
        my_max = 10

        # Test for explicit character setting
        del config['character_set']
        temp_chars = 'ay78%&'
        config['characters'] = temp_chars
        gen = generators.parse_generator(config)
        self.generator_basic_test(
            gen,
            value_test_function=lambda x: set(x).issubset(set(temp_chars)))

        # Test for length setting
        config['length'] = '3'
        gen = generators.parse_generator(config)
        self.generator_basic_test(gen,
                                  value_test_function=lambda x: len(x) == 3)
        del config['length']

        # Test for explicit min/max length
        config['min_length'] = '9'
        config['max_length'] = 12
        gen = generators.parse_generator(config)
        self.generator_basic_test(
            gen, value_test_function=lambda x: len(x) >= 9 and len(x) <= 12)
Пример #23
0
def make_qdb_ts_int64_point_vector(time_points):
    vec = Int64PointsVector()

    c = len(time_points)

    vec.resize(c)

    for i in xrange(c):
        vec[i].timestamp.tv_sec = time_to_unix_timestamp(time_points[i][0])
        vec[i].timestamp.tv_nsec = time_points[i][0].microsecond * long(1000)
        vec[i].value = time_points[i][1]

    return vec
Пример #24
0
def convert_to_wrap_ts_blop_points_vector(tuples):
    vec = BlobPointsVector()

    c = len(tuples)

    vec.resize(c)

    for i in xrange(c):
        vec[i].timestamp.tv_sec = time_to_unix_timestamp(tuples[i][0])
        vec[i].timestamp.tv_nsec = tuples[i][0].microsecond * long(1000)
        vec[i].data = tuples[i][1]

    return vec
Пример #25
0
def write_coordinates(atoms, V, title=""):
    """
    Print coordinates V
    """
    N, D = V.shape

    print(str(N))
    print(title)

    for i in xrange(N):
        atom = atoms[i]
        atom = atom[0].upper() + atom[1:]
        print("{0:2s} {1:15.8f} {2:15.8f} {3:15.8f}".format(
            atom, V[i, 0], V[i, 1], V[i, 2]))
Пример #26
0
    def test_parse_text_generator(self):
        """ Test the text generator parsing """
        config = dict()
        config['type'] = 'random_text'
        config['character_set'] = 'reallyINVALID'

        try:
            gen = generators.parse_generator(config)
            self.fail(
                "Should never parse an invalid character_set successfully, but did!")
        except ValueError:
            pass

        # Test for character set handling
        for charset in generators.CHARACTER_SETS:
            try:
                config['character_set'] = charset
                gen = generators.parse_generator(config)
                myset = set(generators.CHARACTER_SETS[charset])
                for x in xrange(0, 50):
                    val = next(gen)
                    self.assertTrue(set(val).issubset(set(myset)))
            except Exception as e:
                print('Exception occurred with charset: ' + charset)
                raise e

        my_min = 1
        my_max = 10

        # Test for explicit character setting
        del config['character_set']
        temp_chars = 'ay78%&'
        config['characters'] = temp_chars
        gen = generators.parse_generator(config)
        self.generator_basic_test(
            gen, value_test_function=lambda x: set(x).issubset(set(temp_chars)))

        # Test for length setting
        config['length'] = '3'
        gen = generators.parse_generator(config)
        self.generator_basic_test(
            gen, value_test_function=lambda x: len(x) == 3)
        del config['length']

        # Test for explicit min/max length
        config['min_length'] = '9'
        config['max_length'] = 12
        gen = generators.parse_generator(config)
        self.generator_basic_test(
            gen, value_test_function=lambda x: len(x) >= 9 and len(x) <= 12)
Пример #27
0
    def iteroptimize(self, ngen=1):
        """Iterator to the optimizer for ``ngen`` generations

        Parameters
        ----------
        ngen: int
            number of iterations

        Returns
        -------
        population: ndarray
            population locations, [Npop x Ndim]

        fitness: ndarray
            population values, [Npop]
        """

        for i in xrange(self.npop):
            self.fitness[i] = self.m * self.fun(self.population[i, :])

        for j in xrange(ngen):
            self.step()
            self._minidx = np.argmin(self.fitness)
            yield self.population[self._minidx, :], self.fitness[self._minidx]
    def __init__(self, sudoku, variable_heuristic, domain_heuristic):
        sudoku_domains = {}
        for idx, value in enumerate(sudoku):
            value = int(value)
            row = idx // 9
            col = idx % 9
            sudoku_domains['{}_{}'.format(
                chr(row + 65),
                col)] = [value] if value > 0 else [x for x in xrange(1, 10)]

        sudoku_variables = {key: UNASSIGNED for key in sudoku_domains}
        sudoku_constraints = lambda x, y: x != y

        super(Sudoku, self).__init__(sudoku_variables, sudoku_domains,
                                     sudoku_constraints, variable_heuristic,
                                     domain_heuristic)
        self.build_neighbors()
Пример #29
0
def createDataset(outputPath, imageListFile, checkValid=True):
    """
    Create LMDB dataset for CRNN training.

    ARGS:
        outputPath    : LMDB output path
        imagePathList : list of image path
        checkValid    : if true, check the validity of every image
    """
    with open(imageListFile) as fp:
        imagePathList = fp.readlines()
    nSamples = len(imagePathList)
    env = lmdb.open(outputPath, map_size=1099511627776)
    cache = {}
    cnt = 1
    for i in xrange(nSamples):
        imagePath = imagePathList[i].rstrip()
        labelPath = imagePath.replace('images', 'labels').replace('JPEGImages', 'labels').replace('.jpg', '.txt').replace('.png','.txt')
        with open(labelPath) as f:
            label = f.readlines()
        label = ''.join(label)

        if not os.path.exists(imagePath):
            print('%s does not exist' % imagePath)
            continue
        with open(imagePath, 'r') as f:
            imageBin = f.read()
        if checkValid:
            if not checkImageIsValid(imageBin):
                print('%s is not a valid image' % imagePath)
                continue

        imageKey = 'image-%09d' % cnt
        labelKey = 'label-%09d' % cnt
        cache[imageKey] = imageBin
        cache[labelKey] = label
        if cnt % 1000 == 0:
            writeCache(env, cache)
            cache = {}
            print('Written %d / %d' % (cnt, nSamples))
        cnt += 1
    nSamples = cnt-1
    cache['num-samples'] = str(nSamples)
    writeCache(env, cache)
    print('Created dataset with %d samples' % nSamples)
Пример #30
0
def rnn_backward(dh, cache):
    """
    Compute the backward pass for a vanilla RNN over an entire sequence of data.

    Inputs:
    - dh: Upstream gradients of all hidden states, of shape (N, T, H)

    Returns a tuple of:
    - dx: Gradient of inputs, of shape (N, T, D)
    - dh0: Gradient of initial hidden state, of shape (N, H)
    - dWx: Gradient of input-to-hidden weights, of shape (D, H)
    - dWh: Gradient of hidden-to-hidden weights, of shape (H, H)
    - db: Gradient of biases, of shape (H,)
    """
    dx, dh0, dWx, dWh, db, dprev_h = None, None, None, None, None, None
    N, T, H = dh.shape
    x, h0, Wx, Wh, b, h = cache
    D = x.shape[2]
    dx, dh0, dWx, dWh, db = np.zeros((N, T, D)), np.zeros((N, H)), np.zeros((D, H)), np.zeros((H, H)), np.zeros((H, ))
    dprev_h = np.zeros((N, H))
    ##############################################################################
    # TODO: Implement the backward pass for a vanilla RNN running an entire      #
    # sequence of data. You should use the rnn_step_backward function that you   #
    # defined above. You can use a for loop to help compute the backward pass.   #
    ##############################################################################
    for i in xrange(T - 1, -1, -1):
        temp_cache = (h[:, i, :], x[:, i, :], h[:, i - 1, :] if i >= 1 else h0, Wx, Wh, b)
        dx[:, i, :], dprev_h, dWx_t, dWh_t, db_t = rnn_step_backward(dh[:, i, :] + dprev_h, temp_cache)
        dWx += dWx_t
        dWh += dWh_t
        db += db_t
    dh0 = dprev_h
    ##############################################################################
    #                               END OF YOUR CODE                             #
    ##############################################################################
    return dx, dh0, dWx, dWh, db
Пример #31
0
def rnn_forward(x, h0, Wx, Wh, b):
    """
    Run a vanilla RNN forward on an entire sequence of data. We assume an input
    sequence composed of T vectors, each of dimension D. The RNN uses a hidden
    size of H, and we work over a minibatch containing N sequences. After running
    the RNN forward, we return the hidden states for all timesteps.

    Inputs:
    - x: Input data for the entire timeseries, of shape (N, T, D).
    - h0: Initial hidden state, of shape (N, H)
    - Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
    - Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
    - b: Biases of shape (H,)

    Returns a tuple of:
    - h: Hidden states for the entire timeseries, of shape (N, T, H).
    - cache: Values needed in the backward pass
    """
    h, cache = None, None
    N, H = h0.shape
    T = x.shape[1]
    ##############################################################################
    # TODO: Implement forward pass for a vanilla RNN running on a sequence of    #
    # input data. You should use the rnn_step_forward function that you defined  #
    # above. You can use a for loop to help compute the forward pass.            #
    ##############################################################################
    h = np.zeros((N, T, H))
    h[:, -1, :] = h0
    for i in xrange(T):
        h[:, i, :], temp_cache = rnn_step_forward(x[:, i, :], h[:, i - 1, :], Wx, Wh, b)
    cache = (x, h0, Wx, Wh, b, h)
    ##############################################################################
    #                               END OF YOUR CODE                             #
    ##############################################################################
    cache = tuple(cache)
    return h, cache
Пример #32
0
def run_benchmark(benchmark, test_config=TestConfig(), context=None, *args, **kwargs):
    """ Perform a benchmark, (re)using a given, configured CURL call to do so
        The actual analysis of metrics is performed separately, to allow for testing
    """

    # Context handling
    my_context = context
    if my_context is None:
        my_context = Context()

    warmup_runs = benchmark.warmup_runs
    benchmark_runs = benchmark.benchmark_runs
    message = ''  # Message is name of benchmark... print it?

    if (benchmark_runs <= 0):
        raise Exception(
            "Invalid number of benchmark runs, must be > 0 :" + benchmark_runs)

    result = TestResponse()

    # TODO create and use a curl-returning configuration function
    # TODO create and use a post-benchmark cleanup function
    # They should use is_dynamic/is_context_modifier to determine if they need to
    #  worry about context and re-reading/retemplating and only do it if needed
    #    - Also, they will need to be smart enough to handle extraction functions
    #  For performance reasons, we don't want to re-run templating/extraction if
    #   we do not need to, and do not want to save request bodies.

    # Initialize variables to store output
    output = BenchmarkResult()
    output.name = benchmark.name
    output.group = benchmark.group
    metricnames = list(benchmark.metrics)
    # Metric variable for curl, to avoid hash lookup for every metric name
    metricvalues = [METRICS[name] for name in metricnames]
    # Initialize arrays to store results for each metric
    results = [list() for x in xrange(0, len(metricnames))]
    curl = pycurl.Curl()

    # Benchmark warm-up to allow for caching, JIT compiling, on client
    logger.info('Warmup: ' + message + ' started')
    for x in xrange(0, warmup_runs):
        benchmark.update_context_before(my_context)
        templated = benchmark.realize(my_context)
        curl = templated.configure_curl(
            timeout=test_config.timeout, context=my_context, curl_handle=curl)
        # Do not store actual response body at all.
        curl.setopt(pycurl.WRITEFUNCTION, lambda x: None)
        curl.perform()

    logger.info('Warmup: ' + message + ' finished')

    logger.info('Benchmark: ' + message + ' starting')

    for x in xrange(0, benchmark_runs):  # Run the actual benchmarks
        # Setup benchmark
        benchmark.update_context_before(my_context)
        templated = benchmark.realize(my_context)
        curl = templated.configure_curl(
            timeout=test_config.timeout, context=my_context, curl_handle=curl)
        # Do not store actual response body at all.
        curl.setopt(pycurl.WRITEFUNCTION, lambda x: None)

        try:  # Run the curl call, if it errors, then add to failure counts for benchmark
            curl.perform()
        except Exception:
            output.failures = output.failures + 1
            curl.close()
            curl = pycurl.Curl()
            continue  # Skip metrics collection

        # Get all metrics values for this run, and store to metric lists
        for i in xrange(0, len(metricnames)):
            results[i].append(curl.getinfo(metricvalues[i]))

    logger.info('Benchmark: ' + message + ' ending')

    temp_results = dict()
    for i in xrange(0, len(metricnames)):
        temp_results[metricnames[i]] = results[i]
    output.results = temp_results
    return analyze_benchmark_results(output, benchmark)
Пример #33
0
def build_targets(pred_boxes, target, anchors, num_anchors, num_classes, nH,
                  nW, noobject_scale, object_scale, sil_thresh, seen):
    # nH, nW here are number of grids in y and x directions (7, 7 here)
    nB = target.size(0)  # batch size
    nA = num_anchors  # 5 for our case
    nC = num_classes
    anchor_step = len(anchors) // num_anchors
    conf_mask = torch.ones(nB, nA, nH, nW) * noobject_scale
    coord_mask = torch.zeros(nB, nA, nH, nW)
    cls_mask = torch.zeros(nB, nA, nH, nW)
    tx = torch.zeros(nB, nA, nH, nW)
    ty = torch.zeros(nB, nA, nH, nW)
    tw = torch.zeros(nB, nA, nH, nW)
    th = torch.zeros(nB, nA, nH, nW)
    tconf = torch.zeros(nB, nA, nH, nW)
    tcls = torch.zeros(nB, nA, nH, nW)

    # for each grid there are nA anchors
    # nAnchors is the number of anchor for one image
    nAnchors = nA * nH * nW
    nPixels = nH * nW
    # for each image
    for b in xrange(nB):
        # get all anchor boxes in one image
        # (4 * nAnchors)
        cur_pred_boxes = pred_boxes[b * nAnchors:(b + 1) * nAnchors].t()
        # initialize iou score for each anchor
        cur_ious = torch.zeros(nAnchors)
        for t in xrange(50):
            # for each anchor 4 coordinate parameters, already in the coordinate system for the whole image
            # this loop is for anchors in each image
            # for each anchor 5 parameters are available (class, x, y, w, h)
            if target[b][t * 5 + 1] == 0:
                break
            gx = target[b][t * 5 + 1] * nW
            gy = target[b][t * 5 + 2] * nH
            gw = target[b][t * 5 + 3] * nW
            gh = target[b][t * 5 + 4] * nH
            # groud truth boxes
            cur_gt_boxes = torch.FloatTensor([gx, gy, gw,
                                              gh]).repeat(nAnchors, 1).t()
            # bbox_ious is the iou value between orediction and groud truth
            cur_ious = torch.max(
                cur_ious,
                bbox_ious(cur_pred_boxes, cur_gt_boxes, x1y1x2y2=False))
        # if iou > a given threshold, it is seen as it includes an object
        # conf_mask[b][cur_ious>sil_thresh] = 0
        conf_mask_t = conf_mask.view(nB, -1)
        conf_mask_t[b][cur_ious > sil_thresh] = 0
        conf_mask_tt = conf_mask_t[b].view(nA, nH, nW)
        conf_mask[b] = conf_mask_tt

    if seen < 12800:
        if anchor_step == 4:
            tx = torch.FloatTensor(anchors).view(nA, anchor_step).index_select(
                1, torch.LongTensor([2])).view(1, nA, 1,
                                               1).repeat(nB, 1, nH, nW)
            ty = torch.FloatTensor(anchors).view(
                num_anchors, anchor_step).index_select(1, torch.LongTensor(
                    [2])).view(1, nA, 1, 1).repeat(nB, 1, nH, nW)
        else:
            tx.fill_(0.5)
            ty.fill_(0.5)
        tw.zero_()
        th.zero_()
        coord_mask.fill_(1)

    # number of ground truth
    nGT = 0
    nCorrect = 0
    for b in xrange(nB):
        # anchors for one batch (at least batch size, and for some specific classes, there might exist more than one anchor)
        for t in xrange(50):
            if target[b][t * 5 + 1] == 0:
                break
            nGT = nGT + 1
            best_iou = 0.0
            best_n = -1
            min_dist = 10000
            # the values saved in target is ratios
            # times by the width and height of the output feature maps nW and nH
            gx = target[b][t * 5 + 1] * nW
            gy = target[b][t * 5 + 2] * nH
            gi = int(gx)
            gj = int(gy)
            gw = target[b][t * 5 + 3] * nW
            gh = target[b][t * 5 + 4] * nH
            gt_box = [0, 0, gw, gh]
            for n in xrange(nA):
                # get anchor parameters (2 values)
                aw = anchors[anchor_step * n]
                ah = anchors[anchor_step * n + 1]
                anchor_box = [0, 0, aw, ah]
                # only consider the size (width and height) of the anchor box
                iou = bbox_iou(anchor_box, gt_box, x1y1x2y2=False)
                if anchor_step == 4:
                    ax = anchors[anchor_step * n + 2]
                    ay = anchors[anchor_step * n + 3]
                    dist = pow(((gi + ax) - gx), 2) + pow(((gj + ay) - gy), 2)
                # get the best anchor form with the highest iou
                if iou > best_iou:
                    best_iou = iou
                    best_n = n
                elif anchor_step == 4 and iou == best_iou and dist < min_dist:
                    best_iou = iou
                    best_n = n
                    min_dist = dist

            # then we determine the parameters for an anchor (4 values together)
            gt_box = [gx, gy, gw, gh]
            # find corresponding prediction box
            pred_box = pred_boxes[b * nAnchors + best_n * nPixels + gj * nW +
                                  gi]

            # only consider the best anchor box, for each image
            coord_mask[b][best_n][gj][gi] = 1
            cls_mask[b][best_n][gj][gi] = 1
            # in this cell of the output feature map, there exists an object
            conf_mask[b][best_n][gj][gi] = object_scale
            tx[b][best_n][gj][gi] = target[b][t * 5 + 1] * nW - gi
            ty[b][best_n][gj][gi] = target[b][t * 5 + 2] * nH - gj
            tw[b][best_n][gj][gi] = math.log(gw /
                                             anchors[anchor_step * best_n])
            th[b][best_n][gj][gi] = math.log(gh /
                                             anchors[anchor_step * best_n + 1])
            iou = bbox_iou(gt_box, pred_box, x1y1x2y2=False)  # best_iou
            # confidence equals to iou of the corresponding anchor
            tconf[b][best_n][gj][gi] = iou
            tcls[b][best_n][gj][gi] = target[b][t * 5]
            # if ious larger than 0.5, we justify it as a correct prediction
            if iou > 0.5:
                nCorrect = nCorrect + 1

    # true values are returned
    return nGT, nCorrect, coord_mask, conf_mask, cls_mask, tx, ty, tw, th, tconf, tcls
Пример #34
0
print('----- loss --------------------')
print(loss)

save_grad = None


def extract(grad):
    global saved_grad
    saved_grad = convert2cpu(grad.data)


output.register_hook(extract)
loss.backward()

saved_grad = saved_grad.view(-1)
for i in xrange(saved_grad.size(0)):
    if abs(saved_grad[i]) >= 0.001:
        print('%d : %f' % (i, saved_grad[i]))

print(m.state_dict().keys())
#print(m.models[0][0].weight.grad.data.storage()[0:100])
#print(m.models[14][0].weight.data.storage()[0:100])
weight = m.models[13][0].weight.data
grad = m.models[13][0].weight.grad.data
mask = torch.abs(grad) >= 0.1
print(weight[mask])
print(grad[mask])

optimizer.step()
weight2 = m.models[13][0].weight.data
print(weight2[mask])
Пример #35
0
def build_targets(pred_boxes, target, anchors, num_anchors, num_classes, nH,
                  nW, noobject_scale, object_scale, sil_thresh, seen):
    nB = target.size(0)
    nA = num_anchors
    nC = num_classes
    anchor_step = len(anchors) // num_anchors
    conf_mask = torch.ones(nB, nA, nH, nW) * noobject_scale
    coord_mask = torch.zeros(nB, nA, nH, nW)
    cls_mask = torch.zeros(nB, nA, nH, nW)
    tx = torch.zeros(nB, nA, nH, nW)
    ty = torch.zeros(nB, nA, nH, nW)
    tw = torch.zeros(nB, nA, nH, nW)
    th = torch.zeros(nB, nA, nH, nW)
    tconf = torch.zeros(nB, nA, nH, nW)
    tcls = torch.zeros(nB, nA, nH, nW)

    nAnchors = nA * nH * nW
    nPixels = nH * nW
    for b in xrange(nB):
        cur_pred_boxes = pred_boxes[b * nAnchors:(b + 1) * nAnchors].t()
        cur_ious = torch.zeros(nAnchors)
        for t in xrange(50):
            if target[b][t * 5 + 1] == 0:
                break
            gx = target[b][t * 5 + 1] * nW
            gy = target[b][t * 5 + 2] * nH
            gw = target[b][t * 5 + 3] * nW
            gh = target[b][t * 5 + 4] * nH
            cur_gt_boxes = torch.FloatTensor([gx, gy, gw,
                                              gh]).repeat(nAnchors, 1).t()
            cur_ious = torch.max(
                cur_ious,
                bbox_ious(cur_pred_boxes, cur_gt_boxes, x1y1x2y2=False))
        conf_mask[b][cur_ious > sil_thresh] = 0
    if seen < 12800:
        if anchor_step == 4:
            tx = torch.FloatTensor(anchors).view(nA, anchor_step).index_select(
                1, torch.LongTensor([2])).view(1, nA, 1,
                                               1).repeat(nB, 1, nH, nW)
            ty = torch.FloatTensor(anchors).view(
                num_anchors, anchor_step).index_select(1, torch.LongTensor(
                    [2])).view(1, nA, 1, 1).repeat(nB, 1, nH, nW)
        else:
            tx.fill_(0.5)
            ty.fill_(0.5)
        tw.zero_()
        th.zero_()
        coord_mask.fill_(1)

    nGT = 0
    nCorrect = 0
    for b in xrange(nB):
        for t in xrange(50):
            if target[b][t * 5 + 1] == 0:
                break
            nGT = nGT + 1
            best_iou = 0.0
            best_n = -1
            min_dist = 10000
            gx = target[b][t * 5 + 1] * nW
            gy = target[b][t * 5 + 2] * nH
            gi = int(gx)
            gj = int(gy)
            gw = target[b][t * 5 + 3] * nW
            gh = target[b][t * 5 + 4] * nH
            gt_box = [0, 0, gw, gh]
            for n in xrange(nA):
                aw = anchors[anchor_step * n]
                ah = anchors[anchor_step * n + 1]
                anchor_box = [0, 0, aw, ah]
                iou = bbox_iou(anchor_box, gt_box, x1y1x2y2=False)
                if anchor_step == 4:
                    ax = anchors[anchor_step * n + 2]
                    ay = anchors[anchor_step * n + 3]
                    dist = pow(((gi + ax) - gx), 2) + pow(((gj + ay) - gy), 2)
                if iou > best_iou:
                    best_iou = iou
                    best_n = n
                elif anchor_step == 4 and iou == best_iou and dist < min_dist:
                    best_iou = iou
                    best_n = n
                    min_dist = dist

            gt_box = [gx, gy, gw, gh]
            pred_box = pred_boxes[b * nAnchors + best_n * nPixels + gj * nW +
                                  gi]

            coord_mask[b][best_n][gj][gi] = 1
            cls_mask[b][best_n][gj][gi] = 1
            conf_mask[b][best_n][gj][gi] = object_scale
            tx[b][best_n][gj][gi] = target[b][t * 5 + 1] * nW - gi
            ty[b][best_n][gj][gi] = target[b][t * 5 + 2] * nH - gj
            tw[b][best_n][gj][gi] = math.log(gw /
                                             anchors[anchor_step * best_n])
            th[b][best_n][gj][gi] = math.log(gh /
                                             anchors[anchor_step * best_n + 1])
            iou = bbox_iou(gt_box, pred_box, x1y1x2y2=False)  # best_iou
            tconf[b][best_n][gj][gi] = iou
            tcls[b][best_n][gj][gi] = target[b][t * 5]
            if iou > 0.5:
                nCorrect = nCorrect + 1

    return nGT, nCorrect, coord_mask, conf_mask, cls_mask, tx, ty, tw, th, tconf, tcls
Пример #36
0
#!/usr/bin/env python
"""
Calculate RMSD between two XYZ files

by: Jimmy Charnley Kromann <*****@*****.**> and Lars Andersen Bratholm <*****@*****.**>
project: https://github.com/charnley/rmsd
license: https://github.com/charnley/rmsd/blob/master/LICENSE

"""

import numpy as np
import re

#enable Python2/3 compatability
try:
    xrange(1)
except:
    from builtins import range as xrange


def kabsch_rmsd(P, Q):
    """
    Rotate matrix P unto Q and calculate the RMSD
    """
    P = kabsch_rotate(P, Q)
    return rmsd(P, Q)


def kabsch_rotate(P, Q):
    """
    Rotate matrix P unto matrix Q using Kabsch algorithm
Пример #37
0
def syslog_parse(logfile, logdata, headers, output):
    x = 0
    data = []
    singlelines = {}
    multilines = {}
    for i in logdata:
        x += 1
        if (not isinstance(i, str)):
            if re.search('^[A-Za-z]{3}'.encode('utf-8'), i):
                singlelines[x] = i.rstrip()
            else:
                data.append(x)
                data.append(x - 1)
                data.append(x + 1)
                multilines[x] = i.rstrip()
        else:
            if re.search('^[A-Za-z]{3}', i):
                singlelines[x] = i.rstrip()
            else:
                data.append(x)
                data.append(x - 1)
                data.append(x + 1)
                multilines[x] = i.rstrip()

    data = list(OrderedDict.fromkeys(sorted(data)))
    ranges = []
    #for key, group in groupby(enumerate(data), lambda (index, item): index - item):
    for group in enumerate(data):
        #group = map(itemgetter(1), group)
        if len(group) > 1:
            ranges.append(xrange(group[0], group[-1]))
        else:
            ranges.append(group[0])

    for u in ranges:
        chain = []
        for z in u:
            if multilines.get(z) is None:
                lno = z
                anchor = singlelines.get(z)
            else:
                if (not isinstance(multilines.get(z), str)):
                    val = multilines.get(z).decode('utf-8').replace(
                        '\t', '').replace('  ', '')
                else:
                    val = multilines.get(z).replace('\t', '').replace('  ', '')
                chain.append(val)
        if (isinstance(anchor, str)):
            singlelines[lno] = anchor + ' ' + ''.join(chain)
        else:
            singlelines[lno] = anchor.decode('utf-8') + ' ' + ''.join(chain)

    for k, v in singlelines.items():
        line = v
        if (not isinstance(line, str)):
            line = line.decode('utf-8')
        if not 'last message repeated' in line:
            record = OrderedDict((h, '') for h in headers)
            m = re.match(
                r"(?P<month>\w\w\w)\s{1,2}(?P<day>\d{1,2}) (?P<time>\w\w:\w\w:\w\w) (?P<systemname>.*?) (?P<processName>.*?)\[(?P<PID>[0-9]+)\].*?:\s{0,1}(?P<message>.*)",
                line)
            record['src_file'] = logfile
            record['timestamp'] = str(
                m.group('month') + " " + m.group('day') + " " +
                m.group('time'))
            record['log_systemname'] = m.group('systemname')
            record['processname'] = m.group('processName')
            record['pid'] = m.group('PID')
            record['message'] = m.group('message')

            output.write_entry(record.values())
Пример #38
0
def range(start, stop):
    if sys.version_info < (3, 0):
        return builtins.xrange(start, stop)
    else:
        return builtins.range(start, stop)