예제 #1
0
    def get_valid_moves_no_id(self):
        """returns a list of B_paths which is also a list\n
		will be concatnated later\n
		ex)\n
		[\n
			0: [int,int,int]
			1: B_path
			2: B_path
			...\n
		]\n
		Remember to not append 0 length path values"""
        # could use numpy to make the code more efficient
        # HERE
        rt = [[]] + [[] for x in repeat(None, MAX_INT_MINUS_ONE)]
        # 15 elements
        # print(rt)

        nonzero = np.where(self.board != 0)[0]
        # print(nonzero)
        for key, select in enumerate(nonzero):
            # print("-- select --")
            # print(select)
            if self.board[select] > 1:
                # print("tile appears more than twice. adding paired")
                rt[0].append(np.byte(
                    select))  # in c, it would be presented as a single byte
            # print("-- target --")
            for target in nonzero[key + 1:]:
                # print(target)
                rt[target - select].append(
                    Path(np.byte(select), np.byte(target)))
        return rt
예제 #2
0
    def recursive_brancher(board: r_board, lost, paths: List[int]):
        # generate local pools and assign more global pools if needed
        # is_finished is not checked here
        print(">> recursive brancher ==================== <<")
        # the paths are all 0 lost paths
        boards = []
        for path in paths:
            boards.append(
                copy.deepcopy(board).append_path(
                    Path(np.byte(path),
                         np.byte(path))).do_move_pair_move(path))
        print(">> boards")
        pprint.pprint(boards)
        for recursive_branch_board in boards:
            moves = recursive_branch_board.get_valid_moves_no_id()
            # this is because we are going to generate the board right away

            # send another pair of board and 0 lost moves into the function
            if len(moves[0]) > 0:
                recursive_brancher(recursive_branch_board, lost, moves[0])
            lost = 1
            print(">> moves")
            pprint.pprint(moves)
            for lost_idx in range(1 + lost, MAX_INT + lost):
                # loop max_int-1 times
                # print(lost)
                if len(moves[lost]) > 0:
                    for path in moves[lost]:
                        global_board_pool[lost_idx].append(
                            copy.deepcopy(recursive_branch_board).do_move_lost(
                                path.sel, path.tar))
                lost += 1
예제 #3
0
def save_test_signal_ecube(data, save_dir, voltsperbit):
    '''
    Create a binary file with eCube formatting using the given data

    Args:
        data (nt, nch): test_signal to save
        save_dir (str): where to save the file
        voltsperbit (float): gain of the headstage data you are creating

    Returns:
        str: filename of the new data
    '''
    intdata = np.array(data / voltsperbit,
                       dtype='<i2')  # turn into integer data
    flatdata = data.reshape(-1)
    timestamp = [1, 2, 3, 4]
    flatdata = np.insert(flatdata, timestamp, 0)

    # Save it to the test file
    datestr = datetime.now().strftime(
        "%Y-%m-%d_%H-%M-%S")  # e.g. 2021-05-06_11-47-02
    filename = f"Headstages_{data.shape[1]}_Channels_int16_{datestr}.bin"
    filepath = os.path.join(save_dir, filename)
    with open(filepath, 'wb') as f:
        for _ in range(8):
            f.write(np.byte(1))  # 8 byte timestamp
        for t in range(intdata.shape[0]):
            for ch in range(intdata.shape[1]):
                f.write(np.byte(intdata[t, ch]))
                f.write(np.byte(intdata[t, ch] >> 8))

    return filename
예제 #4
0
파일: cpu.py 프로젝트: miglmj/nespy
 def __init__(self):
     #Initialize Registers
     self.registers.A = byte(0)
     self.registers.X = byte(0)
     self.registers.Y = byte(0)
     self.registers.P = byte(0)
     self.registers.SP = byte(0)
     self.registers.PC = uint16(0)
예제 #5
0
    def export_train():  # type: () -> None
        # input size: (1, 2, 1, 3)
        x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)
        s = np.array([1.0, 1.5]).astype(np.float32)
        bias = np.array([0, 1]).astype(np.float32)
        mean = np.array([0, 3]).astype(np.float32)
        var = np.array([1, 1.5]).astype(np.float32)
        # using np.bool(1) while generating test data with "'bool' object has no attribute 'dtype'"
        # working around by using np.byte(1).astype(bool)
        training_mode = np.byte(1).astype(bool)
        y, saved_mean, saved_var, output_mean, output_var = batchnorm_training_mode(
            x, s, bias, mean, var)

        node = onnx.helper.make_node(
            'BatchNormalization',
            inputs=['x', 's', 'bias', 'mean', 'var', 'training_mode'],
            outputs=[
                'y', 'output_mean', 'output_var', 'saved_mean', 'saved_var'
            ],
        )

        # output size: (1, 2, 1, 3)
        expect(node,
               inputs=[x, s, bias, mean, var, training_mode],
               outputs=[y, output_mean, output_var, saved_mean, saved_var],
               name='test_batchnorm_example_training_mode')

        # input size: (2, 3, 4, 5)
        x = np.random.randn(2, 3, 4, 5).astype(np.float32)
        s = np.random.randn(3).astype(np.float32)
        bias = np.random.randn(3).astype(np.float32)
        mean = np.random.randn(3).astype(np.float32)
        var = np.random.rand(3).astype(np.float32)
        training_mode = np.byte(1).astype(bool)
        momentum = 0.9
        epsilon = 1e-2
        y, saved_mean, saved_var, output_mean, output_var = batchnorm_training_mode(
            x, s, bias, mean, var, momentum, epsilon)

        node = onnx.helper.make_node(
            'BatchNormalization',
            inputs=['x', 's', 'bias', 'mean', 'var', 'training_mode'],
            outputs=[
                'y', 'output_mean', 'output_var', 'saved_mean', 'saved_var'
            ],
            epsilon=epsilon,
        )

        # output size: (2, 3, 4, 5)
        expect(node,
               inputs=[x, s, bias, mean, var, training_mode],
               outputs=[y, output_mean, output_var, saved_mean, saved_var],
               name='test_batchnorm_epsilon_training_mode')
예제 #6
0
파일: parser.py 프로젝트: ucl-cssb/py_stoch
    def flatten_matrix(matrix):
        # each entry in the flattened matrix is an uchar4
        f_matrix = []

        count = 0
        for i in range(matrix.shape[0]):
            for j in range(matrix.shape[1]):
                if matrix[i][j] != 0:
                    entry = numpy.array(
                        [numpy.byte(i), numpy.byte(j), numpy.byte(matrix[i][j]),
                         numpy.byte(0)])
                    f_matrix.append(entry)
                    count += 1
        return numpy.array(f_matrix), numpy.ubyte(count)
예제 #7
0
def format_value(value, dtype):
    """
    Set the datatype for a single value.

    Arguments:
        value (Series): non-iterable value to set.

        dtype (str): scalar data type.
    """
    if dtype in ('date', 'datetime', 'timestamp', 'time'):
        value = np.datetime64(pd.to_datetime(value))
    elif dtype in ('int', 'integer', 'bigint'):
        value = np.int_(value)
    elif dtype == 'mediumint':
        value = np.intc(value)
    elif dtype == 'smallint':
        value = np.short(value)
    elif dtype in ('tinyint', 'bit'):
        value = np.byte(value)
    elif dtype in (
            'float', 'real',
            'double'):  # approximate numeric data types for saving memory
        value = np.single(value)
    elif dtype in ('decimal', 'dec', 'numeric',
                   'money'):  # exact numeric data types
        value = np.double(value)
    elif dtype in ('bool', 'boolean'):
        value = np.bool_(value)
    elif dtype in ('char', 'varchar', 'binary', 'text', 'string'):
        value = np.str_(value)
    else:
        value = np.str_(value)

    return value
예제 #8
0
def parse_nicer_flag(filter_flag):
    """
    translates a NICER bit event flag & prints result
    :param filter_flag: specified nicer filter flag
    :return: string describing the filter

    EVENT_FLAGS == xxxxx1: "undershoot" reset
    EVENT_FLAGS == xxxx1x: "overshoot" reset
    EVENT_FLAGS == xxx1xx: software sample
    EVENT_FLAGS == xx1xxx: fast signal chain triggered
    EVENT_FLAGS == x1xxxx: slow signal chain triggered
    EVENT_FLAGS == 1xxxxx: first event in MPU packet
    """
    if len(filter_flag.strip()) != 8:
        print ("Filter flag must have length of 8 characters")
        return
    # NICER only uses 6 bits for flagging so ignore the 1st 2 characters in flag
    fflag = filter_flag.strip()[2:]
    descrip = [
        "First Event in MPU packet",
        "Slow signal chain trigger",
        "Fast signal chain trigger",
        "Software sample",
        "Overshoot Reset",
        "Undershoot Reset"
    ]
    for i, d in enumerate(descrip):
        if fflag[i] != 'x':
            status = bool(np.byte(fflag[i]))
        else:
            status = 'ignored'
        print ("  {d} {stat}".format(d=d, stat=status))
def main(argv):
    s = argv[1]
    s = s.split()
    s = list(map(int, s))
    s = byte(s)
    s = b''.join(s)
    s = s.decode('utf-16')
    arg_list = json.loads(s)

    try:
        subprocess.call(arg_list)
    except FileNotFoundError:
        app = arg_list[0]
        # Only the name of the python script is given.
        app = which(app)[0]
        ext = app.suffix
        prog = winreg.QueryValue(winreg.HKEY_CLASSES_ROOT, ext)
        comkey = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT,
                                rf'\{prog}\Shell\Open\Command')
        try:
            comstr = winreg.QueryValueEx(comkey, '')[0]
            items = re.findall(r'"[^"]+"|\S+', comstr)
        finally:
            winreg.CloseKey(comkey)
        new_list = [items[0].replace('"', '')]
        for item in items[1:]:
            item = item.replace('"', '')
            if item == '%1':
                new_list.append(str(app))
            elif item == '%*':
                new_list.extend(arg_list[1:])
            else:
                new_list.append(item)
        subprocess.call(new_list)
예제 #10
0
 def test_numpy(self):
     """NumPy objects get serialized to readable JSON."""
     l = [
         np.float32(12.5),
         np.float64(2.0),
         np.float16(0.5),
         np.bool(True),
         np.bool(False),
         np.bool_(True),
         np.unicode_("hello"),
         np.byte(12),
         np.short(12),
         np.intc(-13),
         np.int_(0),
         np.longlong(100),
         np.intp(7),
         np.ubyte(12),
         np.ushort(12),
         np.uintc(13),
         np.ulonglong(100),
         np.uintp(7),
         np.int8(1),
         np.int16(3),
         np.int32(4),
         np.int64(5),
         np.uint8(1),
         np.uint16(3),
         np.uint32(4),
         np.uint64(5),
     ]
     l2 = [l, np.array([1, 2, 3])]
     roundtripped = loads(dumps(l2, cls=EliotJSONEncoder))
     self.assertEqual([l, [1, 2, 3]], roundtripped)
예제 #11
0
def mapdicttoarray(array, mapdict, fillvalue=np.byte(-127)):
    '''
    Maps all entries of array according to new values given in mapdict. If key is
    not found, fillvalue is assigned.

    Parameters
    ----------
    array : np.array
        input array to be altered.
    mapdict : dict
        dictionary with <oldvalue>:<newvalue> format.
    fillvalue : any, optional
        fillvalue to be set if no key is found for that entry.

    Returns
    -------
    newarray : np.array
        output array with modified entries.

    '''
    import numpy as np

    newarray = np.vectorize(mapdict.get)(array, fillvalue)

    return newarray
예제 #12
0
def updateOutput(YMatrix,
                 changeIndexes,
                 prevOutput,
                 withReLU=False,
                 useHalf=False):

    outC, outH, outW = prevOutput.size(-3), prevOutput.size(
        -2), prevOutput.size(-1)
    numChanges = changeIndexes.numel()
    CUDA_NUM_THREADS = 1024

    block = (CUDA_NUM_THREADS, 1, 1)
    grid = ((numChanges * outC - 1) // CUDA_NUM_THREADS + 1, 1)

    if numChanges > 0:
        CC = Chalf if useHalf else C
        CC.updateOutput(
            np.int32(1), np.int32(grid[1]), np.int32(grid[0]),
            np.int32(block[2]), np.int32(block[1]), np.int32(block[0]),
            ffi.cast("float *",
                     YMatrix.contiguous().data_ptr()),
            ffi.cast("float *",
                     prevOutput.contiguous().data_ptr()),
            ffi.cast("int *",
                     changeIndexes.contiguous().data_ptr()),
            np.int32(outW * outH), np.int32(numChanges), np.int32(outC),
            np.byte(withReLU))

    return prevOutput
예제 #13
0
 def test_numpy(self):
     """NumPy objects get serialized to readable JSON."""
     l = [
         np.float32(12.5),
         np.float64(2.0),
         np.float16(0.5),
         np.bool(True),
         np.bool(False),
         np.bool_(True),
         np.unicode_("hello"),
         np.byte(12),
         np.short(12),
         np.intc(-13),
         np.int_(0),
         np.longlong(100),
         np.intp(7),
         np.ubyte(12),
         np.ushort(12),
         np.uintc(13),
         np.ulonglong(100),
         np.uintp(7),
         np.int8(1),
         np.int16(3),
         np.int32(4),
         np.int64(5),
         np.uint8(1),
         np.uint16(3),
         np.uint32(4),
         np.uint64(5),
     ]
     l2 = [l, np.array([1, 2, 3])]
     roundtripped = loads(dumps(l2, cls=EliotJSONEncoder))
     self.assertEqual([l, [1, 2, 3]], roundtripped)
예제 #14
0
def main(argv):
    s = argv[1]
    s = s.split()
    s = list(map(int, s))
    s = byte(s)
    s = b''.join(s)
    s = s.decode('utf-16')
    arg_list = json.loads(s)

    try:
        subprocess.call(arg_list)
    except FileNotFoundError:
        app = arg_list[0]
        # Only the name of the python script is given.
        app = which(app)[0]
        ext = app.suffix
        prog = winreg.QueryValue(winreg.HKEY_CLASSES_ROOT, ext)
        comkey = winreg.OpenKey(
                winreg.HKEY_CLASSES_ROOT, 
                rf'\{prog}\Shell\Open\Command')
        try:
            comstr = winreg.QueryValueEx(comkey, '')[0]
            items = re.findall(r'"[^"]+"|\S+', comstr)
        finally:
            winreg.CloseKey(comkey)
        new_list = [items[0].replace('"', '')]
        for item in items[1:]:
            item = item.replace('"', '')
            if item == '%1':
                new_list.append(str(app))
            elif item == '%*':
                new_list.extend(arg_list[1:])
            else:
                new_list.append(item)
        subprocess.call(new_list)
예제 #15
0
def readDataset(filename):
    print("Reading %s" % filename)
    f = open(filename, "rb")
    magic, = unpack(">L", f.read(4))
    count, = unpack(">L", f.read(4))
    dtype = "images" if magic == 2051 else "labels"
    print("  Magic number: %d, %d %s!" % (magic, count, dtype))

    if dtype == "images":
        data = []
        width, = unpack(">L", f.read(4))
        height, = unpack(">L", f.read(4))
        print("  Image size: [%d, %d]" % (width, height))
        for i in range(0, count):
            print("  Reading image: %d / %d" % (i + 1, count), end="\r")
            array = [
                unpack("B", f.read(1))[0] for j in range(0, width * height)
            ]
            array = np.byte([array[j::28] for j in range(0, 28)])
            data.append(array)
        print("")
    elif dtype == "labels":
        data = [unpack("B", f.read(1))[0] for i in range(0, count)]
        #print("  The first 10 labels are:")
        #for i in range(0, 10): print("    %d" % data[i])

    f.close()
    print("")
    return (data, count)
예제 #16
0
def SaveMapToPngRainbow(Dataframe: pd.DataFrame, PngFileName: str) -> None:
    Max = MaxContacts(Dataframe)
    Dataframe = Dataframe.applymap(
        lambda x: list(colorsys.hsv_to_rgb(1.0 - (x / Max), 0.8, x / Max)))
    Dataframe = np.array(
        Dataframe.applymap(lambda x: [np.byte(int(item * 255))
                                      for item in x]).values.tolist())
    Image.fromarray(Dataframe, 'RGB').save(PngFileName)
예제 #17
0
def test_numpy_numeric_args():
    # we don't test them all here, but hopefully a representative sample
    assert (mv.call('+', np.int32(5), 1) == 6)
    assert (mv.call('+', np.int64(5), 1) == 6)
    assert (mv.call('+', np.byte(8), 1) == 9)
    assert (mv.call('+', np.uint16(8), 1) == 9)
    assert (np.isclose(mv.call('+', np.float32(5.5), 1), 6.5))
    assert (np.isclose(mv.call('+', np.float64(5.5), 1), 6.5))
예제 #18
0
    def flatten_matrix(matrix):
        # each entry in the flattened matrix is an uchar4
        f_matrix = []

        count = 0
        for i in range(matrix.shape[0]):
            for j in range(matrix.shape[1]):
                if matrix[i][j] != 0:
                    entry = numpy.array([
                        numpy.byte(i),
                        numpy.byte(j),
                        numpy.byte(matrix[i][j]),
                        numpy.byte(0)
                    ])
                    f_matrix.append(entry)
                    count += 1
        return numpy.array(f_matrix), numpy.ubyte(count)
예제 #19
0
 def futhark_main(self, screenX_700, screenY_701, depth_702, xmin_703,
                  ymin_704, xmax_705, ymax_706):
     res_707 = (xmax_705 - xmin_703)
     res_708 = (ymax_706 - ymin_704)
     y_711 = sitofp_i32_f32(screenX_700)
     y_712 = sitofp_i32_f32(screenY_701)
     x_713 = slt32(np.int32(0), depth_702)
     bytes_902 = (np.int32(4) * screenY_701)
     mem_903 = cl.Buffer(
         self.ctx, cl.mem_flags.READ_WRITE,
         long(
             long(bytes_902) if (bytes_902 > np.int32(0)) else np.int32(1)))
     mem_905 = cl.Buffer(
         self.ctx, cl.mem_flags.READ_WRITE,
         long(
             long(bytes_902) if (bytes_902 > np.int32(0)) else np.int32(1)))
     group_size_911 = np.int32(512)
     num_groups_912 = squot32(
         ((screenY_701 + group_size_911) - np.int32(1)), group_size_911)
     if ((np.int32(1) * (num_groups_912 * group_size_911)) != np.int32(0)):
         self.map_kernel_894_var.set_args(np.float32(ymin_704),
                                          np.float32(y_712),
                                          np.float32(res_708),
                                          np.int32(screenY_701), mem_903,
                                          mem_905)
         cl.enqueue_nd_range_kernel(
             self.queue, self.map_kernel_894_var, (long(
                 (num_groups_912 * group_size_911)), ),
             (long(group_size_911), ))
         if synchronous:
             self.queue.finish()
     nesting_size_844 = (screenX_700 * screenY_701)
     bytes_906 = (bytes_902 * screenX_700)
     mem_908 = cl.Buffer(
         self.ctx, cl.mem_flags.READ_WRITE,
         long(
             long(bytes_906) if (bytes_906 > np.int32(0)) else np.int32(1)))
     group_size_917 = np.int32(512)
     num_groups_918 = squot32(
         (((screenY_701 * screenX_700) + group_size_917) - np.int32(1)),
         group_size_917)
     if ((np.int32(1) * (num_groups_918 * group_size_917)) != np.int32(0)):
         self.map_kernel_846_var.set_args(np.int32(screenX_700),
                                          np.int32(screenY_701), mem_905,
                                          np.byte(x_713),
                                          np.int32(depth_702),
                                          np.float32(xmin_703), mem_903,
                                          np.float32(y_711),
                                          np.float32(res_707), mem_908)
         cl.enqueue_nd_range_kernel(
             self.queue, self.map_kernel_846_var, (long(
                 (num_groups_918 * group_size_917)), ),
             (long(group_size_917), ))
         if synchronous:
             self.queue.finish()
     out_mem_909 = mem_908
     out_memsize_910 = bytes_906
     return (out_memsize_910, out_mem_909)
예제 #20
0
def create_pairs(x, digits, num_pairs, digits_idx, y, digits2=None):

    pairs = []
    labels = []

    if digits2 is None:

        while True:  # Change to while True?

            for d in digits:

                P1, P2 = np.random.choice(digits_idx[d], 2)
                pairs += [[x[P1], x[P2]]]

                assert y[P1] in digits and y[P2] in digits
                assert y[P1] == y[
                    P2], 'Positive pairs should have the same labels'

                N1 = np.random.choice(digits_idx[d])
                N2 = np.random.choice(digits_idx[np.random.choice(
                    [di for di in digits if di != d])])
                pairs += [[x[N1], x[N2]]]

                assert y[N1] in digits and y[N2] in digits
                assert y[N1] != y[
                    N2], 'Negative pairs should have different labels'

                labels += [1, 0]

                if len(pairs) >= num_pairs:
                    if normalize:
                        return np.array(pairs).astype(
                            'float32') / 255, np.array(labels)
                    else:
                        return np.array(pairs).astype('float32'), np.array(
                            labels)
    else:

        while True:

            d1 = np.random.choice(digits)
            d2 = np.random.choice(digits2)

            P1 = np.random.choice(digits_idx[d1])
            P2 = np.random.choice(digits_idx[d2])

            pairs += [[x[P1], x[P2]]]

            labels += [d1 == d2]

            if len(pairs) >= num_pairs:

                if normalize:
                    return np.array(pairs).astype('float32') / 255, np.array(
                        labels)
                else:
                    return np.array(pairs).astype('float32'), np.byte(
                        np.array(labels))
예제 #21
0
    def __init__(self, r=True, g=True, b=True):

        # Create a texture from a RGBA black and white checker board image of size WIDHT and HEIGHT
        data = np.zeros(self.WIDTH * self.HEIGHT * 4, dtype=np.byte).reshape(
            (self.WIDTH, self.HEIGHT, 4))

        for i in range(self.HEIGHT):
            for j in range(self.WIDTH):
                c = (((i & 0x8) == 0) ^ ((j & 0x8) == 0)) * 255
                if r == True:
                    data[i][j][0] = np.byte(c)
                else:
                    data[i][j][0] = np.byte(255)
                if g == True:
                    data[i][j][1] = np.byte(c)
                else:
                    data[i][j][1] = np.byte(255)

                if b == True:
                    data[i][j][2] = np.byte(c)
                else:
                    data[i][j][2] = np.byte(255)

                data[i][j][3] = np.byte(255)

        self.data = data
예제 #22
0
def test_check_argument_list2():
    kernel_name = "test_kernel"
    kernel_string = """__kernel void test_kernel
        (char number, double factors, int * numbers, const unsigned long * moreNumbers) {
        numbers[get_global_id(0)] = numbers[get_global_id(0)] * factors[get_global_id(0)] + number;
        }
        """
    args = [np.byte(5), np.float64(4.6), np.int32([1, 2, 3]), np.uint64([3, 2, 111])]
    assert_no_user_warning(check_argument_list, [kernel_name, kernel_string, args])
예제 #23
0
def SaveMapToPng3Channels(Dataframes: dict, PngFileName: str) -> None:
    Max = {index: MaxContacts(item) for index, item in Dataframes.items()}
    Dataframes = {
        index: item.applymap(lambda x: [np.byte(int(x / Max[index] * 255))])
        for index, item in Dataframes.items()
    }
    Data = np.array(Dataframes['R'].add(Dataframes['G']).add(
        Dataframes['B']).values.tolist())
    Image.fromarray(Data, 'RGB').save(PngFileName)
        def get_valid_moves(self, loc: BoardLoc):
            # advantages of having a path group
            # share same id and reduce the amount of evaluation at a given time
            """returns a list of B_paths which is also a list\n
			will be concatnated later\n
			ex)\n
			[\n
				0: [int,int,int]
				1: B_path
				2: B_path
				...\n
			]\n
			Remember to not append 0 length path values"""
            rt = [[]] + [B_path(loc) for x in repeat(None, MAX_INT)]
            # 15 elements
            # print(rt)

            nonzero = np.where(self.board != 0)[0]
            if len(nonzero) == 0:
                raise Error("ASKED TO FIND PATHS ON AN EMPTY BOARD")
            if len(nonzero) == 1:
                # there are multiple cases here
                if self.board[nonzero[0]] == 1:
                    # if there is only 1 number
                    rt[nonzero[0] + 1].paths.append(
                        Path(np.byte(nonzero[0]), np.byte(nonzero[0])))
                    return rt
                rt[0].append(nonzero[0])
                return rt
            for key, select in enumerate(nonzero):
                # print("-- select --")
                # print(select)
                if self.board[select] > 1:
                    # print("tile appears more than twice. adding paired")
                    rt[0].append(
                        np.byte(select)
                    )  # in c, it would be presented as a single byte
                # print("-- target --")
                for target in nonzero[key + 1:]:
                    # print(target)
                    rt[min(target, select) + 1].paths.append(
                        Path(np.byte(select), np.byte(target)))
            return rt
예제 #25
0
 def __init__(self, start_address):
     super().__init__(start_address)
     self.start_address = start_address
     self.bg_block_bitmap = i32(0)
     self.bg_inode_bitmap = i32(0)
     self.bg_inode_table = i32(0)
     self.bg_free_blocks_count = i16(0)
     self.bg_free_inodes_count = i16(0)
     self.bg_used_dirs_count = i16(0)
     self.bg_pad = i16(0)
     self.bg_reserved = [byte(0)] * 12
예제 #26
0
 def relative(self):
     if not self.cpu.ps['zero_flag']:
         offset = int(self.cpu.fetch_byte(), base=0)
         target_address = self.cpu.pc + np.byte(offset)
         ~self.cpu.clock
         if (target_address >> 8) != (self.cpu.pc >> 8):
             ~self.cpu.clock
         self.cpu.pc = target_address
     else:
         self.cpu.pc = self.cpu.pc + 1
         ~self.cpu.clock
예제 #27
0
    def __init__(self, rate=None):
        if not rate:
            pass

        self.__rate = rate
        simple_rate = np.byte(1000 / rate - 1)

        self.__gyro_bits = self.__get_gyro_rate(rate_byte=simple_rate >> 1)
        self.__accel_bits = self.__get_accel_rate(rate_byte=simple_rate >> 1)

        self.__simple_rate = simple_rate
예제 #28
0
파일: writing.py 프로젝트: equinor/roffio
def cast_to_roff(value, type_str):
    if type_str == "bool":
        return np.byte(value)
    if type_str == "byte":
        return value
    if type_str == "int":
        return np.int32(value)
    if type_str == "float":
        return np.float32(value)
    if type_str == "double":
        return np.float64(value)
예제 #29
0
def test_random_test_sets():
    '''Run randomized testing.'''
    for _ in xrange(50):
        (g, c), x0 = generate_test_set(5, 3, 2)
        p = Position(g, c, np.tile(np.byte(X), (g.shape[1],)), remove_zero_rows=False)
        x = solve(g.copy(), c.copy())
        if x is None:
            print p.formatted_repr()
            print 'x0', repr(x0)
            print 'x', repr(x)
            print 'Testing: FAIL'
            return
    print 'Testing OK'
예제 #30
0
    def __init__(self, reset_limit = 2000):
        """reset_limit - expect next pulse within x number of samples"""
        gr.sync_block.__init__(
            self,
            name='Python OOK Demod',
            in_sig=[np.byte, np.byte], 
            out_sig=None
        )
        
	self.buffer = np.byte(0)
	self.count = 0
	self.timeout = 0
	self.reset_limit = reset_limit
예제 #31
0
def test_check_argument_list7():
    kernel_name = "test_kernel"
    kernel_string = """#define SUM(A, B) (A + B)
        // In this file we define test_kernel
        __kernel void another_kernel (char number, double factors, int * numbers, const unsigned long * moreNumbers)
        __kernel void test_kernel
        (double number, double factors, int * numbers, const unsigned long * moreNumbers) {
        numbers[get_global_id(0)] = SUM(numbers[get_global_id(0)] * factors[get_global_id(0)], number);
        }
        // /test_kernel
        """
    args = [np.byte(5), np.float64(4.6), np.int32([1, 2, 3]), np.uint64([3, 2, 111])]
    assert_user_warning(check_argument_list, [kernel_name, kernel_string, args])
예제 #32
0
def test_check_argument_list6():
    kernel_name = "test_kernel"
    kernel_string = """// This is where we define test_kernel
        #define SUM(A, B) (A + B)
        __kernel void test_kernel
        (char number, double factors, int * numbers, const unsigned long * moreNumbers) {
        numbers[get_global_id(0)] = SUM(numbers[get_global_id(0)] * factors[get_global_id(0)], number);
        }
        // /test_kernel
        """
    args = [np.byte(5), np.float64(4.6), np.int32([1, 2, 3]), np.uint64([3, 2, 111])]
    check_argument_list(kernel_name, kernel_string, args)
    # test that no exception is raised
    assert True
예제 #33
0
 def render(self, frame):
     if self.map_scene:
         self.map_scene.add_c(
             complex(self.params["c_real"], self.params["c_imag"]))
     updated = False
     if self.map_scene:
         updated = self.map_scene.render(frame)
     if not self.draw:
         return updated
     if self.mapmode:
         view_prefix = "map_"
     else:
         view_prefix = ""
     super_sampling = self.params["super_sampling"]
     self.set_view(self.params[view_prefix + "center_real"],
                   self.params[view_prefix + "center_imag"],
                   self.params[view_prefix + "radius"])
     x = np.linspace(self.plane_min[0], self.plane_max[0],
                     self.window_size[0] * super_sampling)
     y = np.linspace(self.plane_min[1], self.plane_max[1],
                     self.window_size[1] * super_sampling) * 1j
     plane = np.ravel(y+x[:, np.newaxis]).astype(np.complex128)
     render_args = [
         plane,
         np.byte(self.params["julia"] and not self.mapmode),
         np.uint32(self.params["max_iter"]),
         np.uint32(self.params.get("pre_iter", 0)),
         np.double(self.params["grad_freq"]),
         np.double(self.params["c_real"]),
         np.double(self.params["c_imag"]),
     ]
     for kernel_param in self.params["kernel_params_mod"]:
         render_args.append(np.double(self.params[kernel_param]))
     nparray = self.gpu.render(*render_args)
     if super_sampling > 1:
         import scipy.ndimage
         import scipy.misc
         s = (self.window_size[0], self.window_size[1])
         nparray = scipy.misc.imresize(
             nparray.view(np.uint8).reshape(s[0]*super_sampling,
                                            s[1]*super_sampling, 4),
             s,
             interp='cubic',
             mode='RGBA')
     self.blit(nparray.view(np.uint32))
     self.draw = False
     if self.mapmode:
         self.draw_previous_c()
     return True
예제 #34
0
    def __init__(self):

        # Create a texture from a RGBA checker board image of size WIDHT and HEIGHT
        data = np.zeros(self.WIDTH * self.HEIGHT * 4, dtype=np.byte).reshape(
            (self.WIDTH, self.HEIGHT, 4))

        for i in range(self.HEIGHT):
            for j in range(self.WIDTH):
                if (i + j) % 2 == 0:
                    c = 255
                    # white
                    data[i][j][0] = np.byte(c)
                    data[i][j][1] = np.byte(c)
                    data[i][j][2] = np.byte(c)
                else:
                    c = 255
                    # red
                    data[i][j][0] = np.byte(c)
                    data[i][j][1] = 0
                    data[i][j][2] = 0

                data[i][j][3] = 0

        self.data = data
예제 #35
0
 def futhark_main(self, screenX_700, screenY_701, depth_702, xmin_703,
                  ymin_704, xmax_705, ymax_706):
   res_707 = (xmax_705 - xmin_703)
   res_708 = (ymax_706 - ymin_704)
   y_711 = sitofp_i32_f32(screenX_700)
   y_712 = sitofp_i32_f32(screenY_701)
   x_713 = slt32(np.int32(0), depth_702)
   bytes_902 = (np.int32(4) * screenY_701)
   mem_903 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
                       long(long(bytes_902) if (bytes_902 > np.int32(0)) else np.int32(1)))
   mem_905 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
                       long(long(bytes_902) if (bytes_902 > np.int32(0)) else np.int32(1)))
   group_size_911 = np.int32(512)
   num_groups_912 = squot32(((screenY_701 + group_size_911) - np.int32(1)),
                            group_size_911)
   if ((np.int32(1) * (num_groups_912 * group_size_911)) != np.int32(0)):
     self.map_kernel_894_var.set_args(np.float32(ymin_704), np.float32(y_712),
                                      np.float32(res_708),
                                      np.int32(screenY_701), mem_903, mem_905)
     cl.enqueue_nd_range_kernel(self.queue, self.map_kernel_894_var,
                                (long((num_groups_912 * group_size_911)),),
                                (long(group_size_911),))
     if synchronous:
       self.queue.finish()
   nesting_size_844 = (screenX_700 * screenY_701)
   bytes_906 = (bytes_902 * screenX_700)
   mem_908 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
                       long(long(bytes_906) if (bytes_906 > np.int32(0)) else np.int32(1)))
   group_size_917 = np.int32(512)
   num_groups_918 = squot32((((screenY_701 * screenX_700) + group_size_917) - np.int32(1)),
                            group_size_917)
   if ((np.int32(1) * (num_groups_918 * group_size_917)) != np.int32(0)):
     self.map_kernel_846_var.set_args(np.int32(screenX_700),
                                      np.int32(screenY_701), mem_905,
                                      np.byte(x_713), np.int32(depth_702),
                                      np.float32(xmin_703), mem_903,
                                      np.float32(y_711), np.float32(res_707),
                                      mem_908)
     cl.enqueue_nd_range_kernel(self.queue, self.map_kernel_846_var,
                                (long((num_groups_918 * group_size_917)),),
                                (long(group_size_917),))
     if synchronous:
       self.queue.finish()
   out_mem_909 = mem_908
   out_memsize_910 = bytes_906
   return (out_memsize_910, out_mem_909)
예제 #36
0
 def test_lit(self):
     self.assertTrue(
         spark_column_equals(SF.lit(np.int64(1)),
                             F.lit(1).astype(LongType())))
     self.assertTrue(
         spark_column_equals(SF.lit(np.int32(1)),
                             F.lit(1).astype(IntegerType())))
     self.assertTrue(
         spark_column_equals(SF.lit(np.int8(1)),
                             F.lit(1).astype(ByteType())))
     self.assertTrue(
         spark_column_equals(SF.lit(np.byte(1)),
                             F.lit(1).astype(ByteType())))
     self.assertTrue(
         spark_column_equals(SF.lit(np.float32(1)),
                             F.lit(float(1)).astype(FloatType())))
     self.assertTrue(spark_column_equals(SF.lit(1), F.lit(1)))
예제 #37
0
def _getExifValue(data, data_type):
    if data_type==1:
        return np.ubyte(data)
    elif data_type==2:
        return str(data)
    elif data_type==3:
        return np.uint16(data)
    elif data_type==4:
        return np.uint32(data)
    elif data_type==5:
        n=np.uint32(0xffffffff&data)
        d=np.uint32(((0xffffffff<<32)&data)>>32)
        if n==0:
            return 0
        elif d==0:
            return "nan"
        else:
            return (n,d)
    elif data_type==6:
        return np.byte(data)
    elif data_type==7:
        return data
    elif data_type==8:
        return np.int16(data)
    elif data_type==9:
        return np.int32(data)
    elif data_type==10:
        n=np.int32(0xffffffff&data)
        d=np.int32(((0xffffffff<<32)&data)>>32)
        if n==0:
            return 0
        elif d==0:
            return "nan"
        else:
            return (n,d)
    elif data_type==11:
        return np.float32(data)
    elif data_type==12:
        return np.float64(data)
    else:
        return data
예제 #38
0
def main():
    gdal.AllRegister()
    path = auxil.select_directory("Choose working directory")
    #    path = 'd:\\imagery\\CRC\\Chapters6-7'
    if path:
        os.chdir(path)
    infile = auxil.select_infile(title="Select a class probability image")
    if infile:
        inDataset = gdal.Open(infile, GA_ReadOnly)
        cols = inDataset.RasterXSize
        rows = inDataset.RasterYSize
        classes = inDataset.RasterCount
    else:
        return
    outfile, fmt = auxil.select_outfilefmt()
    if not outfile:
        return
    nitr = auxil.select_integer(3, "Select number of iterations")
    print "========================="
    print "       PLR"
    print "========================="
    print "infile:  %s" % infile
    print "iterations:  %i" % nitr
    start = time.time()
    prob_image = np.zeros((classes, rows, cols))
    for k in range(classes):
        band = inDataset.GetRasterBand(k + 1)
        prob_image[k, :, :] = band.ReadAsArray(0, 0, cols, rows).astype(float)
    #  compatibility matrix
    Pmn = np.zeros((classes, classes))
    n_samples = (cols - 1) * (rows - 1)
    samplem = np.reshape(prob_image[:, 0 : rows - 1, 0 : cols - 1], (classes, n_samples))
    samplen = np.reshape(prob_image[:, 1:rows, 0 : cols - 1], (classes, n_samples))
    sampleu = np.reshape(prob_image[:, 0 : rows - 1, 1:cols], (classes, n_samples))
    max_samplem = np.amax(samplem, axis=0)
    max_samplen = np.amax(samplen, axis=0)
    max_sampleu = np.amax(sampleu, axis=0)
    print "estimating compatibility matrix..."
    for j in range(n_samples):
        if j % 50000 == 0:
            print "%i samples of %i" % (j, n_samples)
        m1 = np.where(samplem[:, j] == max_samplem[j])[0][0]
        n1 = np.where(samplen[:, j] == max_samplen[j])[0][0]
        if isinstance(m1, int) and isinstance(n1, int):
            Pmn[m1, n1] += 1
        u1 = np.where(sampleu[:, j] == max_sampleu[j])[0][0]
        if isinstance(m1, int) and isinstance(u1, int):
            Pmn[m1, u1] += 1
    for j in range(classes):
        n = np.sum(Pmn[j, :])
        if n > 0:
            Pmn[j, :] /= n
    print Pmn
    itr = 0
    temp = prob_image * 0
    print "label relaxation..."
    while itr < nitr:
        print "iteration %i" % (itr + 1)
        Pm = np.zeros(classes)
        Pn = np.zeros(classes)
        for i in range(1, rows - 1):
            if i % 50 == 0:
                print "%i rows processed" % i
            for j in range(1, cols - 1):
                Pm[:] = prob_image[:, i, j]
                Pn[:] = prob_image[:, i - 1, j] / 4
                Pn[:] += prob_image[:, i + 1, j] / 4
                Pn[:] += prob_image[:, i, j - 1] / 4
                Pn[:] += prob_image[:, i, j + 1] / 4
                Pn = np.transpose(Pn)
                if np.sum(Pm) == 0:
                    Pm_new = Pm
                else:
                    Pm_new = Pm * (np.dot(Pmn, Pn)) / (np.dot(np.dot(Pm, Pmn), Pn))
                temp[:, i, j] = Pm_new
        prob_image = temp
        itr += 1
    #  write to disk
    prob_image = np.byte(prob_image * 255)
    driver = gdal.GetDriverByName(fmt)
    outDataset = driver.Create(outfile, cols, rows, classes, GDT_Byte)
    projection = inDataset.GetProjection()
    geotransform = inDataset.GetGeoTransform()
    if geotransform is not None:
        outDataset.SetGeoTransform(geotransform)
    if projection is not None:
        outDataset.SetProjection(projection)
    for k in range(classes):
        outBand = outDataset.GetRasterBand(k + 1)
        outBand.WriteArray(prob_image[k, :, :], 0, 0)
        outBand.FlushCache()
    outDataset = None
    inDataset = None
    print "result written to: " + outfile
    print "elapsed time: " + str(time.time() - start)
    print "--done------------------------"
예제 #39
0
 def futhark_main(self, width_734, height_735, limit_736, view_737, view_738,
                  view_739, view_740):
   res_741 = (view_739 - view_737)
   res_742 = (view_740 - view_738)
   y_745 = sitofp_i32_f32(width_734)
   y_746 = sitofp_i32_f32(height_735)
   x_747 = slt32(np.int32(0), limit_736)
   bytes_939 = (np.int32(4) * width_734)
   mem_940 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
                       long(long(bytes_939) if (bytes_939 > np.int32(0)) else np.int32(1)))
   mem_942 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
                       long(long(bytes_939) if (bytes_939 > np.int32(0)) else np.int32(1)))
   group_size_965 = np.int32(512)
   num_groups_966 = squot32(((width_734 + group_size_965) - np.int32(1)),
                            group_size_965)
   if ((np.int32(1) * (num_groups_966 * group_size_965)) != np.int32(0)):
     self.map_kernel_930_var.set_args(np.float32(view_737), np.float32(y_745),
                                      np.float32(res_741), np.int32(width_734),
                                      mem_940, mem_942)
     cl.enqueue_nd_range_kernel(self.queue, self.map_kernel_930_var,
                                (long((num_groups_966 * group_size_965)),),
                                (long(group_size_965),))
     if synchronous:
       self.queue.finish()
   nesting_size_879 = (height_735 * width_734)
   x_951 = (width_734 * np.int32(3))
   bytes_949 = (x_951 * height_735)
   mem_952 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
                       long(long(bytes_949) if (bytes_949 > np.int32(0)) else np.int32(1)))
   total_size_960 = (nesting_size_879 * np.int32(3))
   res_mem_948 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
                           long(long(total_size_960) if (total_size_960 > np.int32(0)) else np.int32(1)))
   total_size_961 = (nesting_size_879 * np.int32(3))
   mem_944 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
                       long(long(total_size_961) if (total_size_961 > np.int32(0)) else np.int32(1)))
   total_size_962 = (nesting_size_879 * np.int32(3))
   mem_946 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
                       long(long(total_size_962) if (total_size_962 > np.int32(0)) else np.int32(1)))
   group_size_974 = np.int32(512)
   num_groups_975 = squot32((((width_734 * height_735) + group_size_974) - np.int32(1)),
                            group_size_974)
   if ((np.int32(1) * (num_groups_975 * group_size_974)) != np.int32(0)):
     self.map_kernel_881_var.set_args(mem_940, res_mem_948,
                                      np.int32(limit_736), mem_944, mem_946,
                                      np.int32(width_734), np.float32(res_742),
                                      mem_942, np.float32(view_738),
                                      np.float32(y_746),
                                      np.int32(nesting_size_879),
                                      np.byte(x_747), np.int32(height_735),
                                      mem_952)
     cl.enqueue_nd_range_kernel(self.queue, self.map_kernel_881_var,
                                (long((num_groups_975 * group_size_974)),),
                                (long(group_size_974),))
     if synchronous:
       self.queue.finish()
   x_955 = (width_734 * height_735)
   bytes_953 = (x_955 * np.int32(3))
   mem_956 = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE,
                       long(long(bytes_953) if (bytes_953 > np.int32(0)) else np.int32(1)))
   if ((((np.int32(1) * (height_735 + srem32((np.int32(16) - srem32(height_735,
                                                                    np.int32(16))),
                                             np.int32(16)))) * (np.int32(3) + srem32((np.int32(16) - srem32(np.int32(3),
                                                                                                            np.int32(16))),
                                                                                     np.int32(16)))) * width_734) != np.int32(0)):
     self.fut_kernel_map_transpose_i8_var.set_args(mem_956,
                                                   np.int32(np.int32(0)),
                                                   mem_952,
                                                   np.int32(np.int32(0)),
                                                   np.int32(height_735),
                                                   np.int32(np.int32(3)),
                                                   np.int32(((width_734 * height_735) * np.int32(3))),
                                                   cl.LocalMemory(long((((np.int32(16) + np.int32(1)) * np.int32(16)) * np.int32(1)))))
     cl.enqueue_nd_range_kernel(self.queue,
                                self.fut_kernel_map_transpose_i8_var,
                                (long((height_735 + srem32((np.int32(16) - srem32(height_735,
                                                                                  np.int32(16))),
                                                           np.int32(16)))),
                                 long((np.int32(3) + srem32((np.int32(16) - srem32(np.int32(3),
                                                                                   np.int32(16))),
                                                            np.int32(16)))),
                                 long(width_734)), (long(np.int32(16)),
                                                    long(np.int32(16)),
                                                    long(np.int32(1))))
     if synchronous:
       self.queue.finish()
   out_mem_963 = mem_956
   out_memsize_964 = bytes_953
   return (out_memsize_964, out_mem_963)
예제 #40
0
def pix2ang_ring(nside, ipix):

# -------- number in each ring
    nside  = long(nside)
    npix   = 12L*nside*nside
    tind   = np.arange(npix)
    nq0    = (np.arange(nside)+1)*4
    nq     = np.concatenate([nq0,np.zeros(2*nside-1,np.int64)+(4*nside), \
                                 nq0[::-1]])
    thetaq = np.zeros(4L*nside-1)
    bound  = 2*nside*(1+nside)

    qind    = np.arange(nside)
    qindrev = nside - qind
    xoffs   = 2L*nside*(nside+1)
# -------- ring0 is the lowest pixel number in each ring
    ring0   = np.concatenate([2*qind*(qind+1), \
                                  xoffs+(4*nside)*np.arange(2*nside-1), \
                                  npix-2*qindrev*(qindrev+1)])

# -------- initialize arrays for output
    theta = np.zeros(npix)
    phi   = np.zeros(npix)


    pp   = np.byte(tind < bound)
    w_pp = np.where(pp)[0]
    n_pp = w_pp.size

    if n_pp > 0:
        qind = np.arange(nside)
        thetaq[qind] = np.arccos(1.0-((qind+1)/np.float(nside))**2/3.)

        q = np.array((np.sqrt(0.25 + 0.5*tind[w_pp])-0.5),dtype=int)
        theta[w_pp] = thetaq[q]
        phi[w_pp] = (tind[w_pp]-ring0[q]+0.5)/(nq/(2.0*np.pi))[q]


    w_eq = np.where((pp==0) & (tind<(npix-bound)))[0]
    n_eq = w_eq.size

    if n_eq>0:
        qind = np.arange(2*nside-1)+nside
        thetaq[qind] = np.arccos((2.0-(qind+1)/np.float(nside))*2./3.)

        q = (tind[w_eq]-bound)/(4*nside)+nside
        theta[w_eq] = thetaq[q]

# GGD: this line has been modified correctly... I think...
#        phi[w_eq] = (tind[w_eq]-ring0[q]+(q!=1)*0.5)/(4*nside/(2.0*np.pi))[q]
        phi[w_eq] = (tind[w_eq]-ring0[q]+(q!=1)*0.5)/(4*nside/(2.0*np.pi))


    w_sp = np.where(tind > (npix-bound))[0]
    n_sp = w_sp.size

    if n_sp > 0:
        qind = np.arange(nside) + (3*nside-1)
        thetaq[qind] = np.arccos(-1.0+((qind+1-4*nside)/np.float(nside))**2/3.)

        q = (4*nside-2) - np.array(np.sqrt(0.5*((0.5 + npix-1)- \
                                                    tind[w_sp]))-0.5, \
                                       dtype=long)

        theta[w_sp] = thetaq[q]
        phi[w_sp] = (tind[w_sp]-ring0[q]+0.5)/(nq/(2.0*np.pi))[q]


# -------- Now lookup descired pixels from table
    return theta[ipix], phi[ipix]
예제 #41
0
파일: mapplot.py 프로젝트: gdobler/glib
def mapplot(map, rng=None, title=None, cbar=None, aitoff=None, mask=None, \
                cmap=None, psfile=None, pngfile=None, tsize=None, \
                noerase=None, units=None, usize=None, stretch=None, \
                blackbg=None, figsize=None, full=None, ticks=None):

# -------- set default min and max
    if rng==None:
        stretch  = stretch if stretch else 1.0
        index    = np.where(mask) if mask!=None else np.where(map)
        med      = np.mean(map[index])
        sig      = np.std(map[index])
        immin    = map[index].min()
        immax    = map[index].max()
        min, max = med + np.array([-2.0*sig,10.0*sig])*stretch
        min      = immin if immin > min else min
        max      = immax if immax < max else max
        rng      = [min,max]



# -------- color map and masking
    if cmap!= None:
        if cmap=='cubehelix':
            cmap = make_cmap('cubehelix')
        else:
            cmap = cm.get_cmap(cmap,256)
    else:
        cmap = cm.get_cmap('gist_heat',256)

    if mask!=None:
        map = np.ma.array(map,mask=(mask==0))
        cmap.set_bad(color = '0.25')



# -------- set background color
    pmap = map.clip(min=rng[0], max=rng[1])
    cmap.set_over(color='w')
    cmap.set_under(color='k')


# -------- check for healpix
    if map.ndim==1:
        nside = np.long(np.sqrt(map.size/12l))

        print 'MAPPLOT: Assuming HEALPix with Nside = ', nside

        if aitoff!=None: # check for aitoff
            print 'MAPPLOT:    Projecting to aitoff...'
            sidy = 850.
            sidx = 2*sidy
            pmap = aitoff_proj(pmap, nx=sidx, ny=sidy, blackbg=blackbg)

            if mask!=None:
                pmsk = aitoff_proj(mask, nx=sidx, ny=sidy, blackbg=blackbg)
                pmap = np.ma.array(pmap,mask=np.byte((pmsk < 0.9) & \
                                                         (pmsk >= 0.0)))
        else:
            print 'MAPPLOT:    Projecting to rectangle...'
            cind = healcart_ind(pmap)
            pmap = pmap[cind]



# -------- plot it
    if figsize==None:
        sz    = pmap.shape
        if sz[0] > sz[1]:
            height = 7.5
            width  = height*np.float(sz[1])/np.float(sz[0])*4./3.
        else:
            width  = 7.5
            height = width*np.float(sz[0])/np.float(sz[1])*4./3.
        figsize = [width,height]

    if not noerase:
        plt.figure(figsize=figsize)
    plt.imshow(pmap, cmap=cmap)
    plt.clim(rng)
    plt.axis('off')
    if title: plt.title(title,fontsize=tsize)

    if units: # add colorbar with units
        cb = plt.colorbar(orientation='horizontal', pad=0.05)
        cb.set_label(units, fontsize=usize)
        if ticks!=None: cb.set_ticks(ticks)
    elif cbar: # add colorbar with no units
        cb = plt.colorbar(orientation='horizontal', pad=0.05)
        if ticks!=None: cb.set_ticks(ticks)

    if full:
        plt.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.0)
    else:
        plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)
    plt.show()




# -------- save it
    if psfile: plt.savefig(psfile, bbox_inches='tight')
    if pngfile: plt.savefig(pngfile, bbox_inches='tight')


    return
예제 #42
0
def on_key(event):
    global fileList, targetList, fig, imgNum, brushSize
    global maskDir, maskImg
    global prevImg,   thisImg,   nextImg
    global prevAxImg, thisAxImg, nextAxImg
    global prevTarget, thisTarget, nextTarget
    global prevMin,   thisMin,   nextMin
    global prevMax,   thisMax,   nextMax
    global prevLabel, thisLabel, nextLabel

    # Handle brush sizing
    if event.key == '1':
        brushSize = 1
    elif event.key == '2':
        brushSize = 2
    elif event.key == '3':
        brushSize = 3
    elif event.key == '4':
        brushSize = 4
    elif event.key == '5':
        brushSize = 5
    elif event.key == '6':
        brushSize = 6

    # Increment the image number
    if event.key == 'right' or event.key == 'left':
        if event.key == 'right':
            #Advance to the next image
            imgNum += 1

            # Read in the new files
            prevImg = thisImg
            thisImg = nextImg
            nextImg = AstroImage(fileList[(imgNum + 1) % len(fileList)])

            # Update target info
            prevTarget = thisTarget
            thisTarget = nextTarget
            nextTarget = targetList[(imgNum + 1) % len(fileList)]

            # Compute new image display minima
            prevMin = thisMin
            thisMin = nextMin
            nextMin = np.median(nextImg.arr) - 0.25*np.std(nextImg.arr)

            # Compute new image display maxima
            prevMax = thisMax
            thisMax = nextMax
            nextMax = np.median(nextImg.arr) + 2*np.std(nextImg.arr)

        if event.key == 'left':
            #Move back to the previous image
            imgNum -= 1

            # Read in the new files
            nextImg = thisImg
            thisImg = prevImg
            prevImg = AstroImage(fileList[(imgNum - 1) % len(fileList)])

            # Update target info
            nextTarget = thisTarget
            thisTarget = prevTarget
            prevTarget = targetList[(imgNum - 1) % len(fileList)]

            # Compute new image display minima
            nextMin = thisMin
            thisMin = prevMin
            prevMin = np.median(prevImg.arr) - 0.25*np.std(prevImg.arr)

            # Compute new image display maxima
            nextMax = thisMax
            thisMax = prevMax
            prevMax = np.median(prevImg.arr) + 2*np.std(prevImg.arr)

        #*******************************
        # Update the displayed mask
        #*******************************

        # Check which mask files might be usable...
        prevMaskFile = os.path.join(maskDir,
            os.path.basename(prevImg.filename))
        thisMaskFile = os.path.join(maskDir,
            os.path.basename(thisImg.filename))
        nextMaskFile = os.path.join(maskDir,
            os.path.basename(nextImg.filename))
        if os.path.isfile(thisMaskFile):
            # If the mask for this file exists, use it
            print('using this mask: ',os.path.basename(thisMaskFile))
            maskImg = AstroImage(thisMaskFile)
        elif os.path.isfile(prevMaskFile) and (prevTarget == thisTarget):
            # Otherwise check for the mask for the previous file
            print('using previous mask: ',os.path.basename(prevMaskFile))
            maskImg = AstroImage(prevMaskFile)
        elif os.path.isfile(nextMaskFile) and (nextTarget == thisTarget):
            # Then check for the mask of the next file
            print('using next mask: ',os.path.basename(nextMaskFile))
            maskImg = AstroImage(nextMaskFile)
        else:
            # If none of those files exist, build a blank slate
            # Build a mask template (0 = not masked, 1 = masked)
            maskImg       = thisImg.copy()
            maskImg.filename = thisMaskFile
            maskImg.arr   = maskImg.arr.astype(np.int16) * np.int16(0)
            maskImg.dtype = np.byte
            maskImg.header['BITPIX'] = 16

        # Update contour plot (clear old lines redo contouring)
        axarr[1].collections = []
        axarr[1].contour(xx, yy, maskImg.arr, levels=[0.5], colors='white', alpha = 0.2)

        # Reassign image display limits
        prevAxImg.set_clim(vmin = prevMin, vmax = prevMax)
        thisAxImg.set_clim(vmin = thisMin, vmax = thisMax)
        nextAxImg.set_clim(vmin = nextMin, vmax = nextMax)

        # Display the new images
        prevAxImg.set_data(prevImg.arr)
        thisAxImg.set_data(thisImg.arr)
        nextAxImg.set_data(nextImg.arr)

        # Update the annotation
        axList = fig.get_axes()
        axList[1].set_title(os.path.basename(thisImg.filename))

        prevStr   = (str(prevImg.header['OBJECT']) + '\n' +
                     str(prevImg.header['FILTNME3'] + '\n' +
                     str(prevImg.header['POLPOS'])))
        thisStr   = (str(thisImg.header['OBJECT']) + '\n' +
                     str(thisImg.header['FILTNME3'] + '\n' +
                     str(thisImg.header['POLPOS'])))
        nextStr   = (str(nextImg.header['OBJECT']) + '\n' +
                     str(nextImg.header['FILTNME3'] + '\n' +
                     str(nextImg.header['POLPOS'])))
        prevLabel.set_text(prevStr)
        thisLabel.set_text(thisStr)
        nextLabel.set_text(nextStr)

        # Update the display
        fig.canvas.draw()

    # Save the generated mask
    if event.key == 'enter':
        # Make sure the header has the right values
        maskImg.header = thisImg.header

        # Write the mask to disk
        print('Writing mask for file ', os.path.basename(maskImg.filename))
        maskImg.write()

    # Clear out the mask values
    if event.key == 'backspace':
        # Clear out the mask array
        maskImg.arr = maskImg.arr * np.byte(0)

        # Update contour plot (clear old lines redo contouring)
        axarr[1].collections = []
        axarr[1].contour(xx, yy, maskImg.arr, levels=[0.5], colors='white', alpha = 0.2)

        # Update the display
        fig.canvas.draw()
예제 #43
0
파일: rec_sys.py 프로젝트: RuTing1/learning
__email__ = '*****@*****.**'

"""
import pandas as pd
import numpy as np
import copy

data = pd.read_csv('rating_matrix.csv')
name = pd.read_csv('name.csv', index_col='name')
user_id = pd.read_csv('user_id.csv', index_col='user_id')

# 获得各个用户的平均评分数据表 save_avg
avg_count = []
count = []
for i in data.axes[1]:
    data[i] = np.byte(copy.deepcopy(data[i]))
    sum_lie = 0
    count1 = 0
    for j in data[i]:
        if j == 0:
            count1 = count1 + 1
        else:
            sum_lie += j
    avg_lie = sum_lie / (24 - count1)
    count.append(24 - count1)
    avg_count.append(avg_lie)
save_avg = pd.DataFrame([data.axes[1], avg_count, count]).T
save_avg.rename(columns={0: 'user'}, inplace=True)
save_avg.rename(columns={1: 'avg_count'}, inplace=True)
save_avg.rename(columns={2: 'count'}, inplace=True)
save_avg = save_avg.set_index('user')
예제 #44
0
#! /usr/bin/env python

from pydal import *
import numpy
import sys

# The BeamFormed object represents the file.
#  The parameter is the name of the beam-formed file.
if (len(sys.argv) > 1):
 file = BeamFormed(sys.argv[1])
else:
 print "Please provide a beam-formed hdf5 file as input."
 sys.exit(1)

# get beam 0
beam = file.getBeam(0)
for subband in range( beam.n_subbands() ):

       data = beam.getSubbandData_XY( subband, 0, -1 )

       c = numpy.column_stack( ( numpy.byte(data[0].real), \
       numpy.byte(data[0].imag), numpy.byte(data[1].real), \
       numpy.byte(data[1].imag) ) )

       tmpfile = "B0329+54_080313_subband%02d" % (subband)
       fileobj = open(tmpfile, mode='wb')
       fileobj.write(c.flatten())
       fileobj.close()
       print "Wrote ",tmpfile
예제 #45
0
def main():      
    gdal.AllRegister()
    path = auxil.select_directory('Input directory')
    if path:
        os.chdir(path)        
#  input image    
    infile = auxil.select_infile(title='Image file') 
    if infile:                   
        inDataset = gdal.Open(infile,GA_ReadOnly)
        cols = inDataset.RasterXSize
        rows = inDataset.RasterYSize    
        bands = inDataset.RasterCount
        projection = inDataset.GetProjection()
        geotransform = inDataset.GetGeoTransform()
        if geotransform is not None:
            gt = list(geotransform) 
        else:
            print 'No geotransform available'
            return       
        imsr = osr.SpatialReference()  
        imsr.ImportFromWkt(projection)      
    else:
        return  
    pos =  auxil.select_pos(bands)   
    if not pos:
        return
    N = len(pos) 
    rasterBands = [] 
    for b in pos:
        rasterBands.append(inDataset.GetRasterBand(b)) 
#  training algorithm
    trainalg = auxil.select_integer(1,msg='1:Maxlike,2:Backprop,3:Congrad,4:SVM') 
    if not trainalg:
        return           
#  training data (shapefile)      
    trnfile = auxil.select_infile(filt='.shp',title='Train shapefile')
    if trnfile:
        trnDriver = ogr.GetDriverByName('ESRI Shapefile')
        trnDatasource = trnDriver.Open(trnfile,0)
        trnLayer = trnDatasource.GetLayer() 
        trnsr = trnLayer.GetSpatialRef()             
    else:
        return     
    tstfile = auxil.select_outfile(filt='.tst', title='Test results file') 
    if not tstfile:
        print 'No test output'      
#  outfile
    outfile, outfmt = auxil.select_outfilefmt(title='Classification file')   
    if not outfile:
        return                   
    if trainalg in (2,3,4):
#      class probabilities file, hidden neurons
        probfile, probfmt = auxil.select_outfilefmt(title='Probabilities file')
    else:
        probfile = None     
    if trainalg in (2,3):    
        L = auxil.select_integer(8,'Number of hidden neurons')    
        if not L:
            return                  
#  coordinate transformation from training to image projection   
    ct= osr.CoordinateTransformation(trnsr,imsr) 
#  number of classes    
    K = 1
    feature = trnLayer.GetNextFeature() 
    while feature:
        classid = feature.GetField('CLASS_ID')
        if int(classid)>K:
            K = int(classid)
        feature = trnLayer.GetNextFeature() 
    trnLayer.ResetReading()    
    K += 1       
    print '========================='
    print 'supervised classification'
    print '========================='
    print time.asctime()    
    print 'image:    '+infile
    print 'training: '+trnfile  
    if trainalg == 1:
        print 'Maximum Likelihood'
    elif trainalg == 2:
        print 'Neural Net (Backprop)'
    elif trainalg ==3:
        print 'Neural Net (Congrad)'
    else:
        print 'Support Vector Machine'               
#  loop through the polygons    
    Gs = [] # train observations
    ls = [] # class labels
    classnames = '{unclassified'
    classids = set()
    print 'reading training data...'
    for i in range(trnLayer.GetFeatureCount()):
        feature = trnLayer.GetFeature(i)
        classid = str(feature.GetField('CLASS_ID'))
        classname  = feature.GetField('CLASS_NAME')
        if classid not in classids:
            classnames += ',   '+ classname
        classids = classids | set(classid)        
        l = [0 for i in range(K)]
        l[int(classid)] = 1.0
        polygon = feature.GetGeometryRef()
#      transform to same projection as image        
        polygon.Transform(ct)  
#      convert to a Shapely object            
        poly = shapely.wkt.loads(polygon.ExportToWkt())
#      transform the boundary to pixel coords in numpy        
        bdry = np.array(poly.boundary) 
        bdry[:,0] = bdry[:,0]-gt[0]
        bdry[:,1] = bdry[:,1]-gt[3]
        GT = np.mat([[gt[1],gt[2]],[gt[4],gt[5]]])
        bdry = bdry*np.linalg.inv(GT) 
#      polygon in pixel coords        
        polygon1 = asPolygon(bdry)
#      raster over the bounding rectangle        
        minx,miny,maxx,maxy = map(int,list(polygon1.bounds))  
        pts = [] 
        for i in range(minx,maxx+1):
            for j in range(miny,maxy+1): 
                pts.append((i,j))             
        multipt =  MultiPoint(pts)   
#      intersection as list              
        intersection = np.array(multipt.intersection(polygon1),dtype=np.int).tolist()
#      cut out the bounded image cube               
        cube = np.zeros((maxy-miny+1,maxx-minx+1,len(rasterBands)))
        k=0
        for band in rasterBands:
            cube[:,:,k] = band.ReadAsArray(minx,miny,maxx-minx+1,maxy-miny+1)
            k += 1
#      get the training vectors
        for (x,y) in intersection:         
            Gs.append(cube[y-miny,x-minx,:])
            ls.append(l)   
        polygon = None
        polygon1 = None            
        feature.Destroy()  
    trnDatasource.Destroy() 
    classnames += '}'
    m = len(ls)       
    print str(m) + ' training pixel vectors were read in' 
    Gs = np.array(Gs) 
    ls = np.array(ls)
#  stretch the pixel vectors to [-1,1] for ffn
    maxx = np.max(Gs,0)
    minx = np.min(Gs,0)
    for j in range(N):
        Gs[:,j] = 2*(Gs[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0 
#  random permutation of training data
    idx = np.random.permutation(m)
    Gs = Gs[idx,:] 
    ls = ls[idx,:]     
#  setup output datasets 
    driver = gdal.GetDriverByName(outfmt)    
    outDataset = driver.Create(outfile,cols,rows,1,GDT_Byte) 
    projection = inDataset.GetProjection()
    geotransform = inDataset.GetGeoTransform()
    if geotransform is not None:
        outDataset.SetGeoTransform(tuple(gt))
    if projection is not None:
        outDataset.SetProjection(projection) 
    outBand = outDataset.GetRasterBand(1) 
    if probfile:
        driver = gdal.GetDriverByName(probfmt)    
        probDataset = driver.Create(probfile,cols,rows,K,GDT_Byte) 
        if geotransform is not None:
            probDataset.SetGeoTransform(tuple(gt))
        if projection is not None:
            probDataset.SetProjection(projection)  
        probBands = [] 
        for k in range(K):
            probBands.append(probDataset.GetRasterBand(k+1))         
    if tstfile:
#  train on 2/3 training examples         
        Gstrn = Gs[0:2*m//3,:]
        lstrn = ls[0:2*m//3,:] 
        Gstst = Gs[2*m//3:,:]  
        lstst = ls[2*m//3:,:]    
    else:
        Gstrn = Gs
        lstrn = ls         
    if   trainalg == 1:
        classifier = sc.Maxlike(Gstrn,lstrn)
    elif trainalg == 2:
        classifier = sc.Ffnbp(Gstrn,lstrn,L)
    elif trainalg == 3:
        classifier = sc.Ffncg(Gstrn,lstrn,L)
    elif trainalg == 4:
        classifier = sc.Svm(Gstrn,lstrn)         
            
    print 'training on %i pixel vectors...' % np.shape(Gstrn)[0]
    start = time.time()
    result = classifier.train()
    print 'elapsed time %s' %str(time.time()-start) 
    if result:
        if trainalg in [2,3]:
            cost = np.log10(result)  
            ymax = np.max(cost)
            ymin = np.min(cost) 
            xmax = len(cost)      
            plt.plot(range(xmax),cost,'k')
            plt.axis([0,xmax,ymin-1,ymax])
            plt.title('Log(Cross entropy)')
            plt.xlabel('Epoch')              
#      classify the image           
        print 'classifying...'
        start = time.time()
        tile = np.zeros((cols,N))    
        for row in range(rows):
            for j in range(N):
                tile[:,j] = rasterBands[j].ReadAsArray(0,row,cols,1)
                tile[:,j] = 2*(tile[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0               
            cls, Ms = classifier.classify(tile)  
            outBand.WriteArray(np.reshape(cls,(1,cols)),0,row)
            if probfile:
                Ms = np.byte(Ms*255)
                for k in range(K):
                    probBands[k].WriteArray(np.reshape(Ms[k,:],(1,cols)),0,row)
        outBand.FlushCache()
        print 'elapsed time %s' %str(time.time()-start)
        outDataset = None
        inDataset = None      
        if probfile:
            for probBand in probBands:
                probBand.FlushCache() 
            probDataset = None
            print 'class probabilities written to: %s'%probfile   
        K =  lstrn.shape[1]+1                     
        if (outfmt == 'ENVI') and (K<19):
#          try to make an ENVI classification header file            
            hdr = header.Header() 
            headerfile = outfile+'.hdr'
            f = open(headerfile)
            line = f.readline()
            envihdr = ''
            while line:
                envihdr += line
                line = f.readline()
            f.close()         
            hdr.read(envihdr)
            hdr['file type'] ='ENVI Classification'
            hdr['classes'] = str(K)
            classlookup = '{0'
            for i in range(1,3*K):
                classlookup += ', '+str(str(ctable[i]))
            classlookup +='}'    
            hdr['class lookup'] = classlookup
            hdr['class names'] = classnames
            f = open(headerfile,'w')
            f.write(str(hdr))
            f.close()             
        print 'thematic map written to: %s'%outfile
        if trainalg in [2,3]:
            print 'please close the cross entropy plot to continue'
            plt.show()
        if tstfile:
            with open(tstfile,'w') as f:
                print >>f, 'FFN test results for %s'%infile
                print >>f, time.asctime()
                print >>f, 'Classification image: %s'%outfile
                print >>f, 'Class probabilities image: %s'%probfile
                print >>f, lstst.shape[0],lstst.shape[1]
                classes, _ = classifier.classify(Gstst)
                labels = np.argmax(lstst,axis=1)+1
                for i in range(len(classes)):
                    print >>f, classes[i], labels[i]              
                f.close()
                print 'test results written to: %s'%tstfile
        print 'done'
    else:
        print 'an error occured' 
        return 
예제 #46
0
2326509471271448 ;2 correct
5251583379644322 ;2 correct
1748270476758276 ;3 correct
4895722652190306 ;1 correct
3041631117224635 ;3 correct
1841236454324589 ;3 correct
2659862637316867 ;2 correct

Find the unique 16-digit secret sequence.
============================================================
'''
import numpy as np # Requires numpy 1.7.1+
from itertools import combinations

'''Value of a guess entry known to be either correct or wrong.'''
X = np.byte(-1)

class Position(object):
    '''Holds the current position: guesses, correct elemenst and the constructed secret sequence.''' 
    def __init__(self, guess, correct, seq, depth=0, remove_zero_rows=True, wrong=None):
        self.depth = depth
        self.indent = self.depth * '  ' 

        self.guess = guess
        self.correct = correct
        self.seq = seq
        # Number of digits with unknown status (neither correct nor incorrect) in each guess
        self.left = np.sum(self.guess != X, 1)
        self.wrong = wrong if wrong else [set() for _ in xrange(self.n)]
        if remove_zero_rows: self.remove_zero_rows()
예제 #47
0
def merge(inbmapfn1,inbmapfn2):
    incmapfn1 = inbmapfn1.replace('bmap','cmap')
    incmapfn2 = inbmapfn2.replace('bmap','cmap')
    insmapfn1 = inbmapfn1.replace('bmap','smap')
    insmapfn2 = inbmapfn2.replace('bmap','smap')    
#  output file names
    outbmapfn = inbmapfn1.replace('VV','VVVH')
    outcmapfn = outbmapfn.replace('bmap','cmap')
    outsmapfn = outbmapfn.replace('bmap','smap')
    outfmapfn = outbmapfn.replace('bmap','fmap')
#  change map dimensions and georeferencing
    gdal.AllRegister()
    inDataset1 = gdal.Open(inbmapfn1,GA_ReadOnly)
    inDataset2 = gdal.Open(inbmapfn2,GA_ReadOnly)
    geotransform = inDataset1.GetGeoTransform()
    projection = inDataset1.GetProjection() 
    driver = inDataset1.GetDriver()
    cols = inDataset1.RasterXSize
    rows = inDataset1.RasterYSize    
    bands = inDataset1.RasterCount 
#  merge the VV and VH bmaps by inclusive OR
    bmap = np.zeros((rows,cols,bands),dtype=np.int16)
    for k in range(bands):
        vvband = inDataset1.GetRasterBand(k+1).ReadAsArray(0,0,cols,rows)
        vhband = inDataset2.GetRasterBand(k+1).ReadAsArray(0,0,cols,rows)
        bmap[:,:,k] = (vvband | vhband)/255  
    bmap = np.byte(bmap)  
#  merged frequency map
    fmap = np.sum(bmap,axis=2)
#  merged last change map
    inDataset1 = gdal.Open(incmapfn1,GA_ReadOnly)
    inDataset2 = gdal.Open(incmapfn2,GA_ReadOnly)
    tmp = np.zeros((rows,cols,2),dtype=np.byte)
    tmp[:,:,0] = inDataset1.GetRasterBand(1).ReadAsArray(0,0,cols,rows)
    tmp[:,:,1] = inDataset2.GetRasterBand(1).ReadAsArray(0,0,cols,rows)
    cmap = np.max(tmp,axis=2)
#  merged first change map
    inDataset1 = gdal.Open(insmapfn1,GA_ReadOnly)
    inDataset2 = gdal.Open(insmapfn2,GA_ReadOnly)
    tmp[:,:,0] = inDataset1.GetRasterBand(1).ReadAsArray(0,0,cols,rows)
    tmp[:,:,1] = inDataset2.GetRasterBand(1).ReadAsArray(0,0,cols,rows)
    smap = np.max(tmp,axis=2)    
#  write merged bmap to disk
    outDataset = driver.Create(outbmapfn,cols,rows,bands,GDT_Byte)
    if geotransform is not None:
        outDataset.SetGeoTransform(geotransform)        
    if projection is not None:
        outDataset.SetProjection(projection)
    for k in range(bands):        
        outBand = outDataset.GetRasterBand(k+1)
        outBand.WriteArray(bmap[:,:,k],0,0) 
        outBand.FlushCache()
    print 'bmap written to %s'%outbmapfn    
#  write merged cmap to disk
    outDataset = driver.Create(outcmapfn,cols,rows,1,GDT_Byte)
    if geotransform is not None:
        outDataset.SetGeoTransform(geotransform)        
    if projection is not None:
        outDataset.SetProjection(projection)   
    outBand = outDataset.GetRasterBand(1)
    outBand.WriteArray(cmap,0,0)
    outBand.FlushCache()
    print 'cmap written to %s'%outcmapfn    
#  write merged smap to disk
    outDataset = driver.Create(outsmapfn,cols,rows,1,GDT_Byte)
    if geotransform is not None:
        outDataset.SetGeoTransform(geotransform)        
    if projection is not None:
        outDataset.SetProjection(projection)   
    outBand = outDataset.GetRasterBand(1)
    outBand.WriteArray(smap,0,0) 
    outBand.FlushCache()
    print 'smap written to %s'%outsmapfn   
#  write merged fmap to disk
    outDataset = driver.Create(outfmapfn,cols,rows,1,GDT_Byte)
    if geotransform is not None:
        outDataset.SetGeoTransform(geotransform)        
    if projection is not None:
        outDataset.SetProjection(projection)   
    outBand = outDataset.GetRasterBand(1)
    outBand.WriteArray(fmap,0,0)  
    outBand.FlushCache()
    print 'fmap written to %s'%outfmapfn       
    outDataset = None
예제 #48
0
def solve (g, correct):
    '''Main DFS driver call.'''
    solution = _search(Position(g, correct, np.tile(np.byte(X), (g.shape[1],))))
    r = ssq(residual((g, correct), solution)) if solution is not None else -1
    return ''.join(map(str, solution)) if solution is not None and r == 0 else None
예제 #49
0
파일: em.py 프로젝트: jeromeku/rsensing
def main():
    usage = '''
Usage: 
---------------------------------------------------------
python %s  [-p "bandPositions"] [-d "spatialDimensions"] 
[-K number of clusters] [-M max scale][-m min scale] 
[-t initial annealing temperature] [-s spatial mixing factor] 
[-P generate class probabilities image] filename

bandPositions and spatialDimensions are lists, 
e.g., -p [1,2,4] -d [0,0,400,400]  

If the input file is named 

         path/filenbasename.ext then

The output classification file is named 

         path/filebasename_em.ext

and the class probabilities output file is named

         path/filebasename_emprobs.ext
--------------------------------------------------------''' %sys.argv[0]
    options, args = getopt.getopt(sys.argv[1:],'hp:d:K:M:m:t:s:P')
    pos = None
    dims = None  
    K,max_scale,min_scale,T0,beta,probs = (None,None,None,None,None,None)        
    for option, value in options:
        if option == '-h':
            print usage
            return
        elif option == '-p':
            pos = eval(value)
        elif option == '-d':
            dims = eval(value) 
        elif option == '-K':
            K = eval(value)
        elif option == '-M':
            max_scale = eval(value)
        elif option == '-m':
            min_scale = eval(value)  
        elif option == '-t':
            T0 = eval(value)
        elif option == '-s':
            beta = eval(value) 
        elif option == '-P':
            probs = True                              
    if len(args) != 1: 
        print 'Incorrect number of arguments'
        print usage
        sys.exit(1)       
    if K is None:
        K = 6
    if max_scale is None:
        max_scale = 2   
    else:
        max_scale = min((max_scale,3))  
    if min_scale is None:
        min_scale = 0   
    else:
        min_scale = min((max_scale,min_scale)) 
    if T0 is None:
        T0 = 0.5   
    if beta is None:
        beta = 0.5   
    if probs is None:
        probs = False
                                                  
    gdal.AllRegister()
    infile = args[0]
    
    gdal.AllRegister() 
    try:                   
        inDataset = gdal.Open(infile,GA_ReadOnly)     
        cols = inDataset.RasterXSize
        rows = inDataset.RasterYSize    
        bands = inDataset.RasterCount
    except Exception as e:
        print 'Error: %s  --Image could not be read'%e
        sys.exit(1)
    if pos is not None:
        bands = len(pos)
    else:
        pos = range(1,bands+1)
    if dims:
        x0,y0,cols,rows = dims
    else:
        x0 = 0
        y0 = 0   
    class_image = np.zeros((rows,cols),dtype=np.byte)   
    path = os.path.dirname(infile)
    basename = os.path.basename(infile)
    root, ext = os.path.splitext(basename)
    outfile = path+'/'+root+'_em'+ext
    if probs:
        probfile = path+'/'+root+'_emprobs'+ext
    print '--------------------------'
    print '     EM clustering'
    print '--------------------------'
    print 'infile:   %s'%infile
    print 'clusters: %i'%K
    print 'T0:       %f'%T0
    print 'beta:     %f'%beta         

    start = time.time()                                     
#  read in image and compress 
    path = os.path.dirname(infile) 
    basename = os.path.basename(infile)
    root, ext = os.path.splitext(basename)
    DWTbands = []               
    for b in pos:
        band = inDataset.GetRasterBand(b)
        DWTband = auxil.DWTArray(band.ReadAsArray(x0,y0,cols,rows).astype(float),cols,rows)
        for i in range(max_scale):
            DWTband.filter()
        DWTbands.append(DWTband)
    rows,cols = DWTbands[0].get_quadrant(0).shape    
    G = np.transpose(np.array([DWTbands[i].get_quadrant(0,float=True).ravel() for i in range(bands)]))
#  initialize membership matrix    
    n = G.shape[0]
    U = np.random.random((K,n))
    den = np.sum(U,axis=0)
    for j in range(K):
        U[j,:] = U[j,:]/den
#  cluster at minimum scale
    try:
        U,Ms,Cs,Ps,pdens = em(G,U,T0,beta,rows,cols)
    except:
        print 'em failed' 
        return     
#  sort clusters wrt partition density
    idx = np.argsort(pdens)  
    idx = idx[::-1]
    U = U[idx,:]
#  clustering at increasing scales
    for i in range(max_scale-min_scale):
#      expand U and renormalize         
        U = np.reshape(U,(K,rows,cols))  
        rows = rows*2
        cols = cols*2
        U = ndi.zoom(U,(1,2,2))
        U = np.reshape(U,(K,rows*cols)) 
        idx = np.where(U<0.0)
        U[idx] = 0.0
        den = np.sum(U,axis=0)        
        for j in range(K):
            U[j,:] = U[j,:]/den
#      expand the image
        for i in range(bands):
            DWTbands[i].invert()
        G = np.transpose(np.array([DWTbands[i].get_quadrant(0,float=True).ravel() for i in range(bands)]))  
#      cluster
        unfrozen = np.where(np.max(U,axis=0) < 0.90)
        try:
            U,Ms,Cs,Ps,pdens = em(G,U,0.0,beta,rows,cols,unfrozen=unfrozen)
        except:
            print 'em failed' 
            return                         
    print 'Cluster mean vectors'
    print Ms
    print 'Cluster covariance matrices'
    for k in range(K):
        print 'cluster: %i'%k
        print Cs[k]
#  up-sample class memberships if necessary
    if min_scale>0:
        U = np.reshape(U,(K,rows,cols))
        f = 2**min_scale  
        rows = rows*f
        cols = cols*f
        U = ndi.zoom(U,(1,f,f))
        U = np.reshape(U,(K,rows*cols)) 
        idx = np.where(U<0.0)
        U[idx] = 0.0
        den = np.sum(U,axis=0)        
        for j in range(K):
            U[j,:] = U[j,:]/den        
#  classify
    labels = np.byte(np.argmax(U,axis=0)+1)
    class_image[0:rows,0:cols] = np.reshape(labels,(rows,cols))
    rows1,cols1 = class_image.shape
#  write to disk
    driver = inDataset.GetDriver()    
    outDataset = driver.Create(outfile,cols1,rows1,1,GDT_Byte)
    projection = inDataset.GetProjection()
    geotransform = inDataset.GetGeoTransform()
    if geotransform is not None:
        gt = list(geotransform)
        gt[0] = gt[0] + x0*gt[1]
        gt[3] = gt[3] + y0*gt[5]
        outDataset.SetGeoTransform(tuple(gt))
    if projection is not None:
        outDataset.SetProjection(projection)               
    outBand = outDataset.GetRasterBand(1)
    outBand.WriteArray(class_image,0,0) 
    outBand.FlushCache() 
    outDataset = None   
#  write class membership probability file if desired  
    if probs:   
        outDataset = driver.Create(probfile,cols,rows,K,GDT_Byte) 
        if geotransform is not None:
            outDataset.SetGeoTransform(tuple(gt)) 
        if projection is not None:
            outDataset.SetProjection(projection)  
        for k in range(K):
            probs = np.reshape(U[k,:],(rows,cols))
            probs = np.byte(probs*255)
            outBand = outDataset.GetRasterBand(k+1)
            outBand.WriteArray(probs,0,0)
            outBand.FlushCache()    
        outDataset = None    
        print 'class probabilities written to: %s'%probfile                                  
    inDataset = None
    if (ext == '') and (K<19):
#  try to make an ENVI classification header file            
        hdr = header.Header() 
        headerfile = outfile+'.hdr'
        f = open(headerfile)
        line = f.readline()
        envihdr = ''
        while line:
            envihdr += line
            line = f.readline()
        f.close()         
        hdr.read(envihdr)
        hdr['file type'] ='ENVI Classification'
        hdr['classes'] = str(K+1)
        classlookup = '{0'
        for i in range(1,3*(K+1)):
            classlookup += ', '+str(str(ctable[i]))
        classlookup +='}'    
        hdr['class lookup'] = classlookup
        hdr['class names'] = ['class %i'%i for i in range(K+1)]
        f = open(headerfile,'w')
        f.write(str(hdr))
        f.close()                 
    print 'classified image written to: '+outfile       
    print 'elapsed time: '+str(time.time()-start)                        
    print '--done------------------------'  
예제 #50
0
def main():
    gdal.AllRegister()
    path = auxil.select_directory('Choose working directory')
    if path:
        os.chdir(path) 
    infile = auxil.select_infile(title='Select an image') 
    if infile:                   
        inDataset = gdal.Open(infile,GA_ReadOnly)     
        cols = inDataset.RasterXSize
        rows = inDataset.RasterYSize    
        bands = inDataset.RasterCount
    else:
        
        return
    pos =  auxil.select_pos(bands) 
    if not pos:
        return   
    bands = len(pos)
    dims = auxil.select_dims([0,0,cols,rows])
    if dims:
        x0,y0,cols,rows = dims
    else:
        return   
    class_image = np.zeros((rows,cols),dtype=np.byte)
    K = auxil.select_integer(6,'Number of clusters')
    max_scale = auxil.select_integer(2,'Maximum scaling factor')
    max_scale = min((max_scale,3))
    min_scale = auxil.select_integer(0,'Minimum scaling factor')
    min_scale = min((max_scale,min_scale))
    T0 = auxil.select_float(0.5,'Initial annealing temperature')
    beta = auxil.select_float(0.5,'Spatial mixing parameter')            
    outfile, outfmt = auxil.select_outfilefmt('Select output classification file')  
    if not outfile:
        return
    probfile, probfmt = auxil.select_outfilefmt('Select output probability file (optional)')  
    print '========================='
    print '     EM clustering'
    print '========================='
    print 'infile:   %s'%infile
    print 'clusters: %i'%K
    print 'T0:       %f'%T0
    print 'beta:     %f'%beta         

    start = time.time()                                     
#  read in image and compress 
    DWTbands = []               
    for b in pos:
        band = inDataset.GetRasterBand(b)
        DWTband = auxil.DWTArray(band.ReadAsArray(x0,y0,cols,rows).astype(float),cols,rows)
        for i in range(max_scale):
            DWTband.filter()
        DWTbands.append(DWTband)
    rows,cols = DWTbands[0].get_quadrant(0).shape    
    G = np.transpose(np.array([DWTbands[i].get_quadrant(0,float=True).ravel() for i in range(bands)]))
#  initialize membership matrix    
    n = G.shape[0]
    U = np.random.random((K,n))
    den = np.sum(U,axis=0)
    for j in range(K):
        U[j,:] = U[j,:]/den
#  cluster at minimum scale
    try:
        U,Ms,Cs,Ps,pdens = em(G,U,T0,beta,rows,cols)
    except:
        print 'em failed' 
        return     
#  sort clusters wrt partition density
    idx = np.argsort(pdens)  
    idx = idx[::-1]
    U = U[idx,:]
#  clustering at increasing scales
    for i in range(max_scale-min_scale):
#      expand U and renormalize         
        U = np.reshape(U,(K,rows,cols))  
        rows = rows*2
        cols = cols*2
        U = ndi.zoom(U,(1,2,2))
        U = np.reshape(U,(K,rows*cols)) 
        idx = np.where(U<0.0)
        U[idx] = 0.0
        den = np.sum(U,axis=0)        
        for j in range(K):
            U[j,:] = U[j,:]/den
#      expand the image
        for i in range(bands):
            DWTbands[i].invert()
        G = np.transpose(np.array([DWTbands[i].get_quadrant(0,float=True).ravel() for i in range(bands)]))  
#      cluster
        unfrozen = np.where(np.max(U,axis=0) < 0.90)
        try:
            U,Ms,Cs,Ps,pdens = em(G,U,0.0,beta,rows,cols,unfrozen=unfrozen)
        except:
            print 'em failed' 
            return                         
    print 'Cluster mean vectors'
    print Ms
    print 'Cluster covariance matrices'
    for k in range(K):
        print 'cluster: %i'%k
        print Cs[k]
#  up-sample class memberships if necessary
    if min_scale>0:
        U = np.reshape(U,(K,rows,cols))
        f = 2**min_scale  
        rows = rows*f
        cols = cols*f
        U = ndi.zoom(U,(1,f,f))
        U = np.reshape(U,(K,rows*cols)) 
        idx = np.where(U<0.0)
        U[idx] = 0.0
        den = np.sum(U,axis=0)        
        for j in range(K):
            U[j,:] = U[j,:]/den        
#  classify
    labels = np.byte(np.argmax(U,axis=0)+1)
    class_image[0:rows,0:cols] = np.reshape(labels,(rows,cols))
    rows1,cols1 = class_image.shape
#  write to disk
    driver = gdal.GetDriverByName(outfmt)    
    outDataset = driver.Create(outfile,cols1,rows1,1,GDT_Byte)
    projection = inDataset.GetProjection()
    geotransform = inDataset.GetGeoTransform()
    if geotransform is not None:
        gt = list(geotransform)
        gt[0] = gt[0] + x0*gt[1]
        gt[3] = gt[3] + y0*gt[5]
        outDataset.SetGeoTransform(tuple(gt))
    if projection is not None:
        outDataset.SetProjection(projection)               
    outBand = outDataset.GetRasterBand(1)
    outBand.WriteArray(class_image,0,0) 
    outBand.FlushCache() 
    outDataset = None   
#  write class membership probability file if desired  
    if probfile:
        driver = gdal.GetDriverByName(probfmt)    
        outDataset = driver.Create(probfile,cols,rows,K,GDT_Byte) 
        if geotransform is not None:
            outDataset.SetGeoTransform(tuple(gt)) 
        if projection is not None:
            outDataset.SetProjection(projection)  
        for k in range(K):
            probs = np.reshape(U[k,:],(rows,cols))
            probs = np.byte(probs*255)
            outBand = outDataset.GetRasterBand(k+1)
            outBand.WriteArray(probs,0,0)
            outBand.FlushCache()    
        outDataset = None    
        print 'class probabilities written to: %s'%probfile                                  
    inDataset = None
    if (outfmt == 'ENVI') and (K<19):
#  try to make an ENVI classification header file            
        hdr = header.Header() 
        headerfile = outfile+'.hdr'
        f = open(headerfile)
        line = f.readline()
        envihdr = ''
        while line:
            envihdr += line
            line = f.readline()
        f.close()         
        hdr.read(envihdr)
        hdr['file type'] ='ENVI Classification'
        hdr['classes'] = str(K+1)
        classlookup = '{0'
        for i in range(1,3*(K+1)):
            classlookup += ', '+str(str(ctable[i]))
        classlookup +='}'    
        hdr['class lookup'] = classlookup
        hdr['class names'] = ['class %i'%i for i in range(K+1)]
        f = open(headerfile,'w')
        f.write(str(hdr))
        f.close()                 
    print 'classification written to: '+outfile       
    print 'elapsed time: '+str(time.time()-start)                        
    print '--done------------------------'  
예제 #51
0
    log("CUDA NV12 Buffer=%s, pitch=%s", hex(int(cudaNV12Buffer)), NV12Pitch)

    #host buffers:
    inputBuffer = driver.pagelocked_zeros(inputPitch*h*3/2, dtype=numpy.byte)
    log("inputBuffer=%s", inputBuffer)

    outputBuffer = driver.pagelocked_zeros(inputPitch*h*3/2, dtype=numpy.byte)
    log("outputBuffer=%s", outputBuffer)

    #populate host buffer with random data:
    buf = inputBuffer.data
    for y in range(h*3/2):
        dst = y * inputPitch
        #debug("%s: %s:%s (size=%s) <- %s:%s (size=%s)", y, dst, dst+w, len(buffer), src, src+w, len(Yplane))
        for x in range(w):
            buf[dst+x] = numpy.byte((x+y) % 256)

    #copy input buffer to CUDA buffer:
    driver.memcpy_htod(cudaInputBuffer, inputBuffer)
    log("input buffer copied to device")

    #FIXME: just clear the NV12 buffer:
    driver.memcpy_htod(cudaNV12Buffer, outputBuffer)
    #FIXME: just for testing fill the buffer with our input already:
    #driver.memcpy_htod(cudaNV12Buffer, inputBuffer)

    if True:
        log("calling %s", BGRA2NV12)
        BGRA2NV12(cudaInputBuffer, numpy.int32(inputPitch),
               cudaNV12Buffer, numpy.int32(NV12Pitch),
               numpy.int32(w), numpy.int32(h),
def on_key(event):
    global fileList, targetList, fig, imgNum, brushSize
    global maskDir, maskImg
    global prevImg,   thisImg,   nextImg
    global prevAxImg, thisAxImg, nextAxImg
    global prevTarget, thisTarget, nextTarget
    global prevMin,   thisMin,   nextMin
    global prevMax,   thisMax,   nextMax
    global prevLabel, thisLabel, nextLabel

    # Handle brush sizing
    if event.key == '1':
        brushSize = 1
    elif event.key == '2':
        brushSize = 2
    elif event.key == '3':
        brushSize = 3
    elif event.key == '4':
        brushSize = 4
    elif event.key == '5':
        brushSize = 5
    elif event.key == '6':
        brushSize = 6

    # Increment the image number
    if event.key == 'right' or event.key == 'left':
        if event.key == 'right':
            #Advance to the next image
            imgNum += 1

            # Read in the new files
            prevImg = thisImg
            thisImg = nextImg
            nextImg = ai.reduced.ReducedScience.read(fileList[(imgNum + 1) % len(fileList)])

            # Update target info
            prevTarget = thisTarget
            thisTarget = nextTarget
            nextTarget = targetList[(imgNum + 1) % len(fileList)]

            # Build the image scaling intervals
            zScaleGetter = ZScaleInterval()

            # Compute new image display minima
            prevMin = thisMin
            thisMin = nextMin
            nextMin, _ = zScaleGetter.get_limits(nextImg.data)

            # Compute new image display maxima
            prevMax = thisMax
            thisMax = nextMax
            _, nextMax = zScaleGetter.get_limits(nextImg.data)

        if event.key == 'left':
            #Move back to the previous image
            imgNum -= 1

            # Read in the new files
            nextImg = thisImg
            thisImg = prevImg
            prevImg = ai.reduced.ReducedScience.read(fileList[(imgNum - 1) % len(fileList)])

            # Update target info
            nextTarget = thisTarget
            thisTarget = prevTarget
            prevTarget = targetList[(imgNum - 1) % len(fileList)]

            # Build the image scaling intervals
            zScaleGetter = ZScaleInterval()

            # Compute new image display minima
            nextMin = thisMin
            thisMin = prevMin
            prevMin, _ = zScaleGetter.get_limits(prevImg.data)

            # Compute new image display maxima
            nextMax = thisMax
            thisMax = prevMax
            _, prevMax = zScaleGetter.get_limits(prevImg.data)

        #*******************************
        # Update the displayed mask
        #*******************************

        # Check which mask files might be usable...
        prevMaskFile = os.path.join(maskDir,
            os.path.basename(prevImg.filename))
        thisMaskFile = os.path.join(maskDir,
            os.path.basename(thisImg.filename))
        nextMaskFile = os.path.join(maskDir,
            os.path.basename(nextImg.filename))
        if os.path.isfile(thisMaskFile):
            # If the mask for this file exists, use it
            print('using this mask: ',os.path.basename(thisMaskFile))
            maskImg = ai.reduced.ReducedScience.read(thisMaskFile)
        elif os.path.isfile(prevMaskFile) and (prevTarget == thisTarget):
            # Otherwise check for the mask for the previous file
            print('using previous mask: ',os.path.basename(prevMaskFile))
            maskImg = ai.reduced.ReducedScience.read(prevMaskFile)
        elif os.path.isfile(nextMaskFile) and (nextTarget == thisTarget):
            # Then check for the mask of the next file
            print('using next mask: ',os.path.basename(nextMaskFile))
            maskImg = ai.reduced.ReducedScience.read(nextMaskFile)
        else:
            # If none of those files exist, build a blank slate
            # Build a mask template (0 = not masked, 1 = masked)
            maskImg       = thisImg.copy()
            maskImg.filename = thisMaskFile
            maskImg = maskImg.astype(np.int16)
            # Make sure the uncertainty array is removed from the image
            try:
                del maskImg.uncertainty
            except:
                pass

        # Update contour plot (clear old lines redo contouring)
        axarr[1].collections = []
        axarr[1].contour(xx, yy, maskImg.data, levels=[0.5], colors='white', alpha = 0.2)

        # Reassign image display limits
        prevAxImg.set_clim(vmin = prevMin, vmax = prevMax)
        thisAxImg.set_clim(vmin = thisMin, vmax = thisMax)
        nextAxImg.set_clim(vmin = nextMin, vmax = nextMax)

        # Display the new images
        prevAxImg.set_data(prevImg.data)
        thisAxImg.set_data(thisImg.data)
        nextAxImg.set_data(nextImg.data)

        # Update the annotation
        axList = fig.get_axes()
        axList[1].set_title(os.path.basename(thisImg.filename))

        prevStr   = (str(prevImg.header['OBJECT']) + '\n' +
                     str(prevImg.header['FILTNME2'] + '\n' +
                     str(prevImg.header['HWP'])))
        thisStr   = (str(thisImg.header['OBJECT']) + '\n' +
                     str(thisImg.header['FILTNME2'] + '\n' +
                     str(thisImg.header['HWP'])))
        nextStr   = (str(nextImg.header['OBJECT']) + '\n' +
                     str(nextImg.header['FILTNME2'] + '\n' +
                     str(nextImg.header['HWP'])))
        prevLabel.set_text(prevStr)
        thisLabel.set_text(thisStr)
        nextLabel.set_text(nextStr)

        # Update the display
        fig.canvas.draw()

    # Save the generated mask
    if event.key == 'enter':
        # Make sure the header has the right values
        maskImg.header = thisImg.header

        # TODO: make sure the mask ONLY has what it needs
        # i.e., remove uncertainty and convert to np.ubyte type.

        # Write the mask to disk
        maskBasename = os.path.basename(thisImg.filename)
        maskFullname = os.path.join(maskDir, maskBasename)
        print('Writing mask for file {}'.format(maskBasename))
        maskImg.write(maskFullname, clobber=True)

    # Clear out the mask values
    if event.key == 'backspace':
        # Clear out the mask array
        maskImg.data = maskImg.data * np.byte(0)

        # Update contour plot (clear old lines redo contouring)
        axarr[1].collections = []
        axarr[1].contour(xx, yy, maskImg.data, levels=[0.5], colors='white', alpha = 0.2)

        # Update the display
        fig.canvas.draw()
예제 #53
0
def main():    
    usage = '''
Usage: 
---------------------------------------------------------
python %s  [-p bandPositions] [- a algorithm] [-L number of hidden neurons]   
[-P generate class probabilities image] filename trainShapefile

bandPositions is a list, e.g., -p [1,2,4]  

algorithm  1=MaxLike
           2=NNet(backprop)
           3=NNet(congrad)
           4=SVM

If the input file is named 

         path/filenbasename.ext then

The output classification file is named 

         path/filebasename_class.ext

the class probabilities output file is named

         path/filebasename_classprobs.ext
         
and the test results file is named

         path/filebasename_<classifier>.tst
--------------------------------------------------------''' %sys.argv[0]
    options, args = getopt.getopt(sys.argv[1:],'hnPp:a:L:')
    pos = None
    probs = False   
    L = 8
    graphics = True
    trainalg = 1
    for option, value in options:
        if option == '-h':
            print usage
            return
        elif option == '-p':
            pos = eval(value)
        elif option == '-n':
            graphics = False            
        elif option == '-a':
            trainalg = eval(value)
        elif option == '-L':
            L = eval(value)    
        elif option == '-P':
            probs = True                              
    if len(args) != 2: 
        print 'Incorrect number of arguments'
        print usage
        sys.exit(1)      
    if trainalg == 1:
        algorithm = 'MaxLike'
    elif trainalg == 2:
        algorithm = 'NNet(Backprop)'
    elif trainalg == 3:
        algorithm =  'NNet(Congrad)'
    elif trainalg == 4:
        algorithm = 'SVM'              
    infile = args[0]  
    trnfile = args[1]      
    gdal.AllRegister() 
    if infile:                   
        inDataset = gdal.Open(infile,GA_ReadOnly)
        cols = inDataset.RasterXSize
        rows = inDataset.RasterYSize    
        bands = inDataset.RasterCount
        projection = inDataset.GetProjection()
        geotransform = inDataset.GetGeoTransform()
        if geotransform is not None:
            gt = list(geotransform) 
        else:
            print 'No geotransform available'
            return       
        imsr = osr.SpatialReference()  
        imsr.ImportFromWkt(projection)    
    else:
        return  
    if pos is None: 
        pos = range(1,bands+1)
    N = len(pos)    
    rasterBands = [] 
    for b in pos:
        rasterBands.append(inDataset.GetRasterBand(b))     
#  output files
    path = os.path.dirname(infile)
    basename = os.path.basename(infile)
    root, ext = os.path.splitext(basename)
    outfile = '%s/%s_class%s'%(path,root,ext)  
    tstfile = '%s/%s_%s.tst'%(path,root,algorithm)            
    if (trainalg in (2,3,4)) and probs:
#      class probabilities file
        probfile = '%s/%s_classprobs%s'%(path,root,ext) 
    else:
        probfile = None        
#  training data        
    trnDriver = ogr.GetDriverByName('ESRI Shapefile')
    trnDatasource = trnDriver.Open(trnfile,0)
    trnLayer = trnDatasource.GetLayer() 
    trnsr = trnLayer.GetSpatialRef()             
#  coordinate transformation from training to image projection   
    ct = osr.CoordinateTransformation(trnsr,imsr) 
#  number of classes    
    K = 1
    feature = trnLayer.GetNextFeature() 
    while feature:
        classid = feature.GetField('CLASS_ID')
        if int(classid)>K:
            K = int(classid)
        feature = trnLayer.GetNextFeature() 
    trnLayer.ResetReading()    
    K += 1       
#  es kann losgehen    
    print '========================='
    print 'supervised classification'
    print '========================='
    print time.asctime()    
    print 'image:     '+infile
    print 'training:  '+trnfile  
    print 'algorithm: '+algorithm             
#  loop through the polygons    
    Gs = [] # train observations
    ls = [] # class labels
    classnames = '{unclassified'
    classids = set()
    print 'reading training data...'
    for i in range(trnLayer.GetFeatureCount()):
        feature = trnLayer.GetFeature(i)
        classid = str(feature.GetField('CLASS_ID'))
        classname  = feature.GetField('CLASS_NAME')
        if classid not in classids:
            classnames += ',   '+ classname
        classids = classids | set(classid)     
#      label for this ROI           
        l = [0 for i in range(K)]
        l[int(classid)] = 1.0
        polygon = feature.GetGeometryRef()
#      transform to same projection as image        
        polygon.Transform(ct)  
#      convert to a Shapely object            
        poly = shapely.wkt.loads(polygon.ExportToWkt())
#      transform the boundary to pixel coords in numpy        
        bdry = np.array(poly.boundary) 
        bdry[:,0] = bdry[:,0]-gt[0]
        bdry[:,1] = bdry[:,1]-gt[3]
        GT = np.mat([[gt[1],gt[2]],[gt[4],gt[5]]])
        bdry = bdry*np.linalg.inv(GT) 
#      polygon in pixel coords        
        polygon1 = asPolygon(bdry)
#      raster over the bounding rectangle        
        minx,miny,maxx,maxy = map(int,list(polygon1.bounds))  
        pts = [] 
        for i in range(minx,maxx+1):
            for j in range(miny,maxy+1): 
                pts.append((i,j))             
        multipt =  MultiPoint(pts)   
#      intersection as list              
        intersection = np.array(multipt.intersection(polygon1),dtype=np.int).tolist()
#      cut out the bounded image cube               
        cube = np.zeros((maxy-miny+1,maxx-minx+1,len(rasterBands)))
        k=0
        for band in rasterBands:
            cube[:,:,k] = band.ReadAsArray(minx,miny,maxx-minx+1,maxy-miny+1)
            k += 1
#      get the training vectors
        for (x,y) in intersection:         
            Gs.append(cube[y-miny,x-minx,:])
            ls.append(l)   
        polygon = None
        polygon1 = None            
        feature.Destroy()  
    trnDatasource.Destroy() 
    classnames += '}'
    m = len(ls)       
    print str(m) + ' training pixel vectors were read in' 
    Gs = np.array(Gs) 
    ls = np.array(ls)
#  stretch the pixel vectors to [-1,1] (for ffn)
    maxx = np.max(Gs,0)
    minx = np.min(Gs,0)
    for j in range(N):
        Gs[:,j] = 2*(Gs[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0   
#  random permutation of training data
    idx = np.random.permutation(m)
    Gs = Gs[idx,:] 
    ls = ls[idx,:]             
#  setup output datasets 
    driver = inDataset.GetDriver() 
    outDataset = driver.Create(outfile,cols,rows,1,GDT_Byte) 
    projection = inDataset.GetProjection()
    geotransform = inDataset.GetGeoTransform()
    if geotransform is not None:
        outDataset.SetGeoTransform(tuple(gt))
    if projection is not None:
        outDataset.SetProjection(projection) 
    outBand = outDataset.GetRasterBand(1) 
    if probfile:   
        probDataset = driver.Create(probfile,cols,rows,K,GDT_Byte) 
        if geotransform is not None:
            probDataset.SetGeoTransform(tuple(gt))
        if projection is not None:
            probDataset.SetProjection(projection)  
        probBands = [] 
        for k in range(K):
            probBands.append(probDataset.GetRasterBand(k+1))         
#  initialize classifier  
    if   trainalg == 1:
        classifier = sc.Maxlike(Gs,ls)
    elif trainalg == 2:
        classifier = sc.Ffnbp(Gs,ls,L)
    elif trainalg == 3:
        classifier = sc.Ffncg(Gs,ls,L)
    elif trainalg == 4:
        classifier = sc.Svm(Gs,ls)         
#  train it            
    print 'training on %i pixel vectors...' % np.shape(Gs)[0]
    start = time.time()
    result = classifier.train()
    print 'elapsed time %s' %str(time.time()-start) 
    if result:
        if (trainalg in [2,3]) and graphics:
            cost = np.log10(result)  
            ymax = np.max(cost)
            ymin = np.min(cost) 
            xmax = len(cost)      
            plt.plot(range(xmax),cost,'k')
            plt.axis([0,xmax,ymin-1,ymax])
            plt.title('Log(Cross entropy)')
            plt.xlabel('Epoch')   
            plt.show()
#      classify the image           
        print 'classifying...'
        start = time.time()
        tile = np.zeros((cols,N),dtype=np.float32)    
        for row in range(rows):
            for j in range(N):
                tile[:,j] = rasterBands[j].ReadAsArray(0,row,cols,1)
                tile[:,j] = 2*(tile[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0               
            cls, Ms = classifier.classify(tile)  
            outBand.WriteArray(np.reshape(cls,(1,cols)),0,row)
            if probfile:
                Ms = np.byte(Ms*255)
                for k in range(K):
                    probBands[k].WriteArray(np.reshape(Ms[k,:],(1,cols)),0,row)
        outBand.FlushCache()
        print 'elapsed time %s' %str(time.time()-start)
        outDataset = None
        inDataset = None      
        if probfile:
            for probBand in probBands:
                probBand.FlushCache() 
            probDataset = None
            print 'class probabilities written to: %s'%probfile   
        K =  ls.shape[1]+1                     
        print 'thematic map written to: %s'%outfile
    else:
        print 'an error occured' 
        return 
#  cross-validation
    start = time.time()
    rc = Client()   
    print 'submitting cross-validation to %i IPython engines'%len(rc)  
    m = np.shape(Gs)[0]
    traintest = []
    for i in range(10):
        sl = slice(i*m//10,(i+1)*m//10)
        traintest.append( (np.delete(Gs,sl,0),np.delete(ls,sl,0), \
                                     Gs[sl,:],ls[sl,:],L,trainalg) )
    v = rc[:]   
    v.execute('import auxil.supervisedclass as sc') 
    result = v.map(crossvalidate,traintest).get()   
    print 'parallel execution time: %s' %str(time.time()-start)      
    print 'misclassification rate: %f' %np.mean(result)
    print 'standard deviation:     %f' %np.std(result)         
예제 #54
0
def make_image(redband,greenband,blueband,rows,cols,enhance):
    X = np.ones((rows*cols,3),dtype=np.uint8) 
    if enhance == 'linear255':
        i = 0
        for tmp in [redband,greenband,blueband]:
            tmp = tmp.ravel()
            tmp = np.where(tmp<0,0,tmp)  
            tmp = np.where(tmp>255,255,tmp)
            X[:,i] = np.byte(tmp)
            i += 1
    elif enhance == 'linear':
        i = 0
        for tmp in [redband,greenband,blueband]:             
            tmp = tmp.ravel()  
            mx = np.max(tmp)
            mn = np.min(tmp)  
            if mx-mn > 0:
                tmp = (tmp-mn)*255.0/(mx-mn)    
            tmp = np.where(tmp<0,0,tmp)  
            tmp = np.where(tmp>255,255,tmp)
            X[:,i] = np.byte(tmp)
            i += 1
    elif enhance == 'linear2pc':
        i = 0
        for tmp in [redband,greenband,blueband]:     
            tmp = tmp.ravel()        
            mx = np.max(tmp)
            mn = np.min(tmp)  
            if mx-mn > 0:
                tmp = (tmp-mn)*255.0/(mx-mn)  
            tmp = np.where(tmp<0,0,tmp)  
            tmp = np.where(tmp>255,255,tmp)
            hist,bin_edges = np.histogram(tmp,256,(0,256))
            cdf = hist.cumsum()
            lower = 0
            j = 0
            while cdf[j] < 0.02*cdf[-1]:
                lower += 1
                j += 1
            upper = 255    
            j = 255
            while cdf[j] > 0.98*cdf[-1]:
                upper -= 1
                j -= 1
            if upper==0:
                upper = 255
                print 'Saturated stretch failed'
            fp = (bin_edges-lower)*255/(upper-lower) 
            fp = np.where(bin_edges<=lower,0,fp)
            fp = np.where(bin_edges>=upper,255,fp)
            X[:,i] = np.byte(np.interp(tmp,bin_edges,fp))
            i += 1       
    elif enhance == 'equalization':   
        i = 0
        for tmp in [redband,greenband,blueband]:     
            tmp = tmp.ravel()    
            mx = np.max(tmp)
            mn = np.min(tmp)  
            if mx-mn > 0:
                tmp = (tmp-mn)*255.0/(mx-mn)  
            tmp = np.where(tmp<0,0,tmp)  
            tmp = np.where(tmp>255,255,tmp)  
            hist,bin_edges = np.histogram(tmp,256,(0,256)) 
            cdf = hist.cumsum()
            lut = 255*cdf/float(cdf[-1]) 
            X[:,i] = np.byte(np.interp(tmp,bin_edges[:-1],lut))
            i += 1
    elif enhance == 'logarithmic':   
        i = 0
        for tmp in [redband,greenband,blueband]:     
            tmp = tmp.ravel() 
            mn = np.min(tmp)
            if mn < 0:
                tmp = tmp - mn
            idx = np.where(tmp == 0)
            tmp[idx] = np.mean(tmp)  # get rid of black edges
            idx = np.where(tmp > 0)
            tmp[idx] = np.log(tmp[idx])            
            mn =np.min(tmp)
            mx = np.max(tmp)
            if mx-mn > 0:
                tmp = (tmp-mn)*255.0/(mx-mn)    
            tmp = np.where(tmp<0,0,tmp)  
            tmp = np.where(tmp>255,255,tmp)
 #         2% linear stretch           
            hist,bin_edges = np.histogram(tmp,256,(0,256))
            cdf = hist.cumsum()
            lower = 0
            j = 0
            while cdf[j] < 0.02*cdf[-1]:
                lower += 1
                j += 1
            upper = 255    
            j = 255
            while cdf[j] > 0.98*cdf[-1]:
                upper -= 1
                j -= 1
            if upper==0:
                upper = 255
                print 'Saturated stretch failed'
            fp = (bin_edges-lower)*255/(upper-lower) 
            fp = np.where(bin_edges<=lower,0,fp)
            fp = np.where(bin_edges>=upper,255,fp)
            X[:,i] = np.byte(np.interp(tmp,bin_edges,fp))
            i += 1                           
    return np.reshape(X,(rows,cols,3))/255.