Exemple #1
0
def run_clustering_for_rank(rank,
                            distances_input=None,
                            distance_ranks_input=None,
                            isolates=None,
                            previous_seeds=None):
    """ Clusters isolates into lineages based on their
    relative distances using a single R to enable
    parallelisation.

    Args:
        rank (int)
            Integer specifying the maximum rank of neighbour used
            for clustering. Should be changed to int list for hierarchical
            clustering.
        qlist (list)
            List of query sequences being added to an existing clustering.
            Should be included within rlist.
        use_existing (bool)
            Whether to extend a previously generated analysis or not.

    Returns:
        lineage_assignation (dict)
            Assignment of each isolate to a cluster.
        lineage_seed (dict)
            Seed isolate used to initiate each cluster.
        connections (set of tuples)
            Edges to add to network describing lineages.
    """

    # load shared memory objects
    distances_shm = shared_memory.SharedMemory(name=distances_input.name)
    distances = np.ndarray(distances_input.shape,
                           dtype=distances_input.dtype,
                           buffer=distances_shm.buf)
    distance_ranks_shm = shared_memory.SharedMemory(
        name=distance_ranks_input.name)
    distance_ranks = np.ndarray(distance_ranks_input.shape,
                                dtype=distance_ranks_input.dtype,
                                buffer=distance_ranks_shm.buf)
    isolate_list = isolates
    isolate_indices = range(0, len(isolate_list))

    # load previous scheme
    seeds = {}
    if previous_seeds is not None:
        seeds = previous_seeds[rank]

    # identify nearest neighbours
    nn = get_nearest_neighbours(rank,
                                ranks=distance_ranks_input,
                                isolates=isolate_list)

    # iteratively identify lineages
    lineage_index = 1
    connections = set()
    lineage_assignation = {isolate: None for isolate in isolate_list}

    while None in lineage_assignation.values():
        if lineage_index in seeds.keys():
            seed_isolate = seeds[lineage_index]
        else:
            seed_isolate = pick_seed_isolate(lineage_assignation,
                                             distances=distances_input)
        # skip over previously-defined seeds if amalgamated into different lineage now
        if lineage_assignation[seed_isolate] is None:
            seeds[lineage_index] = seed_isolate
            lineage_assignation, added_connections = get_lineage(
                lineage_assignation, nn, seed_isolate, lineage_index)
            connections.update(added_connections)
        lineage_index = lineage_index + 1

    # return clustering
    return lineage_assignation, seeds, nn, connections
Exemple #2
0
#x = 0
frames = 0

text_to_read = "¿Qué objeto desea encontrar?"
reading_from_string(text_to_read, object_filename)
original_text = input(text_to_read)
valid_object, object_to_find = find_word.find_word(original_text)
object_found = False
coords = []
if valid_object:
    while (not (object_found)):
        t = None
        frame = pepper.getCameraFrame(handle)
        if frames == 0:
            print('Sending initial informantion')
            shm = shared_memory.SharedMemory(
                create=True, size=frame.nbytes)  # name='image_random'
            data = json.dumps({
                "shape": frame.shape,
                "name": shm.name,
                "type": frame.dtype.name,
                "object": object_to_find
            })
            s.send(data.encode())

            #data = s.recv(1024)
            #s.close()
            #print('Received', data)
        # Now create a NumPy array backed by shared memory
        b = np.ndarray(frame.shape, dtype=frame.dtype, buffer=shm.buf)
        b[:] = frame[:]  # Copy the original data into shared memory
        #print('Sending')
 def destroy(self):
     shm = shared_memory.SharedMemory(self.shmname)
     shm.unlink()
Exemple #4
0
import time
import sys
from multiprocessing import shared_memory

SHM_NAME = "xxx"
SHM_SIZE = 1024
DATA = b"x" * SHM_SIZE
N = 100000

argv = sys.argv
is_server = False
if len(argv) > 1:
    if argv[1] == "-s": is_server = True

if is_server:
    shm = shared_memory.SharedMemory(create=True, size=SHM_SIZE, name=SHM_NAME)
else:
    shm = shared_memory.SharedMemory(size=SHM_SIZE, name=SHM_NAME)

try:
    shm.buf[0] = 0
    for i in range(int(N) + 1):
        if is_server:
            while shm.buf[0] == 0:
                time.sleep(0)
            shm.buf[0] = 0
            for x in range(1, len(shm.buf)):
                shm.buf[x] = DATA[x]
            # print(shm.buf)
        else:
            while shm.buf[0] == 1:
    red_rects.append(rect)
# keep rotating the rectangle until running is set to False

joy_steer = 0
joy_throttle = 0
joy_strafe = 0
vel_cmd = 0
steer_cmd = 0
last_vel_cmd = 0
tick_time = time.time()

strafe = 0

myFont = py.font.SysFont("Times New Roman", int(40 * WINDOW_SCALING))

existing_shm = shared_memory.SharedMemory(name='acorn_steering_debug')
# Untrack the resource so it does not get destroyed. This allows the
# steering debug window to stay open.
resource_tracker.unregister(existing_shm._name, 'shared_memory')
calc = np.ndarray((8, ), dtype=np.float64, buffer=existing_shm.buf)

running = True
while running:
    # set FPS
    clock.tick(FPS)
    # clear the screen every time before drawing new objects
    screen.fill(BLACK)
    # check for the exit
    for event in py.event.get():
        if event.type == py.QUIT:
            running = False
Exemple #6
0
def perform_fft(input_info, plot=False):

    print("\nspawn FFT process nr : ", input_info["process_nr"])

    existing_shm = shared_memory.SharedMemory(name=input_info["shm"].name)

    # check if poly calculation is on to be executed, because then there must be an additional key in the dictionary
    if "poly" in input_info.keys():
        poly = True
        reference_to_data_block = numpy.ndarray(
            input_info["dim"], dtype=numpy.float64,
            buffer=existing_shm.buf)[:, input_info["from"]:input_info["to"], :]
    else:
        poly = False
        reference_to_data_block = numpy.ndarray(
            input_info["dim"], dtype=numpy.float64,
            buffer=existing_shm.buf)[:, input_info["from"]:input_info["to"], :]

    # get data to process out of buffer

    print("ref data dtype: ", reference_to_data_block.dtype)
    print("\n")

    # reshape data from buffer to 2d matrix with the time as y coords and x as the values
    data_mat = reference_to_data_block.reshape(
        reference_to_data_block.shape[0],
        reference_to_data_block.shape[1] * reference_to_data_block.shape[2])

    # strore orig time, cols and row information - needed for reshaping
    orig_time = reference_to_data_block.shape[0]
    orig_rows = reference_to_data_block.shape[1]
    orig_cols = reference_to_data_block.shape[2]

    # if plots are whished
    if plot:
        existing_shm_qual = shared_memory.SharedMemory(
            name=input_info["shm_qual"].name)
        if poly:
            reference_to_qual_block = numpy.ndarray(
                input_info["dim"],
                dtype=numpy.float64,
                buffer=existing_shm_qual.buf
            )[:, input_info["from"]:input_info["to"], :]
        else:
            reference_to_qual_block = numpy.ndarray(
                input_info["dim"],
                dtype=numpy.float64,
                buffer=existing_shm_qual.buf
            )[:, input_info["from"]:input_info["to"], :]

        qual_weights = input_info["weights"]
        qual_factor = 1
        qual_mat = reference_to_qual_block.reshape(
            reference_to_qual_block.shape[0],
            reference_to_qual_block.shape[1] *
            reference_to_qual_block.shape[2])
        qual_mat[qual_mat == 255] = numpy.nan

    print("Data Mat Shape", data_mat.shape)

    # setting int Nan value to numpy.nan --> transforms dataytpe to floa64!!!
    data_mat = numpy.where(data_mat == 32767, numpy.nan, data_mat)
    n = data_mat.shape[0]
    t = numpy.arange(0, n, 1)

    # iter through
    for i in range(0, data_mat.shape[1], 1):

        data_mat_v_nan = numpy.isfinite(data_mat[:, i])
        data_mat_v_t = numpy.arange(0, len(data_mat_v_nan), 1)

        if False in data_mat_v_nan:
            try:

                # interpolate on that spots
                data_mat_v_interp = numpy.round(
                    numpy.interp(data_mat_v_t, data_mat_v_t[data_mat_v_nan],
                                 data_mat[:, i][data_mat_v_nan]))

                # calculate the fft
                f_hat = numpy.fft.fft(data_mat_v_interp, n)
                # and the power spectrum - which frequencies are dominant
                power_spectrum = f_hat * numpy.conj(f_hat) / n

                # get the max power value
                max_fft_spectr_value = numpy.max(power_spectrum)
                # set it to zeros so one can find those frequencies that are far lower and important but still no noise
                power_spec_no_max = numpy.where(
                    power_spectrum == max_fft_spectr_value, 0, power_spectrum)

                threshold_remaining_values = numpy.nanmax(
                    power_spec_no_max) / 2

                indices = power_spectrum > threshold_remaining_values
                f_hat = indices * f_hat
                ffilt = numpy.fft.ifft(f_hat)

                if plot:
                    if i <= 3:
                        print("proces nr %d i == %d" %
                              (input_info["process_nr"], i))
                        print("data mat: ", data_mat[:, i])
                        print("data_mat_v_interp", data_mat_v_interp)

                        # print("data mat:", data_mat[:, i])
                        # print("data_mat.dtype: ", data_mat.dtype)
                        # print("data_mat_interp.dtype: ", data_mat_v_interp.dtype)
                        ffilt = numpy.round(ffilt).astype(numpy.int16)
                        # print("\ntransfrom to int16: ", data_mat_v_interp)
                        # print("\ndata_mat_interp.dtype: ", data_mat_v_interp.dtype)
                        fig, axs = plt.subplots(3, 1)

                        good_qual = numpy.where(
                            qual_mat[:, i] == qual_weights[0], qual_weights[0],
                            numpy.nan) * qual_factor
                        okay_qual = numpy.where(
                            qual_mat[:, i] == qual_weights[1], qual_weights[1],
                            numpy.nan) * qual_factor
                        bad_qual = numpy.where(
                            qual_mat[:, i] == qual_weights[2], qual_weights[2],
                            numpy.nan) * qual_factor
                        really_bad_qual = numpy.where(
                            qual_mat[:, i] == qual_weights[3], qual_weights[3],
                            numpy.nan) * qual_factor
                        nan_values = numpy.where(qual_mat[:, i] == 255,
                                                 qual_weights[3], numpy.nan)

                        plt.sca(axs[0])
                        plt.plot(t,
                                 data_mat[:, i],
                                 color='c',
                                 LineWidth=3,
                                 label="raw data")
                        plt.plot(t,
                                 data_mat_v_interp,
                                 color='k',
                                 LineWidth=1,
                                 linestyle='--',
                                 label='lin interp')
                        plt.plot(t,
                                 ffilt,
                                 color="k",
                                 LineWidth=2,
                                 label='FFT Filtered')

                        plt.plot(t, good_qual, 'go', label="Good Quality")
                        plt.plot(t, okay_qual, 'yo', label="Okay Quality")
                        plt.plot(t,
                                 bad_qual,
                                 'o',
                                 color='orange',
                                 label="Bad Quality")
                        plt.plot(t,
                                 really_bad_qual,
                                 'ro',
                                 label="Really Bad Quality")
                        plt.plot(t, nan_values, 'ko', label="NaN Values")

                        plt.xlim(t[0], t[-1])
                        plt.ylabel("Intensity [%]")
                        plt.xlabel("Time [days]")
                        plt.legend()

                        plt.sca(axs[1])
                        plt.plot(t,
                                 power_spectrum,
                                 color="c",
                                 LineWidth=2,
                                 label="Noisy")
                        plt.plot(t,
                                 power_spectrum,
                                 'b*',
                                 LineWidth=2,
                                 label="Noisy")
                        plt.plot(t[0], t[-1])
                        plt.xlabel("Power Spectrum [Hz]")
                        plt.ylabel("Power")
                        plt.title(
                            "Power Spectrum Analyses - Max: {} - Threshold: {}"
                            .format(max_fft_spectr_value,
                                    numpy.nanmean(power_spectrum)))

                        plt.sca(axs[2])
                        plt.plot(t,
                                 power_spec_no_max,
                                 color="c",
                                 LineWidth=2,
                                 label="Noisy")
                        plt.plot(t,
                                 power_spec_no_max,
                                 'b*',
                                 LineWidth=2,
                                 label="Noisy")
                        plt.plot(t[0], t[-1])
                        plt.xlabel("Power Spectrum no max [Hz]")
                        plt.ylabel("Power")
                        plt.title(
                            "Power Spectrum Analysis - removed big max {} - Max: {} - Threshold: {}"
                            .format(max_fft_spectr_value,
                                    numpy.nanmax(power_spec_no_max),
                                    threshold_remaining_values))

                        # plot data
                        plt.show()

                data_mat[:, i] = ffilt

            except:
                # gets triggered most if there are only nans in the array
                continue

        else:
            # calculate the fft
            f_hat = numpy.fft.fft(data_mat[:, i], n)
            # and the power spectrum - which frequencies are dominant
            power_spectrum = f_hat * numpy.conj(f_hat) / n

            # get the max power value
            max_fft_spectr_value = numpy.max(power_spectrum)
            # set it to zeros so one can find those frequencies that are far lower and important but still no noise
            power_spec_no_max = numpy.where(
                power_spectrum == max_fft_spectr_value, 0, power_spectrum)

            threshold_remaining_values = numpy.nanmax(power_spec_no_max) / 2

            indices = power_spectrum > threshold_remaining_values
            f_hat = indices * f_hat
            ffilt = numpy.fft.ifft(f_hat)
            data_mat[:, i] = ffilt

    # transorm float64 back to INT16!!
    # save interpolation results on the shared memory object
    if poly:
        reference_to_data_block[:] = numpy.round(
            data_mat.reshape(orig_time, orig_rows, orig_cols))
    else:
        reference_to_data_block[:] = numpy.round(
            data_mat.reshape(orig_time, orig_rows, orig_cols))
Exemple #7
0
def perform_dft(input_info, plot=False):

    print("\nspawn DFT process nr : ", input_info["process_nr"])

    existing_shm = shared_memory.SharedMemory(name=input_info["shm"].name)
    existing_shm_qual = shared_memory.SharedMemory(
        name=input_info["shm_qual"].name)
    # check if poly calculation is on to be executed, because then there must be an additional key in the dictionary
    poly = False
    reference_to_data_block = numpy.ndarray(
        input_info["dim"], dtype=numpy.float64,
        buffer=existing_shm.buf)[:, input_info["from"]:input_info["to"], :]
    reference_to_qual_block = numpy.ndarray(
        input_info["dim"], dtype=numpy.float64,
        buffer=existing_shm_qual.buf)[:,
                                      input_info["from"]:input_info["to"], :]
    # get data to process out of buffer

    A = input_info["A"]
    print("ref data dtype: ", reference_to_data_block.dtype)
    print("Shape A: ", A.shape)
    print("\n")

    # reshape data from buffer to 2d matrix with the time as y coords and x as the values
    data_mat = reference_to_data_block.reshape(
        reference_to_data_block.shape[0],
        reference_to_data_block.shape[1] * reference_to_data_block.shape[2])

    qual_mat = reference_to_qual_block.reshape(
        reference_to_qual_block.shape[0],
        reference_to_qual_block.shape[1] * reference_to_qual_block.shape[2])
    print("Start DFT in process nr: ", input_info["process_nr"])
    sing_cou = 0
    for i in range(0, data_mat.shape[1], 1):
        # check if nan fields are in a vector - if yes interpolate linear on that spots
        # at the qual vector the indizes of where data is nan should allready be set to zero ( see in main file )
        data_mat_v_nan = numpy.isfinite(data_mat[:, i])
        data_mat_v_t = numpy.arange(0, len(data_mat_v_nan), 1)

        if False in data_mat_v_nan:
            # interpolate on that spots
            try:
                data_mat_v = numpy.round(
                    numpy.interp(data_mat_v_t, data_mat_v_t[data_mat_v_nan],
                                 data_mat[:, i][data_mat_v_nan]))
            except:
                #print("crash ")
                #print(data_mat[:, i])
                continue

        else:
            data_mat_v = data_mat[:, i]

        qual_mat_v = qual_mat[:, i].reshape(len(data_mat_v), 1)
        data_mat_v = data_mat_v.reshape(len(data_mat_v), 1)

        # ATPA

        try:
            P = numpy.eye(len(data_mat_v)) * qual_mat_v
            ATPA = numpy.linalg.inv(numpy.dot(numpy.dot(A.T, P), A))
            ATPL = numpy.dot(numpy.dot(A.T, P), data_mat_v)
            x_hat = numpy.dot(ATPA, ATPL)

            l_hat = numpy.dot(A, x_hat)

            data_mat[:, i] = l_hat.reshape(len(l_hat))
        except:
            # print("singular matrix")
            sing_cou += 1
            # for e in range(0, len(data_mat_v), 1):
            #     print("data: ", data_mat_v[e], " - qual:", qual_mat_v[e])
            continue

        # ------------------------------------------
        # optimize to using vektors and not matrizes
        # ------------------------------------------

        # APv = numpy.multiply(A,qual_mat_v)
        # # check singular matrix --> maybe try/except implement
        # ATPA = numpy.linalg.inv(numpy.dot(APv.T, A))
        # # ATPL
        # ATPL = numpy.dot(APv.T, data_mat_v)
        # x_hat = numpy.dot(ATPA, ATPL)
        # l_hat = numpy.dot(A, x_hat)
        # qual_mat[:, i] = l_hat.reshape(len(l_hat))

        # print("APv: ", APv)
        # print("ATPA.shape", ATPA.shape)
        # print("data shape: ", data_mat_v.shape)
        # print("qual shape: ", qual_mat_v.shape)
        # #print("data: ", data_mat_v)
        # #print("qual: ", qual_mat_v)
        # print("A.shape   : ", A.shape)
        # print("A.T shape : ", A.T.shape)
        # print("DFT x_HAT: ", x_hat)
        # print("lhat:", l_hat)

    # strore orig time, cols and row information - needed for reshaping
    orig_time = reference_to_data_block.shape[0]
    orig_rows = reference_to_data_block.shape[1]
    orig_cols = reference_to_data_block.shape[2]

    print(
        "-  finished process %d - singular matrix count " %
        input_info["process_nr"], sing_cou)

    # save interpolation results on the shared memory object
    reference_to_data_block[:] = numpy.round(
        data_mat.reshape(orig_time, orig_rows, orig_cols))
# In the first Python interactive shell
import numpy as np
a = np.array([1, 1, 2, 3, 5, 8])  # Start with an existing NumPy array
from multiprocessing import shared_memory
shm = shared_memory.SharedMemory(create=True, size=a.nbytes)
# Now create a NumPy array backed by shared memory
b = np.ndarray(a.shape, dtype=a.dtype, buffer=shm.buf)
b[:] = a[:]  # Copy the original data into shared memory
print(b)
print(shm.name)

# In either the same shell or a new Python shell on the same machine
import numpy as np
from multiprocessing import shared_memory
# Attach to the existing shared memory block
existing_shm = shared_memory.SharedMemory(name=shm.name)
# Note that a.shape is (6,) and a.dtype is np.int64 in this example
c = np.ndarray((6, ), dtype=np.int64, buffer=existing_shm.buf)
print(c)
c[-1] = 888
print(c)

# Back in the first Python interactive shell, b reflects this change
print(b)

# Clean up from within the second Python shell
del c  # Unnecessary; merely emphasizing the array is no longer used
existing_shm.close()

# Clean up from within the first Python shell
del b  # Unnecessary; merely emphasizing the array is no longer used
Exemple #9
0
def gen_time_results(mat_size, core_list):
    mat_shape = (mat_size, mat_size)
    data_A = np.random.rand(*mat_shape).astype(np.float32)
    data_B = np.random.rand(*mat_shape).astype(np.float32)
    data_C = np.empty((mat_size, mat_size), dtype=np.float32)
    shm_A = shared_memory.SharedMemory(create=True, size=data_A.nbytes)
    shm_B = shared_memory.SharedMemory(create=True, size=data_B.nbytes)
    shm_C = shared_memory.SharedMemory(create=True, size=data_C.nbytes)
    mat_A = np.ndarray(data_A.shape, dtype=data_A.dtype, buffer=shm_A.buf)
    mat_A[:] = data_A[:]
    mat_B = np.ndarray(data_B.shape, dtype=data_B.dtype, buffer=shm_B.buf)
    mat_B[:] = data_B[:]
    mat_C = np.ndarray(data_C.shape, dtype=data_C.dtype, buffer=shm_C.buf)
    mat_C[:] = data_C[:]
    name_A = shm_A.name
    name_B = shm_B.name
    name_C = shm_C.name
    total_times = []
    send_times = []
    calc_times = []
    recv_times = []
    for no_cores in core_list:
        print(no_cores)
        #Assuming the matrix is of size 2^n for int N, we take log2 to find the value of n
        power = np.log2(no_cores) / 2
        #Represents the number of partitons that must be calculated in the result matrix C
        pars_i = int(2**(np.ceil(power)))
        pars_j = int(2**(np.floor(power)))
        #Represents the size of each partiton in the i and j axis
        len_i = int(mat_size / pars_i)
        len_j = int(mat_size / pars_j)
        send_list = []
        total_start = time.time()
        send_list = [[i, len_i, j, len_j, mat_size, name_A, name_B, name_C]
                     for j in range(pars_j) for i in range(pars_i)]
        p = Pool(processes=no_cores)
        res_list = p.starmap(matrix_mult, send_list)
        p.close()
        total_finish = time.time()
        calc_start_list = []
        calc_finish_list = []
        res_list = list(res_list)
        for i in range(len(res_list)):
            time_difference = res_list[0][0] - res_list[i][0]
            calc_start_list.append(res_list[i][1] + time_difference)
            calc_finish_list.append(res_list[i][2] + time_difference)
        calc_start = min(calc_start_list)
        calc_finish = max(calc_finish_list)
        send_time = calc_start - total_start
        calc_time = calc_finish - calc_start
        gather_time = total_finish - calc_finish
        total_time = total_finish - total_start
        assert send_time + calc_time + gather_time == total_time
        send_times.append(round(send_time, 10))
        calc_times.append(round(calc_time, 10))
        recv_times.append(round(gather_time, 10))
        total_times.append(round(total_time, 10))
    shm_A.close()
    shm_B.close()
    shm_C.close()
    shm_A.unlink()
    shm_B.unlink()
    shm_C.unlink()
    return tuple(send_times), tuple(calc_times), tuple(recv_times), tuple(
        total_times)
Exemple #10
0
import numpy as np
a = np.array([1, 1, 2, 3, 5, 8])  # Start with an existing NumPy array
from multiprocessing import shared_memory
shm = shared_memory.SharedMemory(create=True,
                                 size=a.nbytes,
                                 name='psm_957212c1')
# Now create a NumPy array backed by shared memory
b = np.ndarray(a.shape, dtype=a.dtype, buffer=shm.buf)
b[:] = a[:]  # Copy the original data into shared memory
print(b)

type(b)

type(a)

print(
    shm.name
)  # If we did not specify a name one would be chosen for us and we would have to paste it in the client
wait = input('Launch client then press enter')
print(b)

# Clean up from within the first Python shell
del b  # Unnecessary; merely emphasizing the array is no longer used
shm.close()
shm.unlink()  # Free and release the shared memory block at the very end
import psutil
import numpy as np
from humanize import naturalsize

#
# Make a big array
# E.g., analagously, this could be a large ML model we want to serve

array_n = 10**4
big_matrix = np.random.rand(array_n, array_n).astype(np.float)

print("array size: ", naturalsize(big_matrix.nbytes))

#
# Create dedicated shared memory object
shared_block = shared_memory.SharedMemory(create=True, size=big_matrix.nbytes)
shared_matrix = np.ndarray(big_matrix.shape,
                           dtype=np.float,
                           buffer=shared_block.buf)
shared_matrix[:] = big_matrix[:]

shared_block_id = shared_block.name


#
# Worker func. Run on forked process
def worker_func(worker_number, matrix_block_id, matrix_shape):
    block = shared_memory.SharedMemory(name=shared_block_id)
    matrix = np.ndarray(matrix_shape, dtype=np.float, buffer=block.buf)
    for _ in range(100):
        print(
# Test of how the virtual environment process might pass data to and from
# the simbox parent process. **Don't run this file as-is**; run it in two
# separate Python processes (simplest: two shells), entering code in each
# process in order, going down the page.
# This seems to work great!

# -=-=- Process 1 -=-=-

import numpy as np
import multiprocessing.shared_memory as sm

# Set up and initialize shared array
# Note: mem size and array shape and dtype should be determined for our
# specific purposes. Here it's an array of 6 bools (1 byte each)
mem = sm.SharedMemory(create=True, name="valve_states", size=6)
valves = np.ndarray((6, ), dtype=np.bool, buffer=mem.buf)
valves[:] = [False, False, True, False, False, True]
# valves is now useable as an array in Process 1.

# -=-=- Process 2 -=-=-

import numpy as np
import multiprocessing.shared_memory as sm

# Create array using the same shared memory
# Note that the shape and dtype shoudl eb the same as defined above
mem = sm.SharedMemory(name="valve_states")
valves = np.ndarray((6, ), dtype=np.bool, buffer=mem.buf)
# valves is now useable as an array in Process 2. It shares the same data
# as the array in Process 1
Exemple #13
0
	if t_end-window_start_time<0:
		xgrid   =  int(gridnumber)
	else: 
		xgrid   =  int(gridnumber + c*(t_end-window_start_time)/delta_x)

####################
	x_interval=const.x_interval      #10
	t_total=1e15*x_end/c         #fs
	t_size=int(t_total/dt)+1+1   

######allay define
	SHAPE = ((int(xgrid/x_interval)+1,t_size))
	#xt = Array('f',SHAPE)
	#global a
	a=np.zeros((int(xgrid/x_interval)+1,t_size))
	shm = shared_memory.SharedMemory(create=True, size=a.nbytes)
	#xt = np.ndarray(a.shape, dtype=a.dtype, buffer=shm.buf)
	#xt[:,:]=a[:,:]
	#import multiprocessing 
	#import time
	#import numpy as np 


	#global_arr_shared = None

	#def init_pool(arr_shared):
	#	global global_arr_shared
	#	global_arr_shared = arr_shared

	#def worker(i):
	#	arr = np.frombuffer(global_arr_shared, np.double).reshape(SHAPE)
Exemple #14
0
    def midi_worker_main(mem1, mem2, cfg, q):
        # noinspection PyBroadException
        try:
            shm1 = shared_memory.SharedMemory(mem1)
            shm2 = shared_memory.SharedMemory(mem2)

            def update_status(txt, val):
                write_shm_text(shm1, txt)
                write_shm_long(shm2, val)

            update_status("<b>准备:</b>加载插件。", 0)

            plugins = []

            def _load_module(type_):
                _names = cfg['midi'][f'{type_}s']

                for name in _names:
                    _pkg_name, klass_name = name.split(".")
                    _pkg = importlib.import_module(f"mid.{type_}s.{_pkg_name}")
                    klass = getattr(_pkg, klass_name)
                    args = cfg['midi'][f'{type_}s'][name]
                    real_args = {}

                    for arg, value in args.items():
                        arg = arg.replace(
                            f"#{type_}-cfg-{_pkg_name}-{klass_name}-", "")
                        if not (isinstance(value, str) and value.strip()):
                            continue
                        real_args[arg] = j if (j := eval_or_null(value, vars(_pkg))) \
                                              is not NotImplemented else value
                    try:
                        instance = klass(**real_args)
                    except TypeError:
                        q.put(
                            f"猜测:无法初始化“{name}”,因为参数类型不正确或缺少。\n{traceback.format_exc()}"
                        )
                        return None
                    except AttributeError:
                        q.put(
                            f"猜测:无法初始化“{name}”,因为找不到指定的类或模块。\n{traceback.format_exc()}"
                        )
                        return None
                    yield instance

            plugins.extend(_load_module("plugin"))

            fp = cfg["midi"]["path"]
            ig = InGameGenerator

            if cfg["midi"]["generate_type"] == "rt":
                update_status("<b>准备:</b>创建生成器实例。", 0)
                try:
                    gen = RealTimeGenerator(fp=fp,
                                            plugins=plugins,
                                            namespace=cfg["func_namespace"])
                except FileNotFoundError:
                    q.put(
                        f"猜测:无法创建生成器实例,因为无法找到MIDI文件。\n{traceback.format_exc()}"
                    )
                    return None
                except PermissionError:
                    q.put(
                        f"猜测:无法创建生成器实例,因为无法读取MIDI文件。\n{traceback.format_exc()}"
                    )
                    return None
                except (OSError, IOError):
                    q.put(
                        f"猜测:无法创建生成器实例,因为MIDI文件无效。\n{traceback.format_exc()}")
                    return None

            elif cfg["midi"]["generate_type"] == "ig":
                update_status("<b>准备:</b>加载前端。", 0)

                names = cfg['midi']['frontend']
                pkg_name = names.split(".")[0]
                frn_name = names.split(".")[1]
                pkg = importlib.import_module(f"mid.frontends.{pkg_name}")
                try:
                    frontend = getattr(pkg, frn_name)
                except AttributeError:
                    q.put(
                        f"猜测:无法初始化前端“{frn_name}”,因为找不到指定的前端。\n{traceback.format_exc()}"
                    )
                    return None

                update_status("<b>准备:</b>加载中间件。", 0)
                middles = []
                middles.extend(_load_module("middle"))

                update_status("<b>准备:</b>创建生成器实例。", 0)
                try:
                    gen = InGameGenerator(fp=fp,
                                          frontend=frontend,
                                          plugins=plugins,
                                          middles=middles,
                                          namespace=cfg["func_namespace"])
                except FileNotFoundError:
                    q.put(
                        f"猜测:无法创建生成器实例,因为无法找到MIDI文件。\n{traceback.format_exc()}"
                    )
                    return None
                except PermissionError:
                    q.put(
                        f"猜测:无法创建生成器实例,因为无法读取MIDI文件。\n{traceback.format_exc()}"
                    )
                    return None
                except (OSError, IOError):
                    q.put(
                        f"猜测:无法创建生成器实例,因为MIDI文件无效。\n{traceback.format_exc()}")
                    return None

                try:
                    gen.gvol_enabled = not cfg["midi"]["overrides"]["gvol"]
                    gen.prog_enabled = not cfg["midi"]["overrides"]["program"]
                    gen.phase_enabled = not cfg["midi"]["overrides"]["phase"]
                    gen.pitch_enabled = not cfg["midi"]["overrides"]["pitch"]
                    gen.pitch_factor = float(cfg["midi"]["pitch_factor"])
                    gen.volume_factor = float(cfg["midi"]["volume_factor"])
                except ValueError:
                    q.put(f"猜测:无法配置生成器实例,因为无法解析浮点数。\n{traceback.format_exc()}")
                    return None

            else:
                raise RuntimeError("不合法的生成类型。")

            try:
                gen.wrap_length = round(float(cfg["midi"]["wrap_length"]))
                gen._use_function_array = cfg["midi"]["use_func"]
                gen._auto_function_array = cfg["midi"]["auto_func"]
                gen.blank_ticks = round(float(cfg["midi"]["blank_ticks"]))
                gen.tick_rate = float(cfg["midi"]["tick_rate"])
                gen.tick_scale = float(cfg["midi"]["tick_scale"])
            except ValueError:
                q.put(f"猜测:无法配置生成器实例,因为无法解析浮点数。\n{traceback.format_exc()}")
                return None

            if (i := cfg["midi"]["auto_adjust"])["is_enabled"]:
                update_status("<b>生成:</b>自动调整参数。", 20)
                try:
                    gen.auto_tick_rate(base=float(i["base"]),
                                       step=float(i["step"]),
                                       tolerance=float(i["tolerance"]))
                except ValueError:
                    q.put(f"猜测:无法自动调整参数,因为无法解析浮点数。\n{traceback.format_exc()}")
                    return None
def cluster_proc(q_lett, digs, acc, path_data, shared_list, bordes, std_ini,
                 cluster_report_dicc, report_dicc_circula):

    q_db = collections.deque()

    extendidos = {}

    existing_shm = shared_memory.SharedMemory(name='memo04_wf1460')
    existing_shm_bin = shared_memory.SharedMemory(name='memo04_wf1460_bin')

    c = np.ndarray((1000, end_bin), dtype=np.float64, buffer=existing_shm.buf)
    c_bin = np.ndarray((1000, end_bin),
                       dtype=np.float64,
                       buffer=existing_shm_bin.buf)

    #    q.put('a dta es')

    print('salgo de process 2 to sleep:', c[-20:, -1])
    #    du=collections.deque()
    #    da=collections.deque()
    #    std_ini=0
    bins = bins_constant
    try:
        bins = 22201
        big_wf = load_data(std_ini, bins, path_data)
    except:
        pass

    try:
        bins = 22500
        big_wf = load_data(std_ini, bins, path_data)
    except:
        pass

    try:
        bins = 14261
        big_wf = load_data(std_ini, bins, path_data)
    except:
        pass

    bin_filt_1 = 25
    bin_filt_2 = 15
    bin_filt_3 = 8
    posicion1 = 5500
    posicion2 = 8700

    desplazo = 10
    Niter = int(big_wf.shape[0] / desplazo)

    #    pi=len(du)

    print('termine de cargar la data')
    os.chdir(desvios_path)
    mad_data = np.load("desvios.npy")
    median_data = np.load("medias.npy")
    base_fin = median_data.size

    c[:, :] = np.eye(N=1000, M=end_bin) * 0.2

    wf_actual = c.copy()
    wf_actual_bin = c_bin.copy()

    print('antes de entrar al loop', wf_actual.shape)
    tiempo = 0

    medianM = np.zeros((desplazo, end_bin))
    madM = np.zeros((desplazo, end_bin))
    for jj in range(desplazo):
        medianM[jj, :] = median_data[:end_bin]
        madM[jj, :] = mad_data[:end_bin]

    flag_forward = True
    flag_backward = False
    flag_next = False
    flag_previous = False

    jkl = -1
    while jkl < Niter:
        #        jkl+=1
        #        time.sleep(0.4)

        if q_lett.qsize() > 0:
            print('entro a la cola')
            letter = q_lett.get_nowait()
            if letter == 'f':
                flag_forward = True
                flag_backward = False
                flag_next = False
                flag_previous = False
            if letter == 'b':
                flag_forward = False
                flag_backward = True
                flag_next = False
                flag_previous = False
#                hacer adelantar la data
            if letter == 'n':
                flag_forward = False
                flag_backward = False
                flag_next = True
                flag_previous = False
#                hacer adelantar la data
            if letter == 'a':
                flag_forward = True
                flag_backward = False
                flag_next = False
                flag_previous = True
#                hacer adelantar la data
            if letter == 'p':
                flag_forward = False
                flag_backward = False
                flag_next = False
                flag_previous = False
#                hacer adelantar la data

        if not flag_forward and not flag_backward and not flag_next and not flag_previous:
            time.sleep(0.5)

        if flag_forward:
            jkl += 1
            tiempo = jkl
            wf_actual = np.roll(wf_actual, -desplazo, axis=0)
            wf_actual_bin = np.roll(wf_actual_bin, -desplazo, axis=0)
            #                st3=time.time()
            wf_actual[-desplazo:, :] = big_wf[tiempo * desplazo:(tiempo + 1) *
                                              desplazo, :]
            MAD = np.abs(big_wf[tiempo * desplazo:(tiempo + 1) * desplazo, :] -
                         medianM) / madM
            wf_actual_bin[
                -desplazo:, :posicion1] = MAD[:, :posicion1] > bin_filt_1
            wf_actual_bin[
                -desplazo:,
                posicion1:posicion2] = MAD[:, posicion1:posicion2] > bin_filt_2
            wf_actual_bin[-desplazo:,
                          posicion2:] = MAD[:, posicion2:] > bin_filt_3

        if flag_backward:
            jkl -= 1
            tiempo = jkl
            wf_actual = np.roll(wf_actual, desplazo, axis=0)
            wf_actual_bin = np.roll(wf_actual_bin, desplazo, axis=0)
            #                st3=time.time()
            wf_actual[:desplazo, :] = big_wf[tiempo * desplazo:(tiempo + 1) *
                                             desplazo, :]
            MAD = np.abs(big_wf[tiempo * desplazo:(tiempo + 1) * desplazo, :] -
                         medianM) / madM
            wf_actual_bin[:desplazo, :
                          posicion1] = MAD[:, :posicion1] > bin_filt_1
            wf_actual_bin[:desplazo, posicion1:
                          posicion2] = MAD[:, posicion1:posicion2] > bin_filt_2
            wf_actual_bin[:desplazo,
                          posicion2:] = MAD[:, posicion2:] > bin_filt_3

        if flag_next:
            jkl += 1
            tiempo = jkl
            flag_next = False

        if flag_previous:
            jkl -= 1
            tiempo = jkl
            flag_previous = False

        ### AVANZO TEMPORALMENTE SEA CUAL SEA EL FLAG DE EVOLUCION ...
        ### aca entro a calcular clusters y propiedades porque modifique el tiempo
        if flag_forward or flag_backward or flag_next or flag_previous:
            keyboard.press_and_release(
                'k'
            )  ## with k letter, the other process executes plot_binary to update the mpl figure
            #            k

            for key, group in groupby(
                    sorted(list(extendidos.keys()), key=lambda x: x[0]),
                    lambda x: x[0]):
                for thing in group:
                    try:
                        clave = max([item for item in group],
                                    key=itemgetter(1))
                        print("grouped: %s : %s " % (key, clave))

                        print(clave)
                        if min(extendidos[clave][0][:, 1]) < desplazo:
                            cluster,cs,not_cs,fit,ll,w,h,good_fit, edgecolor, \
                            txt_c, fontsize,velocidad, txt_data,\
                            delta_posicion, intensidad_posicion, delta_tiempo, intensidad_tiempo= extendidos[clave]

                            cluster[:, 1] -= desplazo

                            extendidos[clave]= cluster,cs,not_cs,fit,ll,w,h,good_fit, edgecolor, \
                            txt_c, fontsize,velocidad, txt_data,\
                            delta_posicion, intensidad_posicion, delta_tiempo, intensidad_tiempo
                    except:
                        pass

            time_da = tiempo

            ctr = 0
            threshold = 0.03
            #parametro cluster primera filtrada
            eps = 4  #15
            min_samples = 21  #250
            filas_imagen_binaria = 1000
            size_wf = (filas_imagen_binaria, median_data.size)

            startu = time.time()
            pts_cluster = np.transpose(
                np.array(((np.nonzero(wf_actual_bin)[1],
                           np.nonzero(wf_actual_bin)[0]))))

            if len(pts_cluster.shape) > 1:
                db = DBSCAN(eps=eps, min_samples=min_samples).fit(pts_cluster)
            else:
                time.sleep(0.01)

            finu = time.time()

            rr, noise_pts = calculo_cluster_properties(db, pts_cluster,
                                                       wf_actual)

            gnu = time.time()

            #            shared_list=[]
            ### extiendo clusters

            flag_extend = True
            while flag_extend:
                rr, noise_pts, flag_extend = extiendo_clusters(
                    rr, noise_pts, db, pts_cluster, wf_actual,
                    cluster_report_dicc)
                print(flag_extend)

            ### chequeo clusters reportados extendidos y no cortados
            ###tiempo es el tiempo de abajo
            ### tiempo - 1000 es el tiempo de arriba
            lista_posiciones = [
                int(item[4][0]) if item[11] > 0 else int(item[4][0] + item[5])
                for item in rr
            ]

            for p, n in list(cluster_report_dicc.keys())[::]:

                #                extendidos[p,n,]

                if p in lista_posiciones:  # print (p)
                    print(p)
                    idx_cluster_to_modify = np.where(
                        np.array(lista_posiciones) == p)[0][0]
                    tup = rr[idx_cluster_to_modify]
                    cluster,cs,not_cs,fit,ll,w,h,good_fit, edgecolor, \
                    txt_c, fontsize,velocidad, txt_data,\
                    delta_posicion, intensidad_posicion, delta_tiempo, intensidad_tiempo=tup
                    #                    print (np.min(cluster[:,1]) , 'jjkl el cluster es;,', cluster[:,1])
                    #                    min_tiem=
                    keys = (p, n, (tiempo + 1) * desplazo - 1000 +
                            np.min(cluster[:, 1]))

                    if good_fit: extendidos[keys] = tup
#                        extendidos[keys]= cluster,cs,not_cs,fit,ll,w,h,good_fit, edgecolor, \
#                        txt_c, fontsize,velocidad, txt_data,\
#                        delta_posicion, intensidad_posicion, delta_tiempo, intensidad_tiempo=tup

#                        for tup in rr:
#                        shared_list.append(tup)

            print('')
            print('')
            print('tiempo:', tiempo * desplazo)
            #            for things in extendidos.keys():
            print('kelsssssss', extendidos.keys())
            for key, group in groupby(
                    sorted(list(extendidos.keys()), key=lambda x: x[0]),
                    lambda x: x[0]):

                for thing in group:
                    try:
                        clave = max([item for item in group],
                                    key=itemgetter(1))
                        print("grouped: %s : %s " % (key, clave))

                        print(clave)
                        if min(extendidos[clave][0][:, 1]) < desplazo:
                            cluster,cs,not_cs,fit,ll,w,h,good_fit, edgecolor, \
                            txt_c, fontsize,velocidad, txt_data,\
                            delta_posicion, intensidad_posicion, delta_tiempo, intensidad_tiempo= extendidos[clave]

                        print('limites time clust',
                              min(extendidos[clave][0][:, 1]),
                              max(extendidos[clave][0][:, 1]))

                        shared_list.append(extendidos[clave])

                    except:
                        pass
            print('')
            print('')
            #            time.sleep(1)

            print('updating shm c, ___ iteracion n:',
                  jkl)  ###(jkl* desplazo is the time executed... )
            c[:, :] = wf_actual.copy()
            c_bin[:, :] = wf_actual_bin.copy()

            print('DBSCAN time is:', finu - startu)
            print('cluster_ properties time is:', gnu - finu)

            ##            rr, noise_pts= extiendo_clusters(rr, noise_pts, db, pts_cluster,wf_actual, cluster_report_dicc,ax)
            #            ### aca itero sobre todos los clusters, esto es el lugar donde tengo que extenderlos.
            #            ### en cluster fit....
            ##            for cluster,cs,not_cs,fit,ll,w,h,good_fit,edgecolor,txt_c,fontsize,velocidad, txt_data in rr:
            ##%%
            #            bordea=[]
            ### aca solo clasifico segun las caracteristicas ,,,,
            for tup in rr:
                #                ### unpacking tup
                cluster,cs,not_cs,fit,ll,w,h,good_fit, edgecolor, \
                txt_c, fontsize,velocidad, txt_data,\
                delta_posicion, intensidad_posicion, delta_tiempo, intensidad_tiempo=tup
                #                jom+=1
                #                rect_clust=ptc.Rectangle(ll,w,h,facecolor='none',edgecolor=edgecolor)
                #                ax.add_patch(rect_clust)
                #                rect_clust_bin=ptc.Rectangle(ll,w,h,facecolor='none',edgecolor=edgecolor)
                ##                ax_bin.add_patch(rect_clust_bin)
                #                bordes.append(rect_clust)
                ##                ax.plot(cluster[:,0],cluster[:,1],'o')
                #                tr=(ll[0]+w,ll[1] + h)
                #                ax.annotate(delta_tiempo,tr)
                #
                if h > 49 and 2000 > w > 10:
                    #                    rect_clust=ptc.Rectangle(ll,w,h,facecolor='none',edgecolor=edgecolor)
                    #                    ax.add_patch(rect_clust)
                    #                    rect_clust_bin=ptc.Rectangle(ll,w,h,facecolor='none',edgecolor=edgecolor)
                    #                    ax_bin.add_patch(rect_clust_bin)
                    #                    bordes.append(rect_clust)

                    tr = (ll[0] + w, ll[1] + h)
                    #                    ax.annotate(delta_posicion,tr)
                    pts_cant = cluster.size

                    #                    ax.plot()
                    ### por ahora vamos a extender solo los clusters grandes
                    ### capaz que hay que extenderlos en calculo_cluster_properties
                    #                    yoffset=[0, 10,20,30,40]

                    #                    if good_fit:
                    #                        for off in yoffset:
                    #                            w_clus= max(cluster[:,0]) - min(cluster[:,0])
                    #                            if velocidad<0: xfit= np.arange(min(cluster[:,0])- w ,max(cluster[:,0]))
                    #                            if velocidad>0: xfit= np.arange(min(cluster[:,0]), max(cluster[:,0]) + w)
                    #                            yfit= fit(xfit)
                    #                            ax.plot(xfit,yfit+off)
                    #                            ax_bin.plot(xfit,yfit+off)
                    #                            if velocidad<0:
                    #                                ax.add_patch(ptc.Rectangle(ll-(5,-h),10,off,facecolor='none',
                    #                                                           edgecolor=edgecolor))
                    ##                                ax_bin.add_patch(ptc.Rectangle(ll-(5,-h),10,off,facecolor='none',edgecolor=edgecolor))
                    #                            if velocidad>0:
                    #                                ax.add_patch(ptc.Rectangle(tr+(5,0)),10,off,facecolor='none',
                    #                                             edgecolor=edgecolor)
                    #                                ax_bin.add_patch(ptc.Rectangle(tr+(5,0)),10,off,facecolor='none',edgecolor=edgecolor)

                    ## reporte db da el vehiculo
                    ## cluster_report ve si es actualizacion o finalizacion
                    ## st tipo de actualizacion, st_cant cantidad de actualizacion
                    if velocidad > 0:
                        st, st_cant = cluster_report(velocidad, pts_cant, ll,
                                                     h, cluster_report_dicc)
                        vh = reporte_db(st, st_cant, velocidad, good_fit, ll,
                                        h, w, q_db, report_dicc_circula,
                                        time_da)
                    if velocidad < 0:
                        st, st_cant = cluster_report(velocidad, pts_cant,
                                                     (ll[0] + w, ll[1]), h,
                                                     cluster_report_dicc)
                        vh = reporte_db(st, st_cant, velocidad, good_fit,
                                        (ll[0] + w, ll[1]), h, w, q_db,
                                        report_dicc_circula, time_da)
                    if velocidad == 0:
                        vh = 'nny'
                        st = 'none'
                        st_cant = '-3'

                    if (edgecolor == 'r'
                            or edgecolor == 'g') and velocidad != 0:

                        ann = (vh, st, st_cant, txt_data, tr, txt_c, fontsize)
                        bordes.append(ann)

                        texto_dic = str(vh + '\n' + st + ':' + str(st_cant) +
                                        '\n' + str(txt_data))
                        texto_dic = {
                            'vehiculo': str(vh),
                            'st': st,
                            'st_cant': st_cant,
                            'w': str(int(w)),
                            'h': int(h),
                            'lower': ll[0],
                            'left': ll[1],
                            'velocidad': str(round(velocidad, 3)),
                            'ajuste': str(round((h / w) / fit[1], 3))
                        }
Exemple #16
0
 def _open(self):
     if self.shm_name and not self.memory:
         self.memory = shm.SharedMemory(name=self.shm_name)
     return self.memory
Exemple #17
0
def init_data_block_sg_fft(sg_window,
                           band,
                           in_dir_qs,
                           in_dir_tf,
                           tile,
                           list_qual,
                           list_data,
                           num_ob_buf_bytes,
                           fit_nr,
                           name_weights_addition,
                           master_raster_info,
                           poly=False):
    """

    Parameters
    ----------
    sg_window
    band
    in_dir_qs
    in_dir_tf
    tile
    list_qual
    list_data
    num_ob_buf_bytes
    fit_nr
    name_weights_addition
    master_raster_info

    Returns
    -------

    """

    #data_block = numpy.zeros([sg_window, master_raster_info[2], master_raster_info[3]])
    shm = shared_memory.SharedMemory(create=True, size=num_ob_buf_bytes)
    shm_qual = shared_memory.SharedMemory(create=True, size=num_ob_buf_bytes)
    if poly:
        data_block = numpy.ndarray(
            (sg_window, master_raster_info[2], master_raster_info[3]),
            dtype=numpy.float64,
            buffer=shm.buf)
        qual_block = numpy.ndarray(
            (sg_window, master_raster_info[2], master_raster_info[3]),
            dtype=numpy.float64,
            buffer=shm_qual.buf)
    else:

        data_block = numpy.ndarray(
            (sg_window, master_raster_info[2], master_raster_info[3]),
            dtype=numpy.int16,
            buffer=shm.buf)
        qual_block = numpy.ndarray(
            (sg_window, master_raster_info[2], master_raster_info[3]),
            dtype=numpy.int16,
            buffer=shm_qual.buf)

    print("\n# START READING SATDATA for BAND {}".format(band))
    for i in range(0, sg_window, 1):

        # load qual file
        try:
            qual_ras = gdal.Open(os.path.join(in_dir_qs, tile, list_qual[i]),
                                 gdal.GA_ReadOnly)

            print("# load qual data for band %d: %s" % (band, list_qual[i]))
            qual_block[i, :, :] = qual_ras.ReadAsArray()

            del qual_ras
        except Exception as ErrorQualRasReading:
            print("### ERROR while reading quality raster:\n {}".format(
                ErrorQualRasReading))
        # load satellite data
        try:
            data_ras = gdal.Open(os.path.join(in_dir_tf, tile, list_data[i]),
                                 gdal.GA_ReadOnly)

            print("# load sat data for band %s: %s" %
                  (str(band), list_data[i]))
            #data_band = data_ras.GetRasterBand(1)
            data_block[i, :, :] = data_ras.ReadAsArray()

            # collect epochs raster name
            if fit_nr == i:
                print("\n# Name of fitted tile will be:  {} \n".format(
                    os.path.join(tile, list_data[i])))

                fitted_raster_band_name = list_data[
                    i][:-4] + name_weights_addition % str(sg_window)

            del data_ras

        except Exception as ErrorRasterDataReading:
            print("### ERROR while reading satellite raster:\n {}".format(
                ErrorRasterDataReading))

    print("data_block from readout: ", data_block[:, 2000, 100])
    return data_block, qual_block, shm, shm_qual, fitted_raster_band_name
Exemple #18
0
 def create_shm(self):
     self.shm = shared_memory.SharedMemory(name=self.name,
                                           create=True,
                                           size=self.capacity)
     self.holder = True
Exemple #19
0
def init_data_block_dft(sg_window, band, in_dir_qs, in_dir_tf, tile, list_qual,
                        list_data, num_ob_buf_bytes, master_raster_info):
    """
    Creates a initial datablock for the modis data and returns a numpy ndim array
    Parameters
    ----------
    sg_window
    band
    in_dir_qs
    in_dir_tf
    tile
    list_qual
    list_data
    device
    master_raster_info
    fit_nr
    name_weights_addition

    Returns
    -------

    """

    #data_block = numpy.zeros([sg_window, master_raster_info[2], master_raster_info[3]])

    shm = shared_memory.SharedMemory(create=True, size=num_ob_buf_bytes)
    data_block = numpy.ndarray(
        (sg_window, master_raster_info[2], master_raster_info[3]),
        dtype=numpy.float64,
        buffer=shm.buf)

    shm_qual = shared_memory.SharedMemory(create=True, size=num_ob_buf_bytes)
    qual_block = numpy.ndarray(
        (sg_window, master_raster_info[2], master_raster_info[3]),
        dtype=numpy.float64,
        buffer=shm_qual.buf)

    print("\n# START READING SATDATA for BAND {}".format(band))
    for i in range(0, len(list_data), 1):

        # load qual file
        try:
            qual_ras = gdal.Open(os.path.join(in_dir_qs, tile, list_qual[i]),
                                 gdal.GA_ReadOnly)

            #print("load qual data for band %d: %s" % (band, list_qual[i]))
            #qual_band = qual_ras.GetRasterBand(1)
            qual_block[i, :, :] = qual_ras.ReadAsArray()

            del qual_ras
        except Exception as ErrorQualRasReading:
            print("### ERROR while reading quality raster:\n {}".format(
                ErrorQualRasReading))
        # load satellite data
        try:
            data_ras = gdal.Open(os.path.join(in_dir_tf, tile, list_data[i]),
                                 gdal.GA_ReadOnly)

            print("# load sat data for band %s: %s" %
                  (str(band), list_data[i]))
            #data_band = data_ras.GetRasterBand(1)
            data_block[i, :, :] = data_ras.ReadAsArray()

            del data_ras

        except Exception as ErrorRasterDataReading:
            print("### ERROR while reading satellite raster:\n {}".format(
                ErrorRasterDataReading))

    print("data_block from readout: ", data_block[:, 2000, 100])
    return data_block, qual_block, shm, shm_qual
Exemple #20
0
 def load_shm(self):
     self.shm = shared_memory.SharedMemory(name=self.name, create=False)
     self.holder = False
Exemple #21
0
from multiprocessing import shared_memory
import time
import sys

shm_a = shared_memory.SharedMemory(name="memtest", create=True, size=1)
print(shm_a)

a = shm_a.buf[0]
print(a)

print(shm_a.name)

a = 10
shm_a.buf[0] = 10

try:
    for i in range(40):
        print("i = " + str(i) + ", a = " + str(shm_a.buf[0]))
        time.sleep(1)
except KeyboardInterrupt:
    shm_a.close()
    shm_a.unlink()
    sys.exit()
shm_a.close()
shm_a.unlink()
Exemple #22
0
    async def check_for_file_transfer_requests(self):
        # 2. `Y -> S`: every one second, Y asks server for any requests
        await self.write(bytes(FileTransferRequestResponsePackets()))

        # 3. `S -> X/F -> Y`: server responds with active requests
        file_transfer_requests = FileTransferCheckRequestsPackets(
            data=(await self.read())[4:]).requests
        if not file_transfer_requests:
            return

        print("Incoming file transfer request(s):")
        index_to_email = dict()
        index_to_file_info = dict()
        i = 1
        for email, file_info in file_transfer_requests.items():
            print("\t{}. {}".format(i, email))
            print("\t\tname: ", file_info["name"])
            print("\t\tsize: ", sizeof_fmt(int(file_info["size"])))
            print("\t\tSHA256: ", file_info["SHA256"])
            index_to_email[i] = email
            index_to_file_info[i] = file_info
            i += 1

        try:
            selection = input(
                "\nEnter the number for which request you'd like to accept, or 0 to deny all: "
            )
            accept = True
            selection_num = int(selection)
            if selection_num <= 0 or selection_num >= i:
                raise ValueError

            packets = FileTransferAcceptRequestPackets(
                index_to_email[selection_num])
        except ValueError or KeyboardInterrupt:
            packets = FileTransferAcceptRequestPackets("")
            accept = False

        if accept:
            while True:
                out_directory = input("Enter the output directory: ")
                file_path = os.path.join(
                    out_directory, index_to_file_info[selection_num]["name"])
                if not os.path.isdir(out_directory):
                    print("The path {} is not a directory".format(
                        os.path.abspath(out_directory)))
                elif os.path.exists(file_path):
                    print("The file {} already exists".format(file_path))
                elif not os.access(out_directory, os.X_OK | os.W_OK):
                    print(
                        "Cannot write file path {} permission denied.".format(
                            file_path))
                else:
                    break

        # 4. `Y -> Yes/No -> S`: Y accepts or denies transfer request
        await self.write(bytes(packets))
        if not accept:
            return False

        # 5. `S -> Token -> Y`: if Y accepted, server sends a unique token Y
        token = FileTransferSendTokenPackets(
            data=(await self.read())[4:]).token

        lock = Lock()
        progress = shared_memory.SharedMemory(create=True, size=8)
        server_sentinel = shared_memory.SharedMemory(create=True, size=1)
        status_sentinel = shared_memory.SharedMemory(create=True, size=1)
        listen_port = shared_memory.SharedMemory(create=True, size=4)

        # 6. `Y -> Port -> S`: Y binds to 0 (OS chooses) and sends the port it's listening on to S
        p2p_server = P2PServer(token, os.path.abspath(out_directory),
                               progress.name, lock, listen_port.name,
                               status_sentinel.name)
        p2p_server_process = Process(target=p2p_server.run,
                                     args=(0, server_sentinel.name))
        p2p_server_process.start()

        print("Started P2P server. Waiting for listen...")

        # wait for listen
        port = 0
        while port == 0:
            with lock:
                port = int.from_bytes(listen_port.buf, byteorder='little')

        await self.write(bytes(FileTransferSendPortPackets(port)))

        # Wait until file received

        time_start = time.time()

        chunk_size = FILE_TRANSFER_P2P_CHUNK_SIZE

        def unguarded_print_received_progress(final=False):
            utils.print_status(
                *utils.get_progress(
                    int.from_bytes(progress.buf[0:4], byteorder='little'),
                    int.from_bytes(progress.buf[4:8], byteorder='little'),
                    chunk_size), "received", final)

        def print_received_progress():
            while True:
                with lock:
                    if status_sentinel.buf[0] == 1:
                        break
                    unguarded_print_received_progress()
                time.sleep(0.03)

        status_thread = Thread(target=print_received_progress)
        status_thread.start()
        try:
            p2p_server_process.join()
        except KeyboardInterrupt:
            raise RuntimeError("User requested abort")
        finally:
            if p2p_server_process.is_alive():
                p2p_server_process.terminate()
            with lock:
                status_sentinel.buf[0] = 1
            status_thread.join()
            unguarded_print_received_progress(final=True)
            progress.close()
            progress.unlink()
            server_sentinel.close()
            server_sentinel.unlink()
            status_sentinel.close()
            status_sentinel.unlink()
            listen_port.close()
            listen_port.unlink()

            time_end = time.time()

        print("File transfer completed successfully in {} seconds.".format(
            time_end - time_start))
        return True
Exemple #23
0
sys.path.insert(1, '/home/solcanma/darknet')
import darknet
from multiprocessing import Process, Manager
from multiprocessing import shared_memory

manager = Manager()
manager_detections = manager.list()
from pyimagesearch.centroidtracker import *
from distance import *
from navigate import *
from map import *

#from pyimagesearch.centroidtracker import *

shm = shared_memory.SharedMemory(create=True,
                                 size=6520800,
                                 name='psm_c013ddb5')
shm_image = np.ndarray((Yresolution, Xresolution, 3),
                       dtype=np.uint8,
                       buffer=shm.buf)
logging.basicConfig(
    level=logging.DEBUG,
    format='[%(levelname)s] (%(threadName)-10s) %(message)s',
)
# import the necessary packages

import numpy as np


def convertBack(x, y, w, h):
    xmin = int(round(x - (w / 2)))
Exemple #24
0
    async def send_file(self):
        msg = None
        try:
            # 1. `X -> Y/F -> S`: X wants to send F to Y

            recipient_email = input("Enter the recipient's email address: ")
            file_path = os.path.abspath(input("Enter the file path: "))
            valid_email = validate_and_normalize_email(recipient_email)
            if valid_email is None:
                raise RuntimeError("Invalid Email Address.")
            if not file_path:
                raise RuntimeError("Empty file path.")
            if not os.path.exists(file_path):
                raise RuntimeError("Cannot find file: {}".format(file_path))
            if not os.path.isfile(file_path):
                raise RuntimeError("Not a file: {}".format(file_path))

            file_base = os.path.basename(file_path)
            file_size = os.path.getsize(file_path)
            file_sha256 = sha256_file(file_path)
            file_info = {
                "name": file_base,
                "size": file_size,
                "SHA256": file_sha256,
            }

            # send request
            await self.write(
                bytes(FileTransferRequestPackets(valid_email, file_info)))

            # this only checks if the request is valid
            # this does not check if the recipient accepted or denied the request
            msg = StatusPackets(data=(await self.read())[4:]).message
            if msg != "":
                raise RuntimeError(msg)

            # 7. `S -> Token/Port -> X`: S sends the same token and port to X

            # denied request is indicated by empty token and port
            port_and_token = FileTransferSendPortTokenPackets(
                data=(await self.read())[4:])
            port, token = port_and_token.port, port_and_token.token
            if token and port:
                print(
                    "User {} accepted the file transfer Connecting to recipient on port "
                    .format(valid_email, port))
            else:
                raise RuntimeError(
                    "User {} declined the file transfer request".format(
                        valid_email))

            progress = shared_memory.SharedMemory(create=True, size=8)
            progress_lock = Lock()
            p2p_client = P2PClient(port, token, file_path, file_size,
                                   file_sha256, progress.name, progress_lock)

            time_start = time.time()
            sentinel = False

            chunk_size = FILE_TRANSFER_P2P_CHUNK_SIZE

            def unguarded_print_sent_progress(final=False):
                utils.print_status(
                    *utils.get_progress(
                        int.from_bytes(progress.buf[0:4], byteorder='little'),
                        int.from_bytes(progress.buf[4:8], byteorder='little'),
                        chunk_size), "sent", final)

            def print_sent_progress():
                while not sentinel:
                    with progress_lock:
                        unguarded_print_sent_progress()
                    time.sleep(0.03)

            # i was having trouble with asyncio.gather, so just run status printer in a new thread
            status_thread = Thread(target=print_sent_progress)
            status_thread.start()

            # wait until p2p transfer completes, unless keyboard interrupt
            try:
                await p2p_client.main()
            except KeyboardInterrupt:
                raise RuntimeError("User requested abort")
            finally:
                sentinel = True
                status_thread.join()
                unguarded_print_sent_progress(final=True)
                progress.close()
                progress.unlink()
                time_end = time.time()

            print(
                "\nFile transfer completed in {} seconds.".format(time_end -
                                                                  time_start))

        except RuntimeError as e:
            msg = str(e)
        if msg != "":
            print("Failed to send file: ", msg)
Exemple #25
0
    def __init__(self):
        self.shm = shared_memory.SharedMemory(create=True, size=4)

        self.server = Server(21234)
        self.server.config()
 def __init__(self):
     self._Shm = shared_memory.SharedMemory("_SharedMem_Modbus")
import random
from multiprocessing import shared_memory
from array import array
from util import wait, signal

mem = shared_memory.SharedMemory(name='prod_con_buffer')
buff = mem.buf.cast('i')
print(mem.name)
try:
    while True:
        print("{0:-^50}".format("PRODUCER"))
        seed = int(input("Enter a seed: "))
        wait(buff, 1)
        random.seed = seed
        buff[0] = random.randrange(-2**31, 2**31 - 1)
        print("shared variable write: %d" % buff[0])
        signal(buff, 2)
        signal(buff, 1)
except KeyboardInterrupt:
    pass
del buff
mem.close()
Exemple #28
0
 def create_shared_memory(self):
     self.shared_memory_instance = shared_memory.SharedMemory(create=True,
                                                              size=2)
     self.shared_memory_name = self.shared_memory_instance.name
     self.shared_memory_buffer = self.shared_memory_instance.buf
    shm = shared_memory.SharedMemory(shared_memory_tag)
    AC_in_bytes = shm.buf[0:ac_size]
    ac_in_process = AC.from_buff(AC_in_bytes, copy=False)
    string_to_search = "asdpythonasdasdruby"
    print("Executing search in {}".format(processname))
    for id, start, end in ac_in_process.match(string_to_search):
        print(id, string_to_search[start:end])
    #time.sleep(100)
    ac_in_process = None
    AC_in_bytes.release() # MUST release memory beforing closing shm insance, otherwise error is raised.
    shm.close()
    
if __name__ == "__main__":
    shared_memory_tag = "shared_ac"
    # Put mm in shared memory of py3.8
    shm = shared_memory.SharedMemory(name=shared_memory_tag, create=True, size=ac_size)
    shm.buf[0:ac_size] = mm
    mm.close()
    processes_list = list()
    for x in range(0, 6):
        p = Process(
            target=get_shared_memory_find_matches,
            args=(
                "process_" + str(x),
                shared_memory_tag,
                ac_size
            )
        )
        p.start()
        processes_list.append(p)
    for p in processes_list:
Exemple #30
0
 def create(self, name, size) -> AnyStr:
     shm = shared_memory.SharedMemory(name=name, create=True, size=size)
     self.shm_store[name] = shm
     return shm.buf