Esempio n. 1
0
    def _sync(self):
        """
        Send output data and receive input data.

        Notes
        -----
        Assumes that the attributes used for input and output already
        exist.

        Each message is a tuple containing a module ID and data; for
        outbound messages, the ID is that of the destination module.
        for inbound messages, the ID is that of the source module.
        Data is serialized before being sent and unserialized when
        received.
        """

        if self.net in ['none', 'ctrl']:
            self.logger.info('not synchronizing with network')
        else:
            self.logger.info('synchronizing with network')

            # Send outbound data:
            if self.net in ['out', 'full']:

                # Send all data in outbound buffer:
                send_ids = [out_id for out_id in self._out_ids]
                for out_id, data in self._out_data:
                    self.sock_data.send(msgpack.packb((out_id, data)))
                    send_ids.remove(out_id)
                    self.logger.info('sent to   %s: %s' % (out_id, str(data)))

                # Send data tuples containing None to those modules for which no
                # actual data was generated to satisfy the barrier condition:
                for out_id in send_ids:
                    self.sock_data.send(msgpack.packb((out_id, None)))
                    self.logger.info('sent to   %s: %s' % (out_id, None))

                # All output IDs should be sent data by this point:
                self.logger.info('sent data to all output IDs')

            # Receive inbound data:
            if self.net in ['in', 'full']:

                # Wait until inbound data is received from all source modules:
                while not all((q for q in self._in_data.itervalues())):
                    # Use poller to avoid blocking:
                    if is_poll_in(self.sock_data, self.data_poller):
                        in_id, data = msgpack.unpackb(self.sock_data.recv())
                        self.logger.info('recv from %s: %s ' %
                                         (in_id, str(data)))

                        # Ignore incoming data containing None:
                        if data is not None:
                            self._in_data[in_id].append(data)

                    # Stop the synchronization if a quit message has been received:
                    if not self.running:
                        self.logger.info('run loop stopped - stopping sync')
                        break
                self.logger.info('recv data from all input IDs')
Esempio n. 2
0
    def _sync(self):
        """
        Send output data and receive input data.
            
        Notes
        -----
        Assumes that the attributes used for input and output already
        exist.

        Each message is a tuple containing a module ID and data; for
        outbound messages, the ID is that of the destination module.
        for inbound messages, the ID is that of the source module.
        Data is serialized before being sent and unserialized when
        received.

        """
        
        if self.net in ['none', 'ctrl']:
            self.logger.info('not synchronizing with network')
        else:
            self.logger.info('synchronizing with network')

            # Send outbound data:
            if self.net in ['out', 'full']:

                # Send all data in outbound buffer:
                send_ids = self.out_ids
                for out_id, data in self._out_data:
                    self.sock_data.send(msgpack.packb((out_id, data)))
                    send_ids.remove(out_id)
                    self.logger.info('sent to   %s: %s' % (out_id, str(data)))
                
                # Send data tuples containing None to those modules for which no
                # actual data was generated to satisfy the barrier condition:
                for out_id in send_ids:
                    self.sock_data.send(msgpack.packb((out_id, None)))
                    self.logger.info('sent to   %s: %s' % (out_id, None))

                # All output IDs should be sent data by this point:
                self.logger.info('sent data to all output IDs')

            # Receive inbound data:
            if self.net in ['in', 'full']:
                # Wait until inbound data is received from all source modules:  
                while not all((q for q in self._in_data.itervalues())):
                    # Use poller to avoid blocking:
                    if is_poll_in(self.sock_data, self.data_poller):
                        in_id, data = msgpack.unpackb(self.sock_data.recv())
                        self.logger.info('recv from %s: %s ' % (in_id, str(data)))

                        # Ignore incoming data containing None:
                        if data is not None:
                            self._in_data[in_id].append(data)

                    # Stop the synchronization if a quit message has been received:
                    if not self.running:
                        self.logger.info('run loop stopped - stopping sync')
                        break
                self.logger.info('recv data from all input IDs')
Esempio n. 3
0
def process_get_response():
    glog.info('get_response is called from ' + str(request.environ['REMOTE_ADDR']) + ":" + str(request.environ['REMOTE_PORT']))
    query = urlparse(request.url).query
    if not query:
        # this might be attacker
        return "hello world"
    query_components = urllib.parse.parse_qs(query)
    # https://stackoverflow.com/questions/8928730/processing-http-get-input-parameter-on-server-side-in-python
    if not check_key(query_components):
        return "no key", HTTP_ERROR_CODE_NOKEY
    decode_content = msg_np.unpackb(request.data)
    session_id = decode_content['session_id']
    vec_in = decode_content['vec_in']
    glog.info('predict vec_in shape ' + str(vec_in.shape))
    vec_out = model_wrapper.predict(vec_in)
    ret_data = {
        "session_id" : session_id,
        "vec_out" : vec_out
    }
    response = app.response_class(
        response=msg_np.packb(ret_data),
        status=200,
        mimetype='application/msgpack_numpy'
    )
    return response
Esempio n. 4
0
def producer(verbose=False):

    pusher_addr = const.ZMQ_PROTOCOL + "://" + ZMQ_IP_PUSH + ":" + const.ZMQ_PORT

    context = zmq.Context()
    zmq_socket = context.socket(zmq.PUSH)
    zmq_socket.connect(pusher_addr)

    # redis_client = redis.Redis(host=const.REDIS_REMOTE_IP, port=const.REDIS_PORT, db=0, password=const.REDIS_REMOTE_PASSWORD, username=const.REDIS_REMOTE_USER)

    with h5py.File(SOURCE_H5, 'r') as h5f:
        if verbose: print(f"Using {SOURCE_H5} as file source")

        modlen = len(h5f['times'])

        i = 0
        while True:
            if verbose: print(f"Trying to send iteration {i} to redis store")
            spec = np.array(h5f['spec'][i % modlen]).tolist()
            timestamp = h5f['times'][i % modlen]
            spec.append(timestamp)
            msg = m.packb(spec)
            try:
                # redis_client.set('latest',msg)
                zmq_socket.send(msg, zmq.NOBLOCK)
                if verbose: print("-- Succeeded")
            except Exception as e:
                if verbose: print(f"-- Failed: {e}")
            time.sleep(const.INTEGRATION_RATE / 1000)
            i += 1
Esempio n. 5
0
    def _data_handler(self, msg):
        """
        Data port handler.

        Notes
        -----
        Assumes that each message contains a source module ID
        (provided by zmq) and a serialized tuple; the tuple contains
        the destination module ID and the data to be transmitted.
        """

        if len(msg) != 2:
            self.logger.info('skipping malformed message: %s' % str(msg))
        else:

            # When a message arrives, increase the corresponding received_count
            in_id = msg[0]
            out_id, data = msgpack.unpackb(msg[1])
            self.logger.info('recv from %s: %s' % (in_id, data))

            # Increase the appropriate count in recv_counts by 1
            self._recv_counts[(in_id, out_id)] += 1
            self._data_to_route.append((in_id, out_id, data))

            # When data with source/destination IDs corresponding to
            # every entry in the routing table has been received up to
            # the current time step, deliver the data in the buffer:
            if all(self._recv_counts.values()):
                self.logger.info('recv from all modules')
                for in_id, out_id, data in self._data_to_route:
                    self.logger.info('sent to   %s: %s' % (out_id, data))

                    # Route to the destination ID and send the source ID
                    # along with the data:
                    self.sock_data.send_multipart(
                        [out_id, msgpack.packb((in_id, data))])

                # Reset the incoming data buffer
                self._data_to_route = []

                # Decrease all values in recv_counts to indicate that an
                # execution time_step has been succesfully completed
                for k in self._recv_counts.iterkeys():
                    self._recv_counts[k] -= 1
                self.logger.info('----------------------')
Esempio n. 6
0
    def _data_handler(self, msg):
        """
        Data port handler.

        Notes
        -----
        Assumes that each message contains a source module ID
        (provided by zmq) and a serialized tuple; the tuple contains
        the destination module ID and the data to be transmitted.
        """

        if len(msg) != 2:
            self.logger.info('skipping malformed message: %s' % str(msg))
        else:

            # When a message arrives, increase the corresponding received_count
            in_id = msg[0]
            out_id, data = msgpack.unpackb(msg[1])
            self.logger.info('recv from %s: %s' % (in_id, data))
            # Increase the appropriate count in recv_counts by 1
            self.recv_counts[(in_id,out_id)] += 1
            self.data_to_route.append((in_id, out_id, data))
            # When data with source/destination IDs corresponding to
            # every entry in the routing table has been received upto
            # current time step, deliver the data in the buffer:
            #if all((c for c in self.recv_counts.values())):
            if all(self.recv_counts.values()):
                self.logger.info('recv from all modules')
                for in_id, out_id, data in self.data_to_route:
                    self.logger.info('sent to   %s: %s' % (out_id, data))

                    # Route to the destination ID and send the source ID
                    # along with the data:
                    self.sock_data.send_multipart([out_id,
                                                   msgpack.packb((in_id, data))])

                # Reset the incoming data buffer
                self.data_to_route = []
                # Decrease all values in recv_counts to indicate that an
                # execution time_step has been succesfully completed
                for k in self.recv_counts.iterkeys(): self.recv_counts[k]-=1
                self.logger.info('----------------------')
Esempio n. 7
0
    def _data_handler(self, msg):
        """
        Data port handler.

        Notes
        -----
        Assumes that each message contains a source module ID
        (provided by zmq) and a serialized tuple; the tuple contains
        the destination module ID and the data to be transmitted.
        """

        if len(msg) != 2:
            self.log_info('skipping malformed message: %s' % str(msg))
        else:

            # Queue arriving messages:
            in_id = msg[0]
            out_id, data = msgpack.unpackb(msg[1])
            self.log_info('recv from %s: %s' % (in_id, data))
            self._recv_queues[(in_id, out_id)].appendleft(data)

            # When data with source/destination IDs corresponding to
            # every entry in the routing table has been received up to
            # the current time step (i.e., all queues for each 
            # source/destination pair contain something), deliver the data:
            if all(self._recv_queues.values()):
                self.log_info('recv from all modules')
                for t in self._recv_queues:
                    in_id, out_id = t
                    data = self._recv_queues[t].pop()
                    self.log_info('sent to   %s: %s' % (out_id, data))

                    # Route to the destination ID and send the source ID
                    # along with the data:
                    self.sock_data.send_multipart([out_id,
                                                   msgpack.packb((in_id, data))])

                self.log_info('----------------------')
Esempio n. 8
0
    def _update_dataset(self, data_it):
        if torch.cuda.is_available():
            with torch.cuda.device(self.device):
                torch.cuda.empty_cache()

        if self.envs is None:
            self.envs = construct_envs(self.config,
                                       get_env_class(self.config.ENV_NAME))

        recurrent_hidden_states = torch.zeros(
            self.actor_critic.net.num_recurrent_layers,
            self.config.NUM_PROCESSES,
            self.config.MODEL.STATE_ENCODER.hidden_size,
            device=self.device,
        )
        prev_actions = torch.zeros(self.config.NUM_PROCESSES,
                                   1,
                                   device=self.device,
                                   dtype=torch.long)
        not_done_masks = torch.zeros(self.config.NUM_PROCESSES,
                                     1,
                                     device=self.device)

        observations = self.envs.reset()
        observations = transform_obs(
            observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID)
        batch = batch_obs(observations, self.device)

        episodes = [[] for _ in range(self.envs.num_envs)]
        skips = [False for _ in range(self.envs.num_envs)]
        # Populate dones with False initially
        dones = [False for _ in range(self.envs.num_envs)]

        # https://arxiv.org/pdf/1011.0686.pdf
        # Theoretically, any beta function is fine so long as it converges to
        # zero as data_it -> inf. The paper suggests starting with beta = 1 and
        # exponential decay.
        if self.config.DAGGER.P == 0.0:
            # in Python 0.0 ** 0.0 == 1.0, but we want 0.0
            beta = 0.0
        else:
            beta = self.config.DAGGER.P**data_it

        ensure_unique_episodes = beta == 1.0

        def hook_builder(tgt_tensor):
            def hook(m, i, o):
                tgt_tensor.set_(o.cpu())

            return hook

        rgb_features = None
        rgb_hook = None
        if self.config.MODEL.RGB_ENCODER.cnn_type == "TorchVisionResNet50":
            rgb_features = torch.zeros((1, ), device="cpu")
            rgb_hook = self.actor_critic.net.rgb_encoder.layer_extract.register_forward_hook(
                hook_builder(rgb_features))

        depth_features = None
        depth_hook = None
        if self.config.MODEL.DEPTH_ENCODER.cnn_type == "VlnResnetDepthEncoder":
            depth_features = torch.zeros((1, ), device="cpu")
            depth_hook = self.actor_critic.net.depth_encoder.visual_encoder.register_forward_hook(
                hook_builder(depth_features))

        collected_eps = 0
        if ensure_unique_episodes:
            ep_ids_collected = set(
                [ep.episode_id for ep in self.envs.current_episodes()])

        with tqdm.tqdm(
                total=self.config.DAGGER.UPDATE_SIZE) as pbar, lmdb.open(
                    self.lmdb_features_dir,
                    map_size=int(self.config.DAGGER.LMDB_MAP_SIZE
                                 )) as lmdb_env, torch.no_grad():
            start_id = lmdb_env.stat()["entries"]
            txn = lmdb_env.begin(write=True)

            while collected_eps < self.config.DAGGER.UPDATE_SIZE:
                if ensure_unique_episodes:
                    envs_to_pause = []
                    current_episodes = self.envs.current_episodes()

                for i in range(self.envs.num_envs):
                    if dones[i] and not skips[i]:
                        ep = episodes[i]
                        traj_obs = batch_obs([step[0] for step in ep],
                                             device=torch.device("cpu"))
                        del traj_obs["vln_oracle_action_sensor"]
                        for k, v in traj_obs.items():
                            traj_obs[k] = v.numpy()

                        transposed_ep = [
                            traj_obs,
                            np.array([step[1] for step in ep], dtype=np.int64),
                            np.array([step[2] for step in ep], dtype=np.int64),
                        ]
                        txn.put(
                            str(start_id + collected_eps).encode(),
                            msgpack_numpy.packb(transposed_ep,
                                                use_bin_type=True),
                        )

                        pbar.update()
                        collected_eps += 1

                        if (collected_eps %
                                self.config.DAGGER.LMDB_COMMIT_FREQUENCY) == 0:
                            txn.commit()
                            txn = lmdb_env.begin(write=True)

                        if ensure_unique_episodes:
                            if current_episodes[
                                    i].episode_id in ep_ids_collected:
                                envs_to_pause.append(i)
                            else:
                                ep_ids_collected.add(
                                    current_episodes[i].episode_id)

                    if dones[i]:
                        episodes[i] = []

                if ensure_unique_episodes:
                    (
                        self.envs,
                        recurrent_hidden_states,
                        not_done_masks,
                        prev_actions,
                        batch,
                    ) = self._pause_envs(
                        envs_to_pause,
                        self.envs,
                        recurrent_hidden_states,
                        not_done_masks,
                        prev_actions,
                        batch,
                    )
                    if self.envs.num_envs == 0:
                        break

                (_, actions, _,
                 recurrent_hidden_states) = self.actor_critic.act(
                     batch,
                     recurrent_hidden_states,
                     prev_actions,
                     not_done_masks,
                     deterministic=False,
                 )
                # print("action: ", actions)
                # print("batch[vln_oracle_action_sensor]: ", batch["vln_oracle_action_sensor"])
                # print("torch.rand_like(actions, dtype=torch.float) < beta: ", torch.rand_like(actions, dtype=torch.float) < beta)
                actions = torch.where(
                    torch.rand_like(actions, dtype=torch.float) < beta,
                    batch["vln_oracle_action_sensor"].long(),
                    actions,
                )

                for i in range(self.envs.num_envs):
                    if rgb_features is not None:
                        observations[i]["rgb_features"] = rgb_features[i]
                        del observations[i]["rgb"]

                    if depth_features is not None:
                        observations[i]["depth_features"] = depth_features[i]
                        del observations[i]["depth"]

                    episodes[i].append((
                        observations[i],
                        prev_actions[i].item(),
                        batch["vln_oracle_action_sensor"][i].item(),
                    ))

                skips = batch["vln_oracle_action_sensor"].long() == -1
                actions = torch.where(skips, torch.zeros_like(actions),
                                      actions)
                skips = skips.squeeze(-1).to(device="cpu", non_blocking=True)

                prev_actions.copy_(actions)

                outputs = self.envs.step([a[0].item() for a in actions])
                observations, rewards, dones, _ = [
                    list(x) for x in zip(*outputs)
                ]

                not_done_masks = torch.tensor(
                    [[0.0] if done else [1.0] for done in dones],
                    dtype=torch.float,
                    device=self.device,
                )

                observations = transform_obs(
                    observations,
                    self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID)
                batch = batch_obs(observations, self.device)

            txn.commit()

        self.envs.close()
        self.envs = None

        if rgb_hook is not None:
            rgb_hook.remove()
        if depth_hook is not None:
            depth_hook.remove()
amqp_url='amqp://*****:*****@lark.rmq.cloudamqp.com/tijjoigp'
url = os.environ.get('CLOUDAMQP_URL',amqp_url)
params = pika.URLParameters(url)
params.socket_timeout = 5
#initiate the connexion
connection = pika.BlockingConnection(params)

channel = connection.channel()
result = channel.queue_declare(exclusive=True)
callback_queue = result.method.queue

corr_id = str(uuid.uuid4())
messageBody = numpy.random.random((20,30))
#messageJson = {'type': 0, 'value': 'Test'}
encoded_message = m.packb(messageBody, default = m.encode)
##
# Publish message in queue
channel.basic_publish(exchange='',
                           routing_key='rpc_queue',
                           properties=pika.BasicProperties(
                                 reply_to = callback_queue,
                                 correlation_id = corr_id
                           ),
                           body=encoded_message)


#print(" [x] Sent "+encoded_message)


##
Esempio n. 10
0
# Loop through creating nice profile names
profiles = []
for profile in img_names:
    filename = profile.rsplit('/', maxsplit=1)[1]
    lastname, firstname = name_regex.findall(filename)[-1].rsplit('_', maxsplit=1)
    lastname = lastname.title()
    firstname = firstname.title()
    profiles.append(firstname + lastname)

# Load all images into memory
images = [face_recognition.load_image_file(name) for name in img_names]

# Find all our faces using the GPU-accelerated CNN model
logger.debug("Finding faces....")
start_time = time.time()
face_locations = face_recognition.batch_face_locations(images, number_of_times_to_upsample=0)  # Load all face locations
end_time = time.time()
logger.debug(f"Found {len(face_locations)} faces in {end_time - start_time}")

# Make sure that we only have 1 face / picture
assert len(face_locations) == len(images)

# Grab face encodings for each location
known_face_encodings = [face_recognition.face_encodings(image, face_location)[0] for image, face_location in zip(images, face_locations)]

# Throw our encodings into Redis
packed_known_encodings = m.packb(known_face_encodings)

r.set('known_face_encodings', packed_known_encodings)
r.set('profiles', json.dumps(profiles))
Esempio n. 11
0
    def run(self):
        """
        Body of process.
        """

        # Don't allow keyboard interruption of process:
        self.log_info('starting')
        with IgnoreKeyboardInterrupt():

            # Initialize environment:
            self._init_net()

            # Initialize _out_port_dict and _in_port_dict attributes:
            self._init_port_dicts()

            # Initialize Buffer for incoming data.  Dict used to store the
            # incoming data keyed by the source module id.  Each value is a
            # queue buffering the received data:
            self._in_data = {k: collections.deque() for k in self.in_ids}

            # Perform any pre-emulation operations:
            self.pre_run()

            self.running = True
            self.steps = 0
            if self.time_sync:
                self.sock_time.send(msgpack.packb((self.id, self.steps, 'start',
                                                   time.time())))
                self.log_info('sent start time to master')
            while self.steps < self.max_steps:
                self.log_info('execution step: %s/%s' % (self.steps, self.max_steps))

                # If the debug flag is set, don't catch exceptions so that
                # errors will lead to visible failures:
                if self.debug:

                    # Run the processing step:
                    self.run_step()

                    # Do post-processing:
                    self.post_run_step()

                    # Synchronize:
                    self._sync()
                else:
                    # Run the processing step:
                    catch_exception(self.run_step, self.log_info)

                    # Do post processing:
                    catch_exception(self.post_run_step, self.log_info)

                    # Synchronize:
                    catch_exception(self._sync, self.log_info)

                # Exit run loop when a quit message has been received:
                if not self.running:
                    self.log_info('run loop stopped')
                    break

                self.steps += 1
            if self.time_sync:
                self.sock_time.send(msgpack.packb((self.id, self.steps, 'stop',
                                                   time.time())))
                self.log_info('sent stop time to master')
            self.log_info('maximum number of steps reached')

            # Perform any post-emulation operations:
            self.post_run()

            # Shut down the control handler and inform the manager that the
            # module has shut down:
            self._ctrl_stream_shutdown()
            ack = 'shutdown'
            self.sock_ctrl.send(ack)
            self.log_info('sent to manager: %s' % ack)

        self.log_info('exiting')
min(app_counts)
np.mean(app_counts)
mode(app_counts)
np.median(app_counts)

devices_enough_apps = app_counts[app_counts >= 100].index
device_100app_groups = device_app_groups[device_app_groups['device_type_id'].isin(devices_enough_apps)]

schema = device_100app_groups.columns

device_appBase = {}
for device_type in devices_enough_apps:
	device_apps = device_100app_groups[device_100app_groups['device_type_id'] == device_type]
	X = device_apps.as_matrix(columns=schema[2:])
	app_ids = device_apps.as_matrix(columns = [schema[1]])
	device_appBase[device_type] = (X, app_ids)

handle = open('devAppBase_first_september', 'wb')
handle.write(msgnp.packb(device_appBase, default=msgnp.encode))
handle.close()


apps = db.read_table('applications')
appDict = {}
for app in all_apps:
	appDict[app] = apps[apps['application_version_id'] == app]['name'].values[0]


handle = open('apps', 'wb')
handle.write(msgnp.packb(appDict, default=msgnp.encode))
handle.close()
Esempio n. 13
0
HTTP_ERROR_CODE_UNKNOWN_ACTION = 340
HTTP_ERROR_CODE_BIGAMY = 360
HTTP_ERROR_CODE_NOKEY = 401
HTTP_ERROR_CODE_NO_ACTION = 405
# flask
app = Flask(__name__)
# set flask log level
logging.getLogger('werkzeug').setLevel(logging.ERROR)
logging.getLogger("requests").setLevel(logging.WARNING)

# test
tmp = {
    'a' : 'abc',
    'vec' : np.zeros((3, 3))
}
content = msg_np.packb(tmp)
content2 = msg_np.unpackb(content)
glog.info(content2['a'])
glog.info(str(content2['vec']))
############
import lstm_model
model_wrapper = lstm_model.LstmModel()
last_model_file = '1_final.h5'
glog.info('loading ' + last_model_file)
model_wrapper.load_model_from_file(file=last_model_file)

def get_timestamp_for_dingding():
    return time.strftime("[%m/%d-%H:%M:%S]", time.localtime())

def check_key(query_components):
    if not "api_key" in query_components.keys():
Esempio n. 14
0
    def __init__(self, num_points, transforms=None, train=True, download=True):
        super().__init__()

        self.transforms = transforms

        self.set_num_points(num_points)
        self._cache = os.path.join(BASE_DIR,
                                   "modelnet40_normal_resampled_cache")
        self._uu_cache = os.path.join(BASE_DIR, "_uu_cache")

        if not osp.exists(self._cache):
            self.folder = "modelnet40_normal_resampled"
            self.data_dir = os.path.join(BASE_DIR, self.folder)
            self.url = (
                "https://shapenet.cs.stanford.edu/media/modelnet40_normal_resampled.zip"
            )

            if download and not os.path.exists(self.data_dir):
                www = 'https://shapenet.cs.stanford.edu/media/modelnet40_normal_resampled.zip'
                zipfile = os.path.basename(www)
                os.system('wget %s --no-check-certificate; unzip %s' %
                          (www, zipfile))
                os.system('mv %s %s' % (zipfile[:-4], BASE_DIR))
                os.system('rm %s' % (zipfile))


#                zipfile = os.path.join(BASE_DIR, os.path.basename(self.url))
#                subprocess.check_call(
#                    shlex.split("curl {} -o {}".format(self.url, zipfile))
#                )
#
#                subprocess.check_call(
#                    shlex.split("unzip {} -d {}".format(zipfile, BASE_DIR))
#                )
#
#                subprocess.check_call(shlex.split("rm {}".format(zipfile)))

            self.train = train
            self.set_num_points(num_points)

            self.catfile = os.path.join(self.data_dir,
                                        "modelnet40_shape_names.txt")
            self.cat = [line.rstrip() for line in open(self.catfile)]
            self.classes = dict(zip(self.cat, range(len(self.cat))))

            os.makedirs(self._cache)

            print("Converted to LMDB for faster dataloading while training")
            for split in ["train", "test"]:
                if split == "train":
                    shape_ids = [
                        line.rstrip() for line in open(
                            os.path.join(self.data_dir,
                                         "modelnet40_train.txt"))
                    ]
                else:
                    shape_ids = [
                        line.rstrip() for line in open(
                            os.path.join(self.data_dir, "modelnet40_test.txt"))
                    ]

                shape_names = ["_".join(x.split("_")[0:-1]) for x in shape_ids]
                # list of (shape_name, shape_txt_file_path) tuple
                self.datapath = [(
                    shape_names[i],
                    os.path.join(self.data_dir, shape_names[i], shape_ids[i]) +
                    ".txt",
                ) for i in range(len(shape_ids))]

                with lmdb.open(osp.join(self._cache, split),
                               map_size=1 << 36) as lmdb_env, lmdb_env.begin(
                                   write=True) as txn:
                    for i in tqdm.trange(len(self.datapath)):
                        fn = self.datapath[i]
                        point_set = np.loadtxt(fn[1], delimiter=",").astype(
                            np.float32)
                        cls = self.classes[self.datapath[i][0]]
                        cls = int(cls)

                        txn.put(
                            str(i).encode(),
                            msgpack_numpy.packb(dict(pc=point_set, lbl=cls),
                                                use_bin_type=True),
                        )

            shutil.rmtree(self.data_dir)

        if not osp.exists(self._uu_cache):
            self.folder = "uu_cache"
            self.data_dir = os.path.join(BASE_DIR, self.folder)

            self.train = train
            self.set_num_points(num_points)

            self.catfile = os.path.join(self.data_dir,
                                        "modelnet41_shape_names.txt")
            self.cat = [line.rstrip() for line in open(self.catfile)]
            self.classes = dict(zip(self.cat, range(len(self.cat))))

            os.makedirs(self._uu_cache)

            print("Converted to LMDB for faster dataloading while training")
            for split in ["train", "test"]:
                if split == "train":
                    shape_ids = [
                        line.rstrip() for line in open(
                            os.path.join(self.data_dir,
                                         "modelnet41_train.txt"))
                    ]
                else:
                    shape_ids = [
                        line.rstrip() for line in open(
                            os.path.join(self.data_dir, "modelnet41_test.txt"))
                    ]

                shape_names = ["_".join(x.split("_")[0:-1]) for x in shape_ids]
                # list of (shape_name, shape_txt_file_path) tuple
                self.datapath = [(
                    shape_names[i],
                    os.path.join(self.data_dir, shape_names[i], shape_ids[i]) +
                    ".txt",
                ) for i in range(len(shape_ids))]

                with lmdb.open(osp.join(self._uu_cache, split),
                               map_size=1 << 36) as lmdb_env, lmdb_env.begin(
                                   write=True) as txn:
                    for i in tqdm.trange(len(self.datapath)):
                        fn = self.datapath[i]
                        point_set = np.loadtxt(fn[1], delimiter=",").astype(
                            np.float32)
                        cls = self.classes[self.datapath[i][0]]
                        cls = int(cls)

                        txn.put(
                            str(i).encode(),
                            msgpack_numpy.packb(dict(pc=point_set, lbl=cls),
                                                use_bin_type=True),
                        )

            #shutil.rmtree(self.data_dir)

        self._lmdb_file = osp.join(self._cache, "train" if train else "test")
        with lmdb.open(self._lmdb_file, map_size=1 << 36) as lmdb_env:
            self._len = lmdb_env.stat()["entries"]

        self._lmdb_env = None
Esempio n. 15
0
    def _sync(self):
        """
        Send output data and receive input data.

        Notes
        -----
        Assumes that the attributes used for input and output already
        exist.

        Each message is a tuple containing a module ID and data; for
        outbound messages, the ID is that of the destination module.
        for inbound messages, the ID is that of the source module.
        Data is serialized before being sent and unserialized when
        received.
        """

        if self.net in ['none', 'ctrl']:
            self.log_info('not synchronizing with network')
        else:
            self.log_info('synchronizing with network')

            # Send outbound data:
            start = time.time()
            self._put_out_data()
            if self.net in ['out', 'full']:

                # Send all data in outbound buffer:
                send_ids = [out_id for out_id in self._out_ids]
                for out_id, data in self._out_data:
                    self.sock_data.send(msgpack.packb((out_id, data)))
                    send_ids.remove(out_id)
                    if not self.time_sync:
                        self.log_info('sent to   %s: %s' % (out_id, str(data)))

                # Send data tuples containing None to those modules for which no
                # actual data was generated to satisfy the barrier condition:
                for out_id in send_ids:
                    self.sock_data.send(msgpack.packb((out_id, None)))
                    if not self.time_sync:
                        self.log_info('sent to   %s: %s' % (out_id, None))

                # All output IDs should be sent data by this point:
                if not self.time_sync:
                    self.log_info('sent data to all output IDs')

            # Receive inbound data:
            if self.net in ['in', 'full']:

                # Wait until inbound data is received from all source modules:
                recv_ids = set(self._in_ids)
                nbytes = 0
                while recv_ids:

                    # Poll to avoid blocking:
                    if self.sock_data.poll(POLL_TIMEOUT):
                        data_packed = self.sock_data.recv()
                        in_id, data = msgpack.unpackb(data_packed)
                        if not self.time_sync:
                            self.log_info('recv from %s: %s' % (in_id, str(data)))

                        # Ignore incoming data containing None:
                        if data is not None:
                            self._in_data[in_id].append(data)

                            # Record number of bytes of transmitted serialized data:
                            nbytes += len(data_packed)

			# Remove source module ID from set of IDs from which to
                        # expect data:
                        recv_ids.discard(in_id)

                    # Stop the synchronization if a quit message has been received:
                    if not self.running:
                        if not self.time_sync:
                            self.log_info('run loop stopped - stopping sync')
                        break

                if not self.time_sync:
                    self.log_info('recv data from all input IDs')
            self._get_in_data()

            # Transmit synchronization time:
            stop = time.time()
            if self.time_sync:
                self.log_info('sent timing data to master')
                self.sock_time.send(msgpack.packb((self.id, self.steps, 'sync',
                                                   (start, stop, nbytes))))
Esempio n. 16
0
    def __init__(self, n_points: int, train: bool, batch_size=1, shuffle=False):
        super().__init__()
        self.n_points = n_points
        self.batch_size = batch_size
        self.train = train
        self.shuffle = shuffle

        self.path = BASE_DIR / 'data' / 'modelnet40_normal_resampled'
        self.cache = BASE_DIR / 'data' / 'modelnet40_normal_resampled_cache'
        self.cache.mkdir(exist_ok=True)

        if not self.path.exists():
            self.path.mkdir(parents=True)
            self.url = (
                "https://shapenet.cs.stanford.edu/media/modelnet40_normal_resampled.zip"
            )
            zipfile = self.path / '..' / 'modelnet40_normal_resampled.zip'

            if not zipfile.exists():
                subprocess.check_call([
                    'curl', self.url, '-o', str(zipfile)
                ])

            subprocess.check_call([
                'unzip', str(zipfile), '-d', str(self.path / '..')
            ])

        cats_file = self.path / 'modelnet40_shape_names.txt'
        with cats_file.open() as f:
            cats = [line.rstrip() for line in f.readlines()]
            self.classes = dict(zip(cats, range(len(cats))))

        train = 'train' if self.train else 'test'
        shapes_file = self.path / f'modelnet40_{train}.txt'
        with shapes_file.open() as f:
            self.shapes = []
            for line in f.readlines():
                shape_id = line.rstrip()
                shape_name = '_'.join(shape_id.split('_')[0:-1])
                self.shapes.append((
                    shape_name,
                    shape_id + '.txt',
                ))

        self.lmdb_file = self.cache / train
        self.lmdb_env = None
        if not self.lmdb_file.exists():
            # create lmdb file
            with lmdb.open(str(self.lmdb_file), map_size=1 << 36) as lmdb_env:
                with lmdb_env.begin(write=True) as txn:
                    for i, (shape_name, shape_file) in enumerate(tqdm(self.shapes)):
                        shape_path = self.path / shape_name / shape_file
                        pts = np.loadtxt(shape_path, delimiter=',', dtype=np.float32)
                        cls = self.classes[shape_name]

                        txn.put(
                            str(i).encode(),
                            msgpack_numpy.packb(pts, use_bin_type=True),
                        )

        self.set_attrs(
            batch_size=self.batch_size,
            total_len=len(self.shapes),
            shuffle=self.shuffle
        )
Esempio n. 17
0
def numpy_to_Redis(redis_client,array,key):
    """Store given Numpy array 'array' in Redis under key 'key'"""
    packed_arr = m.packb(array)
    redis_client.set(key,packed_arr)
    return