Пример #1
0
 def create_plugin(self, ctx, plugin_obj):
     new_plugin = serializer.deserialize_entity(ctx, plugin_obj)
     LOG.debug('Creating plugin %s',
               new_plugin.name)
     new_plugin.code = cpickle.dumps(new_plugin.code, 0).decode()
     new_plugin.create()
     return serializer.serialize_entity(ctx, new_plugin)
Пример #2
0
    def _serialize_dict(cls, data):
        '''
        serializes a dictionary

        :param data: data to serialize
        '''
        return b64encode(zlib.compress(cPickle.dumps(data, protocol=2))).decode()
Пример #3
0
def train(ai, config):
    loaded_files = []
    x = config.iterations
    i = len(glob.glob(config.data.model_location+"*.h5"))
    loaded_files, _ = load_games(ai, loaded_files, config)
    while(x != 0):
        if i > config.iter3:
            ai.update_lr(config.learning_rate3)
        elif i > config.iter2:
            ai.update_lr(config.learning_rate2)
        else:
            ai.update_lr(config.learning_rate1)
        loaded_files, diff = load_games(ai, loaded_files, config)
        total_diff = diff
        start = time()
        print("Iteration %04d"%i)
        end = config.min_new_game_files if i> 0 else config.min_game_file
        util.print_progress_bar(0, end, start=start)
        while(total_diff < end):
            if diff > 0:
                total_diff += diff
                util.print_progress_bar(total_diff, end, start=start)
            sleep(5)
            loaded_files, diff = load_games(ai, loaded_files, config)
        util.print_progress_bar(end, end, start=start)
        print("Training for %d batches on %d samples" % (config.batches_per_iter, len(ai.buffer.buffer)))
        start = time()
        history = ai.train_batches(config.batch_size, config.batches_per_iter, config.verbose)
        for val in history.history.keys():
            print("%s: %0.4f" % (val, history.history[val][-1]))
        if i % config.save_model_cycles == 0:
            ai.save("%smodel_%04d.h5" % (config.data.model_location, i))
			
        file = open("%shist_%04d.pickle" % (config.data.history_location, i), 'wb') 
        pickle.dump(pickle.dumps(history.history), file)
        file.close() 
        print("Iteration Time: %0.2f" % (time()-start))
        x -= 1
        i += 1
Пример #4
0
    def populate(cls):
        if cls.fonts is not None:
            return

        start = time.time()
        from cube.constants.application import config_directory
        fonts_dir = os.path.join(config_directory(), "fonts")
        if not os.path.exists(fonts_dir):
            os.makedirs(fonts_dir)
        fonts_file = os.path.join(fonts_dir, "fonts.lst")
        if os.path.exists(fonts_file):
            cube.debug("Loading fonts from cache file '%s'" % fonts_file)
            try:
                with open(fonts_file, 'rb') as f:
                    cls.fonts = pickle.loads(f.read())
            except Exception as e:
                cube.warn("font cache file '%s' is not valid, it will be removed:" % fonts_file, e)
                os.unlink(fonts_file)
        else:
            cube.info("Finding fonts on your system, this may take a while...")
            cls.fonts = {}
            for font_dir in cls.font_directories():
                for root, dirs, files in os.walk(font_dir):
                    for f in files:
                        path = os.path.join(root, f)
                        if font.is_valid(path):
                            try:
                                cls.fonts[path] = font.get_infos(path)
                            except:
                                cube.error("ignoring font file", path)

        if not cls.fonts:
            raise Exception("Couldn't find any font on your system !")
        cube.info(len(cls.fonts), "font infos fetched in %f seconds" % (time.time() - start))

        cube.debug("Saving fonts into cache file '%s'" % fonts_file)
        with open(fonts_file, 'wb') as f:
            f.write(pickle.dumps(cls.fonts))
                for i in range(len(grads)):
                    grads[i] += per_grads[i]

            for i in range(len(grads)):
                grads[i] /= BATCH_SIZE
            loss /= BATCH_SIZE

            train_loss += loss

            for i in range(len(Clip_bound)):
                Clip_bound[i] /= BATCH_SIZE
            grads_noise = Add_noise(grads, Clip_bound)

            client.publish("mapa_grads/" + CLIENT_ID,
                           cPickle.dumps(grads_noise), 2)

            if step % TEST_NUM == 0:
                print(step)
                total = 0
                correct = 0
                correct_pad = 0
                test_loss = 0
                man_file1 = open(
                    RESULT_ROOT + '[' + str(EDGE_NAME) + ']' +
                    '[Mapa-Accuracy]', 'w')
                for batch_idx, (test_x, test_y) in enumerate(test_loader):
                    if batch_idx < test_idx:

                        output = model(test_x)
                        pred_y = torch.max(output, 1)[1].data.numpy()
Пример #6
0
 def serialize(self, generic_object):
     byte_str = pickle.dumps(generic_object)
     return byte_str.hex()
Пример #7
0
 def __conform__(self, protocol):
     if protocol is sqlite3.PrepareProtocol:
         # return cpickle.dumps(self.data, 1)
         return sqlite3.Binary(cpickle.dumps(self.data, -1))
Пример #8
0
def pickleIT(list):
    with open("dictionaries_list.pkl", "wb") as f:
        f.write(cPickle.dumps(list))
Пример #9
0
 def __call__(self, *args, **kwds):
     str = cPickle.dumps(args, 1) + cPickle.dumps(kwds, 1)
     if not str in self.memo:
         self.memo[str] = self.fn(*args, **kwds)
     return self.memo[str]
Пример #10
0
 def __conform__(self, protocol):
     if protocol is sqlite3.PrepareProtocol:
         # return cpickle.dumps(self.data, 1)
         return sqlite3.Binary(cpickle.dumps(self.data, -1))
Пример #11
0
#client.on_connect = on_connect
#client.on_disconnect=on_disconnect
#client.on_log = on_log
#client.on_message = on_message
client.on_publish = on_publish

face_cascade = cv.CascadeClassifier(
    '/usr/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')
# 1 should correspond to /dev/video1 , your USB camera. The 0 is reserved for the TX2 onboard camera
cap = cv.VideoCapture(1)

while (True):
    # Capture frame-by-frame
    ret, frame = cap.read()

    # We don't use the color information, so might as well save space
    gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
    # face detection and other logic goes here
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    for (x, y, w, h) in faces:
        crop_img = gray[y:y + h, x:x + w]
        client.connect(broker)
        client.loop_start()
        #client.subscribe("pictures/faces",1)
        client.publish("pictures/faces", pk.dumps(crop_img))
        time.sleep(5)
        client.loop_stop()
        client.disconnect()
    time.sleep(5)
Пример #12
0
def compress_object(data, level=9):
    if data is None:
        return 'null'
    return base64.b64encode(zlib.compress(cPickle.dumps(data),
                                          level)).decode('utf-8')
Пример #13
0
 def save(self, filename):
     file = open(filename, 'wb') 
     pickle.dump(pickle.dumps(self.buffer), file)
     file.close() 
Пример #14
0
    def get(self, exp_id):
        """ Simulate your experiment on a simple model
        The model that is drawn from is:

        y = -(x - c)**2 + c2 + rnorm(mu,var)

        Currently there is no context. Make sure that the action of your
        experiment results in:

        {"x" : x}

        This is how the model currently expects your action to be formulated.
        This might become more flexible later on.

        +--------------------------------------------------------------------+
        | Example                                                            |
        +====================================================================+
        |http://example.com/eval/5/simulate?key=XXX&N=10&c=5&c2=10&mu=0&var=1|
        +--------------------------------------------------------------------+

        :param int exp_id: Experiment ID as specified in the url
        :param string key: The key corresponding to the experiment
        :param int N: The number of simulation draws
        :param int c: The size of the parabola
        :param int c2: The height of the parabola
        :param int mu: The mean of the noise on the model
        :param int var: The variance of the noise on the model
        :param string log_stats: Flag for logging the results in the database
        
        :returns: A JSON of the form: {"simulate":"success"}
        :raises AuthError: 401 Invalid Key

        """

        key = self.get_argument("key", default = False)
        
        # Number of draws
        N = int(self.get_argument("N", default = 1000))

        log_stats = self.get_argument("log_stats", default = True)

        # Parameterset for the simulator
        c = float(self.get_argument("c", default = 5))
        c2 = float(self.get_argument("c2", default = 10))
        mu = float(self.get_argument("mu", default = 0))
        var = float(self.get_argument("var", default = .1))

        if not key:
            self.set_status(401)
            self.write("Key not given")
            return

        __EXP__ = Experiment(exp_id, key)

        rewards = np.array([0])
        reward_over_time = np.array([])
        regret = np.array([0])

        if __EXP__.is_valid():
            for i in range(N):
                # Generate context
                context = {}

                # Get action

                action = __EXP__.run_action_code(context)

                # Generate reward

                y = -(action["x"] - c)**2 + c2 + np.random.normal(mu, var)
                #y = 15 + 8*action["x"] + 10*action["x"]**2 + np.random.normal(mu, var)

                reward = {"y" : y}

                # Set reward
                __EXP__.run_reward_code(context, action, reward)
                
                # Save stats
                rewards = np.append(rewards, y)
                tmp_rot = (rewards[-1] + y) / (i+1)
                reward_over_time = np.append(reward_over_time, tmp_rot)
                regret = np.append(regret, (regret[-1] + (c2 - y)))

                #self.write("n = {}, Regret is: {}, reward = {} <br>".format(i,regret[-1], rewards[-1]))


            # Now save the data together with a timestamp in the logs
            # To read out the Numpy array data out again, use array =
            # pickle.loads(record['feature'])

            # FOR FUTURE, the json_tricks package might be interesting
            if log_stats == True:
                print("Logging data")
                __EXP__.log_data({
                    "type" : "evaluation",
                    "time" : int(time.time()),
                    "experiment" : exp_id,
                    "N" : N,
                    "c" : c,
                    "c2" : c2,
                    "rewards" : Binary(_pickle.dumps(rewards, protocol = 2), subtype = 128),
                    "reward_over_time" : Binary(_pickle.dumps(reward_over_time, protocol = 2), subtype = 128),
                    "regret" : Binary(_pickle.dumps(regret, protocol = 2), subtype = 128)
                    })

                self.write(json.dumps({'simulate':'success','experiment':exp_id}))
        else:
            self.set_status(401)
            self.write("Key is not valid for this experiment")
            return
Пример #15
0
            % COUNTER)
        #print (tabulate(filter(lambda k,v : np.asarray(v).size==1, stats.items()))) #pylint: disable=W0110
        print(
            tabulate(
                filter(lambda x: np.asarray(x[1]).size == 1, stats.items())))  #pylint: disable=W0110
        # Store to hdf5
        if args.use_hdf:
            for (stat, val) in stats.items():
                if np.asarray(val).ndim == 0:
                    diagnostics[stat].append(val)
                else:
                    assert val.ndim == 1
                    diagnostics[stat].extend(val)
            if args.snapshot_every and ((COUNTER % args.snapshot_every == 0) or
                                        (COUNTER == args.n_iter)):
                hdf['/agent_snapshots/%0.4i' % COUNTER] = np.array(
                    cPickle.dumps(agent, -1))
        # Plot
        if args.plot:
            animate_rollout(env, agent, min(500, args.timestep_limit))

    run_policy_gradient_algorithm(env, agent, callback=callback, usercfg=cfg)

    if args.use_hdf:
        hdf['env_id'] = env_spec.id
        try:
            hdf['env'] = np.array(cPickle.dumps(env, -1))
        except Exception:
            print("failed to pickle env")  #pylint: disable=W0703
    env.close()
Пример #16
0
def dumps(data):
    if _USE_COMPRESS:
        return lz4.frame.compress(cPickle.dumps(data))
    else:
        return cPickle.dumps(data)
Пример #17
0
	def dumps(obj):
		return pkl.dumps(obj)
Пример #18
0
 def _writeAttributesToFile(self, attribute_ids):
     with open('attributes_dict.txt', 'wb') as file:
         file.write(pickle.dumps(attribute_ids))
Пример #19
0
def quick_deepcopy(dictionary):
    return cPickle.loads(cPickle.dumps(dictionary, -1))
                    per_grads.append(params.grad.data)

                per_grads = Clip(per_grads, Clip_bound)

                for i in range(len(grads)):
                    grads[i] += per_grads[i]

            for i in range(len(grads)):
                grads[i] /= BATCH_SIZE
            loss /= BATCH_SIZE

            train_loss += loss

            grads_noise = Add_noise(grads, Clip_bound / BATCH_SIZE)

            client.publish("fixdp_grads/" + CLIENT_ID, cPickle.dumps(grads_noise), 2)

            if step % TEST_NUM == 0:
                print(step)
                total = 0
                correct = 0
                correct_pad = 0
                test_loss = 0
                man_file1 = open(RESULT_ROOT + '[' + str(EDGE_NAME) + ']' + '[FixedDP-Accuracy]', 'w')
                for batch_idx, (test_x, test_y) in enumerate(test_loader):
                    if batch_idx < test_idx:

                        output = model(test_x)
                        pred_y = torch.max(output, 1)[1].data.numpy()
                        test_y = test_y.view(-1)
                        test_loss += nn.CrossEntropyLoss()(output, test_y).item()
Пример #21
0
 def deepcopy(self, model):
     return cPickle.loads(cPickle.dumps(model, -1))
Пример #22
0
 def __deepcopy__(self, memo={}):
     obj = self.__class__(doc=pickle.loads(pickle.dumps(self.copy())), gen_skel=self.gen_skel, mongokat_collection=self.mongokat_collection, fetched_fields=self._fetched_fields)
     obj.__dict__ = self.__dict__.copy()
     return obj
Пример #23
0
def deepcopy(obj):
  return cPickle.loads(cPickle.dumps(obj))
Пример #24
0
    def server_thread(self, clientSocket):

        try:

            print('\nHandling client connection. . .')

            # get the request from browser
            data = b''

            while True:
                part = clientSocket.recv(128)
                data += part

                if len(part) < 128:
                    data += part
                    break

            info = pickle.loads(data)

            # str_data = data.decode()

            # the connected client's IP addr
            h, p = clientSocket.getpeername()

            if info['type'] not in ['IMAGE', 'INTERNET_MSG']:
                print('\nMessage from client: ')
                print(info)

            else:
                print("Message from client: IMAGE FILE")

            req_type = info['type']

            if req_type == 'TEST_CONNECT':
                return_data = {'THIS CONTENT DOES NOT MATTER': -1}
                clientSocket.sendall(pickle.dumps(return_data))

            if req_type == 'KEEP_ALIVE':
                # update the time which we have last seen this client
                client_info = {
                    'IP': h,
                    'PORT_NO': info['port'],
                    'nickname': info['nickname'],
                    'local_ip': info['local_ip'],
                    'mode': info['mode']
                }
                self.update_peer(client_info)

            if req_type == 'REQUEST_PEER_DICT':
                # respond with directory of all active clients
                self.send_list_of_all_peers_to_peer(clientSocket)

            if req_type == 'QUIT':

                for key in self.clients:
                    print(key)

                # remove client from peers dict
                index = h + ':' + str(info['port'])

                print(self.clients)

                # try to delete the user's mailbox - should succeed if on INTERNET mode
                try:
                    del self.mailboxes[index]
                    print('Removed ' + index +
                          '\'s mailbox due to QUIT command. . .')
                except:
                    pass

                try:
                    # this should succeed for INTERNET or LAN mode. . .
                    del self.clients[index]
                    print('Removed ' + index + ' due to QUIT command. . .')

                except Exception as e:
                    # user must be hosting server in separate window - try deleting peer at index of their local IP
                    local_ip_index = info['local_ip'] + ':' + str(info['port'])
                    del self.clients[local_ip_index]
                    print('Removed ' + local_ip_index +
                          ' due to QUIT command. . .')

                finally:
                    pass

            if req_type == "MSG_CHECK":
                # check if the peer has any messages waiting
                # assume that the peer is not on the same LAN as the server
                local_ip = info['local_ip']
                ip = h
                port = info['port']

                if ip == local_ip:
                    # operating in internet mode on LAN for some reason - refer to peer using its local IP
                    index = local_ip + ':' + str(port)

                else:
                    # truly operating over the internet - refer to peer using IP not local IP
                    index = ip + ':' + str(port)

                if self.mailboxes.get(index) is not None:
                    return_data = {'data': []}
                    for tup in self.mailboxes[index]:
                        print("\nRETURNING IMAGE\n")
                        return_data['data'].append((tup[0], tup[1]))

                    self.mailboxes[index] = [
                    ]  # messages will be sent to user, so remove them from central server

                    data = pickle.dumps(return_data)
                    clientSocket.sendall(data)

                else:
                    return  # take no further action

            if req_type == "INTERNET_MSG":
                # message is to be sent to clients over network
                ip = h
                local_ip = info['local_ip']
                port = info['port']
                sender = info['sender']
                png = info['data']

                if ip == local_ip:
                    # operating in internet mode on LAN for some reason but continue
                    index = local_ip + ':' + str(port)

                else:
                    # truly operating over the internet - use IP not Local IP
                    index = ip + ':' + str(port)

                for key in self.mailboxes:
                    if key != index:  # don't send to yourself
                        print(key, index)
                        self.mailboxes[key].append((sender, png))

        except Exception as e:
            print("EXCEPTION IN SERVER THREAD. . .")
            print(e)
        finally:
            clientSocket.close()
Пример #25
0
def generator():
    inputs = imgs_file_list
    targets = list(zip(objs_info_list, mask_list))
    assert len(inputs) == len(targets)
    for _input, _target in zip(inputs, targets):
        yield _input.encode('utf-8'), cPickle.dumps(_target)
Пример #26
0
    list_of_number_of_shared_molecules = []

    #set of the ingredient from the "rows"
    set1 = row["set_molecules"]
    #name of the ingredient from the "rows"
    ingredient_1 = row["ingredient"]

    #starting a dictionary entry with a value of an empty dict
    flavor_matrix_df[ingredient_1] = {}

    #iterate through the "columns" of ingredients
    for index, row in flavorDB_pandas.iterrows():

        #set of the ingredient from the "columns" of ingredients
        set2 = row["set_molecules"]
        #nome of the ingredient from the "columns" of ingredients
        ingredient_2 = row["ingredient"]

        #The molecules that are shared between the two sets
        shared_molecules = set1.intersection(set2)

        #access the dictionary of a dictionary from 1st ingredient
        #set the value as the number of shared molecules
        flavor_matrix_df[ingredient_1][ingredient_2] = len(shared_molecules)

with open('./data/ingredients/flavor_matrix_dict.pickle', 'wb') as file:
    file.write(pickle.dumps(flavor_matrix_df))
    file.close()

#Celebratory print statement
print("we did it!")
Пример #27
0
    print(len(data1['images']))
    print(len(data1['labels']))

    data = split_data(data1, ratio=ratio)

    print('train', data['train']['size'])
    print('valid', data['valid']['size'])
    print('test', data['test']['size'])

    return data


if __name__ == '__main__':

    data = get_data(shape=(540, 540, 1), ratio=(6, 1, 3))

    # add_pickle

    dump = pickle.dumps(data)
    print('dump.pickle')

    GZIP = True
    if GZIP:
        with gzip.open('dump.gz', 'wb') as f:
            f.write(dump)
            print('gzip dump was written')
    else:
        with open('dump.pickle', 'wb') as f:
            pickle.dump(dump, f, protocol=4)
            print('dump was written')
Пример #28
0
        spectrum=spectrum[spectrum[:,1] > 10]

    for i in scan_to_lines[scan_number]:
        mz_low = skyinfo['obs_mz'].values[i] - (10.0/skyinfo['charge'].values[i])
        mz_high = skyinfo['obs_mz'].values[i] + (60.0/skyinfo['charge'].values[i])
        try:
            output_scans[i].append(spectrum[(mz_low < spectrum[:,0]) & (spectrum[:,0] < mz_high)])
        except:
            print (i, output_scans[i], mz_low, mz_high)
            print (spectrum)
            print (spectrum[(mz_low < spectrum[:,0]) & (spectrum[:,0] < mz_high)])
            sys.exit(0)
        if len(output_scans[i]) == scans_per_line[i]:
            hdx_time = mzml[23:-5]
            name = skyinfo.iloc[i]['name']
            #RT_start = skyinfo.iloc[i]['RT_'+str(sys.argv[1])]
            RT_lo = int(RT_matches[name][i][0])
            RT_hi = int(RT_matches[name][i][1])
            keep_drift_times = drift_times[(drift_times >= dt_lbounds[i]) & (drift_times <= dt_ubounds[i]) & (scan_times <= ret_ubounds[i]) & (scan_times >= ret_lbounds[i])]
            keep_scan_times = scan_times[(drift_times >= dt_lbounds[i]) & (drift_times <= dt_ubounds[i]) & (scan_times <= ret_ubounds[RT_hi]) & (scan_times >= ret_lbounds[RT_lo])]
            output = [sorted(set(keep_scan_times)), sorted(set(keep_drift_times)), output_scans[i]]
            with open(str(i)+"_"+hdx_time+"_"+name+".cpickle.zlib", 'wb') as file:
                file.write(zlib.compress(cpickle.dumps(output)))
            print (scan_number, process.memory_info().rss / (1024*1024*1024), 'presave')
            output_scans[i] = []
            print (scan_number, process.memory_info().rss / (1024*1024*1024), 'savedisk')
            
    if len(scan_to_lines[scan_number]) > 0:
        cur_lengths = np.array([len(output_scans[i]) for i in scan_to_lines[scan_number]])
        target_lengths = np.array([scans_per_line[i] for i in scan_to_lines[scan_number]])
Пример #29
0
            province_map['香港'] = sheng
        elif sheng == '澳门特别行政区':
            province_map['澳门'] = sheng


r = redis.Redis(host='192.168.0.148', port=6379, password='******', db=0)
area_map = r.get("area_map")
city_map = r.get("city_map")
province_area_map = r.get("province_area_map")
province_map = r.get("province_map")
# defaultdict 反序列化和序列化 https://mlog.club/article/1456963
# 自定义defaultdict 序列化和反序列化 https://www.coder.work/article/356672
if not area_map or not city_map or not province_area_map or not province_map:
    area_map, city_map, province_area_map, province_map, latlng = _data_from_csv(
    )
    r.set("area_map", cPickle.dumps(area_map))
    r.set("city_map", cPickle.dumps(city_map))
    r.set("province_area_map", cPickle.dumps(province_area_map))
    r.set("province_map", cPickle.dumps(province_map))
else:
    area_map = cPickle.loads(area_map)
    city_map = cPickle.loads(city_map)
    province_area_map = cPickle.loads(province_area_map)
    province_map = cPickle.loads(province_map)

# print(area_map)
# print(city_map)
# print(province_area_map)
# print(province_map)

# 直辖市
Пример #30
0
 def generator():
     """TF Dataset generator."""
     assert len(train_img_paths_list) == len(train_targets_list)
     for _input, _target in zip(train_img_paths_list,
                                train_targets_list):
         yield _input.encode('utf-8'), cPickle.dumps(_target)
from socket import *
import _pickle as pickle
serverName = "localhost"
serverPort = 12001
selectHero = ""
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((serverName, serverPort))
waiting = 0
while 1:
    # before hero selection
    while not selectHero:
        hero = input('choose your hero :')
        clientSocket.send(
            pickle.dumps({
                'user': selectHero,
                'command': ['select-hero', hero]
            }))
        response = pickle.loads(clientSocket.recv(2048))
        selectHero = response['user']
        if not selectHero:
            print("{} is unavailable".format(hero))
        else:
            print("You are selecting '{}'".format(hero))
            data = response['data']
            print(data)
    # waiting for server response
    if waiting:
        clientSocket.settimeout(5)
        try:
            response = pickle.loads(clientSocket.recv(2048))
            if response['status'] == 'death':
Пример #32
0
def threaded_client(conn, _id):
    """
    runs in a new thread for each player connected to the server
    :param con: ip address of connection
    :param _id: int
    :return: None
    """
    global connections, players

    current_id = _id

    # recieve a name from the client
    data = conn.recv(16)
    name = data.decode("utf-8")
    print("[LOG]", name, "connected to the server.")

    # Setup properties for each new player
    color = colors[current_id % len(colors)]
    x, y = get_start_location(players)
    players[current_id] = {
        "x": x,
        "y": y,
        "color": color,
        "name": name,
        "direction": 0.0
    }  # direction in Radians

    # pickle data and send initial info to clients
    conn.send(str.encode(str(current_id)))
    print("send", current_id)

    # server will recieve basic commands from client
    # it will send back all of the other clients info
    '''
    commands start with:
    move
    jump
    get
    id - returns id of client
    '''
    while True:
        try:
            # Recieve data from client
            data = conn.recv(64)

            if not data:
                break

            data = data.decode("utf-8")
            #print("[DATA] Recieved", data, "from client id:", current_id)

            # look for specific commands from recieved data
            if data.split(" ")[0] == "move":
                split_data = data.split(" ")
                x = int(split_data[1])
                y = int(split_data[2])
                direction = float(split_data[3])
                players[current_id]["x"] = x
                players[current_id]["y"] = y
                players[current_id]["direction"] = direction

                player_collision(players)

                send_data = pickle.dumps((players))

            if data.split(" ")[0] == "shoot":
                print(data)
                split_data = data.split(" ")
                x = int(split_data[1])
                y = int(split_data[2])
                dir_x = float(split_data[3])
                dir_y = float(split_data[4])
                owner_id = int(split_data[5])
                shots.append({
                    "x": x,
                    "y": y,
                    "dir_x": dir_x,
                    "dir_y": dir_y,
                    "owner_id": owner_id
                })

                shots_collision(shots, players, owner_id)

                send_data = pickle.dumps((shots))

            elif data.split(" ")[0] == "id":
                send_data = str.encode(
                    str(current_id))  # if user requests id then send it

            elif data.split(" ")[0] == "jump":
                send_data = pickle.dumps((players))
            else:
                # any other command just send back list of players
                send_data = pickle.dumps((players))

            # send data back to clients
            conn.send(send_data)

        except Exception as e:
            print(e)
            break  # if an exception has been reached disconnect client

        time.sleep(0.001)

    # When user disconnects
    print("[DISCONNECT] Name:", name, ", Client Id:", current_id,
          "disconnected")

    connections -= 1
    try:
        del players[current_id]  # remove client information from players list
    except:
        pass  #MAch schöner, wenn man getroffen wird
    conn.close()  # close connection
Пример #33
0
def process_cursor(cursor):
    with open('agg-d-std.json') as f:
        std = json.load(f)

    client = MongoClient("localhost", 27017)
    db = client.dwh

    with tqdm(total=cursor[0],
              desc="Progress{}".format(cursor[2]),
              position=cursor[2],
              leave=True) as pbar:
        for doc in db.agg_d.find(
            {
                "date": {
                    "$gte": datetime.datetime(year=2012, month=1, day=1)
                },
                "target": {
                    "$lte": 250000
                }
            },
                no_cursor_timeout=True).skip(cursor[1]).limit(cursor[0]):

            time_filter = {
                "$and": [{
                    "date": {
                        "$gte": doc["date"] - relativedelta(days=365)
                    }
                }, {
                    "date": {
                        "$lt": doc["date"]
                    }
                }]
            }

            if db.agg_d.find({"id": doc["id"], **time_filter}).count() < 50:
                pbar.update(1)
                continue

            record = {
                "x": np.empty((365, 5)),
                "y": (doc["target"] - std["mean"]) / std["std"]
            }

            for i in range(365):
                date = doc["date"] - relativedelta(days=365 - i)
                wday = int(date.strftime("%w")) + 1
                record["x"][i][0] = -std["mean"] / std["std"]
                record["x"][i][1] = np.sin(2 * np.pi * wday / 7)
                record["x"][i][2] = np.cos(2 * np.pi * wday / 7)
                record["x"][i][3] = np.sin(2 * np.pi * date.month / 12)
                record["x"][i][4] = np.cos(2 * np.pi * date.month / 12)

            for day in db.agg_d.find({"id": doc["id"], **time_filter}):
                index = 365 - (doc["date"] - day["date"]).days
                record["x"][index][0] = (day["target"] -
                                         std["mean"]) / std["std"]
                wday = int(day["date"].strftime("%w")) + 1
                record["x"][index][1] = np.sin(2 * np.pi * wday / 7)
                record["x"][index][2] = np.cos(2 * np.pi * wday / 7)
                record["x"][index][3] = np.sin(2 * np.pi * doc["month"] / 12)
                record["x"][index][4] = np.cos(2 * np.pi * doc["month"] / 12)

            record["x"] = Binary(_pickle.dumps(record["x"].tolist()))

            if np.random.rand() < TRAIN_RATIO:
                db.train.insert(record)
            else:
                db.test.insert(record)

            pbar.update(1)
Пример #34
0
def run_parallelism_analysis(nmin_reps=3, nmin = 2, FDR = 0.05, n_nonsyn_min=50):
    # pass nest list with frequency, coverage of major, coverage of minor, taxon
    #output_to_keep = ['INS', 'DEL', 'SNP', 'SUB']
    to_keep_samples = get_breseq_samples_to_keep()
    to_keep_taxa = get_breseq_taxa_to_keep()
    p_star_dict = {}
    G_score_list = []
    for taxon in to_keep_taxa:
        print(taxon)
        effective_gene_lengths, effective_gene_lengths_syn, Lsyn, Lnon, substitution_specific_synonymous_fraction = lt.calculate_synonymous_nonsynonymous_target_sizes(taxon)
        taxon_sites = []
        taxon_samples = [ x for x in to_keep_samples if x.startswith(taxon) ]
        sites_to_remove = get_sites_to_remove(taxon)
        # keep insertion, deletions, and nonsynonymous SNPs
        # get size_dict
        gene_count_dict = {}
        gene_count_syn_dict = {}
        #print(sites_to_remove)
        for taxon_sample in taxon_samples:
            for i, line in enumerate(open(lt.get_path() + '/data/breseq/annotated/' + taxon_sample + '.gd', 'r')):
                line_split = line.strip().split('\t')
                if line_split[0] == '#=GENOME_DIFF':
                    continue
                if (line_split[3] + '_' + line_split[4] in sites_to_remove):
                    continue
                if (line_split[0] not in output_to_keep): #or ('frequency' in line_split[6]) or (line_split[3] + '_' + line_split[4] in sites_to_remove):
                    continue
                if line_split[0] == 'SNP':
                    if [s for s in line_split if 'snp_type=' in s][0].split('=')[1] == 'nonsynonymous':
                        locus_tag = [s for s in line_split if 'locus_tag=' in s][0].split('=')[1]
                        frequency = float([s for s in line_split if 'frequency=' in s][0].split('=')[1])
                        if ';' in locus_tag:
                            for locus_tag_j in locus_tag.split(';'):
                                if locus_tag_j not in gene_count_dict:
                                    gene_count_dict[locus_tag_j] = {}
                                    gene_count_dict[locus_tag_j]['freqs'] = []
                                    gene_count_dict[locus_tag_j]['n_mut'] = 0

                                gene_count_dict[locus_tag_j]['n_mut'] += 1
                                gene_count_dict[locus_tag_j]['freqs'].append(frequency)

                        else:
                            if locus_tag not in gene_count_dict:
                                #gene_count_dict[locus_tag] = 1
                                gene_count_dict[locus_tag] = {}
                                gene_count_dict[locus_tag]['freqs'] = []
                                gene_count_dict[locus_tag]['n_mut'] = 0

                            gene_count_dict[locus_tag]['n_mut'] += 1
                            gene_count_dict[locus_tag]['freqs'].append(frequency)


                    elif [s for s in line_split if 'snp_type=' in s][0].split('=')[1] == 'synonymous':
                        locus_tag = [s for s in line_split if 'locus_tag=' in s][0].split('=')[1]
                        frequency = float([s for s in line_split if 'frequency=' in s][0].split('=')[1])
                        if ';' in locus_tag:
                            for locus_tag_j in locus_tag.split(';'):
                                if locus_tag_j not in gene_count_syn_dict:
                                    gene_count_syn_dict[locus_tag_j] = {}
                                    gene_count_syn_dict[locus_tag_j]['freqs'] = []
                                    gene_count_syn_dict[locus_tag_j]['n_mut'] = 0

                                gene_count_syn_dict[locus_tag_j]['n_mut'] += 1
                                gene_count_syn_dict[locus_tag_j]['freqs'].append(frequency)

                        else:
                            if locus_tag not in gene_count_syn_dict:
                                gene_count_syn_dict[locus_tag] = {}
                                gene_count_syn_dict[locus_tag]['freqs'] = []
                                gene_count_syn_dict[locus_tag]['n_mut'] = 0

                            gene_count_syn_dict[locus_tag]['n_mut'] += 1
                            gene_count_syn_dict[locus_tag]['freqs'].append(frequency)
                    else:
                        continue
                else:
                    if len([s for s in line_split if 'gene_position=coding' in s]) >= 1:
                        locus_tag = [s for s in line_split if 'locus_tag=' in s][0].split('=')[1]
                        frequency = float([s for s in line_split if 'frequency=' in s][0].split('=')[1])
                        if ';' in locus_tag:
                            for locus_tag_j in locus_tag.split(';'):

                                if locus_tag_j not in gene_count_dict:
                                    gene_count_dict[locus_tag_j] = {}
                                    gene_count_dict[locus_tag_j]['freqs'] = []
                                    gene_count_dict[locus_tag_j]['n_mut'] = 0

                                gene_count_dict[locus_tag_j]['freqs'].append(frequency)
                                gene_count_dict[locus_tag_j]['n_mut'] += 1

                        else:
                            if locus_tag not in gene_count_dict:
                                #gene_count_dict[locus_tag] = 1
                                gene_count_dict[locus_tag] = {}
                                gene_count_dict[locus_tag]['freqs'] = []
                                gene_count_dict[locus_tag]['n_mut'] = 0

                            gene_count_dict[locus_tag]['freqs'].append(frequency)
                            gene_count_dict[locus_tag]['n_mut'] += 1

        gene_parallelism_statistics = {}
        for gene_i, length_i in effective_gene_lengths.items():
            gene_parallelism_statistics[gene_i] = {}
            gene_parallelism_statistics[gene_i]['length'] = length_i
            gene_parallelism_statistics[gene_i]['observed'] = 0
            gene_parallelism_statistics[gene_i]['multiplicity'] = 0

        gene_parallelism_statistics_syn = {}
        for gene_i, length_i in effective_gene_lengths_syn.items():
            gene_parallelism_statistics_syn[gene_i] = {}
            gene_parallelism_statistics_syn[gene_i]['length'] = length_i
            gene_parallelism_statistics_syn[gene_i]['observed'] = 0
            gene_parallelism_statistics_syn[gene_i]['multiplicity'] = 0

        # save number of mutations for multiplicity
        for locus_tag_i, locus_tag_i_dict in gene_count_dict.items():
            gene_parallelism_statistics[locus_tag_i]['observed'] = locus_tag_i_dict['n_mut']
            gene_parallelism_statistics[locus_tag_i]['mean_freq'] = np.mean(locus_tag_i_dict['freqs'])

        # same thing for synonymous
        for locus_tag_i, locus_tag_i_dict in gene_count_syn_dict.items():
            gene_parallelism_statistics_syn[locus_tag_i]['observed'] = locus_tag_i_dict['n_mut']
            gene_parallelism_statistics_syn[locus_tag_i]['mean_freq'] = np.mean(locus_tag_i_dict['freqs'])

        L_mean = np.mean(list(effective_gene_lengths.values()))
        L_tot = sum(list(effective_gene_lengths.values()))
        n_tot = sum([ x['n_mut'] for x in gene_count_dict.values() ])
        # don't include taxa with less than 20 mutations
        print("N_total = " + str(n_tot))
        if n_tot < n_nonsyn_min:
            continue
        # go back over and calculate multiplicity
        for locus_tag_i in gene_parallelism_statistics.keys():
            # double check the measurements from this
            gene_parallelism_statistics[locus_tag_i]['multiplicity'] = gene_parallelism_statistics[locus_tag_i]['observed'] *1.0/ effective_gene_lengths[locus_tag_i] * L_mean
            gene_parallelism_statistics[locus_tag_i]['expected'] = n_tot*gene_parallelism_statistics[locus_tag_i]['length']/L_tot

        # get multiplicity for synonymous mutations
        L_mean_syn = np.mean(list(effective_gene_lengths_syn.values()))
        L_tot_syn = sum(list(effective_gene_lengths_syn.values()))
        n_tot_syn = sum([ x['n_mut'] for x in gene_count_syn_dict.values() ])

        # go back over and calculate multiplicity
        for locus_tag_i in gene_parallelism_statistics_syn.keys():
            # double check the measurements from this
            gene_parallelism_statistics_syn[locus_tag_i]['multiplicity'] = gene_parallelism_statistics_syn[locus_tag_i]['observed'] *1.0/ effective_gene_lengths_syn[locus_tag_i] * L_mean_syn
            gene_parallelism_statistics_syn[locus_tag_i]['expected'] = n_tot_syn*gene_parallelism_statistics_syn[locus_tag_i]['length']/L_tot_syn

        pooled_multiplicities = np.array([gene_parallelism_statistics[gene_name]['multiplicity'] for gene_name in gene_parallelism_statistics.keys() if gene_parallelism_statistics[gene_name]['multiplicity'] >=1])
        pooled_multiplicities.sort()

        pooled_tupe_multiplicities = np.array([(gene_parallelism_statistics[gene_name]['multiplicity'], gene_parallelism_statistics[gene_name]['observed']) for gene_name in gene_parallelism_statistics.keys() if gene_parallelism_statistics[gene_name]['multiplicity'] >=1])
        pooled_tupe_multiplicities = sorted(pooled_tupe_multiplicities, key=lambda x: x[0])
        pooled_tupe_multiplicities_x = [i[0] for i in pooled_tupe_multiplicities]
        pooled_tupe_multiplicities_y = [i[1] for i in pooled_tupe_multiplicities]
        pooled_tupe_multiplicities_y = [sum(pooled_tupe_multiplicities_y[i:]) / sum(pooled_tupe_multiplicities_y) for i in range(len(pooled_tupe_multiplicities_y))]

        null_multiplicity_survival = lt.NullGeneMultiplicitySurvivalFunction.from_parallelism_statistics( gene_parallelism_statistics )
        #observed_ms_test, observed_multiplicity_survival_test = lt.calculate_unnormalized_survival_from_vector(pooled_multiplicities)
        null_multiplicity_survival_copy = null_multiplicity_survival(pooled_multiplicities)
        null_multiplicity_survival_copy = [sum(null_multiplicity_survival_copy[i:]) / sum(null_multiplicity_survival_copy) for i in range(len(null_multiplicity_survival_copy)) ]
        #threshold_idx = numpy.nonzero((null_multiplicity_survival(observed_ms)*1.0/observed_multiplicity_survival)<FDR)[0][0]
        mult_survival_dict = {'Mult': pooled_multiplicities, 'Obs_fract': pooled_tupe_multiplicities_y, 'Null_fract': null_multiplicity_survival_copy}
        mult_survival_df = pd.DataFrame(mult_survival_dict)
        mult_survival_df_out = lt.get_path() + '/data/breseq/mult_survival_curves/' + taxon + '.txt'
        mult_survival_df.to_csv(mult_survival_df_out, sep = '\t', index = True)

        # get likelihood score and null test
        observed_G, pvalue = lt.calculate_total_parallelism(gene_parallelism_statistics)
        G_score_list.append((taxon, observed_G, pvalue))
        print(observed_G, pvalue)
        if pvalue >= 0.05:
            continue
        # Give each gene a p-value, get distribution
        gene_logpvalues = lt.calculate_parallelism_logpvalues(gene_parallelism_statistics)
        pooled_pvalues = []
        for gene_name in gene_logpvalues.keys():
            if (gene_parallelism_statistics[gene_name]['observed']>= nmin) and (float(gene_logpvalues[gene_name]) >= 0):
                pooled_pvalues.append( gene_logpvalues[gene_name] )

        pooled_pvalues = np.array(pooled_pvalues)
        pooled_pvalues.sort()
        if len(pooled_pvalues) == 0:
            continue

        null_pvalue_survival = lt.NullGeneLogpSurvivalFunction.from_parallelism_statistics( gene_parallelism_statistics, nmin=nmin)
        observed_ps, observed_pvalue_survival = lt.calculate_unnormalized_survival_from_vector(pooled_pvalues, min_x=-4)
        # Pvalue version
        # remove negative minus log p values.
        neg_p_idx = np.where(observed_ps>=0)
        observed_ps_copy = observed_ps[neg_p_idx]
        observed_pvalue_survival_copy = observed_pvalue_survival[neg_p_idx]
        pvalue_pass_threshold = np.nonzero(null_pvalue_survival(observed_ps_copy)*1.0/observed_pvalue_survival_copy<FDR)[0]
        if len(pvalue_pass_threshold) == 0:
            continue
        threshold_idx = pvalue_pass_threshold[0]
        pstar = observed_ps_copy[threshold_idx] # lowest value where this is true
        num_significant = observed_pvalue_survival[threshold_idx]
        # make it log base 10
        logpvalues_dict = {'P_value': observed_ps/math.log(10), 'Obs_num': observed_pvalue_survival, 'Null_num': null_pvalue_survival(observed_ps)}
        logpvalues_df = pd.DataFrame(logpvalues_dict)
        logpvalues_df_out = lt.get_path() + '/data/breseq/logpvalues/' + taxon + '.txt'
        logpvalues_df.to_csv(logpvalues_df_out, sep = '\t', index = True)

        p_star_dict[taxon] = (num_significant, pstar/math.log(10))

        output_mult_gene_filename = lt.get_path() + '/data/breseq/mult_genes_nonsyn_sig/' + taxon + '.txt'
        output_mult_gene = open(output_mult_gene_filename,"w")
        output_mult_gene.write(",".join(["Gene", "Length", "Observed", "Expected", "Multiplicity", "-log10(P)"]))
        for gene_name in sorted(gene_parallelism_statistics, key=lambda x: gene_parallelism_statistics.get(x)['observed'],reverse=True):
            if gene_logpvalues[gene_name] >= pstar and gene_parallelism_statistics[gene_name]['observed']>=nmin:
                output_mult_gene.write("\n")
                # log base 10 transform the p-values here as well
                output_mult_gene.write("%s, %0.1f, %d, %0.2f, %0.2f, %g" % (gene_name, gene_parallelism_statistics[gene_name]['length'],  gene_parallelism_statistics[gene_name]['observed'], gene_parallelism_statistics[gene_name]['expected'], gene_parallelism_statistics[gene_name]['multiplicity'], abs(gene_logpvalues[gene_name])/math.log(10) ))
        output_mult_gene.close()

        output_mult_syn_filename = lt.get_path() + '/data/breseq/mult_genes_all/' + taxon + '.txt'
        output_mult_syn = open(output_mult_syn_filename,"w")
        output_mult_syn.write(",".join(["Gene", "mult", "mult_syn", "mean_freq", "mean_freq_syn"]))
        for locus_tag_i in gene_parallelism_statistics.keys():
            mult_i = gene_parallelism_statistics[locus_tag_i]['multiplicity']
            mult_i_syn = gene_parallelism_statistics_syn[locus_tag_i]['multiplicity']
            if (mult_i > 0) and (mult_i_syn > 0):
                freq_i = gene_parallelism_statistics[locus_tag_i]['mean_freq']
                freq_i_syn = gene_parallelism_statistics_syn[locus_tag_i]['mean_freq']
                output_mult_syn.write("\n")
                output_mult_syn.write("%s, %f, %f, %f, %f" % (locus_tag_i, mult_i,  mult_i_syn, freq_i, freq_i_syn))
        output_mult_syn.close()

    G_score_list_p_vales = [i[2] for i in G_score_list]
    reject, pvals_corrected, alphacSidak, alphacBonf = mt.multipletests(G_score_list_p_vales, alpha=0.05, method='fdr_bh')
    total_parallelism_path = lt.get_path() + '/data/breseq/total_parallelism.txt'
    total_parallelism = open(total_parallelism_path,"w")
    total_parallelism.write("\t".join(["Taxon", "G_score", "p_value", "p_value_BH"]))
    for i in range(len(pvals_corrected)):
        taxon_i = G_score_list[i][0]
        G_score_i = G_score_list[i][1]
        p_value_i = G_score_list[i][2]
        pvals_corrected_i = pvals_corrected[i]

        total_parallelism.write("\n")
        total_parallelism.write("\t".join([taxon_i, str(G_score_i), str(p_value_i), str(pvals_corrected_i)]))

    total_parallelism.close()
    with open(lt.get_path() + '/data/breseq/p_star.txt', 'wb') as file:
        file.write(pickle.dumps(p_star_dict)) # use `pickle.loads` to do the reverse
Пример #35
0
def save(data, path):
    serialized = cPickle.dumps(data)
    with gzip.open(path, 'wb', compresslevel=1) as file_object:
        file_object.write(serialized)
Пример #36
0
def threaded_client(conn, _id):
    global connections, players, dots, nxt, started

    current_id = _id
    game_time = 0

    # receive a name from the client
    data = conn.recv(16)
    name = data.decode("utf-8")
    print("[LOG]", name, "connected to the server")

    # properties for new players
    color = colors[current_id]
    x, y = get_start_location(players)
    players[current_id] = {
        "x": x,
        "y": y,
        "color": color,
        "score": 0,
        "name": name
    }

    #pickle data and send initial info to clients
    conn.send(str.encode(str(current_id)))

    while True:
        if start:
            game_time = round(time.time() - start_time)
            # if the game time passes the round time, the game will stop
            if game_time >= ROUND_TIME:
                start = False
            else:
                if game_time // MASS_LOSS_TIME == nxt:
                    nxt += 1
                    release_mass(players)
                    print(f"[GAME] {name}'s Mass depleting")

        try:
            # receive data from client
            data = conn.recv(32)
            if not data:
                break

            data = data.decode("utf-8")
            print("[DATA] Received", data, "from client id", current_id)

            #look for specific commands from received data
            if data.split(" ")[0] == "move":
                split_data = data.split(" ")
                x = int(split_data[1])
                y = int(split_data[2])
                players[current_id]["x"] = x
                players[current_id]["y"] = y

                if start:
                    check_collision(players, dots)
                    player_collision(players)

                if len(dots) < 150:
                    create_dots(dots, random.randrange(100, 150))
                    print("[GAME] Generating more Dots")

                send_data = pickle.dumps((dots, players, game_time))

            else:
                send_data = pickle.dumps((dots, players, game_time))

            # send data back to clients
            conn.send(send_data)

        except Exception as e:
            print(e)
            break  # if exception, disconnect client

        time.sleep(0.001)

        # when user disconnects
        print("[DISCONNECT] Name:", name, " Client Id:", current_id,
              " disconnected")
        connections -= 1
        del players[current_id]
        conn.close()
Пример #37
0
 def init(self):
     df = pd.read_csv(self.fpath)
     return self.redis.set(ct.CALENDAR_INFO, _pickle.dumps(df, 2))
Пример #38
0
if __name__ == "__main__":
    loads = marshal.loads
    dumps = marshal.dumps
    #test cases
    ht = HashTable('/dev/shm/test.HashTable', 1024, 1)

    #set
    ht['a'] = b'1'
    ht.set('b', b'2')
#    c = {'hello': 'world'}
    c = [0,1]
    ht.setobj('c', c)

    #get
    print(ht['b'] == b'2')
    print(ht['c'] == marshal.dumps(c))
    print(ht.getobj('c') == c)
    print(ht.get('d') == None)
    try:
        ht['d']
        print(False)
    except:
        print(True)

    #contains
    print(('c' in ht) == True)
    print(('d' in ht) == False)

    #del
    del ht['c']
    print(ht.get('c') == None)
if DEBUG_MIDI:
    debug_state.enable_midi()
    ic.enable()
else:
    ic.disable()
    ic.configureOutput(includeContext=True)

## MAIN

try:

    # NB: this is slow AF

    resp = mpx1_control_tree.make_flat(inport, outport, DEVICE_ID)
    with open("control_tree_flat.pickle", "wb") as f:
        f.write(pickle.dumps(resp))

    # control_tree = mpx1_control_tree.make(inport, outport, DEVICE_ID)
    # pprint(control_tree['desc']['label'])
    # pprint(control_tree['children'][0]['desc']['label'])
    # pprint(control_tree['children'][0]['children'][2]['desc']['label'])
    # pprint(control_tree['children'][0]['children'][2]['children'][1]['desc']['label'])
    # pprint(control_tree['children'][0]['children'][2]['children'][1]['children'][2]['desc']['label'])

except KeyboardInterrupt as e:
    cleanup_mido()

## CLEANUP

cleanup_mido()