示例#1
0
文件: test.py 项目: 530824679/YOLOv2
def predict_image():
    image_path = "/home/chenwei/HDD/Project/datasets/object_detection/FDDB2016/convert/images/2002_07_19_big_img_130.jpg"

    image = cv2.imread(image_path)
    image_size = image.shape[:2]
    input_shape = [model_params['input_height'], model_params['input_width']]
    image_data = pre_process(image, input_shape)
    image_data = image_data[np.newaxis, ...]

    input = tf.placeholder(shape=[1, None, None, 3], dtype=tf.float32)

    network = Network(is_train=False)
    logits = network.build_network(input)
    output = network.reorg_layer(logits, model_params['anchors'])

    checkpoints = "./checkpoints/model.ckpt-128"
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, checkpoints)
        bboxes, obj_probs, class_probs = sess.run(
            output, feed_dict={input: image_data})

    bboxes, scores, class_id = postprocess(bboxes,
                                           obj_probs,
                                           class_probs,
                                           image_shape=image_size,
                                           input_shape=input_shape)

    img_detection = visualization(image, bboxes, scores, class_id,
                                  model_params["classes"])
    cv2.imshow("result", img_detection)
    cv2.waitKey(0)
示例#2
0
class CompareAlgoNetworkBowdenDataSet(unittest.TestCase):
    """
    Compares the algorithm and the network performance for the Bowden et al.
    (2003) dataset.
    Maximal number of searched items is 8.
    """
    def setUp(self):
        self.net = Network()
        path = '../../ratdata/bowden/rat_items'
        self.items = np.loadtxt(path, dtype=np.character)
        self.nr_words = 10
        self.net.max_visited = self.nr_words

    def testEqual(self):
        equal = 0
        not_equal = []

        for i, rat_item in enumerate(self.items):
            # words
            cues, target = rat_item[:3], rat_item[3]

            # word ids
            cue_ids = [self.net.voc[c] for c in cues]
            target_id = self.net.voc[target]

            # run the algorithm simulation
            _, visited_alg = spread_activity(init_nodes=cue_ids,
                                             target=target_id,
                                             W=self.net.W,
                                             max_visited=self.nr_words)

            # ...and the network simulation
            self.net.setup_problem(cues, target)
            print '\n', i, cues, target, target_id

            # WTA can fail if noise added to two equal numbers is not enough
            # to pick a winner. If this happens, the simulation is repeated
            ok = True
            while ok:
                try:
                    self.net.run()
                    ok = False
                except BaseException:
                    print 'WTA failed, retrying the run!'
                    continue

            l1, l2 = visited_alg, list(self.net.visited())

            if target_id in set(l1) and target_id not in set(l2) or\
                    target_id not in set(l1) and target_id in set(l2):
                not_equal.append(i)
            else:
                print 'ok', equal
                equal += 1

            print 'A:', l1
            print 'N:', l2

        print not_equal
示例#3
0
    def __init__(self,
                 num_samples,
                 burn_in,
                 population_size,
                 topology,
                 train_data,
                 test_data,
                 directory,
                 temperature,
                 swap_sample,
                 parameter_queue,
                 problem_type,
                 main_process,
                 event,
                 active_chains,
                 num_accepted,
                 swap_interval,
                 max_limit=(5),
                 min_limit=-5):
        # Multiprocessing attributes
        multiprocessing.Process.__init__(self)
        self.process_id = temperature
        self.parameter_queue = parameter_queue
        self.signal_main = main_process
        self.event = event
        self.active_chains = active_chains
        self.num_accepted = num_accepted
        self.event.clear()
        self.signal_main.clear()
        # Parallel Tempering attributes
        self.temperature = temperature
        self.swap_sample = swap_sample
        self.swap_interval = swap_interval
        self.burn_in = burn_in
        # MCMC attributes
        self.num_samples = num_samples

        self.topology = topology
        self.pop_size = population_size
        self.train_data = train_data
        self.test_data = test_data
        self.problem_type = problem_type
        self.directory = directory
        self.w_size = (topology[0] * topology[1]) + (
            topology[1] * topology[2]) + topology[1] + topology[2]
        self.neural_network = Network(topology, train_data, test_data)
        self.min_limits = np.repeat(min_limit, self.w_size)
        self.max_limits = np.repeat(max_limit, self.w_size)
        self.initialize_sampling_parameters()
        max_limit_vel = (self.weights_stepsize) * (self.weights_stepsize) * 10
        min_limit_vel = self.weights_stepsize * self.weights_stepsize * -10
        self.min_limits_vel = np.repeat(min_limit_vel, self.w_size)
        self.max_limits_vel = np.repeat(max_limit_vel, self.w_size)
        self.create_directory(directory)
        PSO.__init__(self, self.pop_size, self.w_size, self.max_limits,
                     self.min_limits, self.neural_network.evaluate_fitness,
                     opt.problem_type, self.max_limits_vel,
                     self.min_limits_vel)
class CompareAlgoNetworkBowdenDataSet(unittest.TestCase):
    """
    Compares the algorithm and the network performance for the Bowden et al.
    (2003) dataset.
    Maximal number of searched items is 8.
    """

    def setUp(self):
        self.net = Network()
        path = "../../ratdata/bowden/rat_items"
        self.items = np.loadtxt(path, dtype=np.character)
        self.nr_words = 10
        self.net.max_visited = self.nr_words

    def testEqual(self):
        equal = 0
        not_equal = []

        for i, rat_item in enumerate(self.items):
            # words
            cues, target = rat_item[:3], rat_item[3]

            # word ids
            cue_ids = [self.net.voc[c] for c in cues]
            target_id = self.net.voc[target]

            # run the algorithm simulation
            _, visited_alg = spread_activity(
                init_nodes=cue_ids, target=target_id, W=self.net.W, max_visited=self.nr_words
            )

            # ...and the network simulation
            self.net.setup_problem(cues, target)
            print "\n", i, cues, target, target_id

            # WTA can fail if noise added to two equal numbers is not enough
            # to pick a winner. If this happens, the simulation is repeated
            ok = True
            while ok:
                try:
                    self.net.run()
                    ok = False
                except BaseException:
                    print "WTA failed, retrying the run!"
                    continue

            l1, l2 = visited_alg, list(self.net.visited())

            if target_id in set(l1) and target_id not in set(l2) or target_id not in set(l1) and target_id in set(l2):
                not_equal.append(i)
            else:
                print "ok", equal
                equal += 1

            print "A:", l1
            print "N:", l2

        print not_equal
示例#5
0
def predict():
    fasterRCNN = Network()
    fasterRCNN.build(is_training=False)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, os.path.join(CHECKPOINTS_PATH, "model_final.ckpt"))
        print("Model restored.")
        base_extractor = VGG16(include_top=False)
        extractor = Model(inputs=base_extractor.input, outputs=base_extractor.get_layer('block5_conv3').output)
        predict_img_names = os.listdir(PREDICT_IMG_DATA_PATH)

        for predict_img_name in predict_img_names:
            img_data, img_info = get_predict_data(predict_img_name)
            features = extractor.predict(img_data, steps=1)
            rois, scores, regression_parameter = sess.run(
                [fasterRCNN._predictions["rois"], fasterRCNN._predictions["cls_prob"],
                 fasterRCNN._predictions["bbox_pred"]],
                feed_dict={fasterRCNN.feature_map: features,
                           fasterRCNN.image_info: img_info})

            boxes = rois[:, 1:5] / img_info[2]
            scores = np.reshape(scores, [scores.shape[0], -1])
            regression_parameter = np.reshape(regression_parameter, [regression_parameter.shape[0], -1])
            pred_boxes = bbox_transform_inv(boxes, regression_parameter)
            pred_boxes = clip_boxes(pred_boxes, [img_info[0] / img_info[2], img_info[1] / img_info[2]])

            result_list = []
            for class_index, class_name in enumerate(CLASSES[1:]):
                class_index += 1  # 因为跳过了背景类别
                cls_boxes = pred_boxes[:, 4 * class_index:4 * (class_index + 1)]  # TODO:
                cls_scores = scores[:, class_index]
                detections = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
                keep = nms(detections, NMS_THRESH)
                detections = detections[keep, :]

                inds = np.where(detections[:, -1] >= CONF_THRESH)[0]  # 筛选结果
                for i in inds:
                    result_for_a_class = []
                    bbox = detections[i, :4]
                    score = detections[i, -1]
                    result_for_a_class.append(predict_img_name)
                    result_for_a_class.append(class_name)
                    result_for_a_class.append(score)
                    for coordinate in bbox:
                        result_for_a_class.append(coordinate)
                    result_list.append(result_for_a_class)
                    # result_for_a_class = [fileName,class_name,score,x1,y1,x2,y2]
            if len(result_list) == 0:
                continue

            if TXT_RESULT_WANTED:
                write_txt_result(result_list)

            if IS_VISIBLE:
                visualization(result_list)
示例#6
0
def train():
    fasterRCNN = Network()
    fasterRCNN.build(is_training=True)
    train_op = tf.train.MomentumOptimizer(learning_rate=0.001,
                                          momentum=0.9).minimize(
                                              fasterRCNN._losses['total_loss'])
    init_op = tf.global_variables_initializer()
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(init_op)

        base_extractor = VGG16(include_top=False)
        extractor = Model(
            inputs=base_extractor.input,
            outputs=base_extractor.get_layer('block5_conv3').output)
        train_img_names = os.listdir(TRAIN_IMG_DATA_PATH)
        trained_times = 0

        for epoch in range(1, MAX_EPOCH + 1):
            random.shuffle(train_img_names)
            for train_img_name in train_img_names:
                img_data, boxes, img_info = get_train_data(train_img_name)
                features = extractor.predict(img_data, steps=1)
                sess.run(train_op,
                         feed_dict={
                             fasterRCNN.feature_map: features,
                             fasterRCNN.gt_boxes: boxes,
                             fasterRCNN.image_info: img_info
                         })

                trained_times += 1
                if trained_times % 10 == 0:
                    total_loss = sess.run(fasterRCNN._losses['total_loss'],
                                          feed_dict={
                                              fasterRCNN.feature_map: features,
                                              fasterRCNN.gt_boxes: boxes,
                                              fasterRCNN.image_info: img_info
                                          })
                    print('epoch:{}, trained_times:{}, loss:{}'.format(
                        epoch, trained_times, total_loss))

            if epoch % 10 == 0:
                save_path = saver.save(
                    sess,
                    os.path.join(CHECKPOINTS_PATH,
                                 "model_" + str(epoch) + ".ckpt"))
                print("Model saved in path: %s" % save_path)
        save_path = saver.save(
            sess, os.path.join(CHECKPOINTS_PATH, "model_final.ckpt"))
        print("Model saved in path: %s" % save_path)
示例#7
0
def train(**kwargs):
    conf.parse(kwargs)

    # train_set = DataSet(cfg, train=True, test=False)
    train_set = ImageFolder(conf.TRAIN_DATA_ROOT, transform)
    train_loader = DataLoader(train_set, conf.BATCH_SIZE,
                              shuffle=True,
                              num_workers=conf.NUM_WORKERS)

    model = Network()

    if conf.LOAD_MODEL_PATH:
        print(conf.LOAD_MODEL_PATH)
        model.load_state_dict(torch.load(conf.CHECKPOINTS_ROOT + conf.LOAD_MODEL_PATH))

    device = torch.device('cuda:0' if conf.USE_GPU else 'cpu')
    criterion = nn.CrossEntropyLoss().to(device)
    lr = conf.LEARNING_RATE
    optim = torch.optim.Adam(params=model.parameters(),
                             lr=lr,
                             weight_decay=conf.WEIGHT_DECAY)
    model.to(device)

    for epoch in range(conf.MAX_EPOCH):

        model.train()
        running_loss = 0
        for step, (inputs, targets) in tqdm(enumerate(train_loader)):

            inputs, targets = inputs.to(device), targets.to(device)
            optim.zero_grad()
            outs = model(inputs)
            loss = criterion(outs, targets)
            loss.backward()
            optim.step()

            running_loss += loss.item()
            if step % conf.PRINT_FREQ == conf.PRINT_FREQ - 1:
                running_loss = running_loss / conf.PRINT_FREQ
                print('[%d, %5d] loss: %.3f' % (epoch + 1, step + 1, running_loss))
                # vis.plot('loss', running_loss)
                running_loss = 0



        torch.save(model.state_dict(), conf.CHECKPOINTS_ROOT + time.strftime('%Y-%m-%d-%H-%M-%S.pth'))

        for param_group in optim.param_groups:
            lr *= conf.LEARNING_RATE_DECAY
            param_group['lr'] = lr
示例#8
0
def test_classify_digits():
    retina = Retina(32)
    layer_level1 = Layer(8, 'layer_1')
    layer_level2 = Layer(4, 'layer_2')
    layer_level3 = Layer(1, 'layer_3')
    layers = [layer_level1, layer_level2, layer_level3]
    ConnectTypes.rectangle_connect(retina.vision_cells, layer_level1, 0, 0)
    ConnectTypes.rectangle_connect(layer_level1.nodes, layer_level2, 0, 0)
    ConnectTypes.rectangle_connect(layer_level2.nodes, layer_level3, 0, 0)

    network = Network(layers, retina)
    cca_v1 = CommonCorticalAlgorithmV1(network)

    number_training_timesteps = 1
    t = 0
    print_to_console = True
    # train network on digit dataset to form memory and temporal groups
    with ZipFile('model/datasets/digit_0.zip') as archive:
        for entry in archive.infolist():
            with archive.open(entry) as file:
                binary_image = Image.open(file)
                if print_to_console:
                    print('timestep = ' + str(t))
                input_layer = retina.see_binary_image(binary_image, print_to_console)

                # run 1 time step for all levels in hierarchy?
                cca_v1.learn_one_time_step(input_layer)
                _save_model_at_current_timestep(t, network)
                t += 1
                # now we have trained the network using cca_v1 on data set
                if t >= number_training_timesteps:
                    break
def predict():
    def map_char(char):
        return labels[char]

    if (request.method == "POST"):
        img = request.get_json()
        img = preprocess(img)
        # plt.imshow(img[0])
        # plt.savefig('image_debug.jpg')
        # plt.close()
        net = Network(network_config)
        character, confidence = net.predict_with_pretrained_weights(img)
        print('received', character, confidence)
        data = {
            "character": map_char(character),
            "confidence": float(int(confidence * 100)) / 100.
        }
        return jsonify(data)
示例#10
0
    def __init__(self, train, valid, test, learningRate=0.1, epochs=50):

        self.learningRate = learningRate
        self.epochs = epochs

        self.trainingSet = train
        self.validationSet = valid
        self.testSet = test
        self.network = Network(learningRate)
示例#11
0
文件: test.py 项目: 530824679/YOLOv2
def predict_video():
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    capture = cv2.VideoCapture(0)

    input = tf.placeholder(shape=[1, None, None, 3], dtype=tf.float32)

    network = Network(is_train=False)
    logits = network.build_network(input)
    output = network.reorg_layer(logits, model_params['anchors'])

    checkpoints = "./checkpoints/model.ckpt-128"
    saver = tf.train.Saver()

    with tf.Session(config=config) as sess:
        saver.restore(sess, checkpoints)

        while (True):
            ref, image = capture.read()

            image_size = image.shape[:2]
            input_shape = [
                model_params['input_height'], model_params['input_width']
            ]
            image_data = pre_process(image, input_shape)
            image_data = image_data[np.newaxis, ...]

            bboxes, obj_probs, class_probs = sess.run(
                output, feed_dict={input: image_data})

            bboxes, scores, class_id = postprocess(bboxes,
                                                   obj_probs,
                                                   class_probs,
                                                   image_shape=image_size,
                                                   input_shape=input_shape)

            img_detection = visualization(image, bboxes, scores, class_id,
                                          model_params["classes"])
            cv2.imshow("result", img_detection)
            cv2.waitKey(1)

    cv2.destroyAllWindows()
示例#12
0
文件: test.py 项目: 530824679/YOLOv3
def predict_image():
    image_path = "/home/chenwei/HDD/Project/datasets/object_detection/VOCdevkit/VOC2007/JPEGImages/000066.jpg"
    image = cv2.imread(image_path)
    image_size = image.shape[:2]
    input_shape = [model_params['input_height'], model_params['input_width']]
    image_data = preporcess(image, input_shape)
    image_data = image_data[np.newaxis, ...]

    input = tf.placeholder(shape=[1, input_shape[0], input_shape[1], 3],
                           dtype=tf.float32)

    model = Network(len(model_params['classes']),
                    model_params['anchors'],
                    is_train=False)
    with tf.variable_scope('yolov3'):
        logits = model.build_network(input)
        output = model.inference(logits)

    checkpoints = "/home/chenwei/HDD/Project/YOLOv3/weights/yolov3.ckpt"
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, checkpoints)
        bboxes, obj_probs, class_probs = sess.run(
            output, feed_dict={input: image_data})

    bboxes, scores, class_max_index = postprocess(bboxes,
                                                  obj_probs,
                                                  class_probs,
                                                  image_shape=image_size,
                                                  input_shape=input_shape)

    resize_ratio = min(input_shape[1] / image_size[1],
                       input_shape[0] / image_size[0])
    dw = (input_shape[1] - resize_ratio * image_size[1]) / 2
    dh = (input_shape[0] - resize_ratio * image_size[0]) / 2
    bboxes[:, [0, 2]] = (bboxes[:, [0, 2]] - dw) / resize_ratio
    bboxes[:, [1, 3]] = (bboxes[:, [1, 3]] - dh) / resize_ratio

    img_detection = visualization(image, bboxes, scores, class_max_index,
                                  model_params["classes"])
    cv2.imshow("result", img_detection)
    cv2.waitKey(0)
示例#13
0
 def obtain_cidr():
     cidr = None
     while True:
         cidr = string_input('CIDR')
         if cidr == 'none':
             return None
         elif not Network.check_cidr(cidr):
             error('This is not a valid (IPv4) CIDR. Please enter again, or give \'none\'')
         else:
             break
     return cidr
    def __init__(self,
                 t: int,
                 graph: Network,
                 source: TrafficNode = None,
                 destination: TrafficNode = None,
                 speed_multiplier=1):
        if source is None:
            source = choice(graph.get_nodes())

            destination = choice(graph.get_nodes())
            while graph.get_path(source, destination) is None:
                source = choice(graph.get_nodes())
                destination = choice(graph.get_nodes())

            # Uncomment when method is implemented to replace above brute-force
            # destination = choice(graph.get_reachable_nodes(source))

        # Define object attributes
        self.source = source
        self.destination = destination
        self.path = graph.get_path(source, destination)

        self.speed_multiplier = speed_multiplier

        # Define mutable object state
        self.remaining_path = [n for n in self.path]
        self.time = t  # self.time is the last value of simulation time that spent given
        self.location = source  # self.location is the Traversable object that the vehicle is currently at
        self.last_node = source  # self.last_node is the last node spent
        self.traversable = Component.NODE
        self.timeout = 0

        # Stats to track
        self.time_traveled = 0
        self.time_waiting = 0
示例#15
0
def test(**kwargs):
    conf.parse(kwargs)

    model = Network().eval()

    if conf.LOAD_MODEL_PATH:
        print(conf.LOAD_MODEL_PATH)
        model.load_state_dict(torch.load(conf.CHECKPOINTS_ROOT + conf.LOAD_MODEL_PATH))

    device = torch.device('cuda:0' if conf.USE_GPU else 'cpu')
    model.to(device)

    test_set = ImageFolder(conf.TEST_DATA_ROOT, transform)
    test_loader = DataLoader(test_set, conf.BATCH_SIZE,
                             shuffle=False,
                             num_workers=conf.NUM_WORKERS)

    results = list()

    with torch.no_grad():
        for step, (inputs, targets) in enumerate(test_loader):
            inputs, targets = inputs.to(device), targets.to(device)
            outs = model(inputs)
            pred = torch.max(outs, 1)[1]
            # print((targets == pred).float())
            # (prob_top_k, idxs_top_k) = probability.topk(3, dim=1)

            acc = (pred == targets).float().sum() / len(targets)
            results += ((pred == targets).float().to('cpu').numpy().tolist())

            print('[%5d] acc: %.3f' % (step + 1, acc))

        results = np.array(results)
        print('Top 1 acc: %.3f' % (np.sum(results) / len(results)))
示例#16
0
文件: main.py 项目: Carl-Rabbit/IMP
def run(network_lines, k, model, network_input=None):
    global rrset_func, heap, top_idx

    if model == 'IC':
        rrset_func = get_ic_rrset
    elif model == 'LT':
        rrset_func = get_lt_rrset
    else:
        raise Exception('model type error')

    network: Network

    if network_input:
        network = network_input
    else:
        network = Network(network_lines)

    # step 1
    # t0 = time.time()
    # rrsets: List[Set] = sampling(network, k, e, l)
    rrsets: List[List] = sampling(network, k, model)

    del network

    # step 2
    # print(f'len(rrsets) = {len(rrsets)}')

    # t1 = time.time()
    # init vtx2sid_lst
    vtx2sid_list = get_vtx2sid_lst(rrsets)
    del rrsets

    # init heap
    init_heap(vtx2sid_list)
    del vtx2sid_list

    # t2 = time.time()

    # node selection
    seeds = node_selection(k)
    # t3 = time.time()

    # print(f'sampling: {t1 - t0}\ninit heap: {t2 - t1}\nselection: {t3 - t2}')

    # remember to + 1 because I store the index begin at 0
    output_lst = [str(i + 1) for i in seeds]
    print('\n'.join(output_lst))

    print('write to test folder')
    write_lines(
        '../DatasetOnTestPlatform/my_' + str(model).lower() + '_seeds.txt',
        output_lst)
示例#17
0
def remove_optimizers_params():
    ckpt_path = ''
    class_num = 2
    save_dir = 'shrinked_ckpt'
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    anchors = [[676, 197], [763, 250], [684, 283], [868, 231], [745, 273],
               [544, 391], [829, 258], [678, 316, 713, 355]]

    image = tf.placeholder(tf.float32, [1, 416, 416, 3])
    model = Network(class_num, anchors, False)
    with tf.variable_scope('yolov3'):
        feature_maps = model.build_network(image)

    saver_to_restore = tf.train.Saver()
    saver_to_save = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver_to_restore.restore(sess, ckpt_path)
        saver_to_save.save(sess, save_dir + '/shrinked')
示例#18
0
def weights_to_ckpt():
    num_class = 80
    image_size = 416
    anchors = [[676, 197], [763, 250], [684, 283], [868, 231], [745, 273],
               [544, 391], [829, 258], [678, 316, 713, 355]]
    weight_path = '../weights/yolov3.weights'
    save_path = '../weights/yolov3.ckpt'

    model = Network(num_class, anchors, False)
    with tf.Session() as sess:
        inputs = tf.placeholder(tf.float32, [1, image_size, image_size, 3])

        with tf.variable_scope('yolov3'):
            feature_maps = model.build_network(inputs)

        saver = tf.train.Saver(var_list=tf.global_variables(scope='yolov3'))

        load_ops = load_weights(tf.global_variables(scope='yolov3'),
                                weight_path)
        sess.run(load_ops)
        saver.save(sess, save_path=save_path)
        print('TensorFlow model checkpoint has been saved to {}'.format(
            save_path))
示例#19
0
def get_drugs_related_info(disease_pairs):
    networks = []
    for i in disease_pairs:
        networks.append(Network())
    networks = get_common_drugs(disease_pairs, networks, True)
    drugs = []
    for network in networks:
        pair_drugs = network.get_nodes_by_label('Drug')
        pair_drugs_ids = []
        for d in pair_drugs:
            pair_drugs_ids.append(d.id)
        drugs.append(pair_drugs_ids)
    networks = get_given_drugs_related_info(disease_pairs, drugs)
    return networks
示例#20
0
def get_disease_pairs_info(disease_pairs, writing_files):
    networks = []
    for disease_pair in disease_pairs:
        networks.append(Network())
    networks = get_common_genes(disease_pairs, networks, writing_files)
    networks = get_common_drugs(disease_pairs, networks, writing_files)
    networks = get_common_rnas(disease_pairs, networks, writing_files)
    networks = get_common_variants(disease_pairs, networks, writing_files)
    if writing_files:
        for index, disease_pair in enumerate(disease_pairs):
            temp_id1 = disease_pair[0].replace(':', '-')
            temp_id2 = disease_pair[1].replace(':', '-')
            path = '../analysis/disease_pairs/' + temp_id1 + '_' + temp_id2
            try:
                os.mkdir(path)
            except FileExistsError:
                pass
            network = networks[index]
            network.save(path + '/' + temp_id1 + '_' + temp_id2 + '_full_graph.json')
    return networks
示例#21
0
文件: main.py 项目: Carl-Rabbit/IMP
def imm_main():
    global start_time, time_limit

    file_name, seed_cnt, model, time_limit = param_parse()

    # time_limit *= 10      # for test

    start_time = time.time()

    # print_tmp('Params:', file_name, seed_cnt, model, time_limit)

    # network_lines = read_lines(file_name)
    # run(network_lines, seed_cnt, model)

    network = Network()
    with open(file_name, 'r') as fp:
        network.parse_first_line(fp.readline())
        for _ in range(network.m):
            network.parse_data_line(fp.readline())
    run(None, seed_cnt, model, network)

    sys.stdout.flush()
    print('time cost: ', time.time() - start_time)
示例#22
0
 def setUp(self):
     self.net = Network()
     path = '../../ratdata/bowden/rat_items'
     self.items = np.loadtxt(path, dtype=np.character)
     self.nr_words = 10
     self.net.max_visited = self.nr_words
# -*- coding: utf-8 -*-

import os

from config import Config

from model.data import Data
from model.network import Network

if __name__ == '__main__':
    dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                            Config.CURRENT_MODEL_BASE_PATH)
    data = Data()
    data.load_data_from_file(os.path.join(dir_path,
                                          'data.nosync/all_data.npy'))
    model = Network(data, os.path.join(dir_path, 'log.nosync/network/run1'))
    model.train()
示例#24
0
#!/usr/bin/env python3

import os.path
import urllib.request
import io
import csv
import re
from model.network import Network
from model.gene import Gene
from model.mirna import MiRNA
from model.edge import Edge
from model.go_class import GOClass

network = Network()

file = '../data/EBI-GOA-miRNA/query.txt'
url = 'http://www.ebi.ac.uk/Tools/webservices/psicquic/view/binaryDownload?&serviceURL=https://www.ebi.ac.uk/QuickGO/psicquic-rna/webservices/current/search/&query=*&format=tab27&conversationContext=2'
if not os.path.exists(file):
    print('Database does not exist. Trying to download...')
    with urllib.request.urlopen(url) as response, open(file, 'wb') as f:
        f.write(response.read())
gene_mapping_file = '../data/EBI-GOA-miRNA/ensembl.txt'
gene_mapping_url = 'https://www.genenames.org/cgi-bin/download/custom?col=gd_hgnc_id&col=gd_app_sym&col=md_ensembl_id&status=Approved&hgnc_dbtag=on&order_by=gd_app_sym_sort&format=text&submit=submit'
if not os.path.exists(gene_mapping_file):
    print('Gene mapping table does not exist. Trying to download...')
    with urllib.request.urlopen(gene_mapping_url) as response, open(
            gene_mapping_file, 'wb') as f:
        f.write(response.read())
mirna_mapping_file = '../data/EBI-GOA-miRNA/hgnc.tsv'
mirna_mapping_url = 'ftp://ftp.ebi.ac.uk/pub/databases/RNAcentral/current_release/id_mapping/database_mappings/hgnc.tsv'
if not os.path.exists(mirna_mapping_file):
示例#25
0
    mirna_mapping_url = 'ftp://ftp.ebi.ac.uk/pub/databases/RNAcentral/current_release/id_mapping/database_mappings/tarbase.tsv'
    if not os.path.exists(mirna_mapping_file):
        print('MiRNA mapping table does not exist. Trying to download...')
        with urllib.request.urlopen(mirna_mapping_url) as response, open(
                mirna_mapping_file, 'wb') as f:
            f.write(response.read())

    hgnc_ids_file = '../data/RNAInter/all_hgnc_ids.txt'
    hgnc_ids_url = 'https://www.genenames.org/cgi-bin/download/custom?col=gd_app_sym&status=Approved&hgnc_dbtag=on&order_by=gd_app_sym_sort&format=text&submit=submit'
    if not os.path.exists(hgnc_ids_file):
        print('HGNC mapping table does not exist. Trying to download...')
        with urllib.request.urlopen(hgnc_ids_url) as response, open(
                hgnc_ids_file, 'wb') as f:
            f.write(response.read())

    network = Network()
    node_lookup = {}
    with io.open(file, 'r', encoding='utf-8', newline='') as f:
        reader = csv.reader(f, delimiter='\t', quotechar='"')
        next(reader, None)
        for row in reader:
            if row[3] == 'H**o sapiens' and row[6] == 'H**o sapiens' and float(
                    row[7]) > 0.9:
                interactor_a_name = row[1]
                interactor_a_type = row[2]
                interactor_b_name = row[4]
                interactor_b_type = row[5]
                interactor_a = add_rna(interactor_a_name, interactor_a_type,
                                       node_lookup)
                interactor_b = add_rna(interactor_b_name, interactor_b_type,
                                       node_lookup)
 def setUp(self):
     self.net = Network()
示例#27
0
calculationStart = time.clock()

startTs = datetime.datetime(2019, 1, 1, 7, 0, 0)
totalSteps = 20000  #2500
timeStep = 1
jamDensity = 124
medianValueTime = 50
random.seed(10)

vehicleId = 0
GEN_VEH_DIST = 'normal_whole'  # ["uniform", "random", "random_whole", "normal_whole"]
STRATEGY = 'vol_sim'  # ['vol_sim', 'vol_dist', 'random', 'fix']
MULTIVEH = 1  #[default=1, 2, 3,...]
NO_CHARGE = False

network = Network(startTs)

#fNode = open("C:/Users/lyy90/OneDrive/Documents/GitHub/meso_v2.0/Sioux Falls network/nodes-SiouxFalls_gong.csv")
fNode = open("F:/meso_v2.0/Sioux Falls network/nodes-SiouxFalls_gong.csv")
fNode.readline()
#fLane = open("C:/Users/lyy90/OneDrive/Documents/GitHub/meso_v2.0/Sioux Falls network/lanes-SiouxFalls_gong.csv")
fLane = open("F:/meso_v2.0/Sioux Falls network/lanes-SiouxFalls_gong.csv")
fLane.readline()
#pOd = 'C:/Users/lyy90/OneDrive/Documents/GitHub/meso_v2.0/OD_data'
pOd = "F:/meso_v2.0/OD_data"

readNodes(fNode, network)
readLanes(fLane, network)
tsPairNodePairTypeMap = readOd(pOd)
#print(tsPairNodePairTypeMap)
genVehicle(tsPairNodePairTypeMap, GEN_VEH_DIST, vehicleId, medianValueTime,
示例#28
0
if not os.path.exists(file) or not os.path.exists(drug_file):
    print('Database does not exist. Trying to download and extract...')
    if not os.path.exists(zip_file):
        print('Downloading latest archive...')
        with urllib.request.urlopen(url) as response, open(zip_file, 'wb') as f:
            f.write(response.read())
    if not os.path.exists(drug_file):
        with urllib.request.urlopen(drug_url) as response, open(drug_file, 'wb') as f:
            f.write(response.read())
    print('Extracting database file...')
    with gzip.open(zip_file, 'rb') as z:
        with open(file, 'wb') as f:
            shutil.copyfileobj(z, f)

network = Network()

drug_lookup = {}
with io.open(drug_file, 'r', encoding='utf-8', newline='') as f:
    reader = csv.reader(f, delimiter='\t', quotechar='"')
    for row in reader:
        drug_lookup[row[0].strip()] = row[1].strip()

# 1: STITCH compound id (flat, see above)
# 2: UMLS concept id as it was found on the label
# 3: method of detection: NLP_indication / NLP_precondition / text_mention
# 4: concept name
# 5: MedDRA concept type (LLT = lowest level term, PT = preferred term; in a few cases the term is neither LLT nor PT)
# 6: UMLS concept id for MedDRA term
# 7: MedDRA concept name
示例#29
0
class LogisticRegression(Classifier):
    """
    A digit-7 recognizer based on logistic regression algorithm
    Parameters
    ----------
    train : list
    valid : list
    test : list
    learningRate : float
    epochs : positive int
    Attributes
    ----------
    trainingSet : list
    validationSet : list
    testSet : list
    weight : list
    learningRate : float
    epochs : positive int
    """

    def __init__(self, train, valid, test, learningRate=0.1, epochs=50):

        self.learningRate = learningRate
        self.epochs = epochs

        self.trainingSet = train
        self.validationSet = valid
        self.testSet = test
        self.network = Network(learningRate)
        
    def train(self, verbose=True):
        """Train the Logistic Regression.
        Parameters
        ----------
        verbose : boolean
            Print logging messages with validation accuracy if verbose is True.
        """

        # Here you have to implement training method "epochs" times
        # Please using LogisticLayer class
        self.epochs = 1
        for i in range(0,self.epochs):
            print i
            self.network.train(self.trainingSet.input, self.trainingSet.label)
        pass

    def classify(self, testInstance):
        """Classify a single instance.
        Parameters
        ----------
        testInstance : list of floats
        Returns
        -------
        bool :
            True if the testInstance is recognized as a 7, False otherwise.
        """

        # Here you have to implement classification method given an
        # instance
        res = self.network.classify(testInstance)
        if res > 0.5:
            return 1
        else:
            return 0

    def evaluate(self, test=None):
        """Evaluate a whole dataset.
        Parameters
        ----------
        test : the dataset to be classified
        if no test data, the test set associated to the classifier will be used
        Returns
        -------
        List:
            List of classified decisions for the dataset's entries.
        """
        if test is None:
            test = self.testSet.input
        # Once you can classify an instance, just use map for all of the test
        # set.
        return list(map(self.classify, test))
示例#30
0
    # Creating CNN model
    det = RFDet(
        cfg.TRAIN.score_com_strength,
        cfg.TRAIN.scale_com_strength,
        cfg.TRAIN.NMS_THRESH,
        cfg.TRAIN.NMS_KSIZE,
        args.k,
        cfg.MODEL.GAUSSIAN_KSIZE,
        cfg.MODEL.GAUSSIAN_SIGMA,
        cfg.MODEL.KSIZE,
        cfg.MODEL.padding,
        cfg.MODEL.dilation,
        cfg.MODEL.scale_list,
    )
    des = HardNetNeiMask(cfg.HARDNET.MARGIN, cfg.MODEL.COO_THRSH)
    model = Network(det, des, cfg.LOSS.SCORE, cfg.LOSS.PAIR, cfg.PATCH.SIZE,
                    args.k)
    model = model.to(device=device)
    checkpoint = torch.load(model_file)
    model.load_state_dict(checkpoint["state_dict"])

    random.seed(cfg.PROJ.SEED)
    torch.manual_seed(cfg.PROJ.SEED)
    np.random.seed(cfg.PROJ.SEED)

    root_dir = '/home/wang/workspace/RFSLAM_offline/RFNET/data/'
    csv_file = None
    seq = None
    a = None
    if args.data == 'v':
        csv_file = 'hpatch_view.csv'
        root_dir += 'hpatch_v_sequence'
示例#31
0
        f.write(os.path.join(config['Neo4j']['bin-path'], 'neo4j-admin'))
        f.write(' import ' +
                '--database %s ' % config['Neo4j']['database-name'] +
                ' '.join(['--nodes %s' % x
                          for x in node_import_files]) + ' ' + ' '.join([
                              '--relationships rel_%s.csv' % x
                              for x in network.edge_labels()
                          ]) + ' > import.log\n')


if __name__ == '__main__':
    with io.open('../data/config.json', 'r', encoding='utf-8',
                 newline='') as f:
        config = json.load(f)

    network = Network()
    # Import
    graphs = [
        '../data/EBI-GOA-miRNA/graph.json',
        '../data/miRTarBase/graph.json',
        '../data/RNAInter/graph.json',
        '../data/DisGeNet/graph.json',
        '../data/DrugBank/graph.json',
        '../data/DrugCentral/graph.json',
        '../data/GWAS-Catalog/graph.json',
        '../data/HGNC/graph.json',
        '../data/HPO/graph.json',
        '../data/MED-RT/graph.json',
        '../data/NDF-RT/graph.json',
        '../data/OMIM/graph.json',
        '../data/HuGE-Navigator/graph.json',
示例#32
0
    print(f"{gct()} : model init")
    det = RFDet(
        cfg.TRAIN.score_com_strength,
        cfg.TRAIN.scale_com_strength,
        cfg.TRAIN.NMS_THRESH,
        cfg.TRAIN.NMS_KSIZE,
        cfg.TRAIN.TOPK,
        cfg.MODEL.GAUSSIAN_KSIZE,
        cfg.MODEL.GAUSSIAN_SIGMA,
        cfg.MODEL.KSIZE,
        cfg.MODEL.padding,
        cfg.MODEL.dilation,
        cfg.MODEL.scale_list,
    )
    des = HardNetNeiMask(cfg.HARDNET.MARGIN, cfg.MODEL.COO_THRSH)
    model = Network(det, des, cfg.LOSS.SCORE, cfg.LOSS.PAIR, cfg.PATCH.SIZE,
                    cfg.TRAIN.TOPK)

    print(f"{gct()} : to device")
    device = torch.device("cuda")
    model = model.to(device)
    resume = args.resume
    print(f"{gct()} : in {resume}")
    checkpoint = torch.load(resume)
    model.load_state_dict(checkpoint["state_dict"])

    ###############################################################################
    # detect and compute
    ###############################################################################
    img1_path, img2_path = args.imgpath.split("@")
    kp1, des1, img1, _, _ = model.detectAndCompute(img1_path, device,
                                                   (600, 460))
 def setUp(self):
     self.net = Network()
     path = "../../ratdata/bowden/rat_items"
     self.items = np.loadtxt(path, dtype=np.character)
     self.nr_words = 10
     self.net.max_visited = self.nr_words
示例#34
0
def save_network(network: Network, config: Dict):
    output_path = config['output-path']
    # Save nodes
    node_import_files = []
    for label in network.node_labels():
        file_name = 'nodes_%s.csv' % label.replace(';', '_')
        nodes = set(network.get_nodes_by_label(label))
        if len(nodes) > 0:
            node_import_files.append(file_name)
            with io.open(os.path.join(output_path, file_name),
                         'w',
                         encoding='utf-8',
                         newline='') as f:
                writer = csv.writer(f, delimiter=',', quotechar='"')
                all_attribute_keys = set()
                for n in nodes:
                    all_attribute_keys.update(n.attributes.keys())
                all_attribute_keys = sorted(all_attribute_keys)
                writer.writerow([
                    'label_id:ID(Node-ID)', '_id:string', 'ids:string[]',
                    'names:string[]'
                ] + ['%s:string' % x for x in all_attribute_keys] + [':LABEL'])
                for n in nodes:
                    row = [
                        n.label_id, n.id, ';'.join(n.ids), ';'.join(n.names)
                    ]
                    for key in all_attribute_keys:
                        row.append(n.attributes[key] if key in
                                   n.attributes else None)
                    row.append(n.label)
                    writer.writerow(row)

    edge_metadata = {
        'HAS_MOLECULAR_FUNCTION': [['source:string', 'pmid:string'],
                                   ['source',
                                    'pmid']],  # pmid int now not string
        'BELONGS_TO_BIOLOGICAL_PROCESS': [['source:string', 'pmid:string'],
                                          ['source', 'pmid']],
        'IN_CELLULAR_COMPONENT': [['source:string', 'pmid:string'],
                                  ['source', 'pmid']],
        'INDICATES': [['source:string'], ['source']],
        'REGULATES': [['source:string', 'pmid:string'], ['source', 'pmid']],
        'TRANSCRIBES': [['source:string'], ['source']],
        'CONTRAINDICATES': [['source:string'], ['source']],
        'INDUCES': [['source:string'], ['source']],
        'CODES': [['source:string', 'pmid:int'], ['source', 'pmid']],
        'EQTL': [[
            'source:string', 'pvalue:string', 'snp_chr:string',
            'cis_trans:string'
        ], ['source', 'pvalue', 'snp_chr', 'cis_trans']],
        'INTERACTS': [['source:string', 'description:string'],
                      ['source', 'description']],
        'TARGETS': [[
            'source:string', 'known_action:boolean', 'actions:string[]',
            'simplified_action:string'
        ],
                    [
                        'source', lambda attr:
                        ('true' if attr['known_action'] else 'false')
                        if 'known_action' in attr else None,
                        lambda attr: ';'.join(attr['actions']),
                        'simplified_action'
                    ]],
        'ASSOCIATES_WITH':
        [['source:string', 'num_pmids:int', 'num_snps:int', 'score:string'],
         ['source', 'num_pmids', 'num_snps', 'score']],
        'HAS_ADR': [['source:string'], ['source']],
        'ASSOCIATED_WITH_ADR': [['source:string'], ['source']]
    }

    # Save relationships
    for x in edge_metadata:
        with io.open(os.path.join(output_path, 'rel_%s.csv' % x),
                     'w',
                     encoding='utf-8',
                     newline='') as f:
            writer = csv.writer(f, delimiter=',', quotechar='"')
            writer.writerow([':START_ID(Node-ID)'] + edge_metadata[x][0] +
                            [':END_ID(Node-ID)', ':TYPE'])
            for e in network.get_edges_by_label(x):
                values = []
                for l in edge_metadata[x][1]:
                    if isinstance(l, type(lambda: 0)):
                        values.append(l(e.attributes))
                    else:
                        values.append(e.attributes[l] if l in
                                      e.attributes else None)
                source_id = network.get_node_by_id(
                    e.source_node_id, e.source_node_label).label_id
                target_id = network.get_node_by_id(
                    e.target_node_id, e.target_node_label).label_id
                writer.writerow([source_id] + values + [target_id, e.label])

    with io.open(os.path.join(output_path, 'create_indices.cypher'),
                 'w',
                 encoding='utf-8',
                 newline='') as f:
        unique_labels = set()
        for node_label in network.node_labels():
            unique_labels.update(set(node_label.split(';')))
        for node_label in unique_labels:
            f.write('create constraint on (p:%s) assert p._id is unique;\n' %
                    node_label)
    with io.open(os.path.join(output_path, 'import_admin.bat'),
                 'w',
                 encoding='utf-8',
                 newline='') as f:
        f.write('@echo off\n')
        f.write('net stop neo4j\n')
        f.write('rmdir /s "%s"\n' %
                os.path.join(config['Neo4j']['database-path'],
                             config['Neo4j']['database-name']))
        f.write('CALL ' +
                os.path.join(config['Neo4j']['bin-path'], 'neo4j-admin'))
        f.write(' import ' +
                '--database %s ' % config['Neo4j']['database-name'] +
                ' '.join(['--nodes %s' % x
                          for x in node_import_files]) + ' ' + ' '.join([
                              '--relationships rel_%s.csv' % x
                              for x in network.edge_labels()
                          ]) + ' > import.log\n')
        f.write('net start neo4j\n')
        f.write(os.path.join(config['Neo4j']['bin-path'], 'cypher-shell'))
        f.write(
            ' -u %s -p %s --non-interactive < create_indices.cypher 1>> import.log 2>&1\n'
            % (config['Neo4j']['user'], config['Neo4j']['password']))
    with io.open(os.path.join(output_path, 'import_admin.sh'),
                 'w',
                 encoding='utf-8',
                 newline='') as f:
        f.write(os.path.join(config['Neo4j']['bin-path'], 'neo4j-admin'))
        f.write(' import ' +
                '--database %s ' % config['Neo4j']['database-name'] +
                ' '.join(['--nodes %s' % x
                          for x in node_import_files]) + ' ' + ' '.join([
                              '--relationships rel_%s.csv' % x
                              for x in network.edge_labels()
                          ]) + ' > import.log\n')
示例#35
0
calculationStart = time.clock()

startTs = datetime.datetime(2019, 1, 1, 7, 0, 0)
totalSteps = 20000 #2500
timeStep = 1
jamDensity = 124
medianValueTime = 50
random.seed(10)

vehicleId = 0
GEN_VEH_DIST = 'normal_whole' # ["uniform", "random", "random_whole", "normal_whole", "uniform_whole"]
STRATEGY = 'vol_sim' # ['vol_sim', 'vol_dist', 'random', 'fix']
MULTIVEH = 1 #[default=1, 2, 3,...]
NO_CHARGE = False

network = Network(startTs)

fNode = open("C:/Users/lyy90/OneDrive/Documents/GitHub/meso_v2.0/Sioux Falls network/nodes-SiouxFalls_gong.csv")
#fNode = open("F:/meso_v2.0/Sioux Falls network/nodes-SiouxFalls_gong.csv")
fNode.readline()
fLane = open("C:/Users/lyy90/OneDrive/Documents/GitHub/meso_v2.0/Sioux Falls network/lanes-SiouxFalls_gong.csv")
#fLane = open("F:/meso_v2.0/Sioux Falls network/lanes-SiouxFalls_gong.csv")
fLane.readline()
pOd = 'C:/Users/lyy90/OneDrive/Documents/GitHub/meso_v2.0/OD_data'
#pOd = "F:/meso_v2.0/OD_data"

readNodes(fNode, network)
readLanes(fLane, network)
tsPairNodePairTypeMap = readOd(pOd)
#print(tsPairNodePairTypeMap)
genVehicle(tsPairNodePairTypeMap, GEN_VEH_DIST, vehicleId, medianValueTime, network, MULTIVEH)
示例#36
0
    print(f"{gct()} : model init")
    det = RFDet(
        cfg.TRAIN.score_com_strength,
        cfg.TRAIN.scale_com_strength,
        cfg.TRAIN.NMS_THRESH,
        cfg.TRAIN.NMS_KSIZE,
        cfg.TRAIN.TOPK,
        cfg.MODEL.GAUSSIAN_KSIZE,
        cfg.MODEL.GAUSSIAN_SIGMA,
        cfg.MODEL.KSIZE,
        cfg.MODEL.padding,
        cfg.MODEL.dilation,
        cfg.MODEL.scale_list,
    )
    des = HardNetNeiMask(cfg.HARDNET.MARGIN, cfg.MODEL.COO_THRSH)
    model = Network(det, des, cfg.LOSS.SCORE, cfg.LOSS.PAIR, cfg.PATCH.SIZE,
                    cfg.TRAIN.TOPK)

    print(f"{gct()} : to device")
    device = torch.device("cuda")
    model = model.to(device)
    resume = args.resume
    print(f"{gct()} : in {resume}")
    checkpoint = torch.load(resume)
    model.load_state_dict(checkpoint["state_dict"])

    print(f"{gct()} : start eval")
    model.eval()
    PreNN, PreNNT, PreNNDR = 0, 0, 0
    with torch.no_grad():
        for i_batch, sample_batched in enumerate(data_loader, 1):
            batch = parse_batch(sample_batched, device)
class TestActivityLevels(unittest.TestCase):
    """
    This test-suite compares the performance of the search algorithm and the
    neural network model:
        a) testActivityEqualSingleCue
            compares the activity of all nodes in
            the algorithm and the network after presenting a single cue.

        b) testActivityEqualThreeCues
            compares the activity of all nodes in
            the algorithm and the network after presenting all three cues.

        c) testOrderEightWords
            compares the order of the explored nodes, and the activity levels
            up to a certain tolerance level

    """

    def setUp(self):
        self.net = Network()

    def testActivityEqualSingleCue(self):
        cue = "match"
        target = "fire"

        target_id = self.net.voc[target]

        max_visited = 1
        self.net.max_visited = max_visited

        act_alg, _ = spread_activity(
            init_nodes=[self.net.voc[cue]], target=target_id, W=self.net.W, max_visited=max_visited
        )

        self.net.setup_problem([cue], target)
        self.net.run()

        np.testing.assert_almost_equal(act_alg, self.net.a[self.net.t_max], decimal=3)

    def testActivityEqualThreeCues(self):
        cues = ["match", "game", "stick"]
        target = "fire"

        target_id = self.net.voc[target]
        max_visited = 3
        self.net.max_visited = max_visited

        self.net.setup_problem(cues, target)

        act_alg, _ = spread_activity(
            init_nodes=self.net.cue_ids, target=target_id, W=self.net.W, max_visited=max_visited
        )

        self.net.run()
        np.testing.assert_almost_equal(act_alg, self.net.a[self.net.t_max], decimal=2)

    def testOrderEightWords(self):
        # number of words along the search path
        nr_words = 8

        cues = ["cottage", "swiss", "cake"]
        target = "cheese"

        target_id = self.net.voc[target]

        # get word ids
        cue_ids = [self.net.voc[c] for c in cues]

        act_alg, visited_alg = spread_activity(init_nodes=cue_ids, target=target_id, W=self.net.W, max_visited=nr_words)
        self.net.setup_problem(cues, target)
        self.net.max_visited = nr_words
        self.net.run()

        visited_net = self.net.visited()

        np.testing.assert_equal(visited_alg, visited_net)

        a1 = self.net.a[self.net.t_max, visited_net]
        a2 = act_alg[visited_alg]

        # tolerate diffrences in activities up to 0.1
        np.testing.assert_allclose(a1, a2, rtol=1e-1)