예제 #1
0
 def test_read_ascii(self):
     """Test reading ASCII tags and compare to known baseline values."""
     self.assertEqual(self.image.datetime, Baseline("""2018:03:12 10:12:07"""))
     self.assertEqual(self.image.make, Baseline("""Apple"""))
     self.assertEqual(self.image.model, Baseline("""iPhone 7"""))
     self.assertEqual(self.image.gps_latitude_ref, Baseline("""N"""))
     self.assertEqual(self.image.gps_longitude_ref, Baseline("""W"""))
예제 #2
0
    def test_trainer(self):
        batch_size = 3
        length = 5
        descriptors = torch.FloatTensor(self.config.nclasses,
                                        self.config.descriptor_dim).normal_()
        sender = Sender(self.config)
        sender.eval()
        receiver = Receiver(self.config)
        receiver.eval()
        exchange_model = ExchangeModel(self.config)
        baseline_sender = Baseline(self.config, 'sender')
        baseline_receiver = Baseline(self.config, 'receiver')
        exchange = Exchange(exchange_model, sender, receiver, baseline_sender,
                            baseline_receiver, descriptors)
        trainer = Trainer(exchange)

        image = torch.FloatTensor(batch_size, self.config.image_in).normal_()
        target_dist = F.softmax(torch.FloatTensor(
            batch_size, self.config.nclasses).normal_(),
                                dim=1)
        target = target_dist.argmax(dim=1)
        trainer_loss = trainer.run_step(image, target)

        self.assertEqual(trainer_loss.sender_message_loss.numel(), 1)
        self.assertEqual(trainer_loss.receiver_message_loss.numel(), 1)
        self.assertEqual(trainer_loss.stop_loss.numel(), 1)
        self.assertEqual(trainer_loss.baseline_loss_sender.numel(), 1)
        self.assertEqual(trainer_loss.baseline_loss_receiver.numel(), 1)
        self.assertEqual(trainer_loss.xent_loss.numel(), 1)
예제 #3
0
    def __init__(self, cfg, inference=False, threshold=0.5):
        self.device = torch.device(
            "cuda") if cfg.MODEL.DEVICE == 'cuda' else torch.device("cpu")

        if not inference:
            print('load training data')
            self.dataloader, class_num = get_train_loader(cfg)

            print('load testing data')
            if cfg.TEST.MODE == 'face':
                self.agedb_30, self.cfp_fp, self.lfw, self.agedb_30_issame, self.cfp_fp_issame, self.lfw_issame = get_val_data(
                    self.dataloader.dataset.root.parent)
            else:
                pairs = read_pairs(
                    os.path.join(cfg.DATASETS.FOLDER, 'pairs.txt'))

                self.data, self.data_issame = get_paths(
                    os.path.join(cfg.DATASETS.FOLDER, 'test'), pairs)

            print('load model')
            self.model = Baseline(cfg)
            self.model = self.model.to(self.device)
            self.load_state(cfg)
            if cfg.SOLVER.OPT == 'SGD':
                self.optimizer = optim.SGD(
                    [{
                        'params': self.model.parameters()
                    }],
                    lr=cfg.SOLVER.BASE_LR,
                    momentum=cfg.SOLVER.MOMENTUM,
                    weight_decay=cfg.SOLVER.WEIGHT_DECAY)
            else:
                self.optimizer = optim.Adam(
                    [{
                        'params': self.model.parameters()
                    }],
                    lr=cfg.SOLVER.BASE_LR,
                    weight_decay=cfg.SOLVER.WEIGHT_DECAY)

            self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer,
                T_max=cfg.SOLVER.MAX_EPOCH,
                eta_min=cfg.SOLVER.ETA_MIN_LR)
            checkpoints = cfg.CHECKPOINT.SAVE_DIR
            os.makedirs(checkpoints, exist_ok=True)

            self.best_score = 0.
            self.best_threshold = 0.
        else:
            self.device = torch.device(
                "cuda") if cfg.TEST.DEVICE == 'cuda' else torch.device("cpu")
            print('load model')
            self.model = Baseline(cfg)
            self.model = self.model.to(self.device)
            self.load_state(cfg)
            self.threshold = threshold
            self.test_transform = trans.Compose([
                trans.ToTensor(),
                trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
            ])
def baseline_model(utility_matrix, similarity_matrix):
    if os.path.exists("baseline_matrix.pickle"):
        baseline = Baseline(utility_matrix)
        baseline_matrix = pd.read_pickle("baseline_matrix.pickle")
        print("Using already created baseline_matrix.pickle file")
    else:
        print("Baseline Method Started")
        baseline = Baseline(utility_matrix)
        baseline_matrix = baseline.get_rating_deviation()
    # print("Take Input of user ID and Movie ID to predict the Rating")
    # query_user = int(input("Query User ID : "))
    # query_movie = int(input("Query Movie ID : "))
    # n = 20
    # predicted_rating = baseline.get_baseline_rating_prediction(query_user,query_movie,similarity_matrix,baseline_matrix,n)
    # print("Predicted Rating using baseline approach : ",predicted_rating)

    if os.path.exists("baseline_error_matrix.pickle"):
        error_matrix = pd.read_pickle("baseline_error_matrix.pickle")
        print("Using already created baseline_error_matrix.pickle file")
        size = 20000
        utility_matrix = utility_matrix.fillna(0)
        for i in range(100):
            for j in range(100):
                error_matrix.iloc[
                    i, j] = utility_matrix.iloc[i, j] - error_matrix.iloc[i, j]
            ans2 = error_matrix.sum().sum()
        matrix = error_matrix.pow(2)
        ans = matrix.sum().sum()
        ans = ans / size
        ans2 = ans / size
        print("\nFor Baseline Model")
        print("Calculated RMSE is : ", sqrt(ans))
        print("Calculated MAE is ", ans2)
    else:
        baseline.calculate_error(similarity_matrix, baseline_matrix)
예제 #5
0
 def test_read_rational(self):
     """Test reading RATIONAL tags and compare to known baseline values."""
     self.assertEqual(str(self.image.gps_altitude)[:13], Baseline("""2189.98969072"""))
     self.assertEqual(str(self.image.gps_latitude), Baseline("""(36.0, 3.0, 11.08)"""))
     self.assertEqual(str(self.image.gps_longitude), Baseline("""(112.0, 5.0, 4.18)"""))
     self.assertEqual(str(self.image.x_resolution), Baseline("""72.0"""))
     self.assertEqual(str(self.image.y_resolution), Baseline("""72.0"""))
예제 #6
0
    def test_same_value(self):
        """Test same baselined text for every call on the same line.

         Check baseline instantiations on the same line produce the
         same instance.

        """
        baseline1, baseline2 = Baseline("Hello!"), Baseline("Hello!")

        self.assertIs(baseline1, baseline2)
예제 #7
0
    def test_modify_rational(self):
        """Verify that modifying RATIONAL tags updates the tag values as expected."""
        self.image.gps_altitude = 123.456789
        self.assertEqual(str(self.image.gps_altitude),
                         Baseline("""123.456789"""))
        self.image.gps_latitude = (41.0, 36.0, 33.786)
        self.assertEqual(str(self.image.gps_latitude),
                         Baseline("""(41.0, 36.0, 33.786)"""))

        segment_hex = self.image._segments['APP1'].get_segment_hex()
        self.assertEqual('\n'.join(textwrap.wrap(segment_hex, 90)),
                         MODIFY_RATIONAL_HEX_BASELINE)
예제 #8
0
    def test_differing_value(self):
        """Test differing baselined text for every call.

         Check that exception is raised, that "atexit" registration
         did not occur, and that no files were to be updated.

        """
        with self.assertRaises(RuntimeError):
            Baseline('junk1'), Baseline('junk2')

        with self.assertRaises(RuntimeError):
            for text in ['junk1', 'junk2']:
                Baseline(text)

        self.check_updated_files()
예제 #9
0
 def test_list_attributes_photo(self):
     """Verify that calling dir() on a camera photo lists the expected EXIF attributes."""
     with open(
         os.path.join(os.path.dirname(__file__), "grand_canyon.jpg"), "rb"
     ) as image_file:
         image = Image(image_file)
     dunder_dir_text = "\n".join(textwrap.wrap(repr(sorted(dir(image))), 90))
     self.assertEqual(
         dunder_dir_text,
         Baseline(
             """
         ['<unknown EXIF tag 59932>', '<unknown EXIF tag 59933>', '_exif_ifd_pointer',
         '_gps_ifd_pointer', '_segments', 'aperture_value', 'brightness_value', 'color_space',
         'components_configuration', 'compression', 'datetime', 'datetime_digitized',
         'datetime_original', 'delete', 'delete_all', 'exif_version', 'exposure_bias_value',
         'exposure_mode', 'exposure_program', 'exposure_time', 'f_number', 'flash',
         'flashpix_version', 'focal_length', 'focal_length_in_35mm_film', 'get', 'get_all',
         'get_file', 'get_thumbnail', 'gps_altitude', 'gps_altitude_ref', 'gps_datestamp',
         'gps_dest_bearing', 'gps_dest_bearing_ref', 'gps_horizontal_positioning_error',
         'gps_img_direction', 'gps_img_direction_ref', 'gps_latitude', 'gps_latitude_ref',
         'gps_longitude', 'gps_longitude_ref', 'gps_speed', 'gps_speed_ref', 'gps_timestamp',
         'has_exif', 'jpeg_interchange_format', 'jpeg_interchange_format_length', 'lens_make',
         'lens_model', 'lens_specification', 'list_all', 'make', 'maker_note', 'metering_mode',
         'model', 'orientation', 'photographic_sensitivity', 'pixel_x_dimension',
         'pixel_y_dimension', 'resolution_unit', 'scene_capture_type', 'scene_type',
         'sensing_method', 'shutter_speed_value', 'software', 'subject_area',
         'subsec_time_digitized', 'subsec_time_original', 'white_balance', 'x_resolution',
         'y_and_c_positioning', 'y_resolution']
         """
         ),
     )
예제 #10
0
    def __init__(self):
        config = Baseline()

        config_changes = {
            'description': 'baseline travel model fast (no build)',
        }
        config.replace(config_changes)

        from psrc.configs.create_travel_model_configuration import create_travel_model_configuration
        travel_model_configuration = create_travel_model_configuration(
            'baseline_travel_model_psrc_fast_no_build',
            emme2_batch_file='MODELUSim.BAT ..\\triptabs',
            mode='full')
        config['travel_model_configuration'] = travel_model_configuration
        config['travel_model_configuration']['locations_to_disaggregate'] = [
            'parcel', 'building'
        ]
        del config['travel_model_configuration'][2000]

        ##fast model doesn't have bank2 and bank3; disable macros using them
        del config['travel_model_configuration']['export_macros'][
            'tazvmt2.mac']
        del config['travel_model_configuration']['export_macros'][
            'tazvmt3.mac']

        del config['travel_model_configuration']['matrix_variable_map'][
            'bank2']
        del config['travel_model_configuration']['matrix_variable_map'][
            'bank3']

        self.merge(config)


#if __name__ == "__main__":
#    config = BaselineTravelModelFast()
예제 #11
0
    def test_no_app1_segment(self):
        """Verify behavior of an image without an APP1 segment marker.

        Assert the ``has_exif`` attribute is false. Verify non-EXIF ``dir()`` list contents. Then,
        check the ``get_file()`` hexadecimal.

        """
        image_path = os.path.join(os.path.dirname(__file__), "no_app1.png")
        with open(image_path, "rb") as image_file:
            my_image = Image(image_file)

        self.assertFalse(my_image.has_exif)

        self.assertEqual(
            str(dir(my_image)),
            Baseline("""
            ['_segments', 'delete', 'delete_all', 'get', 'get_all', 'get_file', 'get_thumbnail', 'has_exif', 'list_all']
            """),
        )

        with pytest.raises(RuntimeError,
                           match="image does not contain thumbnail"):
            my_image.get_thumbnail()

        self.assertEqual(
            "\n".join(textwrap.wrap(str(my_image.get_file()), 90)),
            NO_APP1_PNG)
예제 #12
0
    def compute_pose(self, view1, view2=None, is_baseline=False):
        """Computes the pose of the new view"""

        # procedure for baseline pose estimation
        if is_baseline and view2:

            match_object = self.matches[(view1.name, view2.name)]
            baseline_pose = Baseline(view1, view2, match_object)
            view2.R, view2.t = baseline_pose.get_pose(self.K)

            rpe1, rpe2 = self.triangulate(view1, view2)
            self.errors.append(np.mean(rpe1))
            self.errors.append(np.mean(rpe2))

            self.done.append(view1)
            self.done.append(view2)

        # procedure for estimating the pose of all other views
        else:

            view1.R, view1.t = self.compute_pose_PNP(view1)
            errors = []

            # reconstruct unreconstructed points from all of the previous views
            for i, old_view in enumerate(self.done):

                match_object = self.matches[(old_view.name, view1.name)]
                _ = remove_outliers_using_F(old_view, view1, match_object)
                self.remove_mapped_points(match_object, i)
                _, rpe = self.triangulate(old_view, view1)
                errors += rpe

            self.done.append(view1)
            self.errors.append(np.mean(errors))
예제 #13
0
    def test_delete_all_tags(self):
        """Verify deleting all EXIF tags from the Image object."""
        self.image.delete_all()

        segment_hex = (binascii.hexlify(
            self.image._segments["APP1"].get_segment_bytes()).decode(
                "utf-8").upper())
        self.assertEqual("\n".join(textwrap.wrap(segment_hex, 90)),
                         DELETE_ALL_HEX_BASELINE)

        with TemporaryFile("w+b") as temporary_file_stream:
            temporary_file_stream.write(self.image.get_file())
            temporary_file_stream.seek(0)
            reloaded_image = Image(temporary_file_stream)

        dunder_dir_text = "\n".join(
            textwrap.wrap(repr(sorted(dir(reloaded_image))), 90))
        self.assertEqual(
            dunder_dir_text,
            Baseline("""
            ['<unknown EXIF tag 59932>', '<unknown EXIF tag 59933>', '_exif_ifd_pointer',
            '_gps_ifd_pointer', '_segments', 'delete', 'delete_all', 'exif_version', 'get', 'get_all',
            'get_file', 'get_thumbnail', 'has_exif', 'list_all']
            """),
        )
예제 #14
0
def test_index_accessor():
    """Test accessing attributes using index syntax."""
    with open(os.path.join(os.path.dirname(__file__), "grand_canyon.jpg"),
              "rb") as image_file:
        image = Image(image_file)

    assert image["datetime"] == Baseline("""2018:03:12 10:12:07""")
예제 #15
0
def execute_demo(language):
    if language == 'english':
        word_emb = load_word_embeddings('english')
    elif language == 'spanish':
        word_emb = load_word_embeddings('spanish')

    data = Dataset(language)

    print("{}: {} training - {} dev".format(language, len(data.trainset),
                                            len(data.devset)))

    #for sent in data.trainset:
    # Gold label -> 0 if the word is not complex, 1 if the word is complex.
    #print(sent['sentence'], sent['target_word'], sent['gold_label'])

    baseline = Baseline(language)

    model = Model(language)

    model.train(data.trainset, word_emb)

    predictions = model.test(data.devset, word_emb)

    gold_labels = [sent['gold_label'] for sent in data.devset]

    report_score(gold_labels, predictions)
예제 #16
0
    def __init__(self):
        config = Baseline()

        config_changes = {
            'description':
            'run travel model for baseyear and then extract travel data',
            'models': [],
            'models_in_year': {
                2000: [],
            },
            'years': (2000, 2001),
        }

        from psrc.configs.create_travel_model_configuration import create_travel_model_configuration
        travel_model_configuration = create_travel_model_configuration(
            'baseline_travel_model_psrc_2008_lmwang',
            emme2_batch_file='MODEL1-0.BAT',
            mode='full',
            years_to_run={
                2000: '2000_v1.0aTG',
                2005: '2006_v1.0aTG',
                2010: '2010_v1.0aTG',
                2015: '2010_v1.0aTG_2015',
                2020: '2020_v1.0aTG'
            })
        config['travel_model_configuration'] = travel_model_configuration
        config.replace(config_changes)

        self.merge(config)
예제 #17
0
def run(dataset):
    n_features = len(meta[dataset]['val_name'])

    result_online = Result('%s-%s' %(dataset, 'aws-online'), aws=True)
    result_baseline = Result('%s-%s' %(dataset, 'aws-baseline'), aws=True)
    result_active = Result('%s-%s' %(dataset, 'aws-active'), aws=True)

    for repeat in range(0, n_repeat):
        print 'Round %d of %d'% (repeat, n_repeat - 1)

        ex = AWSOnline(meta[dataset]['model_id'], 1, 0, n_features, meta[dataset]['val_name'], ftype='uniform', error=.1)

        test_x, test_y = load_svmlight_file('/Users/Fan/dev/ML/code/binary-classifiers/targets/%s/test.scale' % dataset, n_features)
        test_x = test_x.todense()
        test_y = [a if a == 1 else 0 for a in test_y]
        train_x, train_y = [], []

        for i in result_active.index:
            q_by_u = result_active.Q_by_U[i]
            print 'Active learning with budget %d / %d' % (q_by_u, q_by_u * (n_features + 1))
            main = ActiveLearning(ex, (None, None), (test_x, test_y), n_features,
                                  q_by_u * (n_features + 1), 5)

            L_unif, L_test = main.do()

            result_active.L_unif[i].append(L_unif)
            result_active.L_test[i].append(L_test)
            result_active.nquery[i].append(ex.get_n_query())

        ex = AWSOnline(meta[dataset]['model_id'], 1, 0, n_features, meta[dataset]['val_name'], ftype='uniform', error=.1)

        for i in result_online.index:
            q_by_u = result_online.Q_by_U[i]
            print 'collecting up to budget %d / %d' % (q_by_u, q_by_u * (n_features + 1))

            ex.collect_up_to_budget(q_by_u * (n_features + 1))
            train_x.extend(ex.pts_near_b)
            train_y.extend(ex.pts_near_b_labels)

            print 'retraining with %d points' % len(train_y)

            # online
            e = RBFKernelRetraining(ex.batch_predict, (train_x, train_y), (test_x, test_y), n_features)
            L_unif, L_test = e.grid_retrain_in_x()

            result_online.L_unif[i].append(L_unif)
            result_online.L_test[i].append(L_test)
            result_online.nquery[i].append(ex.get_n_query())

            # baseline
            e = Baseline(ex.batch_predict, (train_x, train_y), (test_x, test_y), n_features)
            L_unif, L_test = e.do()

            result_baseline.L_unif[i].append(L_unif)
            result_baseline.L_test[i].append(L_test)
            result_baseline.nquery[i].append(ex.get_n_query())

    print result_online
    print result_baseline
    print result_active
예제 #18
0
def execute(language):
    language = language
    data = Dataset(language)
    print("{}: {} training - {} dev".format(language, len(data.trainset),
                                            len(data.devset)))

    baseline = Baseline(language)
    estimator = SVC(gamma=300)
    title = 'Spanish Learning Curves (SVM, γ=300)'
    X, y = baseline.train(data.trainset)
    plot_learning_curve(estimator,
                        title,
                        X,
                        y,
                        ylim=None,
                        n_jobs=1,
                        train_sizes=np.linspace(.1, 1.0, 5))

    predictions = baseline.test(data.devset)

    gold_labels = [sent['gold_label'] for sent in data.devset]

    target_words = [sent['target_word'] for sent in data.devset]
    prediction = []
    for i in predictions:
        prediction.append(i)
    df = pd.DataFrame(columns=['target_word', 'prediction'])
    df["target_word"] = target_words
    df['gold_label'] = gold_labels
    df['prediction'] = prediction
    df.to_csv('out_s2.csv')
    report_score(gold_labels, predictions)
예제 #19
0
 def test_nic_name_map(self, mocked_check_output):
     """Test nic_name_map attribute of NetworkAdapters."""
     self.assertEqual(
         str(
             sorted(self.test_adapters.nic_name_map,
                    key=self.test_adapters.nic_name_map.get)),
         Baseline("""['Ethernet Adapter', 'Wi-Fi Adapter']"""))
def test_gitlab_issue_23():
    """Regression test for GitLab issue 23.

    Verify reading ASCII tags containing a smaller length value than specified by the size field.

    """
    image_under_test = Image(
        os.path.join(os.path.dirname(__file__), "excess_ascii_null_bytes.jpg"))

    with pytest.warns(RuntimeWarning,
                      match="ASCII tag contains 2 fewer bytes than specified"):
        assert image_under_test.model == Baseline("""iPhone""")

    with pytest.warns(
            RuntimeWarning,
            match="ASCII tag contains 13 fewer bytes than specified"):
        assert image_under_test.software == Baseline("""Photoshop Express""")
예제 #21
0
 def test_product_name(self, mocked_check_output):
     """Test product_name property of the Nic class."""
     self.assertEqual(self.test_nic.product_name,
                      Baseline("""Dummy Adapter"""))
     with self.assertRaisesRegex(
             AttributeError,
             "'Nic' attribute 'product_name' is not settable"):
         self.test_nic.product_name = "New Name"
예제 #22
0
 def test_set_static_address(self, mocked_check_output, mocked_call_output):
     """Test set_static_address method of the Nic class."""
     self.assertEqual(
         str(
             self.test_nic.set_static_address('192.168.0.2',
                                              '255.255.255.0',
                                              '192.168.0.1')),
         Baseline("""0"""))
예제 #23
0
def test_get_method():
    """Test behavior when accessing tags using the ``get()`` method."""
    with open(os.path.join(os.path.dirname(__file__), 'grand_canyon.jpg'), 'rb') as image_file:
        image = Image(image_file)

    assert image.get('fake_attribute') is None
    assert image.get('light_source', default=-1) == -1  # tag not in image
    assert image.get('make') == Baseline("""Apple""")
예제 #24
0
    def test_modify_rational(self):
        """Verify that modifying RATIONAL tags updates the tag values as expected."""
        self.image.gps_altitude = 123.456789
        self.assertEqual(str(self.image.gps_altitude), Baseline("""123.456789"""))
        self.image.gps_latitude = (41.0, 36.0, 33.786)
        self.assertEqual(
            str(self.image.gps_latitude), Baseline("""(41.0, 36.0, 33.786)""")
        )

        segment_hex = (
            binascii.hexlify(self.image._segments["APP1"].get_segment_bytes())
            .decode("utf-8")
            .upper()
        )
        self.assertEqual(
            "\n".join(textwrap.wrap(segment_hex, 90)), MODIFY_RATIONAL_HEX_BASELINE
        )
예제 #25
0
 def test_nic_connection_id_map(self, mocked_check_output):
     """Test nic_connection_id_map attribute of NetworkAdapters."""
     self.assertEqual(
         str(
             sorted(self.test_adapters.nic_connection_id_map,
                    key=self.test_adapters.nic_connection_id_map.get)),
         Baseline(
             """['Local Area Connection', 'Wireless Area Connection']"""))
예제 #26
0
    def test_modify_ascii_same_len(self):
        """Verify that writing a same length string to an ASCII tag updates the tag."""
        self.image.model = "MyCamera"
        self.assertEqual(self.image.model, Baseline("""MyCamera"""))

        segment_hex = self.image._segments['APP1'].get_segment_hex()
        self.assertEqual('\n'.join(textwrap.wrap(segment_hex, 90)),
                         MODIFY_ASCII_SAME_LEN_HEX_BASELINE)
예제 #27
0
    def test_modify_ascii_shorter(self):
        """Verify that writing a shorter string to an ASCII tag updates the tag."""
        self.image.model = "MyCam"
        self.assertEqual(self.image.model, Baseline("""MyCam"""))

        segment_hex = self.image._segments['APP1'].get_segment_hex()
        self.assertEqual('\n'.join(textwrap.wrap(segment_hex, 90)),
                         MODIFY_ASCII_SHORTER_HEX_BASELINE)
예제 #28
0
    def __init__(self):
        config = Baseline()
        if self.multiple_runs:
            config.sample_inputs()
        config['number_of_runs'] = 99
        config['seed'] = 1
        config_changes = {
            'description': 'baseline travel model fast',
        }
        config.replace(config_changes)

        from psrc.configs.create_travel_model_configuration import create_travel_model_configuration
        travel_model_configuration = create_travel_model_configuration(
            'baseline_travel_model_psrc_fast_hana',
            emme2_batch_file='MODELUSim.BAT ..\\triptabs',
            mode='skims',
            years_to_run={
                2005: '2005_06',
                2010: '2010_06',
                2015: '2010_06'
            })
        config['travel_model_configuration'] = travel_model_configuration
        config['travel_model_configuration']['locations_to_disaggregate'] = [
            'parcel', 'building'
        ]

        ##fast model doesn't have bank2 and bank3; disable macros using them
        del config['travel_model_configuration']['export_macros'][
            'tazvmt2.mac']
        del config['travel_model_configuration']['export_macros'][
            'tazvmt3.mac']

        del config['travel_model_configuration']['matrix_variable_map'][
            'bank2']
        del config['travel_model_configuration']['matrix_variable_map'][
            'bank3']

        config['travel_model_configuration']['export_macros'][
            'get_link_attributes.mac'] = {
                'bank': 'bank1',
                'scenario': -1,
                'path': 'export_macros'
            }
        config['travel_model_configuration']['node_matrix_variable_map'] = {
            "bank1": {
                "attr_on_links.rpt": {
                    "timau": "am_pk_travel_time",
                    "len": "distance"
                },
                "tveham.rpt": {
                    "@tveh": "vehicle_volume"
                }
            }
        }
        #config['travel_model_configuration'][2015]['models'] = list(config['travel_model_configuration'][2015].get('models'))
        #config['travel_model_configuration'][2015]['models'].append('opus_emme2.models.restore_trip_tables')

        self.merge(config)
예제 #29
0
 def test_read(self):
     """Test reading tags and compare to known baseline values."""
     self.assertEqual(repr(self.image.color_space), "<ColorSpace.SRGB: 1>")
     self.assertEqual(str(self.image.datetime_original),
                      Baseline("""2019:02:08 21:44:35"""))
     self.assertEqual(str(self.image.gps_latitude),
                      Baseline("""(79.0, 36.0, 54.804590935844615)"""))
     self.assertEqual(str(self.image.gps_longitude),
                      Baseline("""(47.0, 25.0, 34.489798675854615)"""))
     self.assertEqual(self.image.make, Baseline("""EXIF Package"""))
     self.assertEqual(self.image.model, Baseline("""Little Endian"""))
     self.assertEqual(str(self.image.resolution_unit), Baseline("""2"""))
     self.assertEqual(repr(self.image.saturation),
                      Baseline("""<Saturation.LOW: 1>"""))
     self.assertEqual(repr(self.image.sharpness),
                      Baseline("""<Sharpness.SOFT: 1>"""))
     self.assertEqual(str(self.image.x_resolution), Baseline("""200.0"""))
     self.assertEqual(str(self.image.y_resolution), Baseline("""200.0"""))
 def __init__(self):
     config = Baseline()
     if self.multiple_runs:
         config.sample_inputs()
     config['years'] = (2001, 2005)
     config['number_of_runs'] = 50
     config['seed'] = 1
     config['description'] = 'baseline multiple runs'
     self.merge(config)