Ejemplo n.º 1
0
    def restore_weights(graph: tf.Graph, checkpoint_dir: str,
                        network_name: str, output_node_names: List,
                        dtype: str):
        # Restore weights
        if tf.train.latest_checkpoint(checkpoint_dir) is None:
            logging.info(
                'Checkpoint dir %s not found, attempting to download pre-trained weights.',
                Path(checkpoint_dir).as_posix())
            get_weights(Path(checkpoint_dir), network_name, dtype)

        if tf.train.latest_checkpoint(checkpoint_dir) is None:
            raise ValueError(
                "Weight download failed. Please re-try downloading the weights using the `get_weights.py` script."
            )

        with tf.Session(graph=graph) as sess:
            saver = tf.train.Saver()
            saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir))
            logging.info(
                f'Successfully restored imagenet weights for {network_name} model.'
            )
            graph_def = tf.graph_util.convert_variables_to_constants(
                sess,
                tf.get_default_graph().as_graph_def(), output_node_names)
        return graph_def
Ejemplo n.º 2
0
def test_supported_model(model_name):
    save_dir = Path(tempfile.gettempdir() + f'/{model_name}')
    if not (os.path.exists(save_dir)):
        os.makedirs(save_dir)
    prefix_ckpt = model_name.lower() + ".ckpt*"
    save_dir = Path(tempfile.gettempdir() + f'/{model_name}')
    weight_path = Path(
        get_weights(save_dir, model_name.lower(), 'float16') +
        ".data-00000-of-00001")
    assert weight_path.exists()
    # Remove all generated files
    for files in Path(save_dir).glob(prefix_ckpt):
        files.unlink()
        d_all = df["distance"].values
        pld_all = df["pl_db"].values

        df_uncensored = df.loc[uncensored_packets_mask]

        d_uncensored = df_uncensored["distance"].values
        pld_uncensored = df_uncensored["pl_db"].values

        num_bins = int((d_uncensored.max() - d_uncensored.min()) / 20)

        print(
            F"Using {num_bins} bins with a width of {(d_uncensored.max() - d_uncensored.min()) / num_bins} m"
        )

        w_lin = get_weights(d_uncensored,
                            weight_type='linear',
                            num_bins=num_bins)
        w_log = get_weights(d_uncensored,
                            weight_type='log10',
                            num_bins=num_bins)
        w_sq = get_weights(d_uncensored,
                           weight_type='square',
                           num_bins=num_bins)

        (pld0_ols, n_ols, sigma_ols) = model.ols(d0=1,
                                                 d=d_uncensored,
                                                 pld=pld_uncensored)

        fig, axes = plt.subplots(nrows=1,
                                 ncols=2,
                                 sharey=True,
Ejemplo n.º 4
0
def test_supported_model(model_name):
    assert Path(
        get_weights(Path(tempfile.gettempdir()), model_name) +
        ".data-00000-of-00001").exists()
Ejemplo n.º 5
0
def test_unsupported_model(model_name):
    with pytest.raises(ValueError):
        get_weights(tempfile.gettempdir(), model_name)
Ejemplo n.º 6
0
    rng_flag = int(sys.argv[2])

    if full_build_flag != 1 and full_build_flag != 0:
        assert False, ('full_build_flag must be either \'1\' or \'0\'')
    if rng_flag != 1 and rng_flag != 0:
        assert False, ('rng_flag must be either \'1\' or \'0\'')

    if full_build_flag == 1:
        # Format raw data into datapoints that can be used
        print('Formatting historical data...')
        get_data(1993, 2017, 'data\MMSeeds_with_Team_IDs.csv', 'data')

        # Format raw 2019 tournament data into points that can be manipulated
        print('Formatting 2018-2019 season data...')
        get_data(2019, 2020, 'data\PredictionData\MMSeeds_with_Team_IDs.csv',
                 'data\PredictionData')

        # Create vectors that will be used as datapoints for Logistic Regression
        print('Creating vectors...')
        get_vectors()

        # Create the weights using Logistic Regression
        print('Determining weights...')
        print(get_weights())

    # Predict the 2019 tournament bracket
    print('Predicting...')
    predict(rng_flag)

    print('Done')
Ejemplo n.º 7
0
def model():

    # architecture
    parameters = {
        0: {
            'filters': 16,
            'size': 3,
            'stride': 1,
            'maxpool_size': 2,
            'maxpool_stride': 2,
            'batch_normalize': True,
            'activation': 'leakyRelu'
        },
        1: {
            'filters': 32,
            'size': 3,
            'stride': 1,
            'maxpool_size': 2,
            'maxpool_stride': 2,
            'batch_normalize': True,
            'activation': 'leakyRelu'
        },
        2: {
            'filters': 64,
            'size': 3,
            'stride': 1,
            'maxpool_size': 2,
            'maxpool_stride': 2,
            'batch_normalize': True,
            'activation': 'leakyRelu'
        },
        3: {
            'filters': 128,
            'size': 3,
            'stride': 1,
            'maxpool_size': 2,
            'maxpool_stride': 2,
            'batch_normalize': True,
            'activation': 'leakyRelu'
        },
        4: {
            'filters': 256,
            'size': 3,
            'stride': 1,
            'maxpool_size': 2,
            'maxpool_stride': 2,
            'batch_normalize': True,
            'activation': 'leakyRelu'
        },
        5: {
            'filters': 512,
            'size': 3,
            'stride': 1,
            'maxpool_size': 2,
            'maxpool_stride': 1,
            'batch_normalize': True,
            'activation': 'leakyRelu'
        },
        6: {
            'filters': 1024,
            'size': 3,
            'stride': 1,
            'maxpool_size': False,
            'maxpool_stride': False,
            'batch_normalize': True,
            'activation': 'leakyRelu'
        },
        7: {
            'filters': 512,
            'size': 3,
            'stride': 1,
            'maxpool_size': False,
            'maxpool_stride': False,
            'batch_normalize': True,
            'activation': 'leakyRelu'
        },
        8: {
            'filters': 425,
            'size': 1,
            'stride': 1,
            'pad': 'same',
            'maxpool_size': False,
            'maxpool_stride': False,
            'batch_normalize': False,
            'activation': 'linear'
        }
    }

    # init model variables
    weights, bias_weights, bn_weights = get_weights()
    layer_values = Input(shape=(416, 416, 3))

    # model
    stacked_layers = [layer_values]
    for layer, params in parameters.items():

        # conv
        layer_values = Conv2D(
            filters=params['filters'],
            kernel_size=params['size'],
            strides=params['stride'],
            padding='same',
            weights=weights[layer],
            use_bias=not params['batch_normalize'],  # do not use if batch norm
            kernel_regularizer=l2(0.0005))(
                layer_values)  # decay specified in cfg

        # normalization
        if params['batch_normalize']:
            layer_values = BatchNormalization(weights=[
                bn_weights[layer][0],  # scale gamma
                bias_weights[layer],  # shift beta
                bn_weights[layer][1],  # running var
                bn_weights[layer][2]  # running mean
            ])(layer_values)

        # activation
        if params['activation'] == 'leakyRelu':
            layer_values = LeakyReLU(alpha=0.1)(layer_values)
            stacked_layers.append(layer_values)
        else:
            stacked_layers.append(layer_values)

        # pooling
        if params['maxpool_size']:
            layer_values = MaxPooling2D(pool_size=params['maxpool_size'],
                                        strides=params['maxpool_stride'],
                                        padding='same')(layer_values)
            stacked_layers.append(layer_values)

    return Model(inputs=stacked_layers[0], outputs=stacked_layers[-1])