コード例 #1
0
def main() -> None:
    """Executes the program."""
    kilometrage = input("Kilometrage: ")
    try:
        kilometrage = int(kilometrage)
    except:
        print("Cannot cast '{}' to float.".format(kilometrage))
        exit(1)
    theta0, theta1 = get_weights()
    prediction = predict(theta0, theta1, kilometrage)
    print("Estimated price for {}kms: {:.4f}$".format(kilometrage, prediction))
    if (theta0 == 0 and theta1 == 0):
        print(
            "Note: it seems that the model is not trained yet. Run train.py to set weights."
        )
コード例 #2
0
def main() -> None:
    """Runs the program, plots raw and normalized dataset. If the weights are not null, we draw them as a red line."""
    data = pd.read_csv("./data.csv")
    _, ax = plt.subplots(1, 2)
    ax[0].scatter(data=data, x="km", y="price")
    ax[0].set_title("Raw dataset")
    theta0, theta1 = get_weights()
    if (theta0 != 0 and theta1 != 0):
        x = data["km"]
        y = theta0 + theta1 * x
        ax[0].plot(x, y, 'r')
    prepare_data(data)
    ax[1].set_title("Normalized dataset")
    ax[1].scatter(data=data, x="km", y="price")
    plt.show()
コード例 #3
0
ファイル: train.py プロジェクト: sirebellum/IoEqualizers
parser.add_argument("--weights",
                    default=None,
                    help="Model checkpoint to get pretrained weights from")
args = parser.parse_args()
num_steps = int(args.steps)

# Directory setup
abs_path = os.path.abspath(__file__)  # Absolute path of this file
directory = os.path.dirname(abs_path)
model_dir = directory + "/models/" + args.output_name

# Get pretrained weights for feature extractor
weights = None
if args.weights is not None:
    weights = os.path.join(os.path.dirname(__file__), args.weights)
    weights = get_weights(weights)  # numpy weights


# Define the input function for training
def tfrecord_input():

    # Keep list of filenames, so you can input directory of tfrecords easily
    train_filenames = glob.glob("../data_processing/tfrecords/*tfrecords")
    valid_filenames = glob.glob("../data_processing/tfrecords/val*tfrecords")
    batch_size = 1024

    # Import data
    dataset = tf.data.TFRecordDataset(train_filenames,
                                      num_parallel_reads=6,
                                      buffer_size=1000 * 1000 *
                                      128)  # 128mb of io cache