Example #1
0
def TrainImages():
    # Extract embedding
    extract_embeddings.run()

    # Train model
    train_model.run()
    mainMenu()
Example #2
0
    def onTrainModel(self):
        # Extract embedding
        extract_embeddings.run()

        # Train model
        train_model.run()
        mb.showinfo("Train model", "Model trained successful")
def run_train_model() -> dict:
    import train_model

    try:
        args = flask.request.get_json() or {}
        params = {
            "project": args.get("project", PROJECT),
            "region": args.get("region", REGION),
            "container_image": args.get("container_image", CONTAINER_IMAGE),
            "train_data_dir": args.get("train_data_dir", TRAIN_DATA_DIR),
            "eval_data_dir": args.get("eval_data_dir", EVAL_DATA_DIR),
            "training_dir": args.get("training_dir", TRAINING_DIR),
            "train_epochs": args.get("train_epochs", DEFAULT_TRAIN_EPOCHS),
            "batch_size": args.get("batch_size", DEFAULT_BATCH_SIZE),
            "machine_type": args.get("machine_type", DEFAULT_MACHINE_TYPE),
            "gpu_type": args.get("gpu_type", DEFAULT_GPU_TYPE),
            "gpu_count": args.get("gpu_count", DEFAULT_GPU_COUNT),
        }
        job_id = train_model.run(**params)

        return {
            "method": "train-model",
            "job_id": job_id,
            "job_url":
            f"https://console.cloud.google.com/vertex-ai/locations/{REGION}/training/{job_id}/cpu?project={PROJECT}",
            "params": params,
        }
    except Exception as e:
        return {"error": f"{type(e).__name__}: {e}"}
def main():
    global NAME, SIZE, EPOCHS, BATCH_SIZE, data_path

    save_name = input("Please Enter a Save name: \n")
    model = read_config(save_name)

    if not model:
        name = save_name
        data_path = pathlib.Path(input("Enter the path of the dataset:\n"))
        size = get_meta.get_img_meta(data_path)
        epochs = input("Enter amount of EPOCHS:\n")
        batch_size = input("Enter the BATCH SIZE:\n")
        model = Model(name, size, epochs, batch_size, data_path)

    train_model.run(model.NAME, model.SIZE, model.EPOCHS, model.BATCH_SIZE,
                    model.data_path)

    save_config(save_name, model)
Example #5
0
"""Driver code for language detection model."""
import train_model
import argparse
import sys 

def parse_user_options(args):
	parser = argparse.ArgumentParser(description="Parses command.")
	parser.add_argument("-t", "--train", help="Path of training file if you want to train from scratch", required=False)
	parser.add_argument("-d", "--val", help="Path of validation file if you want to do early stopping", required=False)
	parser.add_argument("-v", "--test", help="Path of test file if you want to test", required=False)
	parser.add_argument("-p", "--predict", help="Path of a file where each line is a document to classify", required=False)
	parser.add_argument("-m", "--sample", help="A single sentence to classify", required=False)
	parser.add_argument("-w", "--topwords", help="Number of words in vocab.", type=int, default=200000)
	parser.add_argument("-l", "--maxlen", help="Max len of input sequence in words.", type=int, default=20)
	parser.add_argument("-e", "--embedding", help="Embedding dimesion.", type=int, default=8)
	parser.add_argument("-s", "--size", help="Model Dimension.", type=int, default=100)
	parser.add_argument("-o", "--epochs", help="Number of epochs.", type=int, default=30)
	parser.add_argument("-b", "--batch", help="Mini-batch size.", type=int, default=32)
	parser.add_argument("-r", "--randomseed", help="Random seed to reproduce numbers.", type=int, default=7)
	parser.add_argument("-c", "--cached", help="Use cached models for train, test.", action="store_true")
	parser.add_argument("-u", "--vocab", help="Repopulate vocab.", action="store_true")
	parser.add_argument("-a", "--early", help="Use early stopping for model training.", action="store_true")
	options = parser.parse_args(args)
	return options

if __name__=='__main__':
	options = parse_user_options(sys.argv[1:])
	train_model.run(options)


#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#GNU General Public License for more details.

#You should have received a copy of the GNU General Public License
#along with this program.  If not, see <http://www.gnu.org/licenses/>.

__author__ = "Miquel Ferrarons, David Vazquez"
__copyright__ = "Copyright 2015, CVC-UAB"
__credits__ = ["Miquel Ferrarons", "David Vazquez"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Miquel Ferrarons"

import extract_features
import train_model
import test_image
import test_folder
import show_results
import evaluate_results

extract_features.run() #Extracts the features for all the images
train_model.run() #Train the classifier

#Tests a single image and shows the results
#test_image.run()

#Test a whole folder
test_folder.run()
show_results.run() #Saves the resulting images
evaluate_results.run()# Runs the evaluation
# local dependencies
import train_model

print("-->>> Start training a neural net")
train_model.run()
# external dependencies
import tensorflow_io.kafka as kafka_io
# local dependencies
import train_model
# config values
from config import EVENT_STREAMS_API_KEY, CERT
from config import KAFKA_BOOTSTRAP, TEST_STREAM_TOPIC_NAME

print("-->>> Train a machine learning model using training data from Kafka")
model = train_model.run()

print("-->>> Prepare a streaming dataset based on a Kafka topic")
dataset = kafka_io.KafkaDataset(
    [TEST_STREAM_TOPIC_NAME + ":0"],
    servers=KAFKA_BOOTSTRAP,
    group="dalelane-tensorflow-test",
    eof=False,
    config_global=[
        "api.version.request=true", "sasl.mechanisms=PLAIN",
        "security.protocol=sasl_ssl", "sasl.username=token",
        "sasl.password="******"ssl.ca.location=" + CERT
    ])
dataset = dataset.map(train_model.deserialize).batch(1)

print("-->>> Start classifying events received on the topic %s" %
      TEST_STREAM_TOPIC_NAME)
for image, label in dataset:
    prediction = model.predict(image)

    if prediction.argmax() == label[0]:
        print("---->>>> ✓ Event classified correctly")
Example #9
0
    log.info("user_recommend insert...")
    list_recommend = mod.recommendProductsForUsers(30).collect()
    sql_values = ""
    rows_num = 0
    for i in range(len(list_recommend)):
        for j in range(len(list_recommend[i][1])):
            row = list_recommend[i][1][j]
            sql_values += "(%s,%s,%s)," % (row.user,row.product,row.rating)
            rows_num += 1
        if (i != 0 and i % 100 == 0) or i == len(list_recommend)-1:
            sql_insert = "insert into user_recommend (uid,pid,rating) values %s" % sql_values.rstrip(',')
            sql_values = ""
            cur.execute(sql_insert)
            conn.commit()
    log.info("user_recommend(%s users and %s rows) insert complete!" % (len(list_recommend),rows_num))

    log.info("user_recommend write redis...")
    redis_prefix = "rec.user_recommend.uid"
    for i in range(len(list_recommend)):
        row = list_recommend[i]
        json_value = json.dumps(row[1])
        pipe.set("%s:%s" % (redis_prefix,row[0]),json_value)
    pipe.execute()

if __name__ == "__main__":
    import train_model
    mod = train_model.run()
    run(mod)


# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.

# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

__author__ = "Miquel Ferrarons, David Vazquez"
__copyright__ = "Copyright 2015, CVC-UAB"
__credits__ = ["Miquel Ferrarons", "David Vazquez"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Miquel Ferrarons"

import extract_features
import train_model
import test_image
import test_folder
import show_results
import evaluate_results

extract_features.run()  # Extracts the features for all the images
train_model.run()  # Train the classifier

# Tests a single image and shows the results
# test_image.run()

# Test a whole folder
test_folder.run()
show_results.run()  # Saves the resulting images
evaluate_results.run()  # Runs the evaluation