示例#1
0
    def compile_bot(self):
        """ Compile source file to directory with source file """

        from ai_arena.contests.compilation import compile
        from django.core.files import File

        src = settings.MEDIA_ROOT + self.bot_source_file.name
        log_target = settings.COMPILATION_TEMP_PATH + self.name + '.log'
        target = settings.COMPILATION_TEMP_PATH + self.name + '.bin' 
        lang = self.bot_lang
        exit_status = compile(src, target, lang, log_target)
        if exit_status != 0:
            log_file = open(log_target, 'r')
            logs = parse_logs(log_file.read())
            return (exit_status, logs)
        else:
            # Use compiled file in object bot
            self.bot_bin_file.save(self.name, File(open(target)))
     
            # Save changes made to bot object
            self.save()

            # Remove compiled file from directory with source
            system('rm ' + target)
            return (exit_status, "")
def create():
    # read in logfile
    f = open(config.path_to_logfile, "r")
    logs = utils.parse_logs(f)

    # filter to this week
    filter_this_week = utils.filter_by_date(config.beginning_of_week)
    logs = filter_this_week(logs)

    # get tags
    tags = utils.extract_sorted_tags(logs)

    # for each tag, grab matching logs
    tags_to_logs = {}
    for tag in tags:
        tags_to_logs[tag] = utils.filter_by_tags(tag)(logs)

    # write groups to email
    msg = []

    for tag in tags_to_logs.keys():
        msg.append('Tag: ' + tag)
        for log in tags_to_logs[tag]:
            msg.append(log['log'])
    send_mail.sendMail(msg)
示例#3
0
def train(caffe_home, log_path, output_log_file, solver, input_weight_file, output_weight_file, note):
    """ Trains Caffe finetuning the given model. """
    print("Training using data")

    _run_trainer(caffe_home, log_path, output_log_file, solver, input_weight_file, note)

    _generate_parsed_logs(caffe_home, log_path, output_log_file)
    (training_details, validation_details) = utils.parse_logs(log_path, output_log_file)
    _move_trained_weight_file(log_path, output_log_file, output_weight_file)

    print "Finished training!"
示例#4
0
def parse_command_line():
    parser = argparse.ArgumentParser(description="""Tests a trained Caffe model to see how well
        it does, generating quality graphs and statistics""")
    parser.add_argument("--log_path", help="The path to where to place log files and graphs",
        type=str, default="logs")
    parser.add_argument("--log_num", help="""Number that will be appended to log files; this will
        be automatically padded and added with zeros, such as output00001.log""", type=int,
        default=1)
    parser.add_argument("--input_weight_file", help="""The trained and fine-tuned Caffe model that
        we will be testing; defaults to the last trained model from train.py""", type=str,
        default="logs/latest_bvlc_alexnet_finetuned.caffemodel")
    parser.add_argument("--note", help="Adds extra note onto generated quality graphs.", type=str,
        default="")
    parser.add_argument("--solver", help="The path to our Caffe solver prototxt file",
        type=str, default="src/caffe_model/bvlc_alexnet/solver.prototxt")
    parser.add_argument("--deploy", help="""Path to our Caffe deploy/inference time prototxt file""",
        type=str, default="src/caffe_model/bvlc_alexnet/deploy.prototxt")
    parser.add_argument("--threshold", help="""The percentage threshold over which we assume
        something is a cloud. Note that this value is from 0.0 to 100.0""", type=float, default=0.1)
    parser.add_argument("--validation_leveldb", help="""Path to where the validation leveldb file is""",
        type=str, default="data/leveldb/validation_leveldb")
    parser.add_argument("--width", help="Width of image during training", type=int, default=256)
    parser.add_argument("--height", help="Height of image during training", type=int, default=256)
    parser.add_argument("--inference_width", help="Width of image during training", type=int,
        default=227)
    parser.add_argument("--inference_height", help="Height of image during training", type=int,
        default=227)
    parser.add_argument("--training_mean_pickle", help="Path to pickled mean values", type=str,
        default="data/imagenet/imagenet_mean.npy")

    args = vars(parser.parse_args())

    print "Testing trained model..."

    caffe_home = utils.assert_caffe_setup()

    # Ensure the random number generator always starts from the same place for consistent tests.
    random.seed(0)

    log_path = os.path.abspath(args["log_path"])
    log_num = args["log_num"]
    (output_ending, output_log_prefix, output_log_file) = utils.get_log_path_details(log_path, log_num)
    output_graph_path = output_log_prefix

    (training_details, validation_details) = utils.parse_logs(log_path, output_log_file)

    plot_results(training_details, validation_details, args["note"], output_graph_path, args["solver"])
    validation_leveldb = os.path.abspath(args["validation_leveldb"])
    deploy = os.path.abspath(args["deploy"])
    input_weight_file = os.path.abspath(args["input_weight_file"])
    training_mean_pickle = os.path.abspath(args["training_mean_pickle"])
    predict.test_validation(args["threshold"], output_log_prefix, validation_leveldb,
        deploy, args["width"], args["height"], args["inference_width"],
        args["inference_height"], input_weight_file, training_mean_pickle)
示例#5
0
文件: train.py 项目: yemenr/cloudless
def train(caffe_home, log_path, output_log_file, solver, input_weight_file,
          output_weight_file, note):
    """ Trains Caffe finetuning the given model. """
    print("Training using data")

    _run_trainer(caffe_home, log_path, output_log_file, solver,
                 input_weight_file, note)

    _generate_parsed_logs(caffe_home, log_path, output_log_file)
    (training_details,
     validation_details) = utils.parse_logs(log_path, output_log_file)
    _move_trained_weight_file(log_path, output_log_file, output_weight_file)

    print "Finished training!"
示例#6
0
def get_best_model(out_dir):
    scores = []
    model_paths = []
    for path in Path(out_dir).glob("*/*"):
        try:
            _, rt = parse_logs(os.path.join(path, "log"))
            if rt is not None:
                scores.append(rt)
                model_paths.append(os.path.join(path, WEIGHTS_NAME))
        except (AttributeError, FileNotFoundError, NotADirectoryError):
            continue

    if len(model_paths) == 0:
        raise FileNotFoundError(
            f"Could not find any trained models in {out_dir}")

    best_score = np.argmax(np.array(scores))
    return model_paths[int(best_score)]
示例#7
0
文件: test.py 项目: yemenr/cloudless
def parse_command_line():
    parser = argparse.ArgumentParser(
        description="""Tests a trained Caffe model to see how well
        it does, generating quality graphs and statistics""")
    parser.add_argument("--log_path",
                        help="The path to where to place log files and graphs",
                        type=str,
                        default="logs")
    parser.add_argument(
        "--log_num",
        help="""Number that will be appended to log files; this will
        be automatically padded and added with zeros, such as output00001.log""",
        type=int,
        default=1)
    parser.add_argument(
        "--input_weight_file",
        help="""The trained and fine-tuned Caffe model that
        we will be testing; defaults to the last trained model from train.py""",
        type=str,
        default="logs/latest_bvlc_alexnet_finetuned.caffemodel")
    parser.add_argument("--note",
                        help="Adds extra note onto generated quality graphs.",
                        type=str,
                        default="")
    parser.add_argument("--solver",
                        help="The path to our Caffe solver prototxt file",
                        type=str,
                        default="src/caffe_model/bvlc_alexnet/solver.prototxt")
    parser.add_argument(
        "--deploy",
        help="""Path to our Caffe deploy/inference time prototxt file""",
        type=str,
        default="src/caffe_model/bvlc_alexnet/deploy.prototxt")
    parser.add_argument("--threshold",
                        help="""The percentage threshold over which we assume
        something is a cloud. Note that this value is from 0.0 to 100.0""",
                        type=float,
                        default=0.1)
    parser.add_argument(
        "--validation_leveldb",
        help="""Path to where the validation leveldb file is""",
        type=str,
        default="data/leveldb/validation_leveldb")
    parser.add_argument("--width",
                        help="Width of image during training",
                        type=int,
                        default=256)
    parser.add_argument("--height",
                        help="Height of image during training",
                        type=int,
                        default=256)
    parser.add_argument("--inference_width",
                        help="Width of image during training",
                        type=int,
                        default=227)
    parser.add_argument("--inference_height",
                        help="Height of image during training",
                        type=int,
                        default=227)
    parser.add_argument("--training_mean_pickle",
                        help="Path to pickled mean values",
                        type=str,
                        default="data/imagenet/imagenet_mean.npy")

    args = vars(parser.parse_args())

    print "Testing trained model..."

    caffe_home = utils.assert_caffe_setup()

    # Ensure the random number generator always starts from the same place for consistent tests.
    random.seed(0)

    log_path = os.path.abspath(args["log_path"])
    log_num = args["log_num"]
    (output_ending, output_log_prefix,
     output_log_file) = utils.get_log_path_details(log_path, log_num)
    output_graph_path = output_log_prefix

    (training_details,
     validation_details) = utils.parse_logs(log_path, output_log_file)

    plot_results(training_details, validation_details, args["note"],
                 output_graph_path, args["solver"])
    validation_leveldb = os.path.abspath(args["validation_leveldb"])
    deploy = os.path.abspath(args["deploy"])
    input_weight_file = os.path.abspath(args["input_weight_file"])
    training_mean_pickle = os.path.abspath(args["training_mean_pickle"])
    predict.test_validation(args["threshold"], output_log_prefix,
                            validation_leveldb, deploy, args["width"],
                            args["height"], args["inference_width"],
                            args["inference_height"], input_weight_file,
                            training_mean_pickle)