コード例 #1
0
    def process_task(self, body, message):
        import time
        import os
        print body, message
        os.system('rm uploaded_custom.py | touch uploaded_custom.py')
        ret = open('uploaded_custom.py', 'wb')
        ret.write(body)
        ret.close()
        try:
            from metrics import run_metrics
            val_ret = {'score': 0, 'duration': 0}
            ret = subprocess.check_output('python uploaded_custom.py',
                                          shell=True)
            import code_exec
            from code_exec import execute_user_script
            import glob
            denoise_list = glob.glob('./kaggle/*_*.jpg')
            total_list = glob.glob('./kaggle/*.jpg')
            raw_list = list(set(total_list) - set(denoise_list))
            run_duration = execute_user_script(raw_list)
            for i in xrange(1, 40):
                tmp = run_metrics('./kaggle/' + str(i) + '.jpg',
                                  './kaggle/denoise_' + str(i) + '.jpg')
                print i, tmp['score']
                val_ret['score'] += tmp['score']
            val_ret['duration'] = run_duration
            return

        except Exception as exc:
            logger.error('task raised exception: %r', exc)
        message.ack()
コード例 #2
0
ファイル: views.py プロジェクト: thomasrob/kit-soft
    def handle_uploaded_file(self, f):
        from metrics import run_metrics
        with open('uploaded_custom.py', 'wb+') as destination:
            for chunk in f.chunks():
                destination.write(chunk)
            destination.close()

        ret = subprocess.check_output('python uploaded_custom.py', shell=True)
        import code_exec
        from code_exec import execute_user_script
        run_duration = execute_user_script()
        val_ret = run_metrics('manu.jpg', 'denoise_image.jpg')
        val_ret['duration'] = run_duration
        return val_ret
コード例 #3
0
ファイル: views.py プロジェクト: echopen/kit-soft
    def handle_uploaded_file(self, f):
        from metrics import run_metrics
        with open('uploaded_custom.py', 'wb+') as destination:
            for chunk in f.chunks():
                destination.write(chunk)
            destination.close()

        ret = subprocess.check_output('python uploaded_custom.py', shell=True)
        import code_exec
        from code_exec import execute_user_script
        run_duration = execute_user_script()
        val_ret   = run_metrics('manu.jpg', 'denoise_image.jpg')
        val_ret['duration'] = run_duration
        return val_ret
コード例 #4
0
 def handle_uploaded_file(self, f):
     from metrics import run_metrics
     with open('uploaded_custom.py', 'wb+') as destination:
         for chunk in f.chunks():
             destination.write(chunk)
         destination.close()
     val_ret = {'score':0,'duration': 0}
     ret = subprocess.check_output('python uploaded_custom.py', shell=True)
     import code_exec
     from code_exec import execute_user_script
     import glob 
     denoise_list = glob.glob('./kaggle/*_*.jpg')
     total_list = glob.glob('./kaggle/*.jpg')
     raw_list= list(set(total_list) - set(denoise_list))
     run_duration = execute_user_script(raw_list)
     for i in xrange(1,40):
         tmp  = run_metrics('./kaggle/'+str(i)+'.jpg', './kaggle/denoise_'+str(i)+'.jpg')
         val_ret['score'] += tmp['score']
     val_ret['duration'] = run_duration
     return val_ret
コード例 #5
0
def calculate_metrics_values(predictions_array, metrics_conf, Y_test):
    metric_values = None

    if predictions_array is None:
        metric_values = {}
        for k in metrics_conf:
            metric_values[k] = 0
        return metric_values

    for p in predictions_array:
        tmp_metric_values = run_metrics(metrics_conf, Y_test, p)
        if metric_values is None:
            metric_values = tmp_metric_values
        else:
            for k in metric_values.keys():
                metric_values[k] += tmp_metric_values[k]

    for k in metric_values.keys():
        metric_values[k] = metric_values[k] / len(predictions_array)

    return metric_values
コード例 #6
0
ファイル: master.py プロジェクト: JustinEvans-github/fasttext
#run_tunemodel(tune_variables, "accuracy")

## Train the model ##

# note - don't define the model parameters here if you have hyperparameter tuned in the previous step
update_arguements({"model_quantize": "yes",
                   "epochs": "10", "learning_rate": "0.7", "dimensions": "60", "minimum_word_count": "1",
                   "word_ngrams": "6", "min_char_grams": "0", "max_char_grams": "5"})

run_trainmodel()

## Make model predictions ##
run_testmodel()

## Generate metrics ##
update_arguements({"metrics_directory": "C:/Users/Justin Evans/Documents/Python/fasttext_project/metrics/"})
update_arguements({"n_iterations": "100", "n_size": "0.5"})
with open("args.txt", "rb") as file:
    args = pickle.load(file)

# generate metrics: F1, precision, recall, bootstrapped accuracy
run_metrics(args['data_directory'], 'validdata_preprocessed_predicted')
run_metrics(args['data_directory'], 'testdata_preprocessed_predicted') # run test second to log results

# based on the valid data error rate & threshold, what is the error rate if applied to the test dataset
setthreshold_testdata(args['data_directory'], 'validdata_preprocessed_predicted', 'testdata_preprocessed_predicted')


## Tracking of ml iterations through ml flow ##
ml_finish_run()