Пример #1
0
 def startTrain(self):
     self.data_processor.start_train(self.bucket_name)
     self.upload_dir_s3(self.feature_path)
     self.upload_dir_s3(self.plot_path)
     self.upload_dir_s3(self.output_path)
     regression.train(self.bucket_name, self.feature_path, "regression.csv",
                      self.output_path + "regression/", self.plot_path)
     k_means.train(self.bucket_name, self.feature_path, "pca.csv",
                   self.output_path + "k-means/", self.plot_path)
Пример #2
0
def admin():
    if req.method == 'POST':
        sensor = Sensor.query.all()
        sensorData = []
        for i in sensor:
            sensorData.append({
                'rpiId': i.rpiId,
                'temp': i.temp,
                'date': int(i.date.strftime("%m")),
                'hour': int(i.time.strftime("%H")),
                'minute': int(i.time.strftime("%M"))
            })
        train = regression.train(sensorData)
        if train:
            status = RpiStatus.query.get(5)
            status.updateStatus()
            db.session.add(status)
            db.session.commit()
            return redirect('/admin')
        else:
            return redirect('/admin')
    else:
        lastTrain = RpiStatus.query.get(5).lastActive
        lastTrain = lastTrain.strftime("%d %B %Y - %H:%M")
        return render('admin.html', lastTrain=lastTrain)
def get_regressor(X, y):
    if os.path.isfile('data/linear_regressor.pkl'):
        with open('data/linear_regressor.pkl', 'rb') as file:
            regressor = pickle.load(file)
    else:
        X_train, X_test, y_train, y_test = X[: -1000], X[-1000: ], y[: -1000], y[-1000: ]
        regressor, r2_score, rmse = train(X_train, y_train, X_test, y_test)
        with open('data/linear_regressor.pkl', 'wb') as file:
            pickle.dump(regressor, file)
        print('Finished training regressor')
        print('R2 Score: {}'.format(r2_score))
        print('RMSE: {}'.format(rmse))
    return regressor
Пример #4
0
def main():

    if len(sys.argv) > 1 and sys.argv[1] == "--debug":
        debug()

    configfile = ""
    caching = False
    if path.exists(os.getcwd() + "/config/config.yml"):
        configfile = os.getcwd() + "/config/config.yml"
    elif path.exists(os.getcwd() + "/config/config.yaml"):
        configfile = os.getcwd() + "/config/config.yaml"
    else:
        print("No config.yml file")

    with open(configfile, 'r') as stream:
        try:
            dict = yaml.load(stream)
            print("config file is parsed successfully.")
        except yaml.YAMLError as exc:
            print(exc)

    for k in dict:
        if k == 'cache':
            if dict.get(k) == True:
                caching = True
                break
            if dict.get(k) == False:
                if path.exists(parsing.cache_path):
                    shutil.rmtree(parsing.cache_path)

    for key, value in dict.items():
        if caching == True:
            if path.exists(parsing.cache_path) == False and key == 'cache':
                parsing.cache_data()
                caching = False
            elif path.exists(parsing.cache_path):
                if key == 'cache':
                    caching = False
                    parsing.read_from_cache()
                continue

        if key.startswith('cache'):
            parsing.cache_data()

        if key == "data":
            if 'read' in value.keys():
                data = parsing.read(value["read"])
            else:
                data = parsing.read_limited(value["read-limited"])
        if re.search("concat", key, re.IGNORECASE):
            manipulation.concat(value)
        if re.search("copy-data", key, re.IGNORECASE):
            parsing.copy(value)
        if re.search("csv", key, re.IGNORECASE):
            parsing.to_csv(value)
        if re.search("customize-cells", key, re.IGNORECASE):
            user_customization.customize(value)
        if re.search("customize-column", key, re.IGNORECASE):
            user_customization.customize_column(value)
        if re.search("customize-row", key, re.IGNORECASE):
            user_customization.customize_row(value)
        if re.search("delete-columns", key, re.IGNORECASE):
            manipulation.delete(value)
        if re.search("delete-df", key, re.IGNORECASE):
            parsing.delete_df(value)
        if re.search("delete-rows", key, re.IGNORECASE):
            manipulation.delete_row(value)
        if key.lower().startswith(("de-normalize")):
            alterations.denormalize(value)
        if re.search("display", key, re.IGNORECASE):
            util.display(value)
        if re.search("fillna-by-search", key, re.IGNORECASE):
            manipulation.fillna_by_search(value)
        elif re.search("fillna-by-mean", key, re.IGNORECASE):
            manipulation.fillna_by_mean(value)
        elif re.search("fillna", key, re.IGNORECASE):
            manipulation.fillna(value)
        if re.search("generate-column", key, re.IGNORECASE):
            user_customization.customize_column(value)
        if re.search("group-by", key, re.IGNORECASE):
            manipulation.group_by(value)
        if re.search("lightgbm", key, re.IGNORECASE):
            decision_tree.train(value)
        if key.lower().startswith("lstm"):
            lstm.train(value)
        if re.search("merge", key, re.IGNORECASE):
            manipulation.merge(value)
        if key.lower().startswith(("normalize-scaled")):
            alterations.normalize_scaled(value)
        elif key.lower().startswith(("normalize")):
            alterations.normalize(value)
        if re.search("ohe", key, re.IGNORECASE):
            manipulation.ohe(value)
        if re.search("partition", key, re.IGNORECASE):
            for l in value:
                for k1, v1 in l.items():
                    split_merge.input_partition(v1, k1)
        if key.lower().startswith(("matplot")):
            mat_plot_lib.plot(value)
        if re.search("dfs", key, re.IGNORECASE):
            for l in value:
                for k, v in l.items():
                    dfs.run_dfs(k, v)
        if re.search("keras", key, re.IGNORECASE):
            regression.train(value)
        if key.lower().startswith("script"):
            manipulation.script_run(value)
        if re.search("transfer", key, re.IGNORECASE):
            manipulation.transfer(value)
        if re.search('xgboost', key, re.IGNORECASE):
            xgboost_impl.train(value)
Пример #5
0
def train_async(logins):
    train(logins)
Пример #6
0
r1 = 2
r2 = 0.5
r3 = 0.7
r4 = 0.9
r5 = 0.5
r6 = 0.1
r7 = 0.7
r8 = 0.8
r9 = 0.9


regression.reset()

regression.load(data_file,75)
results1 = regression.train(lim,thr,r1)
axis1 = range(0,len(results1),1)
regression.reset()

regression.load(data_file,75)
results2 = regression.train(lim,thr,r2)  
axis2 = range(0,len(results2),1)
regression.reset()

regression.load(data_file,75)
results3 = regression.train(lim,thr,r3)  
axis3 = range(0,len(results3),1)
regression.reset()

regression.load(data_file,75)
results4 = regression.train(lim,thr,r4)  
Пример #7
0
def train_():
    js = request.get_json()
    if 'email' not in js or 'data' not in js:
        return 'missing email or data'
    y = train(js['email'], js['data'])
    return 'success'
Пример #8
0
    ops, wraps = ['conv','gemm','pool'], [sc.templates.Conv, sc.templates.GEMM, sc.templates.Pool]
    ops = [wrap for operation, wrap in zip(ops, wraps) if getattr(args, operation)]

    # Done
    return (args.database, args.device, ops, args.nsamples)

def cuda_environment(device):
    platforms = sc.driver.platforms()
    devices = [d for platform in platforms for d in platform.devices]
    device = devices[device]
    context = sc.driver.Context(device)
    stream = sc.driver.Stream(context)
    return device, context, stream
    
if __name__ == "__main__":
    # Get arguments
    database, device, operations, nsamples = parse_arguments()
    
    # Initialize CUDA environment
    init_cuda = lambda: cuda_environment(device)
    
    # Run the auto-tuning
    for OpType in operations:
        print("----------------")
        print('Now tuning {}:'.format(OpType.id))
        print("----------------")
        X, Y = dataset.benchmarks(OpType, nsamples, init_cuda)
        model = regression.train(OpType, X, Y)
        kernels = regression.prune(OpType, model, init_cuda)
        export(database, kernels, model, OpType.id, init_cuda)
Пример #9
0
    # Done
    return (args.database, args.device, ops, args.nsamples)


def cuda_environment(device):
    platforms = sc.driver.platforms()
    devices = [d for platform in platforms for d in platform.devices]
    device = devices[device]
    context = sc.driver.Context(device)
    stream = sc.driver.Stream(context)
    return device, context, stream


if __name__ == "__main__":
    # Get arguments
    database, device, operations, nsamples = parse_arguments()

    # Initialize CUDA environment
    init_cuda = lambda: cuda_environment(device)

    # Run the auto-tuning
    for OpType in operations:
        print("----------------")
        print('Now tuning {}:'.format(OpType.id))
        print("----------------")
        X, Y = dataset.benchmarks(OpType, nsamples, init_cuda)
        model = regression.train(OpType, X, Y)
        kernels = regression.prune(OpType, model, init_cuda)
        export(database, kernels, model, OpType.id, init_cuda)
Пример #10
0
        #globals.Sp.sanityCheck(globals.printArrayInformation,globals.saveArrayInformation,'Sp-' + str(countSnew))

        globals.Snew.empty()

        #/////////////////////////////////////////////////////////
        #//GET NEW SAMPLE OF TEST 3D Data POINTS//////////////////
        #/////////////////////////////////////////////////////////

        globals.Test.extractTest()
        #globals.Test.sanityCheck(globals.printArrayInformation,globals.saveArrayInformation,'Test-' + str(countSnew))

        #/////////////////////////////////////////////////////////
        #//APPLY GAUSSIAN REGRESSION MODEL ///////////////////////
        #/////////////////////////////////////////////////////////

        globals.Model = regression.train()

        getCount = function([], globals.Snew.X.shape[0])
        count = np.uint16(getCount())

#/////////////////////////////////////////////////////////
#//SEGMENT////////////////////////////////////////////////
#/////////////////////////////////////////////////////////

    for j in range(1, globals.N):

        segment.do(j)

#/////////////////////////////////////////////////////////////////
#//SHOW RESULTS
#/////////////////////////////////////////////////////////////////