示例#1
0
文件: bot.py 项目: j-tai/Appalache
async def _do_command(message: discord.message.Message):
    log.info("%s issued command in #%s: '%s'" %
             (message.author, message.channel.name, message.content))
    parts = message.content[2:].strip().split(maxsplit=1)
    if not parts:
        # No command given.
        with data.data_mutex:
            cmds = _last_commands()
            parts = cmds.get(message.author.id)
        if not parts:
            await send(
                message.channel,
                "Typing `a.` repeats your last command. But I don't remember what your last command was!"
            )
            return
    else:
        with data.data_mutex:
            cmds = _last_commands()
            cmds[message.author.id] = parts
            data.save_data()
    cmd = parts[0].lower()
    args = parts[1] if len(parts) > 1 else ''
    if cmd in commands.keys():
        # Run command.
        try:
            await commands[cmd](message, cmd, args)
        except command.Done:
            pass
    else:
        # No matching command.
        await send(message.channel,
                   "%s *(Unknown command.)*" % esc(_get_insult()))
示例#2
0
def select_scenario(set_vec):
    """runs scenario"""
    # get initial data
    inc1, inc2, dat_arr, set_vec = s.main_options(set_vec)

    # generate data
    if set_vec[5] == 'n':
        # time input
        sec = s.simulation_time()
        # create objects/data
        s1, s2, t = spring.Spring(inc1), spring.Spring(inc2), spring.Time(
            sec, set_vec[0])
        # gets and appends num
        set_vec.append(data.get_num())
        # runs calc
        s1, s2, ft, dat_arr = calc.run_calc(s1, s2, t)
        # saves data
        data.save_data(set_vec[6], dat_arr)
    else:
        sst, ft = dat_arr[0], dat_arr[1]
        s1, s2, t = sst[0], sst[1], sst[2]
        set_vec.append('null')

    # plot data
    p.run_plot(s1, s2, t, ft, set_vec)
    if set_vec[5] == 'n':
        print(f'Data saved in:  trials/trial_{set_vec[6]}\n')
    run_prog(set_vec)
def fit_country(country, save_to_json=False):
    def cost_function(x):
        (a, b, c, d, e, f) = x
        v = a * 10**(-b)
        K_r_0 = c * 10**(-d)
        K_d_0 = e * 10**(-f)
        time_sim, cases_sim, healthy_sim, recovered_sim, deaths_sim \
            = calculate_epidemic(
                C=0, v=v, x_n=x_n, y_n=y_n, t_final=max(time_number_days),
                K_r_0=K_r_0, K_r_minus=0,
                K_d_0=K_d_0, K_d_plus=0)
        interp_cases = interpolate.interp1d(time_sim,
                                            cases_sim,
                                            fill_value='extrapolate')
        interp_deaths = interpolate.interp1d(time_sim,
                                             deaths_sim,
                                             fill_value='extrapolate')
        fitness = 0
        N = 0

        for i in range(len(time_number_days)):
            fitness += (
                abs(deaths_ref[i] - interp_deaths(time_number_days[i])) /
                max(deaths_ref))
            fitness += (abs(cases_ref[i] - interp_cases(time_number_days[i])) /
                        max(cases_ref))
            N += 2

        fitness /= N
        print("Fit mean difference: " + str(fitness), end='\r')

        return fitness

    x_n = 1e5  # initial healthy population arbitrary

    time, time_number_days, cases_ref, deaths_ref = get_data(country)

    y_n = cases_ref[0]
    x0 = (2.78, 6.08, 25, 1.9, 1, 2)
    print('Fitting...')
    res = scipy.optimize.minimize(cost_function, x0, method="Nelder-Mead")
    x_opt = res.x
    print('-' * 50)
    (a, b, c, d, e, f) = x_opt
    v = a * 10**(-b)
    K_r_0 = c * 10**(-d)
    K_d_0 = e * 10**(-f)
    print("Fit mean difference: " + "{:.2%}".format(res.fun))
    print("K_c = %.2e" % v)
    print("K_r = %.2e" % K_r_0)
    print("K_d = %.2e" % K_d_0)
    time_sim, cases_sim, healthy_sim, recovered_sim, deaths_sim \
        = calculate_epidemic(
            C=0, v=v, x_n=x_n, y_n=y_n, t_final=60, K_r_0=K_r_0, K_r_minus=0,
            K_d_0=K_d_0, K_d_plus=0)

    if save_to_json is True:
        print("Saving...")
        save_data(country, time, time_sim, cases_sim, deaths_sim)
    return time_sim, cases_sim, healthy_sim, recovered_sim, deaths_sim
    def win(self):
        self.stop()

        record = False
        if self.map.name in data.playerData.times:
            if self.time > data.playerData.times[self.map.name]:
                record = True
                data.playerData.times[self.map.level] = self.time

                unlocked = map_editor.get_map_by_level(self.map.level + 1)
                if not unlocked == None:
                    data.playerData.unlock(unlocked)

                data.save_data()
        else:
            record = True
            unlocked = map_editor.get_map_by_level(self.map.level + 1)
            if not unlocked == None:
                data.playerData.unlock(unlocked)

            data.playerData.times[self.map.level] = self.time
            data.save_data()

        banner = [
            " ",
            " #     #                       #     #                                  #",
            " #     #  ####   ####  ####    #     #  ####  #   #  ####  ####  #   #  #",
            " #     # #    # #     #    #   #     # #    # ##  # #     #    # #   #  #",
            "  #   #  #    # #     ######    #   #  ###### # # # #     ###### #   #  #",
            "   # #   #    # #     #          # #   #      #  ## #     #      #   #   ",
            "    #     ####   ####  ####       #     ####  #   #  ####  ####  #####  #",
            " "
        ]
        lines = int(SCREEN_LINES / 3)
        colums = int(SCREEN_COLUMNS / 5)

        console.setColorXY(
            colums, lines + len(banner) + 2,
            "                        Terminou em " +
            str(self.map.timeCounter - self.time) + " segundos!",
            Colors.FOREGROUND_GREEN)
        if record:
            console.setColorXY(
                colums, lines + len(banner) + 3,
                "                    Você bateu seu record de tempo!",
                Colors.FOREGROUND_YELLOW)
        for yBanner in range(lines, lines + len(banner)):
            for xBanner in range(colums,
                                 colums + len(banner[yBanner - lines])):
                text = banner[yBanner - lines][xBanner - colums]

                if text == '#':
                    console.setColorXY(
                        xBanner, yBanner, ' ',
                        Colors.BACKGROUND_GREEN | Colors.BACKGROUND_BOLD)

        sleep(3)
        from menu import play
        play()
示例#5
0
def main(targets):
    data_config = json.load(open('config/data-params.json'))
    main_model_config = json.load(open('config/main-model-params.json'))
    
    if 'test' in targets:
        dataset = generate_data(**data_config)
        save_data(dataset, **data_config)
        first_baseline_rsme = first_base()
        knn_baseline_rsme = knn_base()
        main_rsme = build_model(dataset, **main_model_config)
        print('Main RMSE: ',main_rsme,'First baseline RMSE: ', first_baseline_rsme, 
              'KNN baseline RMSE: ', knn_baseline_rsme)
示例#6
0
文件: main.py 项目: gitwikc/pybrary
def main():
    library = Library()
    menu_flow({
        'New': library.create_new_book,
        'View': library.view_all_books,
        'Graphs': library.view_graphs(),
        'Update': library.update_library(),
        'Delete': library.delete_book,
        'Save': lambda: save_data(library.df, verbose=True)
    })
    save_data(library.df)
    print(
        "\N{winking face} Don't forget to leave a star on GitHub https://github.com/gitwikc/pybrary"
    )
示例#7
0
    async def run(self, message: discord.message.Message, label: str,
                  args: str):
        # await self.apply_permission(message)
        await self.apply_cooldown(message)
        args = args.split()
        target = args[0] if len(args) > 0 else message.author.id
        target_self = target == message.author.id
        subcmd = args[1] if len(args) > 1 else 'check'

        if subcmd == 'check':
            if target_self:
                user = '******'
            else:
                user = '******'
            with data.data_mutex:
                warn = get_warnings(target, create=False)
                data.save_data()
            if warn:
                oldest = warn[0]
                delta = oldest - time.time(
                ) + data.config['botguard-warning-expire']
                suffix = 's'
                if delta > 60:
                    delta /= 60
                    suffix = 'min'
                    if delta > 60:
                        delta /= 60
                        suffix = 'h'
                        if delta > 24:
                            delta /= 24
                            suffix = 'd'
                            if delta > 30:
                                delta /= 30
                                suffix = 'mo'
                                if delta > 12.16666666666667:
                                    delta /= 12.16666666666667
                                    suffix = 'y'
                delta = int(delta) + 1
                await bot.send(
                    message.channel,
                    '%s committed **%d** offense(s), with the oldest one '
                    'expiring in **%d %s**.' %
                    (user, len(warn), delta, suffix))
            else:
                await bot.send(message.channel,
                               '%s committed no offenses.' % user)
        else:
            await bot.send(message.channel, 'Incorrect arguments for command.')
示例#8
0
    def _get_tag_list(self, msg):
        content_type, chat_type, chat_id = telepot.glance(msg)
        if content_type != "photo":
            return None

        file_id = msg["photo"][-1]["file_id"]
        (rating, character, copyright, general) = ("", set(), set(), set())

        db_data = data.load_data(chat_id, file_id)
        if db_data:
            rating = db_data["rating"]
            character = db_data["character"]
            copyright = db_data["copyright"]
            general = db_data["general"]

            return (rating, character, copyright, general)

        handler_name = ""
        with tempfile.NamedTemporaryFile() as f:
            self.download_file(file_id, f.name)

            for h in handlers_list:
                res = h.run(f)
                if res:
                    (rating, character, copyright, general) = res
                    handler_name = h.HANDLER_NAME
                    break
                else:
                    # Rewind file for reading with other handler.
                    f.seek(0)

            if not res:
                raise InferenceError("Cannot do inference on this image")

        data.save_data(
            chat_id, file_id, {
                "rating": rating,
                "character": character,
                "copyright": copyright,
                "general": general,
                "time": msg["date"],
                "handler": handler_name
            })

        return (rating, character, copyright, general)
示例#9
0
文件: bot.py 项目: e2gal/antisarubot
    def _get_tag_list(self, msg):
        content_type, chat_type, chat_id = telepot.glance(msg)
        if content_type != "photo":
            return None

        file_id = msg["photo"][-1]["file_id"]
        (rating, character, copyright, general) = ("", set(), set(), set())

        db_data = data.load_data(chat_id, file_id)
        if db_data:
            rating    = db_data["rating"]
            character = db_data["character"]
            copyright = db_data["copyright"]
            general   = db_data["general"]

            return (rating, character, copyright, general)

        handler_name = ""
        with tempfile.NamedTemporaryFile() as f:
            self.download_file(file_id, f.name)

            for h in handlers_list:
                res = h.run(f)
                if res:
                    (rating, character, copyright, general) = res
                    handler_name = h.HANDLER_NAME
                    break
                else:
                    # Rewind file for reading with other handler.
                    f.seek(0)

            if not res:
                raise InferenceError("Cannot do inference on this image")

        data.save_data(chat_id, file_id, {
            "rating":    rating,
            "character": character,
            "copyright": copyright,
            "general":   general,
            "time":      msg["date"],
            "handler":   handler_name
        })

        return (rating, character, copyright, general)
示例#10
0
def choose_meme(memes):
    cur_time = time.time()
    with data.data_mutex:
        if 'sent-memes' not in data.data:
            data.data['sent-memes'] = []
        sent = data.data['sent-memes']
        while sent:
            if sent[0]['time'] + 100000 < cur_time:
                sent.pop(0)
            else:
                break
        for meme in memes:
            for s in sent:
                if s['id'] == meme['id']:
                    break
            else:
                sent.append({'id': meme['id'], 'time': cur_time})
                data.save_data()
                return meme
    return None
示例#11
0
文件: main.py 项目: aoviedo92/easyip
 def on_btn_only_save_clicked(self):
     self.red_code = True
     if not self.check_full_list() and not self.validator():
         ad = self.combo.currentText()
         id_ = find_id_by_ad(ad, self.IPCONFIG)
         nm = self.line_nm.text()
         ip = self.line_ip.text()
         ms = self.line_ms.text()
         gw = self.line_gw.text()
         dns = self.line_dns.text()
         save_data(str(id_), str(ad), str(nm), str(ip), str(ms), str(gw), str(dns))
         tile = NetConfigTile(id_, ad, nm, ip, ms, gw, dns)
         self.layout_wrapper_tile.addWidget(tile)
         if self.default_tile: self.default_tile.remove_tile()
         tile.add_tile()
         self.refresh_data()
         self.red_code = False
         self.restart_form()
         return tile
     return None
示例#12
0
async def confirm(ctx):
    status = data.check_temp_meeting(ctx.channel.id)
    if status is False:
        await ctx.send("No meeting in queue. Add a meeting first")
        return
    check = data.check_allvalues_temp(ctx)
    if check == 0:
        await ctx.send("Fill all the fields first. Status of the meeting: ")
        await ctx.send(data.temp_values_remaining(ctx.channel.id))
        return
    else:
        #Check for correct date time.
        isValidDateTime = data.check_valid_datetime(ctx.channel.id)
        if isValidDateTime is True:

            #Call the final save funtion
            await ctx.send(data.temp_values_remaining(ctx.channel.id))
            data.save_data(ctx.channel.id, client)
            await ctx.send("Meeting has been added to the database")

        else:
            await ctx.send("The date time is in past. Please update it.")
示例#13
0
def _run():
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpus',
                        help='GPU数。',
                        type=int,
                        default=tk.get_gpu_count())
    parser.add_argument('--tta-size',
                        help='TTAで何回predictするか。',
                        type=int,
                        default=256)
    parser.add_argument('--target',
                        help='対象のデータ',
                        choices=('val', 'test'),
                        default='test')
    parser.add_argument('--no-predict',
                        help='予測を実行しない。',
                        action='store_true',
                        default=False)
    args = parser.parse_args()

    # 子プロセスを作って予測
    for models_dir, model_name, cv_index, cv_size, split_seed in tqdm.tqdm(
            _MODELS, desc='models', ncols=100, ascii=True):
        _predict_model(args, models_dir, model_name, cv_index, cv_size,
                       split_seed)

    if args.target == 'test':
        # 集計
        pred_target_list = [[
            joblib.load(model_dir / _CACHE_NAME_FORMAT.format(
                target=args.target, model=model_name, tta=tta_index))
            for tta_index in range(args.tta_size)
        ] for model_dir, model_name, cv_index, _, _ in _MODELS]
        pred_target_proba = np.mean(pred_target_list, axis=(0, 1))
        pred_target = pred_target_proba.argmax(axis=-1)
        # 保存
        joblib.dump(pred_target_proba, _MODELS_DIR / 'pseudo_label.pkl')
        data.save_data(_MODELS_DIR / 'submit.tsv', pred_target)
示例#14
0
async def handler(message: discord.message.Message):
    global regex
    if message.channel.name in data.config['botguard-correct-channels']:
        return
    if regex is None:
        regex = re.compile(data.config['botguard-regex'])

    if regex.fullmatch(message.content):
        log.info("[botguard] message in #%s from @%s matched: '%s'" %
                 (message.channel.name, message.author, message.content))
        author_id = message.author.id

        if message.author.id in data.config['botguard-exempt']:
            log.info('user is exempt; displaying exempt message')
            msg = data.config['botguard-offense-exempt']
            await bot.send(message.channel, msg % ('<@%s>' % author_id))
            return

        cur_time = time.time()
        with data.data_mutex:
            warn = get_warnings(author_id)
            warn.append(cur_time)
            data.save_data()
        warns = len(warn)

        if warns >= data.config['botguard-ban']:
            # Ban
            log.info('banning %s' % message.author)
            await bot.client.ban(message.author, delete_message_days=0)
            warns = data.config['botguard-ban']
        elif warns >= data.config['botguard-kick']:
            # Kick
            log.info('kicking %s' % message.author)
            await bot.client.kick(message.author)

        msg = data.config['botguard-offense-%d' % warns]
        await bot.send(message.channel, msg % ('<@%s>' % author_id))
        log.info('warned %s (now %d offenses)' % (message.author, warns))
示例#15
0
            j += 1
        i += 1
        #print array

    result = []
    i = len(array) -1
    count = 0
    while count < 10:
        result.append(array[i])
        i -= 1
        count +=1

    return result


if __name__ == '__main__':
    if not is_exist:
        array = create_array(10 ** 6, 5)
        save_data(array)
    else:
        array = get_data()

        import time
        t1 = time.time()
        result = get_top10(array)
        t2 = time.time()
        t = float(t2 - t1)
        print 'time : ', time.gmtime(t)
        print result

    os.system('pause')
示例#16
0
def main(model=None):

    print(f'readying model & data @ {now()}')

    data = load_data()
    if not data:
        save_data(preprocess())
        data = load_data()

    if not model:
        if not config.fresh_model:
            model = load_model()
        if not model:
            model = make_model()
            save_model(model)
            model = load_model()
            print('created ',end='')
        else: print('loaded ',end='')
        print(f'model: {describe_model(model)}')

    print(f'total files: {len(data)}, ',end='')

    data, data_dev = split_dataset(data)

    if config.batch_size > len(data):
        config.batch_size = len(data)
    elif config.batch_size == -1:
        config.batch_size = len(data_dev)

    print(f'train: {len(data)}, dev: {len(data_dev)}, batch size: {config.batch_size}')

    print(f'hm train: {sum(len(datapoint) for datapoint in data)}, '
          f'hm dev: {sum(len(datapoint) for datapoint in data_dev)}, '
          f'learning rate: {config.learning_rate}, '
          f'optimizer: {config.optimizer}, '
          f'\ntraining for {config.hm_epochs} epochs.. ',end='\n')

    one_batch = (config.batch_size == len(data)) or (config.train_combined and config.train_parallel)
    config.shuffle_epoch &= not one_batch
    window_slide_multiplier = config.hm_bars_grouped//config.hm_bars_slide
    if config.ckp_save_epochs == -1: config.ckp_save_epochs = range(config.hm_epochs)

    data_losss, dev_losss = [], []

    if config.initialize_loss:

        print(f'initializing losses @ {now()}', flush=True)
        if not one_batch:
            data_losss.append(dev_loss(model,data))
        dev_losss.append(dev_loss(model,data_dev))
        print(f'initial losses: {data_losss, dev_losss}')

    print(f'training started @ {now()}', flush=True)

    for ep in range(config.hm_epochs):

        loss = 0

        if config.train_parallel and config.train_combined:
            l, g = process_data_onebatch(model, data)
            loss += l
            give_grads(model, g)
            batch_size = sum(sum(len(inp) * window_slide_multiplier for inp, lbl in datapoint) for datapoint in data)
            sgd(model, batch_size=batch_size) if config.optimizer == 'sgd' else adaptive_sgd(model, ep, batch_size=batch_size)

        else:
            for i,batch in enumerate(batchify(data)):

                if config.disp_batches:
                    print(f'\tbatch {i}, {sum(len(datapoint) for datapoint in batch)}', end='', flush=True)

                batch_size = sum(sum(len(inp)*window_slide_multiplier for inp,lbl in datapoint) for datapoint in batch)

                if config.train_parallel:
                    l,g = process_batch_parallel(model,batch)
                    loss += l
                    give_grads(model,g)

                elif config.train_combined:
                    loss += process_batch_combined(model, batch)

                else:
                    for j,datapoint in enumerate(batch):
                        states = None
                        for k,(inp,lbl) in enumerate(datapoint):
                            out, states = respond_to(model, inp, states)
                            states = [state.detach() for state in states]
                            loss += sequence_loss(lbl,out)

                sgd(model,batch_size=batch_size) if config.optimizer == 'sgd' else adaptive_sgd(model,ep,batch_size=batch_size)

                if config.disp_batches:
                    print(f', completed @ {now()}' ,flush=True)

        loss /= sum(sum(len(inp)*window_slide_multiplier for inp,lbl in datapoint) for datapoint in data)

        data_losss.append(loss)
        dev_losss.append(dev_loss(model,data_dev))
        
        print(f'epoch {ep}, loss {loss}, dev loss {dev_losss[-1]}, completed @ {now()}', flush=True)

        if ep in config.ckp_save_epochs:
            save_model(model,f'{config.model_save_path}_ckp{ep}')

    data_losss.append(dev_loss(model,data))
    dev_losss.append(dev_loss(model,data_dev))

    print(f'final losses: {[data_losss[-1],dev_losss[-1]]}')

    print(f'training ended @ {now()}', flush=True)

    plot(data_losss)
    show()
    plot(dev_losss)
    show()

    if config.overwrite_model or input(f'Save model as {config.model_save_path}? (y/n): ').lower() == 'y':
        save_model(load_model(),config.model_save_path+'_prev')
        save_model(model)

    return model, [data_losss, dev_losss]
示例#17
0
 def save_data(self, fname):
     return data.save_data(fname, self._data)
示例#18
0
                        city_freq_threshold=500,
                        nsample=[30, 5],
                        steps=2_000,
                        test_every=100)
        config.part_dirs = [base_dir]
    else:
        raise NotImplemented

    config.resume = args.resume

    if args.labels or args.data:
        if args.labels:
            typecode = 'I' if args.socialnet == 'vk' else 'L'
            save_labels(config.base_dir, typecode)
        if args.data:
            save_data(config, anew=True)
    else:
        # nbr_sampler = next(load_data(config))
        importlib.reload(sage_conv)
        from sage_conv import SAGEConv, SAGEConvWithEdges

        for nbr_sampler in load_data(config):
            if args.train:
                transductive_sage(nbr_sampler, config)
            if args.test:
                test(nbr_sampler, config)
            if args.predict:
                predict(nbr_sampler, config)

    if args.email:
        tools.send_email(body="%s completed" % __file__)
示例#19
0
    type=str,
    default='data_to_transfer/data',
    help=
    'folder containing input and target pickle files that need to be combined')

args = parser.parse_args()

all_data_x = []
all_data_y = []

input_path = os.path.join(args.data, "*_input.pl")
target_path = os.path.join(args.data, "*_tar.pl")

input_data_list = glob.glob(input_path)
target_data_list = glob.glob(target_path)

for infile in sorted(input_data_list):
    print(infile)
    with open(infile) as f:
        data_x = pickle.load(f)
    all_data_x += data_x

for tarfile in sorted(target_data_list):
    print(tarfile)
    with open(tarfile) as f:
        data_y = pickle.load(f)
    all_data_y += data_y

data.save_data(all_data_x, all_data_y, os.path.join(args.data, 'data_x.pl'),
               os.path.join(args.data, 'data_y.pl'))
示例#20
0
import data
import menu
import helpers

while True:
    employees = data.load_data('employee_db.txt').readlines()
    option = int(menu.show_menu())
    if option == menu.SHOW_ALL_EMPLOYEES:
        menu.show_all(employees)
    elif option == menu.SHOW_EMPLOYEE:
        menu.show(employees)
    elif option == menu.CHANGE_SALARY:
        employees = menu.change_salary(employees)
        data.save_data('employee_db.txt', employees)
    elif option == menu.ADD_EMPLOYEE:
        employeeId, fName, lName, email, salary = menu.add_employee(employees)
        employees = data.add_employee(employees, employeeId, fName, lName,
                                      email, salary)
        data.save_data('employee_db.txt', employees)
    elif option == menu.DELETE_EMPLOYEE:
        employee_position, employee = menu.remove_employee(employees)
        employees = data.remove_employee(employees, employee_position,
                                         employee)
        data.save_data('employee_db.txt', employees)
    elif option == menu.GIVE_BONUS:
        bonus = menu.save_bonus_info(employees)

        bonus_db = data.save_bonus_info(employees, bonus)

        data.save_data('bonus_db.txt', bonus_db)
    elif option == menu.REPORT:
示例#21
0
    Removes 'chr' from first column in input files, and removes any
    invalid rows
    '''
    print("Cleaning {}".format(input_path))
    x = pd.DataFrame()
    with open(input_path) as f:
        for line in f:
            line = line.strip().split('\t')
            if(len(line) == 4):
                line[0] = line[0].lstrip('chr')
                x = pd.concat([x, pd.DataFrame([line])], ignore_index=True)

    x.to_csv(input_path, sep='\t', index=False, header=False)

all_data_x = []
all_data_y = []
num = (10**6)*5
window_size = 10000
mini_window_size = 100

print("Processing data into inputs and targets")

all_data_x, all_data_y = data.load_data(args.input_data, args.target_data, num, window_size, mini_window_size)

# Saves processed data
input_path = os.path.join(args.output_dir, args.input_data.replace("out", "pl"))
target_path = os.path.join(args.output_dir, args.target_data.replace("out", "pl"))
if not os.path.exists(args.output_dir):
    os.mkdir(args.output_dir)
data.save_data(all_data_x, all_data_y, input_path, target_path)
    data = data.Data()
    #Read Data and load Train data
    data.read(config.FILE_PATH)
    print("data read successfully")
    #Cross Validation Data using Gradient Descent     
    #Model object
    model = model.Model()        
    for i in range(config.K_FOLDS):
        data.get_data(i)
        #Reinitialize Parameters
        model.reinit(data.size[1])
        #Train data keeping ith fold as validation set
        model.train(data)        
        #print("Training Completed")
        model.test(data)
        #Using Normal Equation
        model.normalEqn(data)
        #RMSE on Validation Set Using Normal Equation
        model.test_normalEqn(data)
        
    #Save RMSE to File
    model.save_tofile()
    #Plot RMSE for Training Data
    model.plot_rmse("Train")      
    #Plot RMSE for Validation Data
    model.plot_rmse("Validation")
    index = utils.find_min_rmse_val()
    data.get_data(i)
    data.save_data()