Beispiel #1
0
    def solve_sequence(self):
        logging.info(
            '--------------------Solving one sequence---------------------------------'
        )
        self.set_sequence = SetSequence(self.data)
        self.set_sequence.exec_()
        sequence = self.set_sequence.set_sequence()

        sequence.print_sequence()

        self.one_time_model = Solver(self.data)
        self.one_time_model.current_data_set = self.current_data_set_number
        self.one_time_model.load_data_set()

        self.one_time_model.set_sequence(sequence)

        while not self.one_time_model.finish:
            self.one_time_model.next_step()

        total_error = 0
        for truck in itertools.chain(
                self.one_time_model.outbound_trucks.values(),
                self.one_time_model.compound_trucks.values()):
            truck.calculate_error()
            logging.info("Truck {0}, error {1}\n".format(
                truck.truck_name, truck.error))
            total_error += abs(truck.error)
        logging.info("Error: {0}\n".format(total_error))
Beispiel #2
0
def run_calculator_in_command_line():
    print(
        '--------- ** Run A Scientific Calculator Using Command Line ** ---------'
    )
    print(
        '** Enter the command "-h" or "help" to show available calculator functions...\n'
    )
    problem = Solver()
    global valid_answer_previous
    while True:
        infix = input('---> ')
        if infix in ['exit', 'quit']:
            break
        elif infix == 'ans':
            print(valid_answer_previous)
        elif infix in ["help", "-h"]:
            helper()
        else:
            try:
                temp_ans = problem.solve(infix)
                if temp_ans in errors:
                    print(temp_ans)
                else:
                    if isFloat(temp_ans):
                        temp_ans = float(temp_ans)
                    else:
                        temp_ans = integer_modification(temp_ans)
                    valid_answer_previous = temp_ans
                    print(temp_ans)
            except:
                print('Wrong Input')
Beispiel #3
0
def main():
    # Initiaise the solver
    solver = Solver(n=3, NUM_TERMS=5, epsilon=1e-4)
    env = simpy.Environment()
    env.process(example1(env, solver))
    # Run the simulation until all events in the queue are processed.
    # Make it some number to halt simulation after sometime.
    env.run()
Beispiel #4
0
def main(_):
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_index

    solver = Solver(FLAGS)
    if FLAGS.is_train:
        solver.train()
    else:
        solver.test()
Beispiel #5
0
    def generate_data_set(self):
        # ask if sure

        self.data.arrival_times = []
        self.data.boundaries = []
        self.model = Solver(self.data)

        for i in range(len(self.data.data_set_list)):
            self.model.current_data_set = i
            self.model.set_data()
Beispiel #6
0
def main():
    # make a string that describes the current running setup
    num = 0
    run_setup_str = f"{args.source}2{args.target}_k_{args.num_k}_kq_{args.num_kq}_lamb_{args.lamb_marg_loss}"

    while os.path.exists(f"record/{run_setup_str}_run_{num}.txt"):
        num += 1
    run_setup_str = f"{run_setup_str}_run_{num}"
    # eg, svhn2mnist_k_4_kq_4_lamb_10.0_run_5

    # set file names for records (storing training stats)
    record_train = f"record/{run_setup_str}.txt"
    record_test = f"record/{run_setup_str}_test.txt"
    if not os.path.exists('record'):
        os.mkdir('record')  # create a folder for records if not exist

    # set the checkpoint dir name (storing model params)
    checkpoint_dir = f'checkpoint/{run_setup_str}'
    if not os.path.exists('checkpoint'):
        os.mkdir('checkpoint')  # create a folder if not exist
    if not os.path.exists(checkpoint_dir):
        os.mkdir(checkpoint_dir)  # create a folder if not exist

    ####

    # create a solver: load data, create models (or load existing models),
    #   and create optimizers
    solver = Solver(args,
                    source=args.source,
                    target=args.target,
                    nsamps_q=args.nsamps_q,
                    lamb_marg_loss=args.lamb_marg_loss,
                    learning_rate=args.lr,
                    batch_size=args.batch_size,
                    optimizer=args.optimizer,
                    num_k=args.num_k,
                    num_kq=args.num_kq,
                    all_use=args.all_use,
                    checkpoint_dir=checkpoint_dir,
                    save_epoch=args.save_epoch)

    # run it (test or training)
    if args.eval_only:
        solver.test(0)
    else:  # training
        count = 0
        for t in range(args.max_epoch):
            num = solver.train(t, record_file=record_train)
            count += num
            if t % 1 == 0:  # run it on test data every epoch (and save models)
                solver.test(t,
                            record_file=record_test,
                            save_model=args.save_model)
            if count >= 20000 * 10:
                break
Beispiel #7
0
def main():
    # Initiaise the solver
    solver = Solver(epsilon=1e-6)
    # For higher precision, default is 5

    env = simpy.Environment()
    env.process(example1(env, solver))
    # Run the simulation until all events in the queue are processed.
    # Make it some number to halt simulation after sometime.
    env.run(until=10.0)
    print('total steps: ', step)
Beispiel #8
0
def main():
    # Initiaise the solver
    solver = Solver(n=5, epsilon=1e-6)

    env = simpy.Environment()
    env.process(lorenz(env, solver))
    # Run the simulation until all events in the queue are processed.
    # Make it some number to halt simulation after sometime.
    env.run(until=STOP_TIME)

    # Plot the output
    plt.plot(data['t'], data['x'])
    plt.show()
Beispiel #9
0
def main(config):
    os.environ["CUDA_VISIBLE_DEVICES"] = str(config["gpu_no"])

    save_path = Path(config["training"]["save_path"])
    save_path.mkdir(parents=True, exist_ok=True)

    mode = config["mode"]
    solver = Solver(config)

    if mode == "train":
        solver.train()
    if mode == "test":
        solver.test()
Beispiel #10
0
def get_answer(expression):
    problem = Solver()
    try:
        temp_ans = problem.solve(expression)
        if temp_ans in errors:
            return str(temp_ans)
        else:
            global valid_answer_previous
            if isFloat(temp_ans):
                temp_ans = float(temp_ans)
            else:
                temp_ans = integer_modification(temp_ans)
            valid_answer_previous = temp_ans
            return str(temp_ans)
    except:
        return 'Wrong Input'
Beispiel #11
0
def main(config):
    os.environ["CUDA_VISIBLE_DEVICES"] = str(config["gpu_no"])

    mode = config["mode"]

    save_path = Path(config["training"]["save_path"]) / config["version"]
    save_path.mkdir(parents=True, exist_ok=True)
    config["save_path"] = save_path

    datasets = DataLoader(mode, **config["dataset"])
    solver = Solver(config, datasets)

    if mode == "train":
        solver.train()
    if mode == "test":
        solver.test()
Beispiel #12
0
    def evaluate(self, prob_id, tune=False, plot=True):
        assert prob_id in [1, 2, 3,
                           4], "Only support evaluation for problems1~4"

        self.prob_id = prob_id
        self.base_dir = osp.join(cfg.EVALUATION.ROOT_DIR,
                                 "prob" + str(prob_id))
        if not osp.exists(self.base_dir):
            os.makedirs(self.base_dir)
        self.strategy_path = osp.join(cfg.PROB.PROBLEM_LIST[prob_id - 1],
                                      "strategies.pkl")
        self.csv_path = self.strategy_path[:-3] + "csv"
        self.hub_path = osp.join(osp.dirname(self.strategy_path), "hubs.pkl")
        if not tune and not osp.exists(self.strategy_path):
            key = input(
                "It seems that you have not run solver for this problem yet. Run Now?(y or n)"
            )
            if key == 'Y' or key == 'y':
                solver = Solver()
                solver.solve(problem_id=prob_id)
            else:
                print("exit")
                return

        if osp.exists(self.strategy_path):
            self._pkl2csv()
            avg_amount_cost, avg_time_cost = self.get_avg_cost()
            print(
                "For problem {}:\nAverage amount cost:{:.2f} | Average time cost: {:.2f}min"
                .format(prob_id, avg_amount_cost, avg_time_cost))

        # whether we are tuning the parameters
        if tune:
            if prob_id == 1:
                # for problem1 we tune the weight_amount and weight_cost
                # self.weight_tune()
                self.weight_plot()
            elif prob_id == 2:
                # self.hub_cost_test()
                self.hub_cost_plot()
            elif prob_id == 3:
                # self.hub_cap_test()
                self.hub_cap_plot()

        # plot the distribution or not
        if plot:
            self.plot_distribution()
Beispiel #13
0
    def hub_cap_test(self):
        hub_cap_path = osp.join(self.base_dir, "cap.csv")

        index = np.where(
            cfg.EVALUATION.HUB_CAPACITY_FIELD == cfg.PARAM.HUB_CAPACITY)[0]
        hub_capacity_field = np.insert(cfg.EVALUATION.HUB_CAPACITY_FIELD,
                                       index, 0.95 * cfg.PARAM.HUB_CAPACITY)
        hub_capacity_field = np.insert(hub_capacity_field, index + 2,
                                       1.05 * cfg.PARAM.HUB_CAPACITY)

        new_dict = edict()
        num_hubs_list, amount_cost_list, time_cost_list = [], [], []
        mask = np.ones_like(hub_capacity_field)
        for idx, cap in enumerate(hub_capacity_field):
            new_dict["HUB_CAPACITY"] = cap
            merge_a_into_b(new_dict, cfg)
            solver = Solver()
            solver.solve(problem_id=3, tune_mode=True)
            with open(self.hub_path, "rb") as f:
                data = pkl.load(f)
                num_hubs = len(data["hubs"])
            if num_hubs == 0:
                print("When capacity is :{}. No hubs were built...Continue...".
                      format(cap))
                mask[idx] = 0
                continue
            self._pkl2csv()
            avg_amount_cost, avg_time_cost = self.get_avg_cost()
            amount_cost_list.append(avg_amount_cost)
            time_cost_list.append(avg_time_cost)
            num_hubs_list.append(num_hubs)
            print(
                "Hub capacity: {:.0f} | Hubs: {:.0f} | Average amount cost: {:.3f}$ | Average time cost: {:.2f}m."
                .format(cap, num_hubs, avg_amount_cost, avg_time_cost))
        df = pd.DataFrame(
            data=list(
                zip(hub_capacity_field[np.nonzero(mask)], num_hubs_list,
                    amount_cost_list, time_cost_list)),
            columns=["HubCapacity", "NumHubs", "AmountCost", "TimeCost"])
        df["Products"] = df["AmountCost"] * df["TimeCost"]
        df["Values"] = cfg.PARAM.WEIGHT_AMOUNT * df[
            "AmountCost"] + cfg.PARAM.WEIGHT_TIME * df["TimeCost"]
        df.to_csv(hub_cap_path, index=False, float_format="%.2f")
Beispiel #14
0
    def setup_data_set(self):
        # setup for one data set
        self.model = Solver(self.data)
        self.model.current_data_set = self.current_data_set_number
        self.model.load_data_set()
        self.algorithm = self.algorithm_list[self.algorithm_name](
            self.number_of_iterations, self.current_data_set_number,
            self.model, self.data)

        logging.info("Solving using {0} data set {1}".format(
            self.algorithm_name, self.current_data_set_number + 1))
        logging.info("Nuber of iterations {0}".format(
            self.number_of_iterations))

        self.solve_step_button.setEnabled(True)
        self.solve_iteration_button.setEnabled(True)
        self.solve_data_set_button.setEnabled(True)
        self.solve_sequence_button.setEnabled(True)
        self.show_solution_button.setEnabled(True)
        self.show_sequences_button.setEnabled(True)
    def __init__(self):
        super(MainWindow, self).__init__()
        self.setupUi(self)
        self.data = DataStore()
        self.solver_data = SolverData()
        self.solver = Solver(self.data, self.solver_data)
        self.update_data_table()
        self.setup_data()
        self.value_connections()
        self.connections()
        self.combobox_coming_sequence = []
        self.combobox_going_sequence = []
        self.statusBar().showMessage('Ready')
        self.load_generated_data()
        self.results = OrderedDict()
        self.shoved_solution_name = ""
        self.shoved_iteration = IterationResults()
        self.shoved_solution = ModelResult()
        self.shoved_iteration_number = 0

        self.continue_solution = True
        self.showing_result = []
        self.result_times = {}
        self.function_type = "normal"
        self.solution_name = ""
        self.solution_number = 0
        self.sequence_solver = SequenceSolver()
        self.solution_results = dict()
        self.enter_sequence_widget = EnterSequenceWidget(self.data)
        self.current_sequence = Sequence()
        self.load_data()

        self.simulationStartButton.setEnabled(False)
        self.simulationStepForwardButton.setEnabled(False)

        self.result_show_best_solution_button.setEnabled(False)
        self.result_show_errors_button.setEnabled(False)
        self.result_show_sequences_button.setEnabled(False)
        self.result_show_trucktimes_button.setEnabled(False)
        self.result_show_truckgoods_button.setEnabled(False)
        self.run_simulation_button.setEnabled(False)
Beispiel #16
0
    def weight_tune(self):
        """ tuning the weight of time and weight of cost
        """
        new_dict = edict()
        weight_data_path = osp.join(self.base_dir, "weight.csv")
        weight_time_list, weight_amount_list = [], []
        amount_cost_list, time_cost_list = [], []
        for weight_amount in cfg.EVALUATION.WEIGHT_AMOUNT_FIELD:
            for weight_time in cfg.EVALUATION.WEIGHT_TIME_FIELD:
                new_dict["WEIGHT_AMOUNT"] = round(weight_amount, 2)
                new_dict["WEIGHT_TIME"] = round(weight_time, 2)
                # merge the config
                merge_a_into_b(new_dict, cfg)
                # solve the problem and get the output
                solver = Solver()
                solver.solve(problem_id=1, tune_mode=True)
                # don't forget transform into csv file
                self._pkl2csv()
                avg_amount_cost, avg_time_cost = self.get_avg_cost()
                weight_amount_list.append(round(weight_amount, 2))
                weight_time_list.append(round(weight_time, 2))
                amount_cost_list.append(avg_amount_cost)
                time_cost_list.append(avg_time_cost)
                print(
                    "When weight of amount is {:.2f} and weight of time is:{:.2f}"
                    .format(weight_amount, weight_time))
                print(
                    "Average amount cost is: {:.1f}. Average time cost is:{:.1f}"
                    .format(avg_amount_cost, avg_time_cost))

        df = pd.DataFrame(data=list(
            zip(weight_amount_list, weight_time_list, amount_cost_list,
                time_cost_list)),
                          columns=[
                              "WeightOfAmount", "WeightOfTime", "AmountCost",
                              "TimeCost"
                          ])
        # multiply the two columns to get the products
        df["Products"] = df["AmountCost"] * df["TimeCost"]
        df.to_csv(weight_data_path, index=False, float_format="%.2f")
Beispiel #17
0
                    help="the number of orders to parse", type=int, default=2400)
parser.add_argument("--process_num", dest="NUM_PROCESSES",
                    help="the number of processes", type=int, default=16)
parser.add_argument("--weight_amount", dest="WEIGHT_AMOUNT",
                    help="the weight of the amountCost in the objective function", type=float, default=15)
parser.add_argument("--weight_time", dest="WEIGHT_TIME",
                    help="the weight of the timeCost in the objective function", type=float, default=1)
parser.add_argument("--hub_cost_const", dest="HUB_BUILT_COST_CONST",
                    help="the constant part of cost of building a hub", type=int, default=3000)
parser.add_argument("--hub_cost_vary", dest="HUB_BUILT_COST_VARY",
                    help="the variable part cost of building a hub", type=float, default=2.0)
parser.add_argument("--hub_ratio", dest="HUB_UNIT_COST_RATIO",
                    help="the cutoff of unit price of a hub", type=float, default=0.7)
parser.add_argument("--hub_capacity", dest="HUB_CAPACITY",
                    help="the capacity of the hub(in prob3)", type=int, default=500)

args = parser.parse_args()

arg_dict = edict(vars(args))
merge_a_into_b(arg_dict, cfg)
print("Using the config:")
print(cfg)

solver = Solver()
evaluator = Evaluator()

solver.solve(problem_id=args.PROBLEM_ID)
# disable the tune mode but plot the distribution, see the outputs for more detail
evaluator.evaluate(prob_id=args.PROBLEM_ID, tune=False, plot=True)

Beispiel #18
0
import sys
from src.solver import Solver
from src.parsing import parse

lines = []
for line in sys.stdin:
    lines.append(line.rstrip('\n'))

matrix = parse()
solver = Solver(matrix)
# print(matrix)
# print(solver.is_solved(matrix))
# print(solver.manhattan_distance(matrix))
print(solver.solve(matrix))
    num_workers=config['num_workers'],
    sampler=train_data_sampler)
val_data_loader = torch.utils.data.DataLoader(
    dataset=data_set,
    batch_size=config['batch_size'],
    num_workers=config['num_workers'],
    sampler=val_data_sampler)

if config['continue_training']:
    model = torch.load(config['model_path'])
    solver = pickle.load(open(config['solver_path'], 'rb'))
    start_epoch = config['start_epoch']
else:
    model = EncoderDecoder()
    solver = Solver(optim_args={
        "lr": config['learning_rate'],
        "betas": config['betas']
    })
    start_epoch = 0

solver.train(lr_decay=config['lr_decay'],
             start_epoch=start_epoch,
             model=model,
             train_loader=train_data_loader,
             val_loader=train_data_loader,
             num_epochs=config['num_epochs'],
             log_after_iters=config['log_interval'],
             save_after_epochs=config['save_interval'],
             lr_decay_interval=config['lr_decay_interval'],
             save_path=config['save_path'],
             num_subtasks=config['num_subtasks'])
Beispiel #20
0
for net in range(1000):
    # counter printer
    print "iteration: ", net

    # train net1
    model1 = FullyConnectedNet([100, 100],
                               weight_scale=0.003,
                               use_batchnorm=False,
                               reg=0.6)
    solver1 = Solver(
        model1,
        data1,
        print_every=data1['X_train'].shape[0],
        num_epochs=50,
        batch_size=100,
        update_rule='sgd',
        optim_config={
            'learning_rate': 0.03,
        },
        verbose=False,
        lr_decay=0.9,
    )
    solver1.train()
    pass
    #train net2
    model2 = FullyConnectedNet([100, 100],
                               weight_scale=0.003,
                               use_batchnorm=False,
                               reg=0.6)
    solver2 = Solver(
        model2,
Beispiel #21
0
from src.solver import Solver

for i in range(2, 42):
    runner = Solver()
    print("========================================================")
    print(i.__str__() + "begin  :: ")
    runner.run("../dataset/raw_datas/g" + i.__str__())
Beispiel #22
0
def main(args):
    # load dictionary and generate char_list, sos_id, eos_id
    char_list, sos_id, eos_id = process_dict(args.dict)
    vocab_size = len(char_list)
    tr_dataset = AudioDataset('train', args.batch_size)
    cv_dataset = AudioDataset('dev', args.batch_size)

    tr_loader = AudioDataLoader(tr_dataset,
                                batch_size=1,
                                num_workers=args.num_workers,
                                shuffle=args.shuffle,
                                feature_dim=args.feature_dim,
                                char_list=char_list,
                                path_list=tr_dataset.path_lst,
                                label_list=tr_dataset.han_lst,
                                LFR_m=args.LFR_m,
                                LFR_n=args.LFR_n)
    cv_loader = AudioDataLoader(cv_dataset,
                                batch_size=1,
                                num_workers=args.num_workers,
                                feature_dim=args.feature_dim,
                                char_list=char_list,
                                path_list=cv_dataset.path_lst,
                                label_list=cv_dataset.han_lst,
                                LFR_m=args.LFR_m,
                                LFR_n=args.LFR_n)

    data = {'tr_loader': tr_loader, 'cv_loader': cv_loader}

    encoder = Encoder(args.d_input * args.LFR_m,
                      args.d_low_dim,
                      args.n_layers_enc,
                      args.n_head,
                      args.d_k,
                      args.d_v,
                      args.d_model,
                      args.d_inner,
                      dropout=args.dropout,
                      pe_maxlen=args.pe_maxlen)
    decoder = Decoder(
        sos_id,
        eos_id,
        vocab_size,
        args.d_word_vec,
        args.n_layers_dec,
        args.n_head,
        args.d_k,
        args.d_v,
        args.d_model,
        args.d_inner,
        dropout=args.dropout,
        tgt_emb_prj_weight_sharing=args.tgt_emb_prj_weight_sharing,
        pe_maxlen=args.pe_maxlen)
    model = Transformer(encoder, decoder)
    print(model)
    model.cuda()
    # optimizer
    optimizier = TransformerOptimizer(
        torch.optim.Adam(model.parameters(), betas=(0.9, 0.98), eps=1e-09),
        args.init_lr, args.d_model, args.warmup_steps)

    # solver
    solver = Solver(data, model, optimizier, args)
    solver.train()
Beispiel #23
0
def main():
    # torch.set_default_tensor_type('torch.FloatTensor')
    # set up default cuda device
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # Load the (preprocessed) CIFAR10 data. The preprocessing includes
    # channel swapping, normalization and train-val-test splitting.
    # Loading the datasets might take a while.

    datasetGen = DatasetGen()
    datasetGen.BinaryShinyPokemonDataset()
    data_dict = datasetGen.Subsample([0.6, 0.2, 0.2])
    print("Train size: %i" % len(data_dict["X_train"]))
    print("Val size: %i" % len(data_dict["X_val"]))
    print("Test size: %i" % len(data_dict["X_test"]))

    train_data, val_data, test_data = ConvertDatasetDictToTorch(data_dict)

    from src.solver import Solver
    from torch.utils.data.sampler import SequentialSampler

    num_train = len(train_data)
    OverfitSampler = SequentialSampler(range(num_train))

    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=64,
                                               shuffle=True,
                                               num_workers=4)
    val_loader = torch.utils.data.DataLoader(val_data,
                                             batch_size=64,
                                             shuffle=False,
                                             num_workers=4)

    ############################################################################
    # Hyper parameter Grid search : Set grids below                            #
    ############################################################################

    print(train_data)

    lr = 1e-3
    kernelsize = 3
    hidden_dims = [200]
    convArray = [64, 128]

    model = ClassificationCNN(input_dim=[3, 96, 96],
                              num_classes=2,
                              convolutionalDims=convArray,
                              kernel_size=kernelsize,
                              stride_conv=1,
                              weight_scale=0.02,
                              pool=2,
                              stride_pool=2,
                              hiddenDims=hidden_dims,
                              dropout=0.0)
    model.to(device)
    solver = Solver(optim_args={"lr": lr, "weight_decay": 1e-3})
    print(
        "training now with values: lr=%s, hidden_dim=%s, filtersize=%s, convArray=%s"
        % (lr, str(hidden_dims), kernelsize, str(convArray)))
    solver.train(model,
                 train_loader,
                 val_loader,
                 log_nth=6,
                 num_epochs=10,
                 L1=False,
                 reg=0.1)

    from src.vis_utils import visualize_grid

    # first (next) parameter should be convolutional
    conv_params = next(model.parameters()).cpu().data.numpy()
    grid = visualize_grid(conv_params.transpose(0, 2, 3, 1))
    plt.imshow(grid.astype('uint8'))
    plt.axis('off')
    plt.gcf().set_size_inches(6, 6)
    plt.show()
Beispiel #24
0
 final_state = []
 cnt = 0
 print("{:<20} {:<10}".format('PHM positions',
                              'Failure of critical function'))
 print("-------------        ----------------------------")
 for state in states:
     cnt += 1
     if select and cnt < number:
         continue
     if select and cnt > number:
         break
     model_description = model.load_phm(model_description, state)
     # Build the model
     bn = BayesianNetwork(model_description)
     bayesian_model = bn.build()
     solver = Solver(bayesian_model, display, evidence)
     e = solver.run(mapquery=False, printed=False)
     if e < failure:
         final_state = state
         failure = e
         print("{:<20} {:.4E} *".format(cnt, e))
     else:
         print("{:<20} {:.4E}".format(cnt, e))
 if details:
     print('\n=====================\n')
     model_description = model.load_phm(model_description, final_state)
     # Build the model
     bn = BayesianNetwork(model_description)
     bayesian_model = bn.build()
     solver = Solver(bayesian_model, display_final, evidence)
     e = solver.run(mapquery=False, printed=True)
Beispiel #25
0
    def test_ctor(self):
        solver = Solver("", "", False)

        self.assertEqual(solver.name, "")
        self.assertEqual(solver.quiet_mode, False)
Beispiel #26
0
def train(data_dir, epochs, batch_size, model_path,  max_hours=None, continue_from=""):
    # General config
    # Task related
    json_dir = data_dir
    train_dir = data_dir + "tr"
    valid_dir = data_dir + "cv"
    sample_rate = 8000
    segment_len = 4
    cv_maxlen = 6

    # Network architecture
    N = 256  # Number of filters in autoencoder
    L = 20  # Length of filters in conv autoencoder
    B = 256  # number of channels in conv blocks - after bottleneck 1x1 conv
    H = 512  # number of channels in inner conv1d block
    P = 3  # length of filter in inner conv1d blocks
    X = 8  # number of conv1d blocks in (also number of dilations) in each repeat
    R = 4  # number of repeats
    C = 2  # Number of speakers

    norm_type = 'gLN'  # choices=['gLN', 'cLN', 'BN']
    causal = 0
    mask_nonlinear = 'relu'

    use_cuda = 1

    half_lr = 1  # Half the learning rate when there's a small improvement
    early_stop = 1  # Stop learning if no imporvement after 10 epochs
    max_grad_norm = 5  # gradient clipping

    shuffle = 1  # Shuffle every epoch
    # batch_size = 3
    num_workers = 4
    # optimizer
    optimizer_type = "adam"
    lr = 1e-3
    momentum = 0
    l2 = 0  # Weight decay - l2 norm

    # save and visualize
    save_folder = "../egs/models"
    enable_checkpoint = 0  # enables saving checkpoints
    # continue_from = save_folder + "/speech_seperation_first_try.pth"  # model to continue from
    # model_path = "speech_separation_first_try_more_epochs.pth"  # TODO: Fix this
    print_freq = 20000
    visdom_enabled = 1
    visdom_epoch = 1
    visdom_id = "Conv-TasNet Training"  # TODO: Check what this does

    arg_solver = (use_cuda, epochs, half_lr, early_stop, max_grad_norm, save_folder, enable_checkpoint, continue_from,
                  model_path, print_freq, visdom_enabled, visdom_epoch, visdom_id)

    # Datasets and Dataloaders
    tr_dataset = AudioDataset(train_dir, batch_size,
                              sample_rate=sample_rate, segment=segment_len, max_hours=max_hours)
    cv_dataset = AudioDataset(valid_dir, batch_size=1,  # 1 -> use less GPU memory to do cv
                              sample_rate=sample_rate,
                              segment=-1, cv_maxlen=cv_maxlen, max_hours=max_hours)  # -1 -> use full audio
    tr_loader = AudioDataLoader(tr_dataset, batch_size=1,
                                shuffle=shuffle,
                                num_workers=num_workers)
    cv_loader = AudioDataLoader(cv_dataset, batch_size=1,
                                num_workers=0)
    data = {'tr_loader': tr_loader, 'cv_loader': cv_loader}
    # model
    model = ConvTasNet(N, L, B, H, P, X, R,
                       C, norm_type=norm_type, causal=causal,
                       mask_nonlinear=mask_nonlinear)
    # print(model)
    if use_cuda:
        model = torch.nn.DataParallel(model)
        model.cuda()
    # optimizer
    if optimizer_type == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=lr,
                                    momentum=momentum,
                                    weight_decay=l2)
    elif optimizer_type == 'adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=lr,
                                     weight_decay=l2)
    else:
        print("Not support optimizer")
        return

    # solver
    solver = Solver(data, model, optimizer, arg_solver)  # TODO: Fix solver thing
    solver.train()
Beispiel #27
0
def main():
    parser = setup_parser()
    args = parser.parse_args()
    solver = Solver(args.text_file)
    solver.run()
Beispiel #28
0
def train():
    # path of this file
    ABS_PATH = os.path.dirname(os.path.abspath(__file__)) + '/'

    # Year-month-day_Hour-Minute-Second
    timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")

    print("Loading data...")

    # currently only using AN Dataset
    train_data, val_data = get_Dataset()

    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=25,
                                               shuffle=True,
                                               num_workers=0)
    val_loader = torch.utils.data.DataLoader(val_data,
                                             batch_size=25,
                                             shuffle=False,
                                             num_workers=0)
    #train_loader = torch.utils.data.DataLoader(train_data, batch_size=25, shuffle=False, num_workers=4, sampler=OverfitSampler(3000))
    #val_loader = torch.utils.data.DataLoader(val_data, batch_size=25, shuffle=False,num_workers=2, sampler=OverfitSampler(100))

    log_n = 50  # Train acc every x iterations
    epochs = 20  # x epochs, model gets saved after each completed epoch
    val_n = 0  # Run validation every x iterations (default: off/0)

    print("Training for %d epochs." % epochs)
    model = CNNEmoClassifier(weight_scale=0.0005)
    solver = Solver(optim_args={'lr': 5e-5})
    tic = time.time()
    solver.train(model,
                 train_loader,
                 val_loader,
                 num_epochs=epochs,
                 log_nth=log_n,
                 val_nth=val_n)

    temp_time = time.time() - tic
    m, s = divmod(temp_time, 60)
    h, m = divmod(m, 60)
    print('Done after %dh%02dmin%02ds' % (h, m, s))

    # Save model
    model.save("models/model_{}.model".format(timestamp))

    plt.subplot(2, 1, 1)
    plt.plot(solver.train_loss_history, '-', label='train_loss')
    x = np.linspace(0, len(solver.train_loss_history),
                    len(solver.val_loss_history))
    plt.plot(x, solver.val_loss_history, '-o', label='val_loss')
    plt.legend(loc='upper right')
    plt.title('Training vs Validation loss | %d Epochs' % epochs)
    plt.xlabel('iteration')
    plt.ylabel('loss')

    plt.subplot(2, 1, 2)
    plt.plot(solver.train_acc_history,
             '-o',
             label='train_acc=%.4f' % (solver.train_acc_history[-1]))
    plt.plot(solver.val_acc_history,
             '-o',
             label='val_acc=%.4f' % (solver.val_acc_history[-1]))
    plt.legend(loc='upper left')
    plt.title('Training vs Validation accuracy')
    plt.xlabel('epoch')
    plt.ylabel('accuracy')
    plt.gca().yaxis.grid(True)

    plt.gcf().set_size_inches(15, 15)
    plt.tight_layout()
    plt.savefig(ABS_PATH + 'output/performance_{}.png'.format(timestamp))
    plt.gcf().clear()

    # plot examples:
    model.eval()

    # get_pics might not work! If it doesn't, uncomment the old code.
    test_pics, example_labels, filenames, amount_example_pics = get_pics(
        train_data, val_data)
    output = model.forward(Variable(torch.Tensor(test_pics).float()).cuda())
    emotions = {
        0: 'neutral',
        1: 'happy',
        2: 'sad',
        3: 'surprise',
        4: 'fear',
        5: 'disgust',
        6: 'anger',
        7: 'contempt'
    }
    print(
        '0=neutral, 1=happy, 2=sad, 3=surprise, 4=fear, 5=disgust, 6=anger, 7=contempt'
    )
    print(
        np.argmax(output.data.cpu().numpy(),
                  axis=1,
                  out=np.empty(amount_example_pics, dtype='int64')))
    print(example_labels)
    output = torch.nn.functional.softmax(output).cpu().data.numpy()

    # plot images and write output under them, very unsure!! Better check on this one!
    for i in range(amount_example_pics):
        plt.subplot(amount_example_pics, 1, i + 1)
        #plt.legend(loc='upper left')
        plt.title(
            '%s: Truth=%s, N=%.2e, H=%.2e, Sad=%.2e, Sur=%.2e, F=%.2e, D=%.2e, A=%.2e, C=%.2e'
            % (filenames[i], emotions[example_labels[i]], list(output[i])[0],
               list(output[i])[1], list(output[i])[2], list(
                   output[i])[3], list(output[i])[4], list(
                       output[i])[5], list(output[i])[6], list(output[i])[7]))
        plt.imshow(test_pics[i][0])

    plt.tight_layout()
    plt.savefig(ABS_PATH + 'output/examples_{}.png'.format(timestamp))
    plt.gcf().clear()
Beispiel #29
0
parser.add_argument('--raw_data_path', type=str, default='', help="rewrite the data path in config file")
paras = parser.parse_args()
setattr(paras,'gpu',not paras.cpu)
setattr(paras,'verbose',not paras.no_msg)
config = yaml.load(open(paras.config,'r'))
if paras.data_path != '':
    config['solver']['data_path'] = paras.data_path
if paras.raw_data_path != '':
    config['solver']['raw_data_path'] = paras.raw_data_path
    
random.seed(paras.seed)
np.random.seed(paras.seed)
torch.manual_seed(paras.seed)
if torch.cuda.is_available(): torch.cuda.manual_seed_all(paras.seed)

if not paras.rnnlm:
    if not paras.test:
        # Train ASR
        from src.solver import Trainer as Solver
    else:
        # Test ASR
        from src.solver import Tester as Solver
else:
    # Train RNNLM
    from src.solver import RNNLM_Trainer as Solver

solver = Solver(config,paras)
solver.load_data()
solver.set_model()
solver.exec()
Beispiel #30
0
                        default=0,
                        type=int,
                        help='Random seed for reproducible results.',
                        required=False)
    parser.add_argument('--cpu',
                        action='store_true',
                        help='Disable GPU training')
    parser.add_argument('--no-msg',
                        action='store_true',
                        help='Hide all messages')
    args = parser.parse_args()

    args.gpu = not args.cpu
    args.verbose = not args.no_msg

    config = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader)

    # Set seed
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(args.seed)

    # Train
    from src.solver import Trainer as Solver
    solver = Solver(config, args)
    solver.load_data()
    solver.build_model()
    solver.exec()