Esempio n. 1
0
def play_matches(test_agents, cpu_agents, num_matches, time_limit=150):
    """Plays a series of matches between the test agents and cpu agent
    """
    win_records = create_win_records(test_agents, cpu_agents)

    for ta in pbar(test_agents):
        for ca in pbar(cpu_agents):
            for _ in pbar(range(num_matches)):
                play_game_set(ta, ca, win_records, time_limit)
    return pd.DataFrame(win_records).T
Esempio n. 2
0
    def _epoch(self,
               X,
               loss_fns,
               optimizers=None,
               n_iter=(1, 1, 1),
               n_batches=None):
        """Evaluate/optimize for one epoch.
        X: torch.nn.DataLoader
        loss_fns: each takes an input batch and returns dict of loss component Variables
        optimizers: sequence of torch.nn.Optimizer for each loss or None if not training
        n_iter: sequence of optimization steps per batch for each loss
        n_batches: number of batches to draw or None for all data
        """
        optimizers = optimizers or [None] * 3
        iter_losses = defaultdict(list)

        it = _take_batches(X, n_batches) if n_batches else X
        desc = 'training batch' if optimizers else 'validating batch'
        for x in pbar(it, desc=desc, leave=False):
            x = self._wrap(x)
            for opt, iters, loss_fn in zip(optimizers, n_iter, loss_fns):
                for _ in range(iters):
                    loss_components = loss_fn(x)
                    if opt:
                        loss = sum(loss_components.values())
                        self.zero_grad()
                        loss.backward()
                        opt.step()
                        del loss
                    for k, v in loss_components.items():
                        iter_losses[k].append(v.data.cpu().numpy())
                    del loss_components
        return {k: np.mean(v) for k, v in iter_losses.items()}
Esempio n. 3
0
    def _epoch(self, X, D_opt=None, EG_opt=None, n_batches=None):
        """Evaluate/optimize for one epoch.
        X: torch.nn.DataLoader
        D_opt, EG_opt: torch.nn.Optimizer or None if not training
        n_batches: number of batches to draw or None for all data
        """
        iter_objs = defaultdict(list)

        training = bool(D_opt and EG_opt)
        it = _take_batches(X, n_batches) if n_batches else X
        desc = 'training batch' if training else 'validating batch'
        for x in pbar(it, desc=desc, leave=False):
            x = self._wrap(x)
            obj = self.objective(x)
            if training:
                self.zero_grad()
                obj['D_loss'].backward(retain_graph=True)
                D_opt.step()
                self.zero_grad()
                obj['EG_loss'].backward()
                EG_opt.step()
            for k, v in obj.items():
                iter_objs[k].append(v.data.cpu().numpy())
            del obj
        return {k: np.mean(v) for k, v in iter_objs.items()}
Esempio n. 4
0
def train_net(MODEL_NAME, BATCH_SIZE, EPOCHS, train_datax, train_datay,
              test_datax, test_datay, optimizer, net, loss_function):
    with open("model.log", "a") as f:
        f.write(
            f"MODEL_NAME,MODEL_TIME,EPOCH,IN_SAMPLE_ACC,IN_SAMPLE_LOSS,OUT_SAMPLE_ACC,OUT_SAMPLE_LOSS\n"
        )
        for epoch in range(EPOCHS):
            to_do = range(0, len(train_datax), BATCH_SIZE)  #Loop logic
            for batch in pbar(to_do):  #Loop with progress bar
                batch_X = train_datax[batch:batch + BATCH_SIZE].view(
                    -1, 1, 50, 50)
                batch_y = train_datay[batch:batch + BATCH_SIZE]

                acc, loss, net, optimizer = fwd_pass(batch_X,
                                                     batch_y,
                                                     net,
                                                     loss_function,
                                                     optimizer,
                                                     train=True)

                if batch % 50 == 0:
                    val_acc, val_loss = test_model(test_datax,
                                                   test_datay,
                                                   net,
                                                   loss_function,
                                                   optimizer,
                                                   size=100)
                    f.write(
                        f"{MODEL_NAME},{round(time.time(),3)},{epoch},{round(float(acc),2)},{round(float(loss),4)},{round(float(val_acc),2)},{round(float(val_loss),4)}\n"
                    )
        return optimizer, net
Esempio n. 5
0
def main(pidiv_buffer: dict = {}):
    from tqdm import tqdm as pbar
    # make sure it's not python3.8
    check_pyversion()

    # get args
    args = get_pars()

    # get ploidy dict
    #     global ploidy  # for debugging send_windows()
    ploidy = get_ploidy(args)

    # iterate through pops
    for pop in ploidy:
        # read in VariantsToTable.txt file, filter chroms based on args
        global snps
        snps, chromcol = get_datatable(args, pop)

        # get ipcluster engines
        lview, dview = launch_engines(args.engines, args.profile)

        # attach data, functions, and dict to engines (used in and downstream of send_to_calculate())
        attach_data(
            snps=snps,
            ploidy=ploidy,
            write_tmp_file=write_tmp_file,
            send_windows=send_windows,
            send_chrom_to_calculator=send_chrom_to_calculator,
            get_windows=get_windows,
            #                     pidiv_buffer=pidiv_buffer,
            dview=dview)

        # calculate measure
        file = send_chrom_to_calculator(lview,
                                        chromcol=chromcol,
                                        args=args,
                                        pop=pop,
                                        pidiv_buffer=pidiv_buffer,
                                        ploidy=ploidy)
        print(
            ColorText("\nWrote stats to ").green().__str__() +
            ColorText(file).bold().green().__str__())
        print(
            ColorText(
                "\nWrote pypoolation arguments used to ").green().__str__() +
            ColorText(file.replace(".txt",
                                   "_ARGS.pkl")).green().bold().__str__())

        # kill ipcluster to avoid mem problems (restart next loop)
        print(ColorText("\nStopping ipcluster ...").bold())
        subprocess.call([shutil.which('ipcluster'), 'stop'])

        # remove temporary files
        print(ColorText("\nRemoving temporary files ...").bold())
        tmpdir = os.path.join(args.outdir, 'tmp')
        for f in pbar(os.listdir(tmpdir)):
            os.remove(os.path.join(tmpdir, f))

        print(ColorText("\nDONE!!!").bold().green())
Esempio n. 6
0
    def fit(self,
            X_train,
            X_valid=None,
            disc_opt_fn=None,
            ae_opt_fn=None,
            n_iter=1,
            n_batches=None,
            n_epochs=100,
            log_fn=None,
            report_every=1):
        """
        X_train: torch.utils.data.DataLoader
        X_valid: torch.utils.data.DataLoader or None
        disc_opt_fn: takes parameter set, returns torch.optim.Optimizer
            if None, a default optimizer will be used
        ae_opt_fn: takes parameter set, returns torch.optim.Optimizer
            if None, a default optimizer will be used
        n_iter: int/tuple # of discriminator, autoencoder optimizer steps/batch
        n_batches: # of batches per epoch or None for all data
        log_fn: takes diagnostic dict, called after every epoch
        n_epochs: number of discriminator, autoencoder training iterations

        """
        _unfreeze(self)

        try:
            assert len(n_iter) == 2
        except Exception:
            n_iter = (n_iter, ) * 2

        # default optimizers
        disc_opt_fn = disc_opt_fn or (
            lambda p: torch.optim.Adam(p, lr=2e-4, betas=(.5, .9)))
        ae_opt_fn = ae_opt_fn or (
            lambda p: torch.optim.Adam(p, lr=2e-4, betas=(.5, .9)))
        disc_opt = disc_opt_fn(chain(self.D.parameters(), self.C.parameters()))
        ae_opt = ae_opt_fn(chain(self.E.parameters(), self.G.parameters()))

        for i in pbar(range(n_epochs), desc='epoch'):
            diagnostic = defaultdict(dict)
            report = i % report_every == 0 or i == n_epochs - 1

            # train for one epoch
            self.train()
            diagnostic['train'].update(
                self._epoch(X_train,
                            disc_opt,
                            ae_opt,
                            n_batches=n_batches,
                            n_iter=n_iter))

            # validate for one epoch
            self.eval()
            diagnostic['valid'].update(self._epoch(X_valid))

            # log the dict of losses
            if report:
                log_fn(diagnostic)
Esempio n. 7
0
    def fit(self,
            X_train,
            X_valid=None,
            opt_fn=torch.optim.Adam,
            opt_params={
                'lr': 2e-4,
                'betas': (.5, .999)
            },
            n_batches=None,
            n_epochs=10,
            log_fn=None,
            log_every=1,
            checkpoint_fn=None,
            checkpoint_every=2):
        """
        X_train: torch.utils.data.DataLoader
        X_valid: torch.utils.data.DataLoader or None
        opt_fn: nn.Optimizer constructor or triple for D, E/G
        opt_params: dict of keyword args for optimizer or triple for D, E/G
        n_batches: int or pair # of train, valid batches per epoch (None for all data)
        n_epochs: number of training iterations
        log_fn: takes diagnostic dict, called after every nth epoch
        log_every: call log function every nth epoch
        checkpoint_fn: takes model, epoch. called after nth every epoch
        checkpoint_every: call checkpoint function every nth epoch
        """
        _unfreeze(self)

        train_batches, valid_batches = _as_tuple(n_batches, 2)
        D_opt_fn, EG_opt_fn = (
            lambda p: fn(p, **hyperparams) for fn, hyperparams in zip(
                _as_tuple(opt_fn, 2), _as_tuple(opt_params, 2)))

        EG_opt = EG_opt_fn(chain(self.E.parameters(), self.G.parameters()))
        D_opt = D_opt_fn(self.D.parameters())
        #LambdaLR(D_opt, lambda _:-1) # negate learning rate for D_opt

        for i in pbar(range(n_epochs), desc='epoch'):
            diagnostic = defaultdict(dict)
            report = log_fn and (i % log_every == 0 or i == n_epochs - 1)
            checkpoint = checkpoint_every and checkpoint_fn and (
                (i + 1) % checkpoint_every == 0 or i == n_epochs - 1)
            # train for one epoch
            self.train()
            _unfreeze(self)
            diagnostic['train'].update(
                self._epoch(X_train, D_opt, EG_opt, n_batches=train_batches))
            # validate for one epoch
            self.eval()
            _freeze(self)
            diagnostic['valid'].update(
                self._epoch(X_valid, n_batches=valid_batches))
            # log the dict of loss components
            if report:
                log_fn(diagnostic)
            if checkpoint:
                checkpoint_fn(self, i + 1)
Esempio n. 8
0
    def train(self,
              n_batches=200000,
              n_burn_in_batches=2000,
              summary_interval=100,
              ckpt_interval=10000,
              progress_bar=True):

        self.sess.run(self.data.get_dataset('train'))
        self.g.training = True
        self.d.training = True

        burn_in_iter = range(n_burn_in_batches)
        standard_iter = range(n_batches)

        if progress_bar:
            burn_in_iter = pbar(burn_in_iter, unit='batch')
            standard_iter = pbar(standard_iter, unit='batch')

        try:
            for batch in burn_in_iter:
                self.sess.run(self.vgg_adam)

            for batch in standard_iter:
                self.sess.run(self.d_adam)
                self.sess.run(self.g_adam)

                if batch % 100 == summary_interval:
                    self.summarize()

                if batch == batches // 2:
                    self.lr_drop()

                if batch % ckpt_interval == 0 or batch + 1 == batches:
                    self.save()

        except KeyboardInterrupt:
            print("Saving model before quitting...")
            self.save()
            print("Save complete. Training stopped.")

        finally:
            losses = self.sess.run([self.g_loss, self.d_loss])
            return losses
Esempio n. 9
0
async def singleRun():
	logger.info("Start for csv input:%s" % (results.input))
	inputoutput = csvIO() # initialize class
	pdict = inputoutput.areadCSVLinks() # read the csv file
	tasks = []
	for k,v in pdict.items():
		task = loop.create_task(inputoutput._safe_download(k,v)) # start the retrieve process
		tasks.append(task)
	responses = []
	for task in pbar(asyncio.as_completed(tasks),desc='retrieve',total=len(tasks)):
		responses.append(await task)
	await inputoutput.awriteCSV(responses)
	logger.info("Finish for csv output:%s" % (results.output))
Esempio n. 10
0
def train_model(model, params):
    model = model.to(params['device'])
    optimizer = optim.Adam(model.parameters())
    total_updates = params['num_epochs'] * len(params['train_loader'])

    criterion = nn.CrossEntropyLoss()
    # best_accuracy = test_model(model, params)
    best_model = copy.deepcopy(model.state_dict())

    for epoch in pbar(range(params['num_epochs'])):
        # Each epoch has a training and validation phase
        for phase in ['train', 'validation']:

            # Loss accumulator for each epoch
            logs = {'Loss': 0.0, 'Accuracy': 0.0}

            # Set the model to the correct phase
            model.train() if phase == 'train' else model.eval()

            # Iterate over data
            for image, label in params[phase + '_loader']:
                image = image.to(params['device'])
                label = label.to(params['device'])

                # Zero gradient
                optimizer.zero_grad()

                with torch.set_grad_enabled(phase == 'train'):

                    # Forward pass
                    prediction = model(image)
                    loss = criterion(prediction, label)
                    accuracy = torch.sum(
                        torch.max(prediction, 1)[1] == label.data).item()

                    # Update log
                    logs['Loss'] += image.shape[0] * loss.detach().item()
                    logs['Accuracy'] += accuracy

                    # Backward pass
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

            # Normalize and write the data to TensorBoard
            logs['Loss'] /= len(params[phase + '_loader'].dataset)
            logs['Accuracy'] /= len(params[phase + '_loader'].dataset)
Esempio n. 11
0
def test_model(model, params):
    model = model.to(params['device']).eval()
    phase = 'validation'
    logs = {'Accuracy': 0.0}

    # Iterate over data
    for image, label in pbar(params[phase + '_loader']):
        image = image.to(params['device'])
        label = label.to(params['device'])

        prediction = model(image)
        accuracy = torch.sum(torch.max(prediction, 1)[1] == label.data).item()
        logs['Accuracy'] += accuracy

    logs['Accuracy'] /= len(params[phase + '_loader'].dataset)

    return logs['Accuracy']
Esempio n. 12
0
def create_json():
    text = ""
    for root, dirs, files in os.walk('messages/inbox'):
        for dir in pbar(dirs, desc='Converting messages:'):
            curr_dir = os.getcwd() + '/messages/inbox/' + dir
            for file in os.listdir(curr_dir):
                if file.endswith(".json"):
                    with open(curr_dir + '/' + file, "r") as message_file:
                        conversation = json.load(message_file)

                        if os.path.exists(os.getcwd() + '/me.txt'):
                            with open('me.txt', "r") as my_name:
                                name = my_name.read()
                        else:
                            participants = conversation['participants']
                            counter = 0
                            print("- Setting Parameters -")
                            for participant in participants:
                                print("[%s] %s" %
                                      (counter, participant['name']))
                                counter += 1
                            index = input(
                                "Which of the participants are you?:\n")

                            if index.isdigit() and int(index) > 0 and int(
                                    index) < len(participants):
                                name = participants[int(index)]['name']
                                with open('me.txt', "w") as my_name:
                                    my_name.write(name)
                            else:
                                print("Invalid input")
                                break

                        participants = conversation['participants']
                        messages = conversation['messages']
                        for message in messages:
                            if message[
                                    'sender_name'] == name and 'content' in message:
                                text += (message['content'])

        all_messages = " ".join(sorted(text.split()))
        with open('output.txt', 'w') as output:
            output.write(all_messages)
        return  #only go to 2nd level of dirs
Esempio n. 13
0
 def _epoch(self,
            X,
            disc_opt=None,
            ae_opt=None,
            n_batches=None,
            n_iter=(1, 1)):
     """Evaluate/optimize for one epoch.
     X: torch.nn.DataLoader
     disc_opt: torch.nn.Optimizer for discriminators or None if not training
     ae_opt: torch.nn.Optimizer for autoencoder or None if not training
     n_batches: number of batches to draw or None for all data
     n_iter: tuple of steps per batch for discriminators, autoencoder
     """
     iter_losses = defaultdict(list)
     it = _take_batches(X, n_batches) if n_batches else X
     desc = 'training batch' if disc_opt else 'validating batch'
     for x in pbar(it, desc=desc, leave=False):
         x = self._wrap(x)
         for i in range(n_iter[0]):
             self.zero_grad()
             _freeze(self.E, self.G)
             _unfreeze(self.C, self.D)
             loss_components = self.discriminator_loss(x)
             if disc_opt:
                 loss = sum(loss_components.values())
                 loss.backward()
                 disc_opt.step()
                 del loss
             for k, v in loss_components.items():
                 iter_losses[k].append(v.data.cpu().numpy())
         for _ in range(n_iter[1]):
             self.zero_grad()
             _freeze(self.C, self.D)
             _unfreeze(self.E, self.G)
             loss_components = self.autoencoder_loss(x)
             if ae_opt:
                 loss = sum(loss_components.values())
                 loss.backward()
                 ae_opt.step()
                 del loss
             for k, v in loss_components.items():
                 iter_losses[k].append(v.data.cpu().numpy())
     return {k: np.mean(v) for k, v in iter_losses.items()}
Esempio n. 14
0
def get_accuracies(model, model_vars, data):
    accuracies_for_errors = []
    accuracies_for_errors_t = []
    for e in pbar(possible_errors):
        for i in (0, 1, 2, 6, 7, 8):
            current_var = model_vars[i]
            model.variables[i].assign(current_var +
                                      e * np.random.randn(*current_var.shape))

        accuracies_for_errors.append(
            np.mean(
                tf.keras.metrics.categorical_accuracy(
                    data.y_train, model.predict(data.x_train))))

        accuracies_for_errors_t.append(
            np.mean(
                tf.keras.metrics.categorical_accuracy(
                    data.y_test, model.predict(data.x_test))))
    return accuracies_for_errors, accuracies_for_errors_t
Esempio n. 15
0
def test_model(model, params, print_log=False):
    model = model.eval()
    phase = 'test'
    logs = {'Accuracy': 0.0}

    for images, labels in pbar(getattr(params, phase + '_loader')):
        images = images.reshape(-1, 28 * 28).to(device)
        labels = labels.to(device)

        with torch.no_grad():
            outputs = model(images)
            accuracy = torch.sum(torch.max(outputs, 1)[1] == labels.data).item()
            logs['Accuracy'] += accuracy

    logs['Accuracy'] /= len(getattr(params, phase + '_loader').dataset)

    if print_log:
        print('Accuracy and Loss of the network on '
              'the 10000 test images: {}%'.format(accuracy))

    return logs['Accuracy']
Esempio n. 16
0
 def fit_step(self, X, loss_fn, optimizer=None, n_iters=None):
     """Optimize for one epoch.
     X: torch.utils.DataLoader
     loss_fn: return dict of loss component Variables
     optimizer: if falsy, just compute loss components (e.g. for validation)
     n_iters: number of batches. If falsy, use all data.
     """
     batch_losses = defaultdict(list)
     loss_name = loss_fn.__name__
     it = _take_batches(X, n_iters) if n_iters else X
     # train discriminator
     for x in pbar(it, desc=loss_name, leave=False):
         if optimizer:
             self.zero_grad()
         loss_components = loss_fn(_wrap(x))
         if optimizer:
             loss = sum(loss_components.values())
             loss.backward()
             optimizer.step()
         for k,v in loss_components.items():
             batch_losses[k].append(v.data.cpu().numpy())
     return {k+'_'+loss_name:np.mean(v) for k,v in batch_losses.items()}
plt.plot(rm[0])
plt.plot(rm[1])
plt.plot(rm[2])
plt.xlim([400,800])

# %%
# timeit
# pbar = ProgressBar()
from tqdm import tqdm as pbar

num_pulses = 1000
streakspeed = 95  # meV/fs
X = [""]*num_pulses
y = [""]*num_pulses

for i in pbar(range(num_pulses),colour = 'red', ncols= 100):
    x1 = Pulse.from_GS(dT=np.random.uniform(10/2.355, 120/2.355), 
            dE=np.random.uniform(0.2/2.355, 1.8/2.355), 
            num_electrons1=np.random.randint(15, 85), 
            num_electrons2=np.random.randint(15, 85),
            centralE=np.random.uniform(70,76)
                       )
    x1.get_spectra(streakspeed, discretized=False)
    X[i] = x1
# %%
x=X[17]
(xuv, str1, str2) = x.get_augmented_spectra(0, discretized=False)
b2 = Raw_data(np.asarray((xuv, str1, str2)), tof_ens, x.get_temp(
            ), num_electrons1=x.num_electrons1, num_electrons2=x.num_electrons2)
plt.plot(b2.get_raw_matrix().T)
Esempio n. 18
0
    def fit(self,
            X_train,
            X_valid=None,
            EG_opt_fn=None,
            D_opt_fn=None,
            C_opt_fn=None,
            n_iter=1,
            n_batches=None,
            n_epochs=100,
            log_fn=None,
            log_every=1,
            checkpoint_fn=None,
            checkpoint_every=10):
        """
        X_train: torch.utils.data.DataLoader
        X_valid: torch.utils.data.DataLoader or None
        EG_opt_fn, D_opt_fn, C_opt_fn: takes parameter set, returns torch.optim.Optimizer
            if None, a default optimizer will be used
        n_iter: int/tuple # of E/G, D, C optimizer steps/batch
        n_batches: # of batches per epoch or None for all data
        n_epochs: number of discriminator, autoencoder training iterations
        log_fn: takes diagnostic dict, called after every nth epoch
        log_every: call log function every nth epoch
        checkpoint_fn: takes model, epoch. called after nth every epoch
        checkpoint_every: call checkpoint function every nth epoch
        """
        _unfreeze(self)

        try:
            assert len(n_iter) == 3
        except TypeError:
            n_iter = (n_iter, ) * 3

        # default optimizers
        EG_opt_fn = EG_opt_fn or (
            lambda p: torch.optim.Adam(p, lr=8e-4, betas=(.5, .9)))
        D_opt_fn = D_opt_fn or (
            lambda p: torch.optim.Adam(p, lr=8e-4, betas=(.5, .9)))
        C_opt_fn = C_opt_fn or (
            lambda p: torch.optim.Adam(p, lr=8e-4, betas=(.5, .9)))

        # define optimization order/separation of networks
        optimizers, loss_fns = [], []
        # encoder/generator together
        optimizers.append(
            EG_opt_fn(chain(self.E.parameters(), self.G.parameters())))
        loss_fns.append(self.autoencoder_loss)
        # discriminator
        if self.adversarial_weight != 0:
            optimizers.append(D_opt_fn(self.D.parameters()))
            loss_fns.append(self.discriminator_loss)
        # code discriminator
        if self.code_weight != 0:
            optimizers.append(C_opt_fn(self.C.parameters()))
            loss_fns.append(self.code_discriminator_loss)
# #         discriminators together
#         optimizers.append(D_opt_fn(chain(
#             self.D.parameters(), self.C.parameters()
#         )))
#         loss_fns.append(lambda x: self.code_discriminator_loss(x)+self.discriminator_loss(x))

        for i in pbar(range(n_epochs), desc='epoch'):
            diagnostic = defaultdict(dict)
            report = log_fn and (i % log_every == 0 or i == n_epochs - 1)
            checkpoint = checkpoint_every and checkpoint_fn and (
                (i + 1) % checkpoint_every == 0 or i == n_epochs - 1)
            # train for one epoch
            self.train()
            diagnostic['train'].update(
                self._epoch(X_train, loss_fns, optimizers, n_iter, n_batches))
            # validate for one epoch
            self.eval()
            diagnostic['valid'].update(self._epoch(X_valid, loss_fns))
            # log the dict of loss components
            if report:
                log_fn(diagnostic)
            if checkpoint:
                checkpoint_fn(self, i + 1)
Esempio n. 19
0
    def fit(self,
            X_train, X_valid=None,
            disc_opt_fn=None, ae_opt_fn=None,
            disc_iters=8, ae_iters=16,
            log_fn=None,
            n_epochs=100):
        """
        X_train: torch.utils.data.DataLoader
        X_valid: torch.utils.data.DataLoader or None
        disc_opt_fn: takes parameter set, returns torch.optim.Optimizer
            if None, a default Optimizer will be used
        ae_opt_fn: takes parameter set, returns torch.optim.Optimizer
            if None, a default Optimizer will be used
        disc_epochs: number of discriminator passes through the data each epoch
            (may be fractional)
        ae_epochs: number of autoencoder passes through the data each epoch
            (may be fractional)
        log_fn: takes diagnostic dict, called after every epoch
        n_epochs: number of discriminator, autoencoder training iterations
        """
        _unfreeze(self)

        # default optimizers
        disc_opt_fn = disc_opt_fn or (lambda p: torch.optim.Adam(p, lr=3e-4))
        ae_opt_fn = ae_opt_fn or (lambda p: torch.optim.Adam(p, lr=3e-4))

        disc_opt = disc_opt_fn(chain(
            self.D.parameters(), self.C.parameters()))
        ae_opt = ae_opt_fn(chain(
            self.E.parameters(), self.G.parameters()))

        for i in pbar(range(n_epochs), desc='epoch'):
            diagnostic = defaultdict(dict)

            # train discriminators
            self.train() # e.g. for BatchNorm
            _freeze(self.E, self.G)
            _unfreeze(self.C, self.D)
            diagnostic['train'].update( self.fit_step(
                X_train, self.discriminator_loss, disc_opt, disc_iters ))

            # validate discriminators
            if not X_valid is None:
                self.eval()
                _freeze(self)
                diagnostic['valid'].update( self.fit_step(
                    X_valid, self.discriminator_loss ))

            # train autoencoder
            self.train()
            _freeze(self.C, self.D)
            _unfreeze(self.E, self.G)
            diagnostic['train'].update( self.fit_step(
                X_train, self.autoencoder_loss, ae_opt, ae_iters ))

            # validate autoencoder
            if not X_valid is None:
                self.eval()
                _freeze(self)
                diagnostic['valid'].update( self.fit_step(
                    X_valid, self.autoencoder_loss ))

            # log the dict of losses
            log_fn(diagnostic)
Esempio n. 20
0
def train(model, params, print_log=False):
    model_name = "{}_lr{}".format(model.__name__, params.learning_rate)
    writer = SummaryWriter('{}/{}/'.format(mkSavePath('runs', params), model_name))
    optimizer = model.opt

    if params.step_lr:
        scheduler = lr_scheduler.StepLR(optimizer, step_size=params.step_lr, gamma=0.1)

    criterion = nn.CrossEntropyLoss()
    best_model = copy.deepcopy(model.state_dict())
    best_ep = 0
    max_acc = test_model(model, params)
    save_H = False

    for epoch in pbar(range(params.num_epochs)):
        for phase in ['train', 'test']:
            logs = {'Loss': 0.0, 'Accuracy': 0.0}
            # Set the model to the correct phase
            model.train() if phase == 'train' else model.eval()

            for images, labels in getattr(params, phase + '_loader'):
                # Move tensors to the configured device
                images = images.reshape(-1, 28 * 28).to(device)
                labels = labels.to(device)

                optimizer.zero_grad()

                with torch.set_grad_enabled(phase == 'train'):

                    # Forward pass
                    outputs = model(images)
                    loss = criterion(outputs, labels)
                    accuracy = torch.sum(torch.max(outputs, 1)[1] == labels.data).item()

                    # Update log
                    logs['Loss'] += images.shape[0] * loss.detach().item()
                    logs['Accuracy'] += accuracy

                    # Backward pass
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                    if not save_H:
                        init_H = model.H.detach().cpu().numpy().diagonal()
                        max_H = None
                        save_H = True

            logs['Loss'] /= len(getattr(params, phase + '_loader').dataset)
            logs['Accuracy'] /= len(getattr(params, phase + '_loader').dataset)
            writer.add_scalars('Loss', {phase: logs['Loss']}, epoch+1)
            writer.add_scalars('Accuracy', {phase: logs['Accuracy']}, epoch+1)

            if print_log:
                print('\n Epoch [{}]: ({}) Loss = {:.6f}, Accuracy = {:.4f}%'
                      .format(epoch+1, phase, logs['Loss'], logs['Accuracy']*100))

            if phase == 'test' and logs['Accuracy'] > max_acc:
                max_acc = logs['Accuracy']
                best_ep = epoch + 1
                best_model = copy.deepcopy(model.state_dict())
                max_H = model.H.detach().cpu().numpy().diagonal()

        if params.step_lr:
            scheduler.step()

    # write to tensor board
    writer.add_text('Best_Accuracy', str(max_acc), best_ep)
    writer.add_histogram('init_H', init_H)
    writer.add_histogram('max_H', max_H, best_ep)

    # save model
    PATH = '{}/{}.pt'.format(mkSavePath('model', params), model_name)
    torch.save(best_model, PATH)

    writer.close()
import os
import cv2
from tqdm import tqdm as pbar
import numpy as np

IMG_SIZE = 50  #Need uniform images, will make 50x50
CATS = "PetImages/Cat"
DOGS = "PetImages/Dog"
LABELS = {CATS: 0, DOGS: 1}

training_data = []
catcount = 0
dogcount = 0

for label in LABELS:
    for f in pbar(os.listdir(label)):
        try:
            path = os.path.join(label, f)
            img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
            img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
            training_data.append([np.array(img), np.eye(2)[LABELS[label]]])

            if label == CATS:
                catcount += 1
            elif label == DOGS:
                dogcount += 1
        except Exception as e:
            pass
            #print(str(e)) - can print error but its just bad pics in this case

np.random.shuffle(training_data)
Esempio n. 22
0
    def fit(self,
            X_train,
            X_valid=None,
            opt_fn=torch.optim.Adam,
            opt_params={
                'lr': 8e-4,
                'betas': (.5, .9)
            },
            n_iter=(2, 1, 1),
            n_batches=None,
            n_epochs=10,
            log_fn=None,
            log_every=1,
            checkpoint_fn=None,
            checkpoint_every=2):
        """
        X_train: torch.utils.data.DataLoader
        X_valid: torch.utils.data.DataLoader or None
        opt_fn: nn.Optimizer constructor or triple for E/G, D, C
        opt_params: dict of keyword args for optimizer or triple for E/G, D, C
        n_iter: int or triple # of E/G, D, C optimizer steps/batch
        n_batches: int or pair # of train, valid batches per epoch (None for all data)
        n_epochs: number of discriminator, autoencoder training iterations
        log_fn: takes diagnostic dict, called after every nth epoch
        log_every: call log function every nth epoch
        checkpoint_fn: takes model, epoch. called after nth every epoch
        checkpoint_every: call checkpoint function every nth epoch
        """
        _unfreeze(self)

        n_iter = _as_tuple(n_iter, 3)
        train_batches, valid_batches = _as_tuple(n_batches, 2)
        EG_opt_fn, D_opt_fn, C_opt_fn = (
            lambda p: fn(p, **hyperparams) for fn, hyperparams in zip(
                _as_tuple(opt_fn, 3), _as_tuple(opt_params, 3)))

        # define optimization order/separation of networks
        optimizers, loss_fns = [], []
        #if neither autoencoder loss is present, skip encoder and just train GAN
        if self.use_E():
            optimizers.append(
                EG_opt_fn(chain(self.E.parameters(), self.G.parameters())))
        else:
            optimizers.append(EG_opt_fn(self.G.parameters()))
        loss_fns.append(self.autoencoder_loss)
        # discriminator
        if self.use_D():
            optimizers.append(D_opt_fn(self.D.parameters()))
            loss_fns.append(self.discriminator_loss)
        # code discriminator
        if self.use_C():
            optimizers.append(C_opt_fn(self.C.parameters()))
            loss_fns.append(self.code_discriminator_loss)
# #         discriminators together
#         optimizers.append(D_opt_fn(chain(
#             self.D.parameters(), self.C.parameters()
#         )))
#         loss_fns.append(lambda x: self.code_discriminator_loss(x)+self.discriminator_loss(x))

        for i in pbar(range(n_epochs), desc='epoch'):
            diagnostic = defaultdict(dict)
            report = log_fn and (i % log_every == 0 or i == n_epochs - 1)
            checkpoint = checkpoint_every and checkpoint_fn and (
                (i + 1) % checkpoint_every == 0 or i == n_epochs - 1)
            # train for one epoch
            self.train()
            diagnostic['train'].update(
                self._epoch(X_train, loss_fns, optimizers, n_iter,
                            train_batches))
            # validate for one epoch
            self.eval()
            diagnostic['valid'].update(
                self._epoch(X_valid, loss_fns, n_batches=valid_batches))
            # log the dict of loss components
            if report:
                log_fn(diagnostic)
            if checkpoint:
                checkpoint_fn(self, i + 1)
Esempio n. 23
0
def train_model(model, params, summary_path, output):
    writer = SummaryWriter(summary_path)
    model = model.to(params['device'])
    optimizer = optim.AdamW(model.parameters())
    total_updates = params['num_epochs'] * len(params['train_loader'])

    criterion = nn.CrossEntropyLoss()
    best_accuracy = test_model(model, params)
    best_model = copy.deepcopy(model.state_dict())

    for epoch in pbar(range(params['num_epochs'])):
        # Each epoch has a training and validation phase
        for phase in ['train', 'validation']:

            # Loss accumulator for each epoch
            logs = {'Loss': 0.0, 'Accuracy': 0.0}

            # Set the model to the correct phase
            model.train() if phase == 'train' else model.eval()

            # Iterate over data
            for image, label in params[phase + '_loader']:
                image = image.to(params['device'])
                label = label.to(params['device'])

                # Zero gradient
                optimizer.zero_grad()

                with torch.set_grad_enabled(phase == 'train'):

                    # Forward pass
                    prediction = model(image)
                    loss = criterion(prediction, label)
                    accuracy = torch.sum(
                        torch.max(prediction, 1)[1] == label.data).item()

                    # Update log
                    logs['Loss'] += image.shape[0] * loss.detach().item()
                    logs['Accuracy'] += accuracy

                    # Backward pass
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

            # Normalize and write the data to TensorBoard
            logs['Loss'] /= len(params[phase + '_loader'].dataset)
            logs['Accuracy'] /= len(params[phase + '_loader'].dataset)
            writer.add_scalars('Loss', {phase: logs['Loss']}, epoch)
            writer.add_scalars('Accuracy', {phase: logs['Accuracy']}, epoch)

            # Save the best weights
            if phase == 'validation' and logs['Accuracy'] > best_accuracy:
                best_accuracy = logs['Accuracy']
                best_model = copy.deepcopy(model.state_dict())

        # Write best weights to disk
        if epoch % params['check_point'] == 0 or epoch == params[
                'num_epochs'] - 1:
            torch.save(best_model, output)

    final_accuracy = test_model(model, params)
    writer.add_text('Final_Accuracy', str(final_accuracy), 0)
    writer.close()
Esempio n. 24
0
def calculate_dose(
    normalized_data: pd.DataFrame,
    settings: PyskindoseSettings,
    table: Phantom,
    pad: Phantom,
) -> Tuple[Phantom, Optional[Dict[str, Any]]]:
    """Calculate skin dose.

    This function initializes the skin dose calculations.

    Parameters
    ----------
    normalized_data : pd.DataFrame
        RDSR data, normalized for compliance with PySkinDose.
    settings : PyskindoseSettings
        Settings class for PySkinDose
    table : Phantom
        Patient support table phantom
    pad : Phantom
        Patient support pad phantom

    Returns
    -------
    Tuple[Phantom, Optional[Dict[str, Any]]]
        [description]

    """
    if settings.mode != c.MODE_CALCULATE_DOSE:
        logger.debug(
            "Mode not set to calculate dose. Returning without doing anything")
        return None, None

    logger.info("Start performing dose calculations")
    patient = Phantom(
        phantom_model=settings.phantom.model,
        phantom_dim=settings.phantom.dimension,
        human_mesh=settings.phantom.human_mesh,
    )

    # position objects in starting position
    position_geometry(patient=patient,
                      table=table,
                      pad=pad,
                      pad_thickness=settings.phantom.dimension.pad_thickness,
                      patient_offset=[
                          settings.phantom.patient_offset.d_lon,
                          settings.phantom.patient_offset.d_ver,
                          settings.phantom.patient_offset.d_lat
                      ],
                      patient_orientation=settings.phantom.patient_orientation)

    normalized_data = fetch_and_append_hvl(data_norm=normalized_data)

    # Check which irradiation events that contains updated
    # geometry parameters since the previous irradiation event
    new_geometry = check_new_geometry(normalized_data)

    # fetch of k_bs interpolation object (k_bs=f(field_size))for all events
    back_scatter_interpolation = calculate_k_bs(data_norm=normalized_data)

    k_tab = calculate_k_tab(
        data_norm=normalized_data,
        estimate_k_tab=settings.estimate_k_tab,
        k_tab_val=settings.k_tab_val,
    )

    total_number_of_events = len(normalized_data)

    if settings.plot.notebook_mode:
        from tqdm import tqdm_notebook as pbar
    else:
        from tqdm import tqdm as pbar

    output_template = {
        c.OUTPUT_KEY_HITS: [[]] * total_number_of_events,
        c.OUTPUT_KEY_KERMA: [np.array] * total_number_of_events,
        c.OUTPUT_KEY_CORRECTION_INVERSE_SQUARE_LAW:
        [[]] * total_number_of_events,
        c.OUTPUT_KEY_CORRECTION_BACK_SCATTER: [[]] * total_number_of_events,
        c.OUTPUT_KEY_CORRECTION_MEDIUM: [[]] * total_number_of_events,
        c.OUTPUT_KEY_CORRECTION_TABLE: [[]] * total_number_of_events,
        c.OUTPUT_KEY_DOSE_MAP: np.zeros(len(patient.r)),
    }

    output = calculate_irradiation_event_result(
        normalized_data=normalized_data,
        event=0,
        total_events=len(normalized_data),
        new_geometry=new_geometry,
        k_tab=k_tab,
        hits=[],
        patient=patient,
        table=table,
        pad=pad,
        back_scatter_interpolation=back_scatter_interpolation,
        output=output_template,
        pbar=pbar(total=total_number_of_events,
                  leave=False,
                  desc='calculating skindose'))

    return patient, output
Esempio n. 25
0
plt.savefig('nnanalysis_10.png', bbox_inches='tight')

##Plot2
std = np.sqrt(0.0025)
errors = []

inds_1 = [0, 1]
inds_2 = [6, 7]

for i in (0, 1, 2, 6, 7, 8):
    current_var = model_eo_vars_DCT_N36[i]
    errors.append(std * np.random.randn(*current_var.shape))

accuracies = []

for n in pbar(range(N * 2 + 1)):
    if n <= 64:
        for i in (0, 1):
            error_mask = np.zeros_like(model_eo_vars_DCT_N36[i])
            error_mask[n:] = 1
            model_eo_DCT_N36.variables[i].assign(model_eo_vars_DCT_N36[i] +
                                                 errors[i] * error_mask)
    for i in (6, 7):
        if n >= 64:
            error_mask = np.zeros_like(model_eo_vars_DCT_N36[i])
            error_mask[n - N:] = 1
        else:
            error_mask = np.ones_like(model_eo_vars_DCT_N36[i])
        model_eo_DCT_N36.variables[i].assign(model_eo_vars_DCT_N36[i] +
                                             errors[i - 3] * error_mask)
    accuracies.append(
Esempio n. 26
0
def create_dose_map_plot(patient: Phantom, settings: PyskindoseSettings,
                         dose_map: np.ndarray) -> None:
    """Plot a map of the absorbed skindose upon the patient phantom.

    This function creates and plots an offline plotly graph of the
    skin dose distribution on the phantom. The colorscale is mapped to the
    absorbed skin dose value. Only available for phantom type: "plane",
    "cylinder" or "human"

    Parameters
    ----------
    patient : Phantom
        patient phantom
    settings : PyskindoseSettings
        Settings class for PySkinDose
    dose_map : np.ndarray
        array with dose matrix, where each element is to be mapped to the
        corresponding skin cell of the patient.

    """
    if not settings.plot.plot_dosemap:
        logger.debug(
            "Mode not set to plot dosemap. Returning without doing anything")
        return

    if dose_map is None:
        logger.debug("""Cannot plot dosemap since dose calculation has not been
            conducted. Returning without doing anything.""")
        raise ValueError("""Dosemap is None. Mode must be set to calculate
        dose in order to plot dosemap""")

    # Fix error with plotly layout for 2D plane patient.
    if patient.phantom_model == PHANTOM_MODEL_PLANE:
        patient = Phantom(phantom_model=settings.phantom.model,
                          phantom_dim=settings.phantom.dimension)

    # append dosemap to patient
    patient.dose = dose_map

    COLOR_CANVAS, COLOR_PLOT_TEXT, COLOR_GRID, COLOR_ZERO_LINE = \
        fetch_plot_colors(dark_mode=settings.plot.dark_mode)

    PLOT_HEIGHT, PLOT_WIDTH = fetch_plot_size(
        notebook_mode=settings.plot.notebook_mode)

    PLOT_MARGINS = fetch_plot_margin(notebook_mode=settings.plot.notebook_mode)

    lat_text = [
        f"<b>lat:</b> {np.around(patient.r[ind, 2],2)} cm<br>"
        for ind in range(len(patient.r))
    ]

    lon_text = [
        f"<b>lon:</b> {np.around(patient.r[ind, 0],2)} cm<br>"
        for ind in range(len(patient.r))
    ]

    ver_text = [
        f"<b>ver:</b> {np.around(patient.r[ind, 1],2)} cm<br>"
        for ind in range(len(patient.r))
    ]

    dose_text = [
        f"<b>skin dose:</b> {round(patient.dose[ind],2)} mGy"
        for ind in range(len(patient.r))
    ]

    hover_text = [
        lat_text[cell] + lon_text[cell] + ver_text[cell] + dose_text[cell]
        for cell in range(len(patient.r))
    ]

    # create mesh object for the phantom
    phantom_mesh = [
        go.Mesh3d(x=patient.r[:, 0],
                  y=patient.r[:, 1],
                  z=patient.r[:, 2],
                  i=patient.ijk[:, 0],
                  j=patient.ijk[:, 1],
                  k=patient.ijk[:, 2],
                  intensity=patient.dose,
                  colorscale=DOSEMAP_COLORSCALE,
                  showscale=True,
                  hoverinfo='text',
                  text=hover_text,
                  name="Human",
                  colorbar=dict(tickfont=dict(color=COLOR_PLOT_TEXT),
                                title="Skin dose [mGy]",
                                titlefont=dict(family=PLOT_FONT_FAMILY,
                                               color=COLOR_PLOT_TEXT)))
    ]

    layout = create_layout_for_dose_map_plots(PLOT_MARGINS=PLOT_MARGINS,
                                              PLOT_HEIGHT=PLOT_HEIGHT,
                                              PLOT_WIDTH=PLOT_WIDTH,
                                              COLOR_PLOT_TEXT=COLOR_PLOT_TEXT,
                                              COLOR_CANVAS=COLOR_CANVAS)

    # create figure
    fig = go.Figure(data=phantom_mesh, layout=layout)

    if settings.plot.interactivity:
        fig.show()
        return

    # proceed with creating static dose map

    eyes = [PLOT_EYE_RIGHT, PLOT_EYE_BACK, PLOT_EYE_LEFT, PLOT_EYE_FRONT]

    names = PLOT_ORDER_STATIC
    file_type_static = PLOT_FILE_TYPE_STATIC

    if settings.plot.notebook_mode:
        from tqdm import tqdm_notebook as pbar

    else:
        from tqdm import tqdm as pbar

    # create static dose map plots
    for i in pbar(range(len(eyes)), desc=f'saving static dosemaps: '):
        fig['layout']['scene']['camera'] = eyes[i]
        fig.write_image(f"{names[i]}.png")

    # show dose map plot with PIL if not in notebook mode
    if not settings.plot.notebook_mode:
        for image_file_name in [name + file_type_static for name in names]:
            im = Image.open(image_file_name)
            im.show()
        return

    # proceed with showing the dose map plot in notebook mode
    create_notebook_dose_map_plot(
        names=names,
        file_type_static=file_type_static,
    )