コード例 #1
0
ファイル: btrfs.py プロジェクト: jisqyv/buttersink
    def __init__(self, fileSystem, rootid, generation, info):
        """ Initialize. """
        logger.debug("Volume %d/%d: %s", rootid, generation, pretty(info))
        self.fileSystem = fileSystem
        self.id = rootid  # id in BTRFS_ROOT_TREE_OBJECTID, also FS treeid for this volume
        self.original_gen = info.otransid
        self.current_gen = info.ctransid
        # self.size = info.bytes_used
        self.readOnly = bool(info.flags & BTRFS_ROOT_SUBVOL_RDONLY)
        self.level = info.level
        self.uuid = info.uuid
        self.parent_uuid = info.parent_uuid
        self.received_uuid = info.received_uuid
        self.sent_gen = info.stransid

        self.totalSize = None
        self.exclusiveSize = None

        self.info = info

        self.links = {}

        assert rootid not in self.fileSystem.volumes, rootid
        self.fileSystem.volumes[rootid] = self

        logger.debug("%s", self)
コード例 #2
0
ファイル: btrfs.py プロジェクト: AmesCornish/buttersink
    def _getRoots(self):
        for (header, buf) in self._walkTree(BTRFS_ROOT_TREE_OBJECTID):
            if header.type == objectTypeKeys['BTRFS_ROOT_BACKREF_KEY']:
                info = buf.read(btrfs_root_ref)
                name = buf.readView(info.name_len).tobytes()

                directory = self.INO_LOOKUP(treeid=header.offset, objectid=info.dirid)

                logger.debug("%s: %s %s", name, pretty(info), pretty(directory))

                self.volumes[header.objectid]._addLink(
                    header.offset,
                    info.dirid,
                    info.sequence,
                    directory.name,
                    name,
                )
            elif header.type == objectTypeKeys['BTRFS_ROOT_ITEM_KEY']:
                if header.len == btrfs_root_item.size:
                    info = buf.read(btrfs_root_item)
                elif header.len == btrfs_root_item_v0.size:
                    info = buf.read(btrfs_root_item_v0)
                else:
                    assert False, header.len

                if (
                    (header.objectid >= BTRFS_FIRST_FREE_OBJECTID
                     and header.objectid <= BTRFS_LAST_FREE_OBJECTID)
                        or header.objectid == BTRFS_FS_TREE_OBJECTID
                ):
                    assert header.objectid not in self.volumes, header.objectid
                    self.volumes[header.objectid] = _Volume(
                        self,
                        header.objectid,
                        header.offset,
                        info,
                    )
            elif header.type == objectTypeKeys['BTRFS_DIR_ITEM_KEY']:
                info = buf.read(btrfs_dir_item)
                name = buf.readView(info.name_len).tobytes()
                if name == "default":
                    self.defaultID = info.location.objectid
                logger.debug("Found dir '%s' is %d", name, self.defaultID)
コード例 #3
0
    def _getRoots(self):
        for (header, buf) in self._walkTree(BTRFS_ROOT_TREE_OBJECTID):
            if header.type == objectTypeKeys['BTRFS_ROOT_BACKREF_KEY']:
                info = buf.read(btrfs_root_ref)
                name = buf.readView(info.name_len).tobytes()

                directory = self.INO_LOOKUP(treeid=header.offset,
                                            objectid=info.dirid)

                logger.debug("%s: %s %s", name, pretty(info),
                             pretty(directory))

                self.volumes[header.objectid]._addLink(
                    header.offset,
                    info.dirid,
                    info.sequence,
                    directory.name,
                    name,
                )
            elif header.type == objectTypeKeys['BTRFS_ROOT_ITEM_KEY']:
                if header.len == btrfs_root_item.size:
                    info = buf.read(btrfs_root_item)
                elif header.len == btrfs_root_item_v0.size:
                    info = buf.read(btrfs_root_item_v0)
                else:
                    assert False, header.len

                if ((header.objectid >= BTRFS_FIRST_FREE_OBJECTID
                     and header.objectid <= BTRFS_LAST_FREE_OBJECTID)
                        or header.objectid == BTRFS_FS_TREE_OBJECTID):
                    assert header.objectid not in self.volumes, header.objectid
                    self.volumes[header.objectid] = _Volume(
                        self,
                        header.objectid,
                        header.offset,
                        info,
                    )
            elif header.type == objectTypeKeys['BTRFS_DIR_ITEM_KEY']:
                info = buf.read(btrfs_dir_item)
                name = buf.readView(info.name_len).tobytes()
                if name == "default":
                    self.defaultID = info.location.objectid
                logger.debug("Found dir '%s' is %d", name, self.defaultID)
コード例 #4
0
ファイル: btrfs.py プロジェクト: jisqyv/buttersink
    def _getRoots(self):
        for (header, buf) in self._walkTree(BTRFS_ROOT_TREE_OBJECTID):
            if header.type == objectTypeKeys['BTRFS_ROOT_BACKREF_KEY']:
                data = Structure(
                    (btrfs_root_ref, 'ref'),
                    (t.char, 'name', header.len - btrfs_root_ref.size),
                ).read(buf)

                info = data.ref
                name = data.name

                directory = self.INO_LOOKUP(treeid=header.offset,
                                            objectid=info.dirid)

                logger.debug("%s: %s %s", name, pretty(info),
                             pretty(directory))

                self.volumes[header.objectid]._addLink(
                    header.offset,
                    info.dirid,
                    info.sequence,
                    directory.name,
                    name,
                )
            elif header.type == objectTypeKeys['BTRFS_ROOT_ITEM_KEY']:
                if header.len == btrfs_root_item.size:
                    info = btrfs_root_item.read(buf)
                elif header.len == btrfs_root_item_v0.size:
                    info = btrfs_root_item_v0.read(buf)
                else:
                    assert False, header.len

                if ((header.objectid >= BTRFS_FIRST_FREE_OBJECTID
                     and header.objectid <= BTRFS_LAST_FREE_OBJECTID)
                        or header.objectid == BTRFS_FS_TREE_OBJECTID):
                    assert header.objectid not in self.volumes, header.objectid
                    self.volumes[header.objectid] = _Volume(
                        self,
                        header.objectid,
                        header.offset,
                        info,
                    )
コード例 #5
0
    def get(self):
        # user = User.getForSession(self.request)
        # userSpotify = user.getSpotifyAPI()

        userSpotify = User.getSpotifyForSession(self.request)

        if not userSpotify:
            self.response.write("<h1>Not logged in or session expired</h1>")
            self.response.write('<a href="/auth/login/spotify">login</a>')
            return
        self.response.write("<h1>Your information:</h1>")
        data = userSpotify.getUserInformation()
        self.response.write("<pre>")
        self.response.write(cgi.escape(pretty(data)))
        self.response.write('</pre>')
        self.response.write(
            '<a href="/">home</a> <a href="/logout">logout</a> ')
コード例 #6
0
    def get(self):
        if 'error' in self.request.GET:
            self.response.status = 401
            self.response.write("<h1>Authorization unsuccessful</h1>")
            self.response.write("<p>An error occured: <code>")
            self.response.write(
                cgi.escape(self.request.GET['error']) + "</code>.</p>")
            return
        code = self.request.GET['code']
        scope = self.request.GET['scope']

        if scope != STRAVA_SCOPE:
            self.response.status = 401
            self.response.write("<h1>Authorization not granted</h1>")
            self.response.write("<p>Cannot read private activities. ")
            self.response.write(
                '<a href="/auth/login/strava">(try again)</a></p>')
            self.response.write(
                '<script>history.replaceState({}, "", location.pathname)</script>'
            )
            return

        def errorHandler(e):
            self.response.status = 500
            self.response.write(
                "An error occurred when getting the authorization token.")

        resp = safeGet(STRAVA_TOKEN_URL, {
            "grant_type": "authorization_code",
            "code": code,
            "client_id": getenv("STRAVA_ID"),
            "client_secret": getenv("STRAVA_SECRET")
        },
                       error=errorHandler)
        if not resp: return

        data = json.load(resp)

        # self.response.headers.add("Content-Type", "application/json")
        self.response.write("<h1>Authorization successful</h1>")
        self.response.write(
            "<p>The following information was received:</p><pre>")
        self.response.write(cgi.escape(pretty(data)) + "</pre>")
        self.response.write(
            '<script>history.replaceState({}, "", location.pathname)</script>')
コード例 #7
0
    def get(self):
        # user = User.getForSession(self.request)
        # userSpotify = user.getSpotifyAPI()

        userSpotify = User.getSpotifyForSession(self.request)

        if not userSpotify:
            self.response.write("<h1>Not logged in or session expired</h1>")
            self.response.write('<a href="/auth/login/spotify">login</a>')
            return
        self.response.write("<h1>Your playlists:</h1>")
        data = userSpotify.getPlaylists()

        for playlist in data["items"]:
            self.response.write("<hr>")
            self.response.write('<img src="%s" width="64" height="64">' %
                                playlist["images"][0]["url"])
            self.response.write(cgi.escape(playlist["name"]))

        self.response.write("<pre>")
        self.response.write(cgi.escape(pretty(data)))
        self.response.write('</pre>')
        self.response.write(
            '<a href="/">home</a> <a href="/logout">logout</a> ')
コード例 #8
0
rttcsvprefix = "./log/rtt-" + args.sname + "/rtt"

y_real_file = tfcsvprefix + "-real-Y.csv"
y_pred_file_tf = tfcsvprefix + "-pred-Y.csv"
y_pred_file_rtt = rttcsvprefix + "-pred-Y.csv"

Y = pd.read_csv(y_real_file, sep=',', header=None, names=['L'])
predYtf = pd.read_csv(y_pred_file_tf, sep=',', header=None, names=['L'])
predYrtt = pd.read_csv(y_pred_file_rtt, sep=',', header=None, names=['L'])

print(args.model)
if args.model == 'logistic':
    emetrixs = score_logistic_regression(predYtf.to_numpy(),
                                         Y.to_numpy(),
                                         tag='tensorflow')
    print(pretty(emetrixs))

    emetrixs = score_logistic_regression(predYrtt.to_numpy(),
                                         Y.to_numpy(),
                                         tag='rosetta')
    print(pretty(emetrixs))

if args.model == 'linear':
    emetrixs = score_linear_regression(predYtf.to_numpy(),
                                       Y.to_numpy(),
                                       tag='tensorflow')
    print(pretty(emetrixs))

    emetrixs = score_linear_regression(predYrtt.to_numpy(),
                                       Y.to_numpy(),
                                       tag='rosetta')
コード例 #9
0
ファイル: trainer.py プロジェクト: prem2017/face-verification
def train_network(dataloader,
                  model,
                  loss_function,
                  optimizer,
                  start_lr,
                  end_lr,
                  num_epochs=90,
                  sanity_check=False):
    """Trains the network and saves for different checkpoints such as minimum train/val loss, f1-score, AUC etc. different performance metrics

		Parameters:
		-----------
			dataloader (dict): {key (str):  Value(torch.utils.data.DataLoader)} training and validation dataloader to respective purposes
			model (nn.Module): models to traine the face-recognition
			loss_function (torch.nn.Module): Module to mesure loss between target and model-output
			optimizer (Optimizer): Non vanilla gradient descent method to optimize learning and descent direction
			start_lr (float): For one cycle training the start learning rate
			end_lr (float): the end learning must be greater than start learning rate
			num_epochs (int): number of epochs the one cycle is 
			sanity_check (bool): if the training is perfomed to check the sanity of the model. i.e. to anaswer 'is model is able to overfit for small amount of data?'

		Returns:
		--------
			None: perfoms the required task of training

	"""

    model = model.train()
    logger_msg = '\nDataLoader = %s' \
        '\nModel = %s' \
        '\nLossFucntion = %s' \
        '\nOptimizer = %s' \
        '\nStartLR = %s, EndLR = %s' \
        '\nNumEpochs = %s' % (dataloader, model, loss_function, optimizer, start_lr, end_lr, num_epochs)

    logger.info(logger_msg), print(logger_msg)

    # [https://arxiv.org/abs/1803.09820]
    # This is used to find optimal learning-rate which can be used in one-cycle training policy
    # [LR]TODO: for finding optimal learning rate
    if util.get_search_lr_flag():
        lr_scheduler = MultiStepLR(optimizer=optimizer,
                                   milestones=list(np.arange(2, 24, 2)),
                                   gamma=10,
                                   last_epoch=-1)

    def get_lr():
        lr = []

        for param_group in optimizer.param_groups:
            lr.append(np.round(param_group['lr'], 11))
        return lr

    def set_lr(lr):

        for param_group in optimizer.param_groups:
            param_group['lr'] = lr

    # Loss storation
    current_epoch_batchwise_loss = []
    avg_epoch_loss_container = []  # Stores loss for each epoch averged over
    all_epoch_batchwise_loss = []
    avg_val_loss_container = []
    val_report_container = []
    f1_checker_container = []
    val_auc_container = []
    test_auc_container = {}
    test_f1_container = {}

    if util.get_search_lr_flag():
        extra_epochs = 4
    else:
        extra_epochs = 20
    total_epochs = num_epochs + extra_epochs

    # One cycle setting of Learning Rate
    num_steps_upndown = 10
    further_lowering_factor = 10
    further_lowering_factor_steps = 4

    def one_cycle_lr_setter(current_epoch):
        if current_epoch <= num_epochs:
            assert end_lr > start_lr, '[EndLR] should be greater than [StartLR]'
            lr_inc_rate = np.round((end_lr - start_lr) / (num_steps_upndown),
                                   9)
            lr_inc_epoch_step_len = max(num_epochs / (2 * num_steps_upndown),
                                        1)

            steps_completed = current_epoch / lr_inc_epoch_step_len
            print('[Steps Completed] = ', steps_completed)
            if steps_completed <= num_steps_upndown:
                current_lr = start_lr + (steps_completed * lr_inc_rate)
            else:
                current_lr = end_lr - (
                    (steps_completed - num_steps_upndown) * lr_inc_rate)
            set_lr(current_lr)
        else:
            current_lr = start_lr / (further_lowering_factor**(
                (current_epoch - num_epochs) // further_lowering_factor_steps))
            set_lr(current_lr)

    if sanity_check:
        train_dataloader = next(iter(dataloader['train']))
        train_dataloader = [train_dataloader] * 32
    else:
        train_dataloader = dataloader['train']

    for epoch in range(total_epochs):
        msg = '\n\n\n[Epoch] = %s' % (epoch + 1)
        print(msg)
        start_time = time.time()
        start_datetime = datetime.now()

        for i, (x, y) in enumerate(train_dataloader):  #
            loss = 0
            # pdb.set_trace()

            x = x.to(device=device, dtype=torch.float)
            y = y.to(device=device, dtype=torch.float)

            # TODO: early breaker
            # if i == 2:
            # 	print('[Break] by force for validation check')
            # 	break

            optimizer.zero_grad()
            output = model(x)  #
            loss = loss_function(output, y)
            loss.backward()
            optimizer.step()

            current_epoch_batchwise_loss.append(loss.item())
            all_epoch_batchwise_loss.append(loss.item())

            batch_run_msg = '\nEpoch: [%s/%s], Step: [%s/%s], InitialLR: %s, CurrentLR: %s, Loss: %s' \
                % (epoch + 1, total_epochs, i + 1, len(train_dataloader), start_lr, get_lr(), loss.item())
            print(batch_run_msg)
        #------------------ End of an Epoch ------------------

        # store average loss
        avg_epoch_loss = np.round(
            sum(current_epoch_batchwise_loss) / (i + 1.0), 6)
        current_epoch_batchwise_loss = []
        avg_epoch_loss_container.append(avg_epoch_loss)

        if not (util.get_search_lr_flag() or sanity_check):
            val_loss, val_report, f1_checker, auc = cal_loss_and_metric(
                model, dataloader['val'], loss_function, epoch + 1)
            val_report['roc'] = 'Removed'
        test_test_data = False
        if not (util.get_search_lr_flag() or sanity_check):
            avg_val_loss_container.append(val_loss)
            val_report_container.append(
                val_report)  # ['epoch_' + str(epoch)] = val_report
            f1_checker_container.append(f1_checker)
            val_auc_container.append(auc)

            if np.round(val_loss, 4) <= np.round(min(avg_val_loss_container),
                                                 4):
                model = save_model(
                    model,
                    extra_extension='_minval')  # + '_epoch_' + str(epoch))

            if np.round(auc, 4) >= np.round(max(val_auc_container), 4):
                model = save_model(
                    model,
                    extra_extension='_maxauc')  # + '_epoch_' + str(epoch))
                test_test_data = True

            if np.round(f1_checker, 4) >= np.round(max(f1_checker_container),
                                                   4):
                model = save_model(
                    model,
                    extra_extension='_maxf1')  # + '_epoch_' + str(epoch))
                test_test_data = True

        if avg_epoch_loss <= min(avg_epoch_loss_container):
            model = save_model(model, extra_extension='_mintrain')

        # Logger msg
        msg = '\n\n\n\n\nEpoch: [%s/%s], InitialLR: %s, CurrentLR= %s \n' \
           '\n\n[Train] Average Epoch-wise Loss = %s \n' \
           '\n\n********************************************************** [Validation]' \
           '\n\n[Validation] Average Epoch-wise loss = %s \n' \
           '\n\n[Validation] Report () = %s \n'\
           '\n\n[Validation] F-Report = %s\n'\
           %(epoch+1, total_epochs, start_lr, get_lr(), avg_epoch_loss_container, avg_val_loss_container, None if not val_report_container else util.pretty(val_report_container[-1]), f1_checker_container)
        logger.info(msg)
        print(msg)

        if not (util.get_search_lr_flag() or sanity_check) or test_test_data:
            test_loss, test_report, test_f1_checker, test_auc = cal_loss_and_metric(
                model,
                dataloader['test'],
                loss_function,
                epoch + 1,
                model_type='test_set')
            test_report['roc'] = 'Removed'
            test_auc_container[epoch + 1] = "{0:.3f}".format(round(
                test_auc, 4))
            test_f1_container[epoch + 1] = "{0:.3f}".format(
                round(test_f1_checker, 4))
            msg = '\n\n\n\n**********************************************************[Test]\n '\
               '[Test] Report = {}' \
               '\n\n[Test] fscore = {}' \
               '\n\n[Test] AUC dict = {}' \
               '\n\n[Test] F1-dict= {}'.format(util.pretty(test_report), test_f1_checker, test_auc_container, test_f1_container)
            logger.info(msg)
            print(msg)

        if avg_epoch_loss < 1e-6 or get_lr()[0] < 1e-11 or get_lr()[0] >= 10:
            msg = '\n\nAvg. Loss = {} or Current LR = {} thus stopping training'.format(
                avg_epoch_loss, get_lr())
            logger.info(msg)
            print(msg)
            break

        # [LR]TODO:
        if util.get_search_lr_flag():
            lr_scheduler.step(
                epoch + 1)  # TODO: Only for estimating good learning rate
        else:
            one_cycle_lr_setter(epoch + 1)

        end_time = time.time()
        end_datetime = datetime.now()
        msg = '\n\n[Time] taken for epoch({}) time = {}, datetime = {} \n\n'.format(
            epoch + 1, end_time - start_time, end_datetime - start_datetime)
        logger.info(msg)
        print(msg)

    # ----------------- End of training process -----------------

    msg = '\n\n[Epoch Loss] = {}'.format(avg_epoch_loss_container)
    logger.info(msg)
    print(msg)

    # [LR]TODO: change for lr finder
    if util.get_search_lr_flag():
        losses = avg_epoch_loss_container
        plot_file_name = 'training_epoch_loss_for_lr_finder.png'
        title = 'Training Epoch Loss'
    else:
        losses = {
            'train': avg_epoch_loss_container,
            'val': avg_val_loss_container
        }
        plot_file_name = 'training_vs_val_avg_epoch_loss.png'
        title = 'Training vs Validation Epoch Loss'
    plot_loss(losses=losses, plot_file_name=plot_file_name, title=title)
    plot_loss(losses=all_epoch_batchwise_loss,
              plot_file_name='training_batchwise.png',
              title='Training Batchwise Loss',
              xlabel='#Batchwise')

    # Save the model
    model = save_model(model)