예제 #1
0
    def __load(self, filename):
        try:
            with open(filename, 'rb') as file:
                pgctl_data = file.read(PG_CONTROL_SIZE)

                self.systemidentifier, = struct.unpack_from('Q', pgctl_data, memoffset['ControlFileData.system_identifier'])
                self.catalog_version_no, = struct.unpack_from('I', pgctl_data, memoffset['ControlFileData.catalog_version_no'])
                self.dbstate, = struct.unpack_from(pgstruct.get_type_format('DBState'), pgctl_data, memoffset['ControlFileData.state'])
                self.checkPoint, = struct.unpack_from('Q', pgctl_data, memoffset['ControlFileData.checkPoint'])
                self.minRecoveryPoint, = struct.unpack_from('Q', pgctl_data, memoffset['ControlFileData.minRecoveryPoint'])
                self.minRecoveryPointTLI, = struct.unpack_from('I', pgctl_data, memoffset['ControlFileData.minRecoveryPointTLI'])
                self.checkPointCopy = CheckPoint(pgctl_data, memoffset['ControlFileData.checkPointCopy'])
                self.xlog_blcksz, = struct.unpack_from('I', pgctl_data, memoffset['ControlFileData.xlog_blcksz'])
                self.xlog_seg_size, = struct.unpack_from('I', pgctl_data, memoffset['ControlFileData.xlog_seg_size'])
                self.blcksz, = struct.unpack_from('I', pgctl_data, memoffset['ControlFileData.blcksz'])
                self.relseg_size, = struct.unpack_from('I', pgctl_data, memoffset['ControlFileData.relseg_size'])
                
                self.nameDataLen, = struct.unpack_from('I', pgctl_data, memoffset['ControlFileData.nameDataLen'])
                
                self.float8ByVal, = struct.unpack_from(pgstruct.get_type_format('bool'), pgctl_data, memoffset['ControlFileData.float8ByVal'])
                self.float4ByVal, = struct.unpack_from(pgstruct.get_type_format('bool'), pgctl_data, memoffset['ControlFileData.float4ByVal'])
                
                self.crc, = struct.unpack_from('I', pgctl_data, memoffset['ControlFileData.crc'])
                
                crcdatas = []
                crcdatas.append((pgctl_data, memoffset['ControlFileData.crc']));
                 
                if not upgcrc.crceq(crcdatas, self.crc):
                    logger.error('pg_control has invalid CRC')
                    raise UPgException('pg_control has invalid CRC')
        except IOError:
            logger.error("Error in reading control file!")
            raise
예제 #2
0
    def create_mtcnn_net(self, p_model_path=None, r_model_path=None, o_model_path=None, use_cuda=True):
        dirname, _ = os.path.split(p_model_path)
        checkpoint = CheckPoint(dirname)

        pnet, rnet, onet = None, None, None
        self.device = torch.device(
            "cuda:0" if use_cuda and torch.cuda.is_available() else "cpu")

        if p_model_path is not None:
            pnet = PNet()
            pnet_model_state = checkpoint.load_model(p_model_path)
            pnet = checkpoint.load_state(pnet, pnet_model_state)
            if (use_cuda):
                pnet.to(self.device)
            pnet.eval()

        if r_model_path is not None:
            rnet = RNet()
            rnet_model_state = checkpoint.load_model(r_model_path)
            rnet = checkpoint.load_state(rnet, rnet_model_state)
            if (use_cuda):
                rnet.to(self.device)
            rnet.eval()

        if o_model_path is not None:
            onet = ONet()
            onet_model_state = checkpoint.load_model(o_model_path)
            onet = checkpoint.load_state(onet, onet_model_state)
            if (use_cuda):
                onet.to(self.device)
            onet.eval()

        return pnet, rnet, onet
    def _set_checkpoint(self):
        """
        load pre-trained model or resume checkpoint
        """

        assert self.pruned_model is not None, "please create model first"

        self.checkpoint = CheckPoint(self.settings.save_path, self.logger)
        self._load_pretrained()
        self._load_resume()
예제 #4
0
    def __init__(self, index, conf):
        self._nodes = conf['nodes']
        self._node_cnt = len(self._nodes)
        self._index = index
        # Number of faults tolerant.
        self._f = (self._node_cnt - 1) // 3
        # leader
        if self._index == 0:
            self._is_leader = True
        else:
            self._is_leader = False
        self._leader = 0

        self._view = View(0, self._node_cnt)
        # The largest view either promised or accepted
        self._follow_view = View(0, self._node_cnt)

        self._next_propose_slot = 0

        # tracks if commit_decisions had been commited to blockchain
        self.committed_to_blockchain = False
        # Checkpoint

        # Network simulation
        self._loss_rate = conf['loss%'] / 100

        # Time configuration
        self._network_timeout = conf['misc']['network_timeout']
        # After finishing committing self._checkpoint_interval slots,
        # trigger to propose new checkpoint.
        self._checkpoint_interval = conf['ckpt_interval']
        self._ckpt = CheckPoint(self._checkpoint_interval, self._nodes,
                                self._f, self._index, self._loss_rate,
                                self._network_timeout)
        # Commit
        self._last_commit_slot = -1

        self._dump_interval = conf['dump_interval']

        # Restore the votes number and information for each view number
        self._view_change_votes_by_view_number = {}

        # Record all the status of the given slot
        # To adjust json key, slot is string integer.
        self._status_by_slot = {}

        self._sync_interval = conf['sync_interval']

        self._blockchain = Blockchain()

        self._session = None
        self._log = logging.getLogger(__name__)
    def _set_checkpoint(self):
        assert self.model is not None, "please create model first"

        self.checkpoint = CheckPoint(self.settings.save_path, self.logger)
        if self.settings.retrain is not None:
            model_state = self.checkpoint.load_model(self.settings.retrain)
            self.model = self.checkpoint.load_state(self.model, model_state)

        if self.settings.resume is not None:
            model_state, optimizer_state, epoch = self.checkpoint.load_checkpoint(
                self.settings.resume)
            self.model = self.checkpoint.load_state(self.model, model_state)
            self.start_epoch = epoch
            self.optimizer_state = optimizer_state
예제 #6
0
    def _set_checkpoint(self):
        assert self.model is not None, "please create model first"

        self.checkpoint = CheckPoint(self.settings.save_path)
        if self.settings.retrain is not None:
            model_state = self.checkpoint.load_model(self.settings.retrain)
            self.model = self.checkpoint.load_state(self.model, model_state)

        if self.settings.resume is not None:
            check_point_params = torch.load(self.settings.resume)
            model_state = check_point_params["model"]
            self.seg_opt_state = check_point_params["seg_opt"]
            self.fc_opt_state = check_point_params["fc_opt"]
            self.aux_fc_state = check_point_params["aux_fc"]
            self.model = self.checkpoint.load_state(self.model, model_state)
            self.start_epoch = 90
예제 #7
0
    async def register(self):
        """
        post { 'host':xx, 'port': xx }to ca to register, get index and current nodes. then broadcast join_request
        """     
        if not self._session:
            timeout = aiohttp.ClientTimeout(self._network_timeout)
            self._session = aiohttp.ClientSession(timeout=timeout)
        resp = await self._session.post(self.make_url(self._ca, MessageType.REGISTER), json=self._node)
        resp = await resp.json()
        self._index = resp['index']
        self._nodes = resp['nodes']
        self._log.info('register to ca, get index %d, current nodes: %s', self._index, self._nodes)

        self._node_cnt = len(self._nodes)
        self._f = (self._node_cnt - 1) // 3
        self._is_leader = False
        self._ckpt = CheckPoint(self._checkpoint_interval, self._nodes, 
            self._f, self._index, self._loss_rate, self._network_timeout)

        await self.join_request()
예제 #8
0
                                               batch_size=config.batchSize,
                                               shuffle=True,
                                               **kwargs)

    print(len(train_loader.dataset))
    print(len(valid_loader.dataset))

    #Set model
    model = YangNet()
    para = sum([np.prod(list(p.size())) for p in model.parameters()])
    print('Model {} : params: {:4f}M'.format(model._get_name(),
                                             para * 4 / 1000 / 1000))
    model = model.to(device)

    # Set checkpoint
    checkpoint = CheckPoint(config.save_path)

    # Set optimizer
    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                        model.parameters()),
                                 lr=config.lr)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=config.step,
                                                     gamma=0.1)

    # Set trainer
    logger = Logger(config.save_path)
    trainer = AlexNetTrainer(config.lr, train_loader, valid_loader, model,
                             optimizer, scheduler, logger, device)

    print(model)
예제 #9
0
파일: main.py 프로젝트: L-Zhe/SDISS
from SDISS.Optim import WarmUpOpt, LabelSmoothing
from checkpoint import CheckPoint, saveOutput, loadModel
from run import fit
from eval import eval_score
from generate import generator
import os

if __name__ == '__main__':
    checkpoint = CheckPoint(trainSrcFilePath=Parameter.trainSrcFilePath,
                            validSrcFilePath=Parameter.validSrcFilePath,
                            testSrcFilePath=Parameter.testSrcFilePath,
                            trainTgtFilePath=Parameter.trainTgtFilePath,
                            validTgtFilePath=Parameter.validTgtFilePath,
                            testTgtFilePath=Parameter.testTgtFilePath,
                            trainGraph=Parameter.trainGraph,
                            validGraph=Parameter.validGraph,
                            testGraph=Parameter.testGraph,
                            min_freq=Parameter.min_freq,
                            BATCH_SIZE=Parameter.BATCH_SIZE,
                            dataPath=Parameter.dataPath,
                            dataFile=Parameter.dataFile,
                            checkpointPath=Parameter.checkpointPath,
                            checkpointFile=Parameter.checkpointFile,
                            mode=Parameter.mode)

    trainDataSet, validDataSet, testDataSet, index2word, gword2index = checkpoint.LoadData(
    )
    model = createModel(len(index2word), len(gword2index)).to(device)
    if Parameter.mode == 'train':
        criterion = LabelSmoothing(smoothing=Parameter.smoothing,
                                   lamda=Parameter.lamda).to(device)
예제 #10
0
     auc_last = 0
 lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
     optimizer_model,
     mode="min",
     factor=0.1,
     patience=0,
     verbose=True,
     min_lr=1e-8,
     threshold=0.0001,
     threshold_mode='abs')
 path_ckpt = '{}/ckpt/{}'.format(ROOT_DIR, exp_name)
 # learning checkpointer
 ckpter = CheckPoint(model=model,
                     optimizer=optimizer_model,
                     path=path_ckpt,
                     prefix=run_name,
                     interval=1,
                     save_num=n_save_epoch,
                     loss0=loss0)
 ckpter_lr = CheckPoint(model=logisticReg,
                        optimizer=optimizer_model,
                        path=path_ckpt,
                        prefix=run_name + '_lr',
                        interval=1,
                        save_num=n_save_epoch,
                        loss0=loss0)
 ckpter_auc = CheckPoint(model=model,
                         optimizer=optimizer_model,
                         path=path_ckpt,
                         prefix=run_name,
                         interval=1,
예제 #11
0
    def StartProgram(self):

        # ---Get the User Input and make it globally accessible---#

        cg.SampleRate = float(
            self.sample_rate.get())  # sample rate for experiment in seconds

        if cg.method == "Continuous Scan":
            cg.numFiles = int(self.numfiles.get())  # file limit
        elif cg.method == "Frequency Map":
            cg.numFiles = 1

        cg.q = Queue()

        if cg.delimiter == 1:
            cg.delimiter = " "
        elif cg.delimiter == 2:
            cg.delimiter = "\t"
        elif cg.delimiter == 3:
            cg.delimiter = ","

        if cg.extension == 1:
            cg.extension = ".txt"
        elif cg.extension == 2:
            cg.extension = ".csv"
        elif cg.extension == 3:
            cg.extension = ".DTA"

        cg.InjectionPoint = (
            None  # None variable if user has not selected an injection point
        )
        cg.InitializedNormalization = False  # tracks if the data has been normalized
        # to the starting normalization point
        cg.RatioMetricCheck = False  # tracks changes to high and low frequencies
        cg.NormWarningExists = (
            False  # tracks if a warning label for the normalization has been created
        )

        cg.NormalizationPoint = 3
        cg.starting_file = 1

        cg.SaveVar = self.SaveVar.get(
        )  # tracks if text file export has been activated
        cg.InjectionVar = self.InjectionVar.get(
        )  # tracks if injection was selected
        cg.resize_interval = int(self.resize_entry.get()
                                 )  # interval at which xaxis of plots resizes
        cg.handle_variable = (self.ImportFileEntry.get()
                              )  # string handle used for the input file

        # --- Y Limit Adjustment Parameters ---#
        cg.min_norm = float(self.norm_data_min.get())  # normalization y limits
        cg.max_norm = float(self.norm_data_max.get())
        cg.min_raw = float(
            self.raw_data_min.get())  # raw data y limit adjustment variables
        cg.max_raw = float(self.raw_data_max.get())
        cg.min_data = float(
            self.data_min.get())  # raw data y limit adjustment variables
        cg.max_data = float(self.data_max.get())
        cg.ratio_min = float(self.KDM_min.get())  # KDM min and max
        cg.ratio_max = float(self.KDM_max.get())

        #############################################################
        # Interval at which the program searches for files (ms) ###
        #############################################################
        cg.Interval = self.Interval.get()

        # set the resizeability of the container ##
        # frame to handle PlotContainer resize   ##
        cg.container.columnconfigure(1, weight=1)

        # --- High and Low Frequency Selection for Drift Correction (KDM) ---#
        cg.HighFrequency = max(cg.frequency_list)
        cg.LowFrequency = min(cg.frequency_list)
        cg.HighLowList["High"] = cg.HighFrequency
        cg.HighLowList["Low"] = cg.LowFrequency

        # --- Create a timevault for normalization variables if the chosen
        # normalization point has not yet been analyzed ---#
        cg.NormalizationVault = []  # timevault for Normalization Points
        cg.NormalizationVault.append(
            cg.NormalizationPoint)  # append the starting normalization point

        ################################################################
        # If all checkpoints have been met, initialize the program ###
        ################################################################
        if not self.NoSelection:
            if cg.FoundFilePath:

                _ = CheckPoint(self.parent, self.controller)
예제 #12
0
        last_epoch = int(reporter.select_last(run=run_name).last_epoch)
        loss0 = reporter.select_last(run=run_name).last_loss
        loss0 = float(loss0[:-4])
        model.load_state_dict(
            torch.load(last_model_filename)['model_state_dict'])
    else:
        last_epoch = -1
        loss0 = 0

    optimizer_model = torch.optim.Adam(model.parameters(), lr=lr)
    path_ckpt = '{}/ckpt/{}'.format(ROOT_DIR, triplet_method)
    # learning embedding checkpointer.
    ckpter = CheckPoint(model=model,
                        optimizer=optimizer_model,
                        path=path_ckpt,
                        prefix=run_name,
                        interval=1,
                        save_num=n_save_epoch,
                        loss0=loss0)
    ckpter_v2 = CheckPoint(model=model,
                           optimizer=optimizer_model,
                           path=path_ckpt,
                           prefix='X' + run_name,
                           interval=1,
                           save_num=n_save_epoch,
                           loss0=loss0)
    train_hist = History(name='train_hist' + run_name)
    validation_hist = History(name='validation_hist' + run_name)
    #  --------------------------------------------------------------------------------------
    # Computing metrics on validation set before starting training
    #  --------------------------------------------------------------------------------------
예제 #13
0
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
train_loader = torch.utils.data.DataLoader(FaceDataset(train_config.annoPath,
                                                       transform=transform,
                                                       is_train=True),
                                           batch_size=train_config.batchSize,
                                           shuffle=True,
                                           **kwargs)

# Set model
model = ONet(config.NUM_LANDMARKS)
model = model.to(device)

# Set checkpoint
checkpoint = CheckPoint(train_config.save_path)

# Set optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=train_config.lr)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                 milestones=train_config.step,
                                                 gamma=0.1)

# Set trainer
logger = Logger(train_config.save_path)
trainer = ONetTrainer(train_config.lr, train_loader, model, optimizer,
                      scheduler, logger, device)

for epoch in range(1, train_config.nEpochs + 1):
    trainer.train(epoch)
    checkpoint.save_model(model,