def authenticate(self, **kwargs):
     result = None
     tenantKey = kwargs.get('tenant')
     userId = kwargs.get('userId')
     if tenantKey != None and userId != None:
         tenantPackages = get_subpackages(tenant)
         logger.warn(str(tenantPackages))
         if tenantKey in tenantPackages:
             result = self.get_user(userId)
             logger.warn(str(tenantKey))
             if result == None:
                 if 'emailAddress'in kwargs.keys():
                     # 'Long' call has been issued
                     userId = kwargs['userId']
                     emailAddress = kwargs['emailAddress']
                     firstname = kwargs['firstname']
                     lastname = kwargs['lastname']
                     role = kwargs['role']
                     raw_password = userId[::-1]
                     user = User(username=userId, email=emailAddress, first_name=firstname,
                                 last_name=lastname)
                     user.set_password(raw_password)
                     user.save()
                     result = user
     return result
     
     
Beispiel #2
0
 def __define_package(self):
     logger.debug('')
     for package_protocol in self.protocol['packages'].values():
         if self.__define_request(package_protocol=package_protocol):
             logger.info('GeneralProtocol package define as {}'.format(package_protocol['name']))
             return package_protocol
     logger.warn('GeneralProtocol can not define request')
 def authenticate(self, **kwargs):
     result = None
     tenantKey = kwargs.get('tenant')
     userId = kwargs.get('userId')
     if tenantKey != None and userId != None:
         tenantPackages = get_subpackages(tenant)
         logger.warn(str(tenantPackages))
         if tenantKey in tenantPackages:
             result = self.get_user(userId)
             logger.warn(str(tenantKey))
             if result == None:
                 if 'emailAddress' in kwargs.keys():
                     # 'Long' call has been issued
                     userId = kwargs['userId']
                     emailAddress = kwargs['emailAddress']
                     firstname = kwargs['firstname']
                     lastname = kwargs['lastname']
                     role = kwargs['role']
                     raw_password = userId[::-1]
                     user = User(username=userId,
                                 email=emailAddress,
                                 first_name=firstname,
                                 last_name=lastname)
                     user.set_password(raw_password)
                     user.save()
                     result = user
     return result
Beispiel #4
0
	def exec_query(self, sql_query):
		if not sql_query:
			logger.warn('sql error')
			return
		if not self.conn:
			self.conn = self.connect()
			self.cursor = self.conn.cursor()
		
		try:
			logger.info('executes sql: "%s"' % sql_query)
			self.cursor.execute(sql_query)
			result = self.cursor.fetchall()
		except Exception as e:
			logger.error(e)
			return

		if result:
			logger.info('quering executed successfully')
		else:
			logger.info('quering executed with no result')
		return result
Beispiel #5
0
    def exec_query(self, sql_query):
        if not sql_query:
            logger.warn('sql error')
            return
        if not self.conn:
            self.conn = self.connect()
            self.cursor = self.conn.cursor()

        try:
            logger.info('executes sql: "%s"' % sql_query)
            self.cursor.execute(sql_query)
            result = self.cursor.fetchall()
        except Exception as e:
            logger.error(e)
            return

        if result:
            logger.info('quering executed successfully')
        else:
            logger.info('quering executed with no result')
        return result
Beispiel #6
0
    def train(self, num_of_iters=1, data=None, hidden=None):
        self.loss = 0.0
        s = time.time()
        # zero the parameter gradients
        #self.optimizer.zero_grad()
        for i in range(num_of_iters):
            self.adjust_learning_rate(self.train_epoch, self.optimizer)
            if self.train_iter % self.num_batches_per_epoch == 0 and self.train_iter > 0:
                self.train_epoch += 1
                logger.info('train iter: %d, num_batches_per_epoch: %d', self.train_iter, self.num_batches_per_epoch)
                logger.info('Epoch %d, avg train acc: %f, lr: %f, avg loss: %f' % (self.train_iter//self.num_batches_per_epoch, np.mean(self.train_acc_top1), self.lr, self.avg_loss_per_epoch/self.num_batches_per_epoch))

                if self.rank == 0 and self.writer is not None:
                    self.writer.add_scalar('cross_entropy', self.avg_loss_per_epoch/self.num_batches_per_epoch, self.train_epoch)
                    self.writer.add_scalar('top-1_acc', np.mean(self.train_acc_top1), self.train_epoch)
                if self.rank == 0:
                    self.test(self.train_epoch)
                self.sparsities = []
                self.compression_ratios = []
                self.communication_sizes = []
                self.train_acc_top1 = []
                self.epochs_info.append(self.avg_loss_per_epoch/self.num_batches_per_epoch)
                self.avg_loss_per_epoch = 0.0

                # Save checkpoint
                if self.train_iter > 0 and self.rank == 0:
                    state = {'iter': self.train_iter, 'epoch': self.train_epoch, 'state': self.get_model_state()}
                    if self.prefix:
                        relative_path = './weights/%s/%s-n%d-bs%d-lr%.4f' % (self.prefix, self.dnn, self.nworkers, self.batch_size, self.base_lr)
                    else:
                        relative_path = './weights/%s-n%d-bs%d-lr%.4f' % (self.dnn, self.nworkers, self.batch_size, self.base_lr)
                    utils.create_path(relative_path)
                    filename = '%s-rank%d-epoch%d.pth'%(self.dnn, self.rank, self.train_epoch)
                    fn = os.path.join(relative_path, filename)
                    if self.train_epoch % 2== 0:
                        self.save_checkpoint(state, fn)
                        self.remove_dict(state)
                if self.train_sampler and (self.nworkers > 1):
                    self.train_sampler.set_epoch(self.train_epoch)

            ss = time.time()
            if data is None:
                data = self.data_iter()

            if self.dataset == 'an4':
                inputs, labels_cpu, input_percentages, target_sizes = data
                input_sizes = input_percentages.mul_(int(inputs.size(3))).int()
            else:
                inputs, labels_cpu = data
            if self.is_cuda:
                if self.dnn == 'lstm' :
                    inputs = Variable(inputs.transpose(0, 1).contiguous()).cuda()
                    labels = Variable(labels_cpu.transpose(0, 1).contiguous()).cuda()
                else:
                    inputs, labels = inputs.cuda(non_blocking=True), labels_cpu.cuda(non_blocking=True)
            else:
                labels = labels_cpu
                
            self.iotime += (time.time() - ss)
            
            sforward = time.time()
            if self.dnn == 'lstman4':
                out, output_sizes = self.net(inputs, input_sizes)
                out = out.transpose(0, 1)  # TxNxH
                loss = self.criterion(out, labels_cpu, output_sizes, target_sizes)
                #torch.cuda.synchronize()
                self.forwardtime += (time.time() - sforward)
                loss = loss / inputs.size(0)  # average the loss by minibatch
            elif self.dnn == 'lstm' :
                hidden = lstmpy.repackage_hidden(hidden)
                outputs, hidden = self.net(inputs, hidden)
                tt = torch.squeeze(labels.view(-1, self.net.batch_size * self.net.num_steps))
                loss = self.criterion(outputs.view(-1, self.net.vocab_size), tt)
                #torch.cuda.synchronize()
                self.forwardtime += (time.time() - sforward)
            else:
                # forward + backward + optimize
                outputs = self.net(inputs)
                loss = self.criterion(outputs, labels)
                #torch.cuda.synchronize()
                self.forwardtime += (time.time() - sforward)
            sbackward = time.time()
            if self.amp_handle is not None:
                with apex.amp.scale_loss(loss, self.optimizer) as scaled_loss:
                    scaled_loss.backward()
                    loss = scaled_loss
            else:
                loss.backward()
            loss_value = loss.item()
            #torch.cuda.synchronize()
            self.backwardtime += (time.time() - sbackward)

            self.loss += loss_value 

            self.avg_loss_per_epoch += loss_value

            if self.dnn not in ['lstm', 'lstman4']:
                acc1, = self.cal_accuracy(outputs, labels, topk=(1,))
                self.train_acc_top1.append(float(acc1))
                
            self.train_iter += 1
        self.num_of_updates_during_comm += 1
        self.loss /= num_of_iters 
        self.timer += time.time() - s 
        display = 40
        if self.train_iter % display == 0:
            logger.warn('[%3d][%5d/%5d][rank:%d] loss: %.3f, average forward (%f) and backward (%f) time: %f, iotime: %f ' %
                  (self.train_epoch, self.train_iter, self.num_batches_per_epoch, self.rank,  self.loss, self.forwardtime/display, self.backwardtime/display, self.timer/display, self.iotime/display))
            self.timer = 0.0
            self.iotime = 0.0
            self.forwardtime = 0.0
            self.backwardtime = 0.0
            
        if self.dnn == 'lstm':
            return num_of_iters, hidden
        return num_of_iters
Beispiel #7
0
def ssgd(dnn,
         dataset,
         data_dir,
         nworkers,
         lr,
         batch_size,
         nsteps_update,
         max_epochs,
         nwpernode,
         pretrain,
         num_steps,
         compressor,
         density,
         threshold,
         gradient_path=None):
    rank = hvd.rank()
    torch.cuda.set_device(rank % nwpernode)
    if rank != 0:
        pretrain = None
    trainer = DLTrainer(rank,
                        nworkers,
                        dist=False,
                        batch_size=batch_size,
                        is_weak_scaling=True,
                        ngpus=1,
                        data_dir=data_dir,
                        dataset=dataset,
                        dnn=dnn,
                        lr=lr,
                        nworkers=nworkers,
                        prefix='allreduce',
                        pretrain=pretrain,
                        num_steps=num_steps,
                        tb_writer=writer)

    init_epoch = torch.ones(1) * trainer.get_train_epoch()
    init_iter = torch.ones(1) * trainer.get_train_iter()
    trainer.set_train_epoch(int(hvd.broadcast(init_epoch, root_rank=0)[0]))
    trainer.set_train_iter(int(hvd.broadcast(init_iter, root_rank=0)[0]))
    is_sparse = density < 1
    #if not is_sparse:
    #    compressor = None

    if settings.ADAPTIVE_MERGE or settings.ADAPTIVE_SPARSE:
        seq_layernames, layerwise_times, layerwise_sizes = benchmark(trainer)
        layerwise_times = comm.bcast(layerwise_times, root=0)
        if rank == 0:
            logger.info('layerwise backward times: %s', list(layerwise_times))
            logger.info('layerwise backward sizes: %s', list(layerwise_sizes))
        logger.info('Bencharmked backward time: %f', np.sum(layerwise_times))
        logger.info('Model size: %d', np.sum(layerwise_sizes))
    else:
        seq_layernames, layerwise_times, layerwise_sizes = None, None, None

    norm_clip = None
    if dnn == 'lstm':
        norm_clip = 0.25
    elif dnn == 'lstman4':
        norm_clip = 400

    optimizer = hvd.DistributedOptimizer(
        trainer.optimizer,
        named_parameters=trainer.net.named_parameters(),
        compression=compressors[compressor],
        is_sparse=is_sparse,
        density=density,
        seq_layernames=seq_layernames,
        layerwise_times=layerwise_times,
        norm_clip=norm_clip,
        threshold=threshold,
        writer=writer,
        gradient_path=gradient_path)
    hvd.broadcast_parameters(trainer.net.state_dict(), root_rank=0)
    trainer.update_optimizer(optimizer)
    iters_per_epoch = trainer.get_num_of_training_samples() // (
        nworkers * batch_size * nsteps_update)

    times = []
    logger.info('max_epochs: %d', max_epochs)
    display = 40 if iters_per_epoch > 40 else iters_per_epoch - 1
    for epoch in range(max_epochs):
        hidden = None
        if dnn == 'lstm':
            hidden = trainer.net.init_hidden()
        for i in range(iters_per_epoch):
            s = time.time()
            optimizer.zero_grad()
            for j in range(nsteps_update):
                if j < nsteps_update - 1 and nsteps_update > 1:
                    optimizer.local = True
                else:
                    optimizer.local = False
                if dnn == 'lstm':
                    _, hidden = trainer.train(1, hidden=hidden)
                else:
                    trainer.train(1)
            if dnn == 'lstm':
                optimizer.synchronize()
                torch.nn.utils.clip_grad_norm_(trainer.net.parameters(), 0.25)
            elif dnn == 'lstman4':
                optimizer.synchronize()
                torch.nn.utils.clip_grad_norm_(trainer.net.parameters(), 400)
            trainer.update_model()
            times.append(time.time() - s)
            if i % display == 0 and i > 0:
                time_per_iter = np.mean(times)
                logger.warn(
                    'Time per iteration including communication: %f, Speed: %f images/s',
                    time_per_iter, batch_size * nsteps_update / time_per_iter)
                times = []
        optimizer.increase_one_epoch()
Beispiel #8
0
    def _generate_groups_mgwfbp(self):
        num_of_workers = size()
        p_alpha_beta = {
            16: (0.00010632079996292579, 1.5 * 3.2713239529771973e-10),
            8: (9.75367204301171e-05, 3.0568230536676206e-10),
            4: (4.204298980348825e-05, 2.0589360830118177e-10),
            2: (2.554691138304671e-06, 9.837548167872609e-11)
        }
        alpha, beta = p_alpha_beta[num_of_workers]

        def __calculate_comm_start(tc, tb, taob, L):
            taoc = [0] * L
            taoc[L - 1] = taob[L - 1] + tb[L - 1]
            for l in range(L - 1)[::-1]:
                taoc[l] = max(taoc[l + 1] + tc[l + 1], taob[l] + tb[l])
            return taoc

        def __merge(taob, tc, p, l):
            tc[l] = 0
            p[l - 1] = p[l - 1] + p[l]
            p[l] = 0
            tc[l - 1] = utils.predict_allreduce_time_with_size(
                alpha, beta, p[l - 1] * 4, num_of_workers)

        sizes = [
            self._named_parameters[k].data.numel()
            for k in self._seq_layernames
        ]
        seq_layernames = self._seq_layernames
        self._sizes = sizes
        p = sizes[:]
        L = len(sizes)
        tc = [
            utils.predict_allreduce_time_with_size(alpha, beta, s * 4,
                                                   num_of_workers)
            for s in sizes
        ]
        tb = list(self._layerwise_times)
        taob = [0] * L
        for l in range(0, L - 1)[::-1]:
            taob[l] = taob[l + 1] + tb[l + 1]
        taoc = __calculate_comm_start(tc, tb, taob, L)
        if rank() == 0:
            logger.warn('tc sum: %f', np.sum(tc))
            logger.warn('tc: %s', tc)
            logger.warn('taoc: %s', taoc)
        groups = []
        group = []
        idx = 0
        key_groupidx_maps = {}
        l = L - 1
        key = seq_layernames[l]
        key_groupidx_maps[key] = idx
        group.append(key)
        for l in range(1, L - 1)[::-1]:
            key = seq_layernames[l]
            group.append(key)
            key_groupidx_maps[key] = idx
            current_taob = taob[l - 1] + tb[l - 1]
            if current_taob < taoc[l + 1] + tc[l + 1]:
                __merge(taob, tc, p, l)
                taoc = __calculate_comm_start(tc, tb, taob, L)
            elif current_taob > taoc[l + 1] + tc[l + 1] and current_taob < taoc[
                    l] + tc[l] and taoc[l] + alpha > current_taob:
                __merge(taob, tc, p, l)
                taoc = __calculate_comm_start(tc, tb, taob, L)
            else:
                idx += 1
                groups.append(group)
                group = []
        l = 0
        key = seq_layernames[l]
        key_groupidx_maps[key] = idx
        group.append(key)
        logger.info('Predicted non-overlapped time: %f',
                    taoc[0] + tc[0] - (taob[0] + tb[0]))
        logger.info('Predicted tb+tc= %f', taoc[0] + tc[0])
        if len(group) > 0:
            groups.append(group)
        return groups, key_groupidx_maps
Beispiel #9
0
		uid, gid = os.getuid(), os.getgid()

		if uid != 0 and gid != 0:
			logger.error("please use root privilege to exec.")
			sys.exit(1)

		return func(*args, **kargs)

	return check

@check_permission
def directory(path="", mode=0755, recursive=False, action='create'):

	if not path:
		logger.warn("please sepecific the directory name.")
		sys.exit(1)


	if action == "create":

		is_exists_dir = os.path.exists(path)

		if is_exists_dir:
			try:
				shutil.rmtree(path)
			except OSError, e:
				logger.error("rm path: %s error" % (path) + str(e))
				sys.exit(1)

		if not recursive:
Beispiel #10
0
    def train(self, num_of_iters=1, data=None, hidden=None):
        self.loss = 0.0
        s = time.time()

        for i in range(num_of_iters):
            self.adjust_learning_rate(self.train_epoch, self.optimizer)
            if self.train_iter % self.num_batches_per_epoch == 0 and self.train_iter > 0:
                logger.info('train iter: %d, num_batches_per_epoch: %d',
                            self.train_iter, self.num_batches_per_epoch)
                logger.info(
                    'Epoch %d, avg train acc: %f, lr: %f, avg loss: %f' %
                    (self.train_iter // self.num_batches_per_epoch,
                     np.mean(self.train_acc_top1), self.lr,
                     self.avg_loss_per_epoch / self.num_batches_per_epoch))
                mean_s = np.mean(self.sparsities)
                if self.train_iter > 0 and np.isnan(mean_s):
                    logger.warn('NaN detected! sparsities:  %s' %
                                self.sparsities)
                logger.info(
                    'Average Sparsity: %f, compression ratio: %f, communication size: %f',
                    np.mean(self.sparsities), np.mean(self.compression_ratios),
                    np.mean(self.communication_sizes))
                if self.rank == 0 and self.writer is not None:
                    self.writer.add_scalar(
                        'cross_entropy',
                        self.avg_loss_per_epoch / self.num_batches_per_epoch,
                        self.train_epoch)
                    self.writer.add_scalar('top-1 acc',
                                           np.mean(self.train_acc_top1),
                                           self.train_epoch)
                if self.rank == 0:
                    self.test(self.train_epoch)
                self.sparsities = []
                self.compression_ratios = []
                self.communication_sizes = []
                self.train_acc_top1 = []
                self.epochs_info.append(self.avg_loss_per_epoch /
                                        self.num_batches_per_epoch)
                self.avg_loss_per_epoch = 0.0
                if self.train_iter > 0 and self.rank == 0:
                    state = {
                        'iter': self.train_iter,
                        'epoch': self.train_epoch,
                        'state': self.get_model_state()
                    }
                    if self.prefix:
                        relative_path = './weights/%s/%s-n%d-bs%d-lr%.4f' % (
                            self.prefix, self.dnn, self.nworkers,
                            self.batch_size, self.base_lr)
                    else:
                        relative_path = './weights/%s-n%d-bs%d-lr%.4f' % (
                            self.dnn, self.nworkers, self.batch_size,
                            self.base_lr)
                    if settings.SPARSE:
                        relative_path += '-s%.5f' % self.sparsity
                    utils.create_path(relative_path)
                    filename = '%s-rank%d-epoch%d.pth' % (self.dnn, self.rank,
                                                          self.train_epoch)
                    fn = os.path.join(relative_path, filename)
                    #self.save_checkpoint(state, fn)
                    #self.remove_dict(state)
                self.train_epoch += 1
                if self.train_sampler and (self.nworkers > 1):
                    self.train_sampler.set_epoch(self.train_epoch)

            ss = time.time()
            if data is None:
                data = self.data_iter()

            if self.dataset == 'an4':
                inputs, labels_cpu, input_percentages, target_sizes = data
                input_sizes = input_percentages.mul_(int(inputs.size(3))).int()
            else:
                inputs, labels_cpu = data
            if self.is_cuda:
                if self.dnn == 'lstm':
                    inputs = Variable(inputs.transpose(0,
                                                       1).contiguous()).cuda()
                    labels = Variable(labels_cpu.transpose(
                        0, 1).contiguous()).cuda()
                else:
                    inputs, labels = inputs.cuda(
                        non_blocking=True), labels_cpu.cuda(non_blocking=True)
            else:
                labels = labels_cpu

            self.iotime += (time.time() - ss)

            if self.dnn == 'lstman4':
                out, output_sizes = self.net(inputs, input_sizes)
                out = out.transpose(0, 1)  # TxNxH
                loss = self.criterion(out, labels_cpu, output_sizes,
                                      target_sizes)
                loss = loss / inputs.size(0)  # average the loss by minibatch
                loss.backward()
            elif self.dnn == 'lstm':
                hidden = lstmpy.repackage_hidden(hidden)
                outputs, hidden = self.net(inputs, hidden)
                tt = torch.squeeze(
                    labels.view(-1, self.net.batch_size * self.net.num_steps))
                loss = self.criterion(outputs.view(-1, self.net.vocab_size),
                                      tt)
                loss.backward()
            else:
                # forward + backward + optimize
                outputs = self.net(inputs)
                loss = self.criterion(outputs, labels)
                loss.backward()
            loss_value = loss.item()
            # logger.info statistics
            self.loss += loss_value

            self.avg_loss_per_epoch += loss_value

            if self.dnn not in ['lstm', 'lstman4']:
                acc1, = self.cal_accuracy(outputs, labels, topk=(1, ))
                self.train_acc_top1.append(acc1)

            self.train_iter += 1
        self.num_of_updates_during_comm += 1
        self.loss /= num_of_iters
        self.timer += time.time() - s
        display = 100
        if self.train_iter % display == 0:
            logger.info(
                '[%3d][%5d/%5d][rank:%d] loss: %.3f, average forward and backward time: %f, iotime: %f '
                % (self.train_epoch, self.train_iter,
                   self.num_batches_per_epoch, self.rank, self.loss,
                   self.timer / display, self.iotime / display))
            mbytes = 1024. * 1024
            logger.info(
                'GPU memory usage memory_allocated: %d MBytes, max_memory_allocated: %d MBytes, memory_cached: %d MBytes, max_memory_cached: %d MBytes, CPU memory usage: %d MBytes',
                ct.memory_allocated() / mbytes,
                ct.max_memory_allocated() / mbytes,
                ct.memory_cached() / mbytes,
                ct.max_memory_cached() / mbytes,
                process.memory_info().rss / mbytes)
            self.timer = 0.0
            self.iotime = 0.0
            if self.is_cuda:
                torch.cuda.empty_cache()

        if self.dnn == 'lstm':
            return num_of_iters, hidden
        return num_of_iters
def singleIpScanner(ip):
    logger.info("Start to scan %s" % ip.strip())
    ret = checkIP(ip)
    if ret:
        logger.warn("Found wormhole vuln in %s : %s" % (ip, dumps(ret)))