def search():
    if request.method == 'POST':
        global resultado
        local = request.form['Local']
        busca = Model(tabela='pontos_turisticos')
        resultado = busca.get(lugar = local)
        return redirect(url_for('index'))
def login():
    global vLogin
    if vLogin == 0:
        if request.method == 'POST':
            username = request.form['username']
            senha = request.form['senha']

            user = Usuario(tabela='usuarios', username=username, senha=senha)
            validate = user.autenticar()

            if validate:
                #global vLogin
                global usr
                global usrList
                usr = username
                usuario = Model(tabela='usuarios')
                usrList = usuario.get(username=username)
                vLogin = 1
                return redirect(url_for('index'))
            else:
                erro = 'Login ou senha incorretos'
                return render_template('Login.html', erro=erro)
        if request.method == 'GET':
            return render_template('Login.html')
    else:
        #global vLogin
        vLogin = 0
        return redirect(url_for('index'))
def index():
    pontos = Model(tabela='pontos_turisticos')
    pts = pontos.get_all()
    global resultado
    if len(resultado) != 0:
        pts = resultado
        resultado = pontos.get_all()
    return render_template('index.html', len=len(pts), pontos= pts, vLogin=vLogin, usr= usr, usrList=usrList)
Example #4
0
def index():
    pontos = Model(tabela='pontos_turisticos')
    pts = pontos.get_all()
    print(pts)
    return render_template('index.html',
                           len=len(pts),
                           pontos=pts,
                           vLogin=vLogin,
                           usr=usr,
                           usrList=usrList)
Example #5
0
 def verificar(self):
     v=0
     ponto = self.get(ponto = self.ponto)
     teste0 = Model(tabela='pontos_turisticos')
     teste1 = teste0.get(name=self.ponto)
     if len(teste1) != 0:
         for i in range(len(ponto)):
             if ponto[i][3] == self.data:
                 v=1
         if v==1:
             return True
         else:
             return False
     else:
         return False
Example #6
0
 def adicionar(self):
     v=0
     w = self.get(name_turista = self.name_turista)
     teste0 = Model(tabela='usuarios')
     teste1 = teste0.get(username=self.name_turista)
     if len(teste1) != 0:
         for i in range(len(w)):
             if w[i][1] == self.id_passeio:
                 v=1
         if v==0:
             self.save()
             return True
         else:
             return False
     else:
         return False   
Example #7
0
 def oferecerPasseio(self):
     v=0
     ponto = self.get(ponto = self.ponto)
     teste0 = Model(tabela='pontos_turisticos')
     teste1 = teste0.get(name=self.ponto)
     if len(teste1) != 0:
         for i in range(len(ponto)):
             if ponto[i][3] == self.data:
                 v=1
         if v==0:
             self.save()
             return True
         else:
             return False
     else:
         return False
Example #8
0
def model_notify(model: Model, created=True) -> None:
    """
    Email notification when a new Model created.
    """

    recipients = [
        u.email for u in User.objects.filter(
            groups__name="Style Managers").exclude(email='')
    ]

    if created:
        model_status = "created"
    else:
        model_status = "updated"

    if recipients:
        domain = Site.objects.get_current().domain
        mail_from = settings.DEFAULT_FROM_EMAIL

        send_mail_wrapper(
            _('A new Model has been %s by %s.') %
            (model_status, model.creator),
            _('\r\nModel name is: %s\r\nModel description is: %s\r\n'
              'Link: http://%s%s\r\n') %
            (model.name, model.description, domain, model.get_absolute_url()),
            mail_from,
            recipients,
            fail_silently=True)
        logging.debug('Sending email notification for %s Model, '
                      'recipients:  %s' % (model.name, recipients))
    else:
        logging.warning('No recipients found for %s Model notification' %
                        model.name)
def like():
    if request.method == 'POST':
        ponto = request.form['pontolike']
        id_ponto = int(request.form['id_ponto'])
        busca = Model(tabela='pontos_turisticos')
        resultado = busca.get(name=ponto)

        connection = sqlite3.connect('database.db')
        cursor = connection.cursor()
        cursor.execute(
        '''
        SELECT * FROM like WHERE id_ponto=? AND id_usuarios=? AND likeOrDislike=0
        ''', (id_ponto, usrList[0][0])
        )
        lista1 = cursor.fetchall()
        connection.close()
        connection = sqlite3.connect('database.db')
        cursor = connection.cursor()
        cursor.execute(
        '''
        SELECT * FROM like WHERE id_ponto=? AND id_usuarios=? AND likeOrDislike=1
        ''', (id_ponto, usrList[0][0])
        )
        lista = cursor.fetchall()
        connection.commit()
        connection.close()
        
        if len(lista) == 0:
            like = resultado[0][5] + 1
            dislike = resultado[0][6]
            if len(lista1) != 0:
                dislike = resultado[0][6] - 1
                connection = sqlite3.connect('database.db')
                cursor = connection.cursor()
                cursor.execute(
                '''
                UPDATE like SET likeOrDislike=1 WHERE id_ponto=? AND id_usuarios=?
                ''', (id_ponto, usrList[0][0])
                )
                connection.commit()
                connection.close()
            else:
                tabelaDislike = Model(tabela='like', id_usuarios= usrList[0][0], id_ponto= id_ponto, likeOrDislike= 1)
                tabelaDislike.save()

            connection = sqlite3.connect('database.db')
            cursor = connection.cursor()
            cursor.execute(
            '''
            UPDATE pontos_turisticos set like=?, dislike=? where name=?
            ''', (like, dislike, ponto)
            )
            connection.commit()
            connection.close()
        return redirect(url_for('index'))
Example #10
0
    def __init__(self):
        # create model and setup callbacks
        self.model = Model()
        self.model.spyeworks.addCallback(self.updatePlayerOnline)
        self.model.spyeworks.currentList.addCallback(self.updateCurrentList)
        self.model.sensorstate.addCallback(self.updateSensorState)

        # create variables for timers
        self.activeTimer = Timer(1, self.restartFunc, ())
        #self.playIdleList=False

        # update variables with data from model
        self.updatePlayerOnline(self.model.spyeworks.get())
        self.updateSensorState(self.model.sensorstate.get())
def ponto(name):
    ponto = Model(tabela='pontos_turisticos')
    pt = ponto.get(name=name)
    passeio = Model(tabela='passeios')
    dados = passeio.get(ponto=name)
    passeio1 = Model(tabela='passeios1')
    dados1 = [None] * len(dados)
    vLen = [None] * len(dados)
    d = [None] * len(dados)
    now = datetime.now()
    n = [now.strftime("%Y"), now.strftime("%m"), now.strftime("%d")]
    for i in range(len(dados)):
        dados1[i] = passeio1.get(id_passeio=dados[i][0])
        vLen[i] = len(dados1[i])
        date = datetime.strptime(dados[i][3], '%Y-%m-%d').date()
        date1 = date.strftime("%x")
        now1 = now.strftime("%x")
        if date1 > now1:
            d[i] = 1
        else:
            d[i] = 0
    return render_template('ponto.html', len=len(dados), dados=dados, now=now, n=n, d=d, dados1=dados1, len1=len(dados1), vLen=vLen, pontos= pt, vLogin=vLogin, usr= usr, usrList=usrList)
Example #12
0
def ponto(name):
    ponto = Model(tabela='pontos_turisticos')
    pt = ponto.get(name=name)
    passeio = Model(tabela='passeios')
    dados = passeio.get(ponto=name)
    return render_template('ponto.html',
                           len=len(dados),
                           dados=dados,
                           pontos=pt,
                           vLogin=vLogin,
                           usr=usr,
                           usrList=usrList)
Example #13
0
def model_update_notify(model: Model, creator: User, staff: User) -> None:
    """
    Email notification system when staff approved or rejected a Model
    """

    recipients = [
        u.email for u in User.objects.filter(
            groups__name="Style Managers").exclude(email='')
    ]

    if creator.email:
        recipients += [creator.email]

    if model.approved:
        approval_state = 'approved'
    else:
        approval_state = 'rejected'

    review = model.modelreview_set.last()
    comment = review.comment

    if recipients:
        domain = Site.objects.get_current().domain
        mail_from = settings.DEFAULT_FROM_EMAIL
        send_mail_wrapper(
            _('Model %s %s notification.') % (model, approval_state),
            _('\r\nModel %s %s by %s.\r\n%s\r\nLink: http://%s%s\r\n') %
            (model.name, approval_state, staff, comment, domain,
             model.get_absolute_url()),
            mail_from,
            recipients,
            fail_silently=True)
        logging.debug('Sending email %s notification for %s Model, '
                      'recipients:  %s' % (approval_state, model, recipients))
    else:
        logging.warning('No recipients found for %s model %s notification' %
                        (model, approval_state))
Example #14
0
    def __init__(self, args):
        self.epoch_timer = utils.TimeIt(print_str="Epoch")
        self.args = args

        if self.args.training_mode == "gmm":
            self.dataloader = DataLoader(self.args)
        else:
            if self.args.eval:
                if self.args.eval_checkpoint == "":
                    raise ValueError(
                        "Eval mode is set, but no checkpoint path is provided!"
                    )
                self.loader = torch.load(self.args.eval_checkpoint)

            self.dataloader = DataLoader(self.args)

            # Load the model
            self.model = Model(self.args)

            if self.args.eval:
                self.model.load_state_dict(self.loader)

            if self.args.cuda:
                self.model.cuda()

            self.best_test_accuracy = 0.0
            self.best_test_epoch = 0

            if self.args.eval is False:

                if self.args.optimiser == "sgd":
                    self.opt = optim.SGD(
                        self.model.parameters(),
                        lr=self.args.learning_rate,
                        momentum=self.args.momentum,
                        weight_decay=self.args.weight_decay,
                    )
                elif self.args.optimiser == "adam":
                    self.opt = optim.Adam(
                        self.model.parameters(),
                        lr=self.args.learning_rate,
                        weight_decay=self.args.weight_decay,
                    )
                else:
                    raise Exception("Unknown optimiser {}".format(
                        self.args.optim))

                if self.args.lr_scheduler:
                    self.lr_scheduler = optim.lr_scheduler.MultiStepLR(
                        self.opt,
                        milestones=self.args.lr_schedule,
                        gamma=self.args.lr_decay_factor,
                    )
                if self.args.lr_reducer:
                    self.lr_reducer = torch.optim.lr_scheduler.ReduceLROnPlateau(
                        self.opt,
                        factor=np.sqrt(0.1),
                        cooldown=0,
                        patience=5,
                        min_lr=0.5e-6,
                    )

                # Loss function
                self.criterion = nn.CrossEntropyLoss()

                self.args.logdir = os.path.join("checkpoints",
                                                self.args.exp_name)
                utils.create_dir(self.args.logdir)

                if self.args.filelogger:
                    self.logger_path = os.path.join(
                        "checkpoints",
                        self.args.exp_name,
                        "%s_values.log" % self.args.exp_name,
                    )
                    self.logger = {
                        "train_loss_per_iter": [],
                        "train_loss_per_epoch": [],
                        "val_loss_per_iter": [],
                        "val_loss_per_epoch": [],
                        "val_accuracy_per_iter": [],
                        "val_accuracy_per_epoch": [],
                        "test_loss": [],
                        "test_accuracy": [],
                        "best_epoch": 0,
                        "best_test_accuracy": 0.0,
                        "ssl_loss": [],
                        "ssl_accuracy": [],
                        "ssl_correct": [],
                    }
                if self.args.tensorboard:
                    self.writer = SummaryWriter(log_dir=self.args.logdir,
                                                flush_secs=30)
                    self.writer.add_text("Arguments",
                                         params.print_args(self.args))
Example #15
0
    def __init__(self):

        logger = Logger()
        self.log = logger.logger
        self.error_log = logger.err_logger
        self.camera = None

        try:
            self.log.info("Reading the config...")
            self.config = ConfigReader()
            self.log.info("Config read")
        except:
            self.log.error("Error reading config.ini")
            self.error_log.error("Error reading config.ini", exc_info=True)
            sys.exit()

        try:
            self.folder_path = self.config.get_folder_path_config()
            print("folder_path: {}".format(self.folder_path))
        except:
            self.log.error(
                "Initialisation error: the video folder path is not defined")
            self.error_log.error(
                "Initialisation error: the video folder path is not defined",
                exc_info=True)
            sys.exit()

        self.log.info("Initialising face_model")
        try:
            face_model_address, face_model_protos = self.config.get_model_config(
            )
            print(face_model_address, face_model_protos)
            self.get_face_model = Model(face_model_address,
                                        face_model_protos,
                                        num_classes=2)
            self.get_face_model.get_session()
            self.log.info("face_model initialisation completed")
        except:
            self.log.error("face_model initialisation error")
            self.error_log.error("face_model initialisation error",
                                 exc_info=True)
            sys.exit()

        self.log.info("Initialising camera")
        try:
            self.camera = Camera(self.folder_path)
            self.log.info("Camera initialised")
        except:
            self.log.error("Camera initialisation error")
            self.error_log.error("Camera initialisation error", exc_info=True)
            sys.exit()

        try:
            self.max_boxes_to_draw, self.min_score_thresh = self.config.get_vis_utils_config(
            )
        except:
            self.max_boxes_to_draw, self.min_score_thresh = 10, 0.3

        self.log.info("Initializing detector")
        try:
            self.detector = Detector(self.get_face_model,
                                     self.max_boxes_to_draw,
                                     self.min_score_thresh)
            self.log.info("detector initialized")
        except:
            self.log.error("detector initialization failed")
            self.error_log.error("detector initialization failed",
                                 exc_info=True)
            sys.exit()

        try:
            self.log.info("Initialising embeddings models...")
            self.calculator = Calculate(self.log, self.error_log, self.config)
            self.log.info("embeddings Models initialisation completed")
        except:
            self.log.error("embeddings Models initialisation error")
            self.error_log.error("embeddings Models initialisation error",
                                 exc_info=True)
            sys.exit()

        self.batch_frame_list = []

        # todo set batch_size to config.ini ?
        self.batch_size = 2
        self.flag = True
Example #16
0
 def build_model(self):
     self.net: nn.Module = Model(self.backbone, self.num_color,
                                 self.num_style, self.num_season,
                                 self.num_category).to(self.device)
     self.net.eval()
     self.load_model()
Example #17
0
def search():
    local = request.form['Local']
    busca = Model(tabela='pontos_turisticos')
    resultado = busca.get(lugar=local)
    return render_template('/Pesquisa local')
Example #18
0
    opt.nThreads = 1  # test code only supports nThreads = 1
    opt.batchSize = 1  # test code only supports batchSize = 1 (1 image at a time)
    opt.serial_batches = True
    opt.gpu_ids = [opt.gpu_ids[0]]
    if 'vrd' in opt.dataroot:
        test_dataset = VrdDataset(opt.dataroot,
                                  split='test',
                                  net=opt.feat_net,
                                  use_gt=opt.use_gt,
                                  use_lang=opt.use_lang)
    else:
        print('No this dataset')
        sys.exit(1)
    test_data_loader = DataLoader(test_dataset,
                                  batch_size=opt.batchSize,
                                  shuffle=False,
                                  num_workers=int(opt.nThreads))
    model = Model(opt)
    test_net(model, opt, test_data_loader, test_dataset.name)

    if not opt.no_evaluate:
        with open(
                os.path.join(opt.results_dir, opt.name, test_dataset.name,
                             'predicted_predicate.pkl'), 'rb') as f:
            rel_result = pickle.load(f)

        test_dataset.evaluate(rel_result,
                              n_rels=opt.n_rels,
                              obj_co_occur=model.relevance)
        #test_dataset.evaluate(rel_result, n_rels=opt.n_rels)
Example #19
0
    #         if isinstance(net, torch.nn.ReLU):
    #             net.register_forward_hook(relu_hook)
    #         if isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d):
    #             net.register_forward_hook(pooling_hook)
    #         if isinstance(net, torch.nn.Upsample):
    #             net.register_forward_hook(upsample_hook)
    #         return
    #     for c in childrens:
    #         foo(c)

    # if model == None:
    #     model = torchvision.models.alexnet()
    # foo(model)
    # # input = Variable(torch.rand(3,input_res,input_res).unsqueeze(0), requires_grad = True)
    # input = Variable(torch.rand(3,3,input_res,input_res), requires_grad = True)
    # out = model(input)
    #

    total_flops = (sum(list_conv) + sum(list_linear) + sum(list_bn) + sum(list_relu) + sum(list_pooling) + sum(list_upsample))

    print('  + Number of FLOPs: %.2fG' % (total_flops / 1e9))

    return total_flops

if __name__ == '__main__':

    model = Model()
    # input=torch.rand(1,3,400,400)
    # out = Model(input)
    print_model_param_nums(model)
    count_model_param_flops(model,400, True)
Example #20
0
from models.models import Model
from mxnet import gpu, nd
import pickle
import os
from tqdm import trange, tqdm

model_param = {'emb_size': 200, 'hidden_size': 200}
vocab_path = 'data/vocab'
source_path = 'data/cnn_articles'
target_path = 'data/cnn_abstracts'
lda_path = 'data/cnn_head_lda'

model = Model(model_param,
              vocab_path,
              head_attention=True,
              decoder_cell='dlstm')

model.train(source_path,
            target_path,
            lda_path,
            epoch_num=50,
            learning_rate=0.00001)
Example #21
0
class Person_processing:
    def __init__(self):

        logger = Logger()
        self.log = logger.logger
        self.error_log = logger.err_logger
        self.camera = None

        try:
            self.log.info("Reading the config...")
            self.config = ConfigReader()
            self.log.info("Config read")
        except:
            self.log.error("Error reading config.ini")
            self.error_log.error("Error reading config.ini", exc_info=True)
            sys.exit()

        try:
            self.folder_path = self.config.get_folder_path_config()
            print("folder_path: {}".format(self.folder_path))
        except:
            self.log.error(
                "Initialisation error: the video folder path is not defined")
            self.error_log.error(
                "Initialisation error: the video folder path is not defined",
                exc_info=True)
            sys.exit()

        self.log.info("Initialising face_model")
        try:
            face_model_address, face_model_protos = self.config.get_model_config(
            )
            print(face_model_address, face_model_protos)
            self.get_face_model = Model(face_model_address,
                                        face_model_protos,
                                        num_classes=2)
            self.get_face_model.get_session()
            self.log.info("face_model initialisation completed")
        except:
            self.log.error("face_model initialisation error")
            self.error_log.error("face_model initialisation error",
                                 exc_info=True)
            sys.exit()

        self.log.info("Initialising camera")
        try:
            self.camera = Camera(self.folder_path)
            self.log.info("Camera initialised")
        except:
            self.log.error("Camera initialisation error")
            self.error_log.error("Camera initialisation error", exc_info=True)
            sys.exit()

        try:
            self.max_boxes_to_draw, self.min_score_thresh = self.config.get_vis_utils_config(
            )
        except:
            self.max_boxes_to_draw, self.min_score_thresh = 10, 0.3

        self.log.info("Initializing detector")
        try:
            self.detector = Detector(self.get_face_model,
                                     self.max_boxes_to_draw,
                                     self.min_score_thresh)
            self.log.info("detector initialized")
        except:
            self.log.error("detector initialization failed")
            self.error_log.error("detector initialization failed",
                                 exc_info=True)
            sys.exit()

        try:
            self.log.info("Initialising embeddings models...")
            self.calculator = Calculate(self.log, self.error_log, self.config)
            self.log.info("embeddings Models initialisation completed")
        except:
            self.log.error("embeddings Models initialisation error")
            self.error_log.error("embeddings Models initialisation error",
                                 exc_info=True)
            sys.exit()

        self.batch_frame_list = []

        # todo set batch_size to config.ini ?
        self.batch_size = 2
        self.flag = True

    def get_filenames_list(self):
        """
        get filenames from  folder
        :return: string (video_file, scan, rfid)
        """
        video_name = None
        files = [
            f for f in os.listdir(self.folder_path)
            if os.path.isfile(os.path.join(self.folder_path, f))
        ]
        for file in files:
            if ".txt" in file:
                return None
            if ".avi" in file:
                video_name = file
        return video_name

    def get_all_frames(self, video_name):
        full_video_path = os.path.join(self.folder_path, video_name)
        # print("reading {}".format(full_video_path))
        try:
            self.camera.camera_capture.open(full_video_path)
            self.log.debug("reading {}".format(full_video_path))
        except:
            self.log.error("Cannot open video file {}".format(full_video_path))
            self.error_log.error(
                "Cannot open video file {}".format(full_video_path),
                exc_info=True)
        # read frames to frame_list
        frame_list = []
        while True:
            frame, ret = self.camera.get_frame()
            if not ret:
                break
            # rgb_2_bgr
            frame = self.camera.rgb_to_bgr(frame)
            frame_list.append(frame)

            # print(frame.shape)
            # cv2.imshow('window_name', frame)
            # cv2.waitKey(300)
            # cv2.destroyAllWindows()

        # after reading video file, close reader to avoid PermissionError
        try:
            self.camera.camera_capture.release()
        except:
            self.log.error(
                "Cannot close video file {}".format(full_video_path))
            self.error_log.error(
                "Cannot close video file {}".format(full_video_path),
                exc_info=True)

        return frame_list

    def get_faces(self, frame_list):
        """
        Get all faces from video
        :param frame_list: list of frames
        :return:
        """
        face_list = []
        while len(frame_list) > 0:
            if len(frame_list) < self.batch_size:  # if len < n skip
                break
            else:
                # print("detect")
                batch = frame_list[:self.batch_size]
                frame_list = frame_list[self.batch_size:]
            face_batch = self.detector.detect(batch)
            face_list = face_list + face_batch
        return face_list

    def process(self):
        print('in process')
        while True:
            # read filenames from folder
            video_name = self.get_filenames_list()
            if video_name is None:
                continue
            # for video file get all frames
            try:
                frame_list = self.get_all_frames(video_name)
            except:
                self.log.error("Cannot read video file {}".format(video_name))
                self.error_log.error(
                    "Cannot read video file {}".format(video_name),
                    exc_info=True)
                continue
            # for frames_list get face_list (detect face and corp)
            try:
                video_face_list = self.get_faces(frame_list)
            except:
                self.log.error("Cannot get face, detector error for {}".format(
                    video_name))
                self.error_log.error(
                    "Cannot get face, detector error for {}".format(
                        video_name),
                    exc_info=True)
                continue
            # for frame in video_face_list:
            # print(frame.shape)
            # cv2.imshow('window_name', frame)
            # cv2.waitKey(100)
            # cv2.destroyAllWindows()

            # # # embeddings
            # read scan & rfid images
            scan_face = cv2.imread(os.path.join(self.folder_path, 'scan.bmp'))
            rfid_face = cv2.imread(os.path.join(self.folder_path, 'rfid.jpg'))
            # get video, scan & rfid hash
            video_face_hash = self.calculator.calculate_faces(video_face_list)
            scan_face_hash = self.calculator.calculate_faces(
                np.expand_dims(scan_face, axis=0))
            rfid_face_hash = self.calculator.calculate_faces(
                np.expand_dims(rfid_face, axis=0))
            # distance
            scan_dist = 1 - self.count_distance(video_face_hash,
                                                scan_face_hash)
            rfid_dist = 1 - self.count_distance(video_face_hash,
                                                rfid_face_hash)
            print('----')
            print(scan_dist, rfid_dist)
            print('----')
            # todo  проверить как будет менятся дистанция при обрезании края изображения для scan и rfid или увеличения краёв при обнаружении лиц
            self.save_results(scan_dist, rfid_dist)

    def count_distance(self, hash_1, hash_2):
        # Count euclidian distance matrix, between objects and faces in one image
        dist = distance.euclidean(hash_1, hash_2)
        return dist

    def save_results(self, scan_dist, rfid_dist):
        """
        save info about obj. recognitions to .txt file
        :return: None
        """
        save_path = os.path.join(self.folder_path, 'res.txt')
        try:
            text_file = open(save_path, "x")
        except FileExistsError:
            self.log.error(
                "save_results: FileExistsError {}".format(save_path))
            self.error_log.error(
                "save_results: FileExistsError {}".format(save_path),
                exc_info=True)
            pass
        except FileNotFoundError:
            self.log.error(
                "save_results: FileNotFoundError {}".format(save_path))
            self.error_log.error(
                "save_results: FileNotFoundError {}".format(save_path),
                exc_info=True)
            pass
        text_file.write(str(scan_dist) + '\n' + str(rfid_dist))
def passeios():
    if usrList[0][4] == 'Guia':

        guia = usr
        passeio = Model(tabela='passeios')
        dados = passeio.get(guia=guia)
        passeio1 = Model(tabela='passeios1')
        dados1 = [None] * len(dados)
        vLen = [None] * len(dados)
        d = [None] * len(dados)
        now = datetime.now()
        n = [now.strftime("%Y"), now.strftime("%m"), now.strftime("%d")]
        for i in range(len(dados)):
            dados1[i] = passeio1.get(id_passeio=dados[i][0])
            vLen[i] = len(dados1[i])
            date = datetime.strptime(dados[i][3], '%Y-%m-%d').date()
            date1 = date.strftime("%x")
            now1 = now.strftime("%x")
            if date1 > now1:
                d[i] = 1
            else:
                d[i] = 0
        return render_template('passeios.html', len=len(dados), dados=dados, now=now, n=n, d=d, dados1=dados1, len1=len(dados1), vLen=vLen, vLogin=vLogin, usr= usr, usrList=usrList)

    if usrList[0][4] == 'Turista':
        a = Model(tabela='passeios1')
        b = a.get(name_turista=usr)
        a1 = Model(tabela='passeios')
        lenV = [None] * len(b)
        V = [None] * len(b)
        for i in range(len(b)):
            V[i] = a1.get(id=b[i][1])
            lenV[i] = len(V[i])
        print(lenV)
        return render_template('passeios.html', V=V, len=len(b), lenV=lenV, vLogin=vLogin, usr= usr, usrList=usrList)
Example #23
0
from models.models import Model
from mxnet import gpu, nd, cpu
import pickle
import os
from tqdm import trange
from models.vocab import Vocab

model_param = {'emb_size': 200, 'hidden_size': 200}
vocab_path = 'data/vocab'
source_path = 'data/cnn_articles'
target_path = 'data/cnn_abstracts'
lda_path = 'data/cnn_head_lda'

model = Model(model_param,
              vocab_path,
              mode='decode',
              head_attention=True,
              decoder_cell='dlstm',
              ctx=cpu())

res = model.decode(source_path, lda_path, 'best.model')

res = [int(i.asscalar()) for i in res.tokens]

vocab = Vocab(vocab_path)

res = [vocab.id2word(i) for i in res]
print(' '.join(res))

abstract = pickle.load(
    open(os.path.join(target_path,
                      os.listdir(target_path)[0]), 'rb'))
Example #24
0
class ModelTrainer:
    """Class for training and testing of model"""
    def __init__(self, args):
        self.epoch_timer = utils.TimeIt(print_str="Epoch")
        self.args = args

        if self.args.training_mode == "gmm":
            self.dataloader = DataLoader(self.args)
        else:
            if self.args.eval:
                if self.args.eval_checkpoint == "":
                    raise ValueError(
                        "Eval mode is set, but no checkpoint path is provided!"
                    )
                self.loader = torch.load(self.args.eval_checkpoint)

            self.dataloader = DataLoader(self.args)

            # Load the model
            self.model = Model(self.args)

            if self.args.eval:
                self.model.load_state_dict(self.loader)

            if self.args.cuda:
                self.model.cuda()

            self.best_test_accuracy = 0.0
            self.best_test_epoch = 0

            if self.args.eval is False:

                if self.args.optimiser == "sgd":
                    self.opt = optim.SGD(
                        self.model.parameters(),
                        lr=self.args.learning_rate,
                        momentum=self.args.momentum,
                        weight_decay=self.args.weight_decay,
                    )
                elif self.args.optimiser == "adam":
                    self.opt = optim.Adam(
                        self.model.parameters(),
                        lr=self.args.learning_rate,
                        weight_decay=self.args.weight_decay,
                    )
                else:
                    raise Exception("Unknown optimiser {}".format(
                        self.args.optim))

                if self.args.lr_scheduler:
                    self.lr_scheduler = optim.lr_scheduler.MultiStepLR(
                        self.opt,
                        milestones=self.args.lr_schedule,
                        gamma=self.args.lr_decay_factor,
                    )
                if self.args.lr_reducer:
                    self.lr_reducer = torch.optim.lr_scheduler.ReduceLROnPlateau(
                        self.opt,
                        factor=np.sqrt(0.1),
                        cooldown=0,
                        patience=5,
                        min_lr=0.5e-6,
                    )

                # Loss function
                self.criterion = nn.CrossEntropyLoss()

                self.args.logdir = os.path.join("checkpoints",
                                                self.args.exp_name)
                utils.create_dir(self.args.logdir)

                if self.args.filelogger:
                    self.logger_path = os.path.join(
                        "checkpoints",
                        self.args.exp_name,
                        "%s_values.log" % self.args.exp_name,
                    )
                    self.logger = {
                        "train_loss_per_iter": [],
                        "train_loss_per_epoch": [],
                        "val_loss_per_iter": [],
                        "val_loss_per_epoch": [],
                        "val_accuracy_per_iter": [],
                        "val_accuracy_per_epoch": [],
                        "test_loss": [],
                        "test_accuracy": [],
                        "best_epoch": 0,
                        "best_test_accuracy": 0.0,
                        "ssl_loss": [],
                        "ssl_accuracy": [],
                        "ssl_correct": [],
                    }
                if self.args.tensorboard:
                    self.writer = SummaryWriter(log_dir=self.args.logdir,
                                                flush_secs=30)
                    self.writer.add_text("Arguments",
                                         params.print_args(self.args))

    def train_val(self, epoch):
        """Train the model for one epoch and evaluate on val split if log_intervals have passed"""

        for batch_idx, batch in enumerate(self.dataloader.train_loader):
            self.model.train()
            self.opt.zero_grad()

            self.iter += 1

            images, targets, indices = batch
            if self.args.cuda:
                images, targets = images.cuda(), targets.cuda()

            logits, unnormalised_scores = self.model(images)
            loss = self.criterion(unnormalised_scores, targets)
            loss.backward()
            self.opt.step()

            if batch_idx % self.args.log_interval == 0:
                val_loss, val_acc = self.evaluate("Val", n_batches=4)

                train_loss, val_loss, val_acc = utils.convert_for_print(
                    loss, val_loss, val_acc)

                if self.args.filelogger:
                    self.logger["train_loss_per_iter"].append(
                        [self.iter, train_loss])
                    self.logger["val_loss_per_iter"].append(
                        [self.iter, val_loss])
                    self.logger["val_accuracy_per_iter"].append(
                        [self.iter, val_acc])

                if self.args.tensorboard:
                    self.writer.add_scalar("Loss_at_Iter/Train", train_loss,
                                           self.iter)
                    self.writer.add_scalar("Loss_at_Iter/Val", val_loss,
                                           self.iter)
                    self.writer.add_scalar("Accuracy_at_Iter/Val", val_acc,
                                           self.iter)

                examples_this_epoch = batch_idx * len(images)
                epoch_progress = 100.0 * batch_idx / len(
                    self.dataloader.train_loader)
                print("Train Epoch: %3d [%5d/%5d (%5.1f%%)]\t "
                      "Train Loss: %0.6f\t Val Loss: %0.6f\t Val Acc: %0.1f" %
                      (
                          epoch,
                          examples_this_epoch,
                          len(self.dataloader.train_loader.dataset),
                          epoch_progress,
                          train_loss,
                          val_loss,
                          val_acc,
                      ))
        if self.args.lr_reducer:
            val_loss, val_acc = self.evaluate("Val", n_batches=None)
            self.lr_reducer.step(val_loss)

        val_loss, val_acc = utils.convert_for_print(val_loss, val_acc)

        if self.args.filelogger:
            self.logger["train_loss_per_epoch"].append([epoch, train_loss])
            self.logger["val_loss_per_epoch"].append([epoch, val_loss])
            self.logger["val_accuracy_per_epoch"].append([epoch, val_acc])

        if self.args.tensorboard:
            self.writer.add_scalar("Loss_at_Epoch/Train", train_loss, epoch)
            self.writer.add_scalar("Loss_at_Epoch/Val", val_loss, epoch)
            self.writer.add_scalar("Accuracy_at_Epoch/Val", val_acc, epoch)

    def evaluate(self, split, epoch=None, verbose=False, n_batches=None):
        """Evaluate model on val or test data"""

        self.model.eval()
        with torch.no_grad():
            loss = 0
            correct = 0
            n_examples = 0

            if split == "Val":
                loader = self.dataloader.val_loader
            elif split == "Test":
                loader = self.dataloader.test_loader

            for batch_idx, batch in enumerate(loader):
                images, targets, _ = batch
                if args.cuda:
                    images, targets = images.cuda(), targets.cuda()

                logits, unnormalised_scores = self.model(images)
                loss += F.cross_entropy(unnormalised_scores,
                                        targets,
                                        reduction="sum")
                pred = logits.max(1, keepdim=False)[1]
                correct += pred.eq(targets).sum()
                n_examples += pred.shape[0]
                if n_batches and (batch_idx >= n_batches):
                    break

            loss /= n_examples
            acc = 100.0 * correct / n_examples

            if split == "Test" and acc >= self.best_test_accuracy:
                self.best_test_accuracy = utils.convert_for_print(acc)
                self.best_test_epoch = epoch
                if self.args.filelogger:
                    self.logger["best_epoch"] = self.best_test_epoch
                    self.logger["best_test_accuracy"] = self.best_test_accuracy
            if verbose:
                if epoch is None:
                    epoch = 0
                    self.best_test_epoch = 0
                loss, acc = utils.convert_for_print(loss, acc)
                print(
                    "\n%s set Epoch: %2d \t Average loss: %0.4f, Accuracy: %d/%d (%0.1f%%)"
                    % (split, epoch, loss, correct, n_examples, acc))
                print(
                    "Best %s split Performance: Epoch %d - Accuracy: %0.1f%%" %
                    (split, self.best_test_epoch, self.best_test_accuracy))

                if self.args.filelogger:
                    self.logger["test_loss"].append([epoch, loss])
                    self.logger["test_accuracy"].append([epoch, acc])
                if self.args.tensorboard:
                    self.writer.add_scalar("Loss_at_Epoch/Test", loss, epoch)
                    self.writer.add_scalar("Accuracy_at_Epoch/Test", acc,
                                           epoch)
                    self.writer.add_scalar(
                        "Accuracy_at_Epoch/Best_Test_Accuracy",
                        self.best_test_accuracy,
                        self.best_test_epoch,
                    )

        return loss, acc

    def generate_labels_for_ssl(self, epoch, n_batches=None, verbose=False):

        self.model.eval()
        with torch.no_grad():
            loss = 0
            correct = 0
            n_examples = 0

            predictions_indices = []
            predictions_labels = []

            loader = self.dataloader.unsupervised_train_loader

            for batch_idx, batch in enumerate(loader):
                images, targets, indices = batch
                if args.cuda:
                    images, targets = images.cuda(), targets.cuda()

                logits, unnormalised_scores = self.model(images)
                loss += F.cross_entropy(unnormalised_scores,
                                        targets,
                                        reduction="sum")
                pred = logits.max(1, keepdim=False)[1]
                correct += pred.eq(targets).sum()
                n_examples += pred.shape[0]

                predictions_indices.extend(indices.tolist())
                predictions_labels.extend(pred.tolist())

                if n_batches and (batch_idx >= n_batches):
                    break

            loss /= n_examples
            acc = 100.0 * correct / n_examples

            if verbose:
                loss, acc, correct = utils.convert_for_print(
                    loss, acc, correct)
                print(
                    "\nLabel Generation Performance Average loss: %0.4f, Accuracy: %d/%d (%0.1f%%)"
                    % (loss, correct, n_examples, acc))

                # TODO: Add logging
                if self.args.filelogger:
                    self.logger["ssl_loss"].append([epoch, loss])
                    self.logger["ssl_accuracy"].append([epoch, acc])
                    self.logger["ssl_correct"].append([epoch, correct])
                if self.args.tensorboard:
                    self.writer.add_scalar("SSL_Loss_at_Epoch", loss, epoch)
                    self.writer.add_scalar("SSL_Accuracy_at_Epoch", acc, epoch)
                    self.writer.add_scalar("SSL_Correct_Labels_at_Epoch",
                                           correct, epoch)

        return predictions_indices, predictions_labels

    def train_val_test(self):
        """ Function to train, validate and evaluate the model"""
        self.iter = 0
        for epoch in range(1, self.args.epochs + 1):
            self.train_val(epoch)
            self.evaluate("Test", epoch, verbose=True)
            if self.args.lr_scheduler:
                self.lr_scheduler.step()
            if epoch % self.args.checkpoint_save_interval == 0:
                print("Saved %s/%s_epoch%d.pt\n" %
                      (self.args.logdir, self.args.exp_name, epoch))
                torch.save(
                    self.model.state_dict(),
                    "%s/%s_epoch%d.pt" %
                    (self.args.logdir, self.args.exp_name, epoch),
                )
            self.epoch_timer.tic(verbose=True)

        if self.args.tensorboard:
            if self.args.filelogger:
                text = "Epoch: %d Test Accuracy:%0.1f" % (
                    self.logger["best_epoch"],
                    self.logger["best_test_accuracy"],
                )
                self.writer.add_text("Best Test Performance", text)
            self.writer.close()
        if self.args.filelogger:
            utils.write_log_to_json(self.logger_path, self.logger)
        self.epoch_timer.time_since_init(print_str="Total")

    def ssl_train_val_test(self):
        """ Function to train, validate and evaluate the model"""
        self.iter = 0
        predictions_indices, predictions_labels = [], []
        self.dataloader.stop_label_generation = False

        for epoch in range(1, self.args.epochs + 1):
            if not self.dataloader.stop_label_generation:
                self.dataloader.ssl_init_epoch(predictions_indices,
                                               predictions_labels)

            self.train_val(epoch)
            self.evaluate("Test", epoch, verbose=True)

            if not self.dataloader.stop_label_generation:
                predictions_indices, predictions_labels = self.generate_labels_for_ssl(
                    epoch, n_batches=4, verbose=True)

            if self.args.lr_scheduler:
                self.lr_scheduler.step()
            if epoch % self.args.checkpoint_save_interval == 0:
                print("Saved %s/%s_epoch%d.pt\n" %
                      (self.args.logdir, self.args.exp_name, epoch))
                torch.save(
                    self.model.state_dict(),
                    "%s/%s_epoch%d.pt" %
                    (self.args.logdir, self.args.exp_name, epoch),
                )
            self.epoch_timer.tic(verbose=True)

        if self.args.tensorboard:
            if self.args.filelogger:
                text = "Epoch: %d Test Accuracy:%0.1f" % (
                    self.logger["best_epoch"],
                    self.logger["best_test_accuracy"],
                )
                self.writer.add_text("Best Test Performance", text)
            self.writer.close()
        if self.args.filelogger:
            utils.write_log_to_json(self.logger_path, self.logger)
        self.epoch_timer.time_since_init(print_str="Total")

    def gmm_train_val_test(self):
        train_data = self.dataloader.full_supervised_train_dataset.train_data
        train_labels = self.dataloader.full_supervised_train_dataset.train_labels

        train_data = train_data / 255

        mean = np.array(
            self.args.cifar10_mean_color)[np.newaxis][np.newaxis][np.newaxis]
        std = np.array(
            self.args.cifar10_std_color)[np.newaxis][np.newaxis][np.newaxis]

        train_data = (train_data - mean) / std

        train_data = train_data.reshape(train_data.shape[0], -1)

        cv_types = ["spherical", "diag", "full", "tied"]
        for cv_type in cv_types:
            gmm = mixture.GaussianMixture(n_components=10,
                                          covariance_type=cv_type)
            gmm.fit(train_data)
            clusters = gmm.predict(train_data)
            labels = np.zeros_like(clusters)
            for i in range(10):
                mask = clusters == i
                labels[mask] = mode(train_labels[mask])[0]

            correct1 = np.equal(clusters, train_labels).sum()
            correct2 = np.equal(labels, train_labels).sum()
            print("%d/49000 (%0.2f%%)" % (correct1, correct1 / 49000))
            print("%d/49000 (%0.2f%%)" % (correct2, correct2 / 49000))
def perfil():
    a = Model(tabela='like')
    b = a.get(id_usuarios=usrList[0][0])
    pontos = Model(tabela='pontos_turisticos')
    pts = pontos.get_all()
    return render_template('likes.html', b=b, len=len(pts), len1=len(b), pontos= pts, vLogin=vLogin, usr= usr, usrList=usrList)
Example #26
0
test_probabilitiesBkg = []
for b in bkg_test:
    b = scaler.transform([b])
    prob = classifier.predict_proba(b)[0][0]
    b = b[0].flatten().tolist()
    test_probabilitiesBkg.append(prob)
    testDataBkg.append(b)

batchSize = 4
test = testDataSig
probs_test = test_probabilitiesSig
test_bkg = testDataBkg
probs_test_bkg = test_probabilitiesBkg

# Initialise model
model = Model(len(test[0]), 1)
model.load_model('../scripts/approx1.pkl')

test = np.array(test)
probs_test = np.array(probs_test)

test_bkg = np.array(test_bkg)
probs_test_bkg = np.array(probs_test_bkg)

# Make a vector of outputs
comp_preds = []
comp_true = []
for (batchX, batchY) in next_batch(test, probs_test, batchSize):
    if batchY.shape[0] < batchSize:
        print 'Batch size insufficient (%s), continuing...' % batchY.shape[0]
        continue
Example #27
0
num_training = int(len(dataset) * 0.8)
num_val = int(len(dataset) * 0.1)
num_test = len(dataset) - (num_training + num_val)
training_set, validation_set, test_set = random_split(
    dataset, [num_training, num_val, num_test])

train_loader = DataLoader(training_set,
                          batch_size=args.batch_size,
                          shuffle=True)
val_loader = DataLoader(validation_set,
                        batch_size=args.batch_size,
                        shuffle=False)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False)

model = Model(args).to(args.device)
optimizer = torch.optim.Adam(model.parameters(),
                             lr=args.lr,
                             weight_decay=args.weight_decay)


def train():
    min_loss = 1e10
    patience_cnt = 0
    val_loss_values = []
    best_epoch = 0

    t = time.time()
    model.train()
    for epoch in range(args.epochs):
        loss_train = 0.0
Example #28
0
def train():
    opt = TrainOptions().parse()
    train_dataset = VrdDataset(opt.dataroot,
                               split='train',
                               net=opt.feat_net,
                               use_lang=opt.use_lang)
    train_data_loader = DataLoader(train_dataset,
                                   batch_size=opt.batchSize,
                                   shuffle=not opt.serial_batches,
                                   num_workers=int(opt.nThreads))
    val_dataset = VrdDataset(opt.dataroot,
                             split='val',
                             net=opt.feat_net,
                             use_lang=opt.use_lang)
    val_data_loader = DataLoader(val_dataset,
                                 batch_size=opt.batchSize,
                                 shuffle=opt.serial_batches,
                                 num_workers=int(opt.nThreads))

    model = Model(opt)

    total_steps = 0
    batch = 0
    n_train_batches = len(train_data_loader)
    for epoch in range(opt.epoch_count, opt.niter + opt.epoch_count):
        loss_temp = 0
        epoch_start_time = time.time()
        epoch_iter = 0
        for i_batch, data_dict in enumerate(train_data_loader):
            batch += 1
            if opt.loss == 'kl':
                alpha = model.update_alpha(batch * 1. / n_train_batches)

            total_steps += opt.batchSize
            epoch_iter += opt.batchSize

            model.set_input(data_dict)
            model.optimize()
            loss = model.get_loss()
            loss_temp = loss_temp + loss * opt.batchSize

            # print statistics
            if epoch_iter % opt.print_epoch_iter_freq == 0:
                if epoch_iter > 0:
                    loss_temp = loss_temp / opt.print_epoch_iter_freq
                #print('epoch: {:d}, epoch_iter: {:d}, loss: {:.3f}'.format(epoch, epoch_iter, loss.cpu().data[0]))
                print(
                    'Epoch: {:d} \t Epoch_iter: {:d} \t Training Loss: {:.4f}'.
                    format(epoch, epoch_iter, loss_temp))
                loss_temp = 0

        #if total_steps % opt.save_latest_freq == 0:
        #print('saving the latest model (epoch {:d}, total_steps {:d})'.format(epoch, total_steps))
        #model.save_model('latest')

        if epoch % opt.val_epoch_freq == 0:
            val_loss, val_true_loss = validate(model, val_data_loader, opt)
            print(
                '=============== Epoch: {:d} \t Validation Loss: {:.4f} \t True Loss : {:.4f} ==============='
                .format(epoch, val_loss, val_true_loss))

        #lr = model.update_learning_rate(val_loss)
        lr = model.update_learning_rate(val_loss)
        if opt.loss == 'kl':
            print(
                '[ End of epoch {:d} / {:d} \t Time Taken: {:f} sec \t Learning rate: {:.2e} \t alpha: {:.2e}]'
                .format(epoch, opt.niter + opt.epoch_count - 1,
                        time.time() - epoch_start_time, lr, alpha))
        else:
            print(
                '[ End of epoch {:d} / {:d} \t Time Taken: {:f} sec \t Learning rate: {:.2e}]'
                .format(epoch, opt.niter + opt.epoch_count - 1,
                        time.time() - epoch_start_time, lr))

        if epoch % opt.save_epoch_freq == 0:
            print('Saving the model at the end of epoch {:d} \t iters {:d}'.
                  format(epoch, total_steps))
            #model.save_model('latest')
            model.save_model(epoch)