def main():

    n_unique = 0
    
    #checking model
    print('[INFO] Searching for model...(',model_file,')\n')
    if os.path.isfile(model_file):
        print('[INFO] Model found.\n')

    else:
        print('[INFO] Model not found!\n')
        print('[INFO] Creating model...')
        print('---------------------\n')

    # Loading Dataset
        print('Load datatrain...')
        if os.path.isfile(datatrain_file):
            print('[INFO] Datatrain found!\n')
            	
        else:
            print('[INFO] Datatrain not found!\n')
            print('[INFO] Creating datatrain...\n')

            if(face_localization):
                if not os.path.exists(dir_datatrain_new):
                    ds.preprocess(dir_datatrain,dir_datatrain_new,(width,height))

                ds.generate_data(datatrain_file,dir_datatrain_new+'/'+dir_datatrain,(width,height),normalized)
                
            else:
                ds.generate_data(datatrain_file,dir_datatrain,(width,height),normalized)	

        print('[INFO] Start training...\n')
        if not os.path.exists(dir_logs):
            os.makedirs(dir_logs)

        if not os.path.exists(dir_logs_tb):
            os.makedirs(dir_logs_tb)
            os.makedirs(dir_logs_tb+'/training')
            os.makedirs(dir_logs_tb+'/validation')

        st.train(model_file,epoch,datatrain_file,width,height,channel,v_split)

    if(run == 'recognition'):
        recognition_data_test()
        
    elif(run == 'counting'):
        
        counting_from_video()

    gc.collect()
 def infer(raw_data, **kwargs):
     """
     :param raw_data: raw input (여기서는 문자열)을 입력받습니다
     :param kwargs:
     :return:
     """
     # dataset.py에서 작성한 preprocess 함수를 호출하여, 문자열을 벡터로 변환합니다
     (we, sl, cs, wl, ss, _) = preprocess(raw_data,
                                          config.max_sentence_length,
                                          config.max_word_length,
                                          config.max_syll_num,
                                          word_dim=200)
     we = wordvec_lookup(we, 200)
     # 저장한 모델에 입력값을 넣고 prediction 결과를 리턴받습니다
     pred = sess.run(y_logits,
                     feed_dict={
                         word_embed: we,
                         sent_len: sl,
                         chars: cs,
                         word_len: wl,
                         sylls: ss,
                         is_training: False
                     })
     pred = np.reshape(pred, [-1])
     return list(zip(np.zeros(len(pred)), pred))
Example #3
0
def fpgrowth(dataset, minSupportRatio, minConfidenceRatio):
    dataset = preprocess(dataset)
    dataset = [(list(data), 1) for data in dataset]
    minSupport = int(minSupportRatio * len(dataset))
    frequent_itemset, support = build_fptree(dataset, minSupport, [])
    rules = getAssociaionRules(frequent_itemset, support, minConfidenceRatio)
    return frequent_itemset, rules
Example #4
0
def infer(raw_data, label, **kwargs):
    """
    :param raw_data: raw input (여기서는 문자열)을 입력받습니다
    :param kwargs:
    :return:
    """

    preprocessed_data = preprocess('../data/processing_data', raw_data,
                                   config.strmaxlen)
    model.load_state_dict(
        torch.load("../model/model_RCNN.pt", map_location='cpu'))
    #model.load_state_dict(torch.load("../model/model_RCNN.pt"))

    model.eval()

    output_prediction = model(preprocessed_data)
    point = output_prediction.data.squeeze(dim=1).tolist()

    numpy_point = np.array(point)
    tot = len(point)
    k = 10
    for i in range(1, k + 1):
        topk_list = (-numpy_point).argsort()[:, :i].tolist()
        n_correct = 0
        for p, topk in zip(label, topk_list):
            # print('topk prediction = {}, true_label = {}'.format(topk, p))
            if int(p.replace('\n', '')) in topk: n_correct += 1
        score = n_correct / tot
        print("accuracy@top{} : {} ".format(i, score))

    point = [np.argmax(p) for p in point]

    return list(zip(np.zeros(len(point)), point))
Example #5
0
 def infer(raw_data, **kwargs):
     nouns = [" ".join(twt.nouns(raw_data)) for doc in loaded]
     preprocessed_data = preprocess(nouns, config.strmaxlen)
     pred = model.predict(preprocessed_data)[-1]
     pred_prob = pred[:, 1]
     clipped = np.argmax(pred, axis=-1)
     return list(zip(pred_prob.flatten(), clipped.flatten()))
Example #6
0
    def infer(raw_data, **kwargs):

        left_preprocessed_data, right_preprocessed_data = preprocess(raw_data, config.strmaxlen)
        # 저장한 모델에 입력값을 넣고 prediction 결과를 리턴받습니다
        pred = sess.run(output_sigmoid, feed_dict={x_1: left_preprocessed_data, x_2: right_preprocessed_data})
        clipped = np.array(pred > config.threshold, dtype=np.int)
        return list(zip(pred.flatten(), clipped.flatten()))
Example #7
0
def prepare_setting(args):
    model_path = find_latest(args.model_path)
    print(model_path)
    jsonpath = os.path.join(os.path.dirname(model_path), "args.json")
    print(jsonpath)
    with open(jsonpath, 'r') as f:
        train_args = json.load(f)

    model_cand = {"mv2": MobilenetV2, "vgg16": VGG16, "resnet50": ResNet50}
    if train_args["model_name"] == "mv2":
        model = MobilenetV2(num_classes=101, depth_multiplier=1.0)
    else:
        model = model_cand[train_args["model_name"]](num_classes=101)

    model = L.Classifier(model)
    chainer.serializers.load_npz(model_path, model)

    test_dataset = FoodDataset(args.dataset,
                               model_name=train_args["model_name"],
                               train=False)

    if args.device >= 0:
        # use GPU
        chainer.backends.cuda.get_device_from_id(args.device).use()
        model.predictor.to_gpu()
        import cupy as xp
    else:
        # use CPU
        xp = np

    preprocess_ = lambda image: preprocess(image, train_args["model_name"])

    return model, preprocess_, xp, test_dataset,
Example #8
0
def recognize(img_file, uid_file):
    recognizer = cv2.face.LBPHFaceRecognizer_create()
    model_filename = os.path.join(config.OUTPUT_DIR, config.OUTPUT_MODEL_FILE)
    recognizer.read(model_filename)

    faceCascade = cv2.CascadeClassifier(config.CASCADE_PATH)

    img = cv2.imread(img_file)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = faceCascade.detectMultiScale(gray,
                                         scaleFactor=1.1,
                                         minNeighbors=5,
                                         minSize=(100, 100),
                                         flags=cv2.CASCADE_SCALE_IMAGE)

    with open(uid_file) as f:
        uids = [line.strip() for line in f.readlines()]

    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3)
        img_predict = gray[y:y + h, x:x + w]
        img_predict = preprocess(img_predict)
        img_predict = cv2.resize(img_predict,
                                 (config.IMAGE_SIZE, config.IMAGE_SIZE))
        Id, score = recognizer.predict(img_predict)
        if score > 0:
            cv2.putText(img, uids[Id], (x, y), cv2.FONT_HERSHEY_PLAIN, 4,
                        (0, 0, 255), 3)
            print("%s" % (uids[Id]))
        else:
            cv2.putText(img, "unknown", (x, y), cv2.FONT_HERSHEY_PLAIN, 2,
                        (0, 0, 255), 3)

    cv2.imwrite("detect.jpg", img)
Example #9
0
    def infer(raw_data, **kwargs):
        """

        :param raw_data: raw input (여기서는 문자열)을 입력받습니다
        :param kwargs:
        :return:
        """
        sp_model = preprocess_infer['sp_model']
        wp2i = preprocess_infer['wp2i']

        char_text = preprocess(raw_data, config.strmaxlen)
        char_text = torch.tensor(char_text)
        char_text = Variable(char_text.long()).cuda()

        # dataset.py에서 작성한 preprocess 함수를 호출하여, 문자열을 벡터로 변환합니다
        inter_x_text = infer_preprocess(raw_data, sp_model, wp2i,
                                        config.max_words_len,
                                        config.max_wp_len)
        inter_x_text = (inter_x_text[0].cuda(), inter_x_text[1].cuda())

        #preprocessed_data = preprocess(raw_data, config.strmaxlen)
        model.eval()
        # 저장한 모델에 입력값을 넣고 prediction 결과를 리턴받습니다
        output_prediction = model(char_text, inter_x_text)
        point = output_prediction.data.squeeze(dim=1)  #.tolist()
        print(config.model)
        if config.model in [
                'classification', 'cnntext', 'bilstmwithattn', 'ImgText2Vec'
        ]:
            #point = [np.argmax(p) for p in point]
            point = torch.sum(point > 0.5, dim=1).tolist()

        # DONOTCHANGE: They are reserved for nsml
        # 리턴 결과는 [(confidence interval, 포인트)] 의 형태로 보내야만 리더보드에 올릴 수 있습니다. 리더보드 결과에 confidence interval의 값은 영향을 미치지 않습니다
        return list(zip(np.zeros(len(point)), point))
    def infer(raw_data, **kwargs):

        preprocessed_data = preprocess(raw_data, tknzr, config.strmaxlen)
        pred = model.predict(preprocessed_data)[-1]
        pred_prob = pred[:, 1]
        clipped = np.argmax(pred, axis=-1)
        return list(zip(pred_prob.flatten(), clipped.flatten()))
Example #11
0
def apriori(dataset, minSupportRatio, minConfidenceRatio):
    dataset = preprocess(dataset)
    frequent_itemset = []
    support = {}
    itemsets = set()
    for data in dataset:
        itemsets |= data
    itemsets = [set([
        itemset,
    ]) for itemset in itemsets]
    minSupport = int(minSupportRatio * len(dataset))
    while True:
        L = getSupport(dataset, itemsets, minSupport)
        # print(L)
        support.update(L)
        # print(support)
        if len(L.items()) == 0:
            break
        frequent_itemset.extend(L.keys())
        itemsets = []
        for set1 in L.keys():
            for set2 in L.keys():
                if isJoinable(set1, set2):
                    itemsets.append(set(set1) | set(set2))

    rules = getAssociaionRules(frequent_itemset, support, minConfidenceRatio)
    return frequent_itemset, rules
Example #12
0
 def infer(raw_data, **kwargs):
     """
     :param raw_data: raw input (여기서는 문자열)을 입력받습니다
     :param kwargs:
     :return:
     """
     preprocessed_data = preprocess(raw_data, 150)
     output_prediction = model.predict(preprocessed_data)[1].flatten().tolist()
     # DONOTCHANGE: They are reserved for nsml
     # 리턴 결과는 [(confidence interval, 포인트)] 의 형태로 보내야만 리더보드에 올릴 수 있습니다. 리더보드 결과에 confidence interval의 값은 영향을 미치지 않습니다
     return list(zip(np.zeros(len(output_prediction)), output_prediction))
Example #13
0
def eval(raw_data):

    preprocessed_data = preprocess('../data/processing_data', raw_data,
                                   config.strmaxlen)
    model.load_state_dict(torch.load("./model/model.pt"))
    model.eval()

    output_prediction = model(preprocessed_data)
    point = output_prediction.data.squeeze(dim=1).tolist()
    point = [np.argmax(p) for p in point]

    return list(zip(np.zeros(len(point)), point))
Example #14
0
    def infer(raw_data, **kwargs):
        """

        :param raw_data: raw input (여기서는 문자열)을 입력받습니다
        :param kwargs:
        :return:
        """
        queries = preprocess(raw_data, config.strmaxlen, hidden_layer_size)
        pred = sess.run(hypothesis, feed_dict={x: queries})
        clipped = np.array(pred > config.threshold, dtype=np.int)
        # DONOTCHANGE: They are reserved for nsml
        return list(zip(pred.flatten(), clipped.flatten()))
Example #15
0
    def infer(raw_data, **kwargs):
        """

        :param raw_data: raw input (여기서는 문자열)을 입력받습니다
        :param kwargs:
        :return:
        """
        # dataset.py에서 작성한 preprocess 함수를 호출하여, 문자열을 벡터로 변환합니다
        preprocessed_data = preprocess(raw_data, config.strmaxlen)
        # 저장한 모델에 입력값을 넣고 prediction 결과를 리턴받습니다
        pred = sess.run(output_sigmoid, feed_dict={x: preprocessed_data})
        clipped = np.array(pred > config.threshold, dtype=np.int)
        # DONOTCHANGE: They are reserved for nsml
        # 리턴 결과는 [(확률, 0 or 1)] 의 형태로 보내야만 리더보드에 올릴 수 있습니다. 리더보드 결과에 확률의 값은 영향을 미치지 않습니다
        return list(zip(pred.flatten(), clipped.flatten()))
Example #16
0
 def infer(raw_data, **kwargs):
     """
     :param raw_data: raw input (여기서는 문자열)을 입력받습니다
     :param kwargs:
     :return:
     """
     # dataset.py에서 작성한 preprocess 함수를 호출하여, 문자열을 벡터로 변환합니다
     preprocessed_data = preprocess(raw_data, config.strmaxlen)
     model.eval()
     # 저장한 모델에 입력값을 넣고 prediction 결과를 리턴받습니다
     output_prediction = model(preprocessed_data[0])
     point = output_prediction.data.squeeze(dim=1).tolist()
     # DONOTCHANGE: They are reserved for nsml
     # 리턴 결과는 [(confidence interval, 포인트)] 의 형태로 보내야만 리더보드에 올릴 수 있습니다. 리더보드 결과에 confidence interval의 값은 영향을 미치지 않습니다
     return list(zip(np.zeros(len(point)), point))
 def infer(raw_data, **kwargs):
     """
     :param raw_data: raw input (여기서는 문자열)을 입력받습니다
     :param kwargs:
     :return:
     """
     # dataset.py에서 작성한 preprocess 함수를 호출하여, 문자열을 벡터로 변환합니다
     we, sl, cs, wl, _, _ = preprocess(raw_data, config.max_sentence_length,
                                       config.max_word_length, 0)
     # 저장한 모델에 입력값을 넣고 prediction 결과를 리턴받습니다
     pred = sess.run(y_logits,
                     feed_dict={
                         word_embed: we,
                         sent_len: sl,
                         chars: cs,
                         word_len: wl,
                         is_training: False
                     })
     return list(zip(np.zeros(len(pred)), pred))
Example #18
0
    def infer(raw_data, **kwargs):
        """

        :param raw_data: raw input (여기서는 문자열)을 입력받습니다
        :param kwargs:
        :return:
        """
        # dataset.py에서 작성한 preprocess 함수를 호출하여, 문자열을 벡터로 변환합니다
        global save_data
        input1, input2, _ = preprocess(raw_data,
                                       config.strmaxlen,
                                       save_data=save_data or 1)

        # 저장한 모델에 입력값을 넣고 prediction 결과를 리턴받습니다
        output_prediction = model.predict([input1, input2])
        point = output_prediction.flatten().tolist()
        # DONOTCHANGE: They are reserved for nsml
        # 리턴 결과는 [(confidence interval, 포인트)] 의 형태로 보내야만 리더보드에 올릴 수 있습니다. 리더보드 결과에 confidence interval의 값은 영향을 미치지 않습니다
        return list(zip(np.zeros(len(point)), point))
Example #19
0
    def load_resources(self, bpart, num_pick):
        """
        Utility method that load all the resources needed for training.
        Will use csv/pickle/recreated images as cache to avoid recomputation.
        Args:
            bpart: Body part to pick
            num_pick: Number of images to pick from each study in training set

        Returns: train_df, valid_df, img_valid, label_valid, path_valid, flow_dir

        """
        train_table_path = os.path.join(
            self.cache_path,
            "training_table_{}_{}.csv".format(bpart, num_pick)
        )

        valid_table_path = os.path.join(
            self.cache_path,
            "valid_table_{}.csv".format(bpart)
        )

        # Load datasets from csvs. If not exist, recreate from dataset.py and save to csvs
        try:
            train_df = pd.read_csv(train_table_path, index_col=0)
            valid_df = pd.read_csv(valid_table_path, index_col=0)

        except FileNotFoundError:
            util.create_dir(self.cache_path)

            train_df, valid_df = dataset.preprocess()

            if bpart != "all":
                train_df = dataset.pick_bpart(train_df, bpart)
                valid_df = dataset.pick_bpart(valid_df, bpart)

            if num_pick > 0:
                train_df = dataset.pick_n_per_patient(train_df, num_pick)

            train_df.to_csv(train_table_path)
            valid_df.to_csv(valid_table_path)

        return train_df, valid_df
Example #20
0
 def reader():
     np.random.shuffle(file_list)
     for line in file_list:
         if mode == 'train' or mode == 'eval':
             image_path, label_path = line.split()
             image_path = os.path.join(data_dir, image_path)
             label_path = os.path.join(data_dir, label_path)
             img = Image.open(image_path)
             if img.mode != 'RGB':
                 img = img.convert('RGB')
             im_width, im_height = img.size
             # layout: label | xmin | ymin | xmax | ymax | difficult
             bbox_labels = []
             root = xml.etree.ElementTree.parse(label_path).getroot()
             for object in root.findall('object'):
                 bbox_sample = []
                 bbox_sample.append(
                     float(train_parameters['label_dict'][object.find(
                         'name').text]))
                 bbox = object.find('bndbox')
                 difficult = float(object.find('difficult').text)
                 bbox_sample.append(
                     float(bbox.find('xmin').text) / im_width)
                 bbox_sample.append(
                     float(bbox.find('ymin').text) / im_height)
                 bbox_sample.append(
                     float(bbox.find('xmax').text) / im_width)
                 bbox_sample.append(
                     float(bbox.find('ymax').text) / im_height)
                 bbox_sample.append(difficult)
                 bbox_labels.append(bbox_sample)
             img, sample_labels = preprocess(img, bbox_labels, mode)
             sample_labels = np.array(sample_labels)
             if len(sample_labels) == 0:
                 continue
             boxes = sample_labels[:, 1:5]
             lbls = sample_labels[:, 0].astype('int32')
             difficults = sample_labels[:, -1].astype('int32')
             yield img, boxes, lbls, difficults
         elif mode == 'test':
             img_path = os.path.join(data_dir, line)
             yield Image.open(img_path)
Example #21
0
 def infer(raw_data, **kwargs):
     """
     :param raw_data: raw input (여기서는 문자열)을 입력받습니다
     :param kwargs:
     :return:
     """
     # dataset.py에서 작성한 preprocess 함수를 호출하여, 문자열을 벡터로 변환합니다
     (we, sl, cs, wl, ss,
      _) = preprocess(raw_data, config.max_sentence_length,
                      config.max_word_length, config.max_syll_num)
     # 저장한 모델에 입력값을 넣고 prediction 결과를 리턴받습니다
     pred = sess.run(pred,
                     feed_dict={
                         wx: we,
                         cx_: cs,
                         sx_: ss,
                         is_training: False
                     })
     pred = np.reshape(pred, [-1])
     return list(zip(np.zeros(len(pred)), pred))
Example #22
0
def read_textfile(path):
    # Extracting data to predict and play
    text = []
    with open(path, 'rb') as f:
        text = f.readlines()
        f.close()

    text = [t.decode()[:-2] for t in text[:-1]] + [text[-1].decode()]
    text = [t.split(' ') for t in text]

    pr_text = []
    for t in text:
        pr_text = pr_text + [t for t in t]

    print('----[INFO] Cleaning text data')
    pr_text = [dataset.preprocess(t) for t in pr_text]

    for i in range(0, (len(pr_text) - 1) * 2, 2):
        pr_text.insert(i + 1,
                       '<l>')  #inserting silent regions to add structure
    return pr_text
Example #23
0
    def infer(raw_data, **kwargs):
        """

        :param raw_data: raw input (여기서는 문자열)을 입력받습니다
        :param kwargs:
        :return:
        """
        # dataset.py에서 작성한 preprocess 함수를 호출하여, 문자열을 벡터로 변환합니다
        preprocessed_data = preprocess(raw_data, config.strmaxlen)
        # 저장한 모델에 입력값을 넣고 prediction 결과를 리턴받습니다
        pred = sess.run(output_score,
                        feed_dict={
                            x: preprocessed_data,
                            output_keep_prob: 1.0,
                            phase: 0
                        })

        point = pred.squeeze(axis=1).tolist()
        # DONOTCHANGE: They are reserved for nsml
        # 리턴 결과는 [(confidence interval, 포인트)] 의 형태로 보내야만 리더보드에 올릴 수 있습니다. 리더보드 결과에 confidence interval의 값은 영향을 미치지 않습니다
        return list(zip(np.zeros(len(point)), point))
    def infer(raw_data, **kwargs):
        """

        :param raw_data: raw input (여기서는 문자열)을 입력받습니다
        :param kwargs:
        :return:
        """
        # dataset.py에서 작성한 preprocess 함수를 호출하여, 문자열을 벡터로 변환합니다
        data = preprocess(raw_data, dataset.dict, dataset.max_len)
        max_len = max(map(lambda x: len(x), data))
        review = np.zeros((len(data), max_len), dtype=np.int64)
        for i, row in enumerate(data):
            length = len(row)
            review[i, :length] = row
        model.eval()
        # 저장한 모델에 입력값을 넣고 prediction 결과를 리턴받습니다
        output_prediction = model(Variable(torch.from_numpy(review)).cuda())
        point = ((output_prediction.data.squeeze(1).cpu().numpy() * 9 + 11) / 2).tolist()
        #point = np.clip(output_prediction.data.squeeze(1).cpu().numpy(), 1, 10).tolist()
        # DONOTCHANGE: They are reserved for nsml
        # 리턴 결과는 [(confidence interval, 포인트)] 의 형태로 보내야만 리더보드에 올릴 수 있습니다. 리더보드 결과에 confidence interval의 값은 영향을 미치지 않습니다
        return list(zip(np.zeros(len(point)), point))
Example #25
0
    def __getitem__(self, index):
        # 1. get image
        img_path = os.path.join(self.data_dir, "img", self.ids[index] + ".jpg")
        f = Image.open(img_path)
        img = np.asarray(f, dtype=np.float32)
        # (H, W, C) -> (C, H, W)
        img = img.transpose((2, 0, 1))

        # 2. get bbox, top left and bottom right (y_min, x_min, y_max, x_max)
        xml_path = os.path.join(self.data_dir, "ano", self.ids[index] + ".xml")
        anno = ET.parse(xml_path)
        bbox = list()
        label = list()

        for obj in anno.findall("object"):
            bbox_ano = obj.find("bndbox")
            # subtract 1 to make pixel indexes 0-based
            bbox.append([
                int(bbox_ano.find(tag).text) - 1
                for tag in ('ymin', 'xmin', 'ymax', 'xmax')
            ])
            label.append(1)

        bbox = np.stack(bbox).astype(np.float32)
        label = np.stack(label).astype(np.float32)

        # resize image and bbox according paper
        # scale image
        _, H, W = img.shape
        img = preprocess(img)
        _, o_H, o_W = img.shape

        # resize bbox
        bbox = resize_bbox(bbox, (H, W), (o_H, o_W))

        return img, bbox, label
Example #26
0
def main():
    # parse arguments
    args = parse_args()

    # fix seed
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    torch.backends.cudnn.deterministic = True

    # set size
    seq_size = args.seq_size
    init_size = args.init_size

    # set device as gpu
    device = torch.device('cuda', 0)

    # set writer
    exp_name = set_exp_name(args)
    writer = SummaryWriter(args.log_dir + exp_name)

    # load dataset
    train_loader, test_loader = maze_dataloader(seq_size, init_size,
                                                args.batch_size)

    # init models
    hrssm_params = {
        'seq_size': args.seq_size,
        'init_size': args.init_size,
        'state_size': args.state_size,
        'belief_size': args.belief_size,
        'num_layers': args.num_layers,
        'max_seg_num': args.seg_num,
        'max_seg_len': args.seg_len
    }
    optimizer = optim.Adam
    optimizer_params = {'lr': args.learn_rate, 'amsgrad': True}
    model = HRSSM(optimizer=optimizer,
                  optimizer_params=optimizer_params,
                  clip_grad_norm=args.grad_clip,
                  hrssm_params=hrssm_params)

    # test data
    pre_test_full_data_list = iter(test_loader).next()
    pre_test_full_data_list = preprocess(pre_test_full_data_list.to(device))

    # for each iter
    b_idx = 0
    while b_idx <= args.max_iters:
        # for each batch
        for train_obs_list in train_loader:
            b_idx += 1
            # mask temp annealing
            if args.beta_anneal:
                model.mask_beta = (args.max_beta - args.min_beta) * 0.999**(
                    b_idx / args.beta_anneal) + args.min_beta
            else:
                model.mask_beta = args.max_beta

            # get input data
            train_obs_list = preprocess(train_obs_list.to(device))

            # train step and return the loss
            loss = model.train(train_obs_list)

            # log
            if b_idx % 1000 == 0:
                writer.add_scalar('train/total_loss', loss, b_idx)

            # test time
            if b_idx % 1000 == 0:
                # set data
                pre_test_init_data_list = pre_test_full_data_list[:, :
                                                                  init_size]
                post_test_init_data_list = post_process_maze(
                    pre_test_init_data_list)
                pre_test_input_data_list = pre_test_full_data_list[:,
                                                                   init_size:
                                                                   (init_size +
                                                                    seq_size)]
                post_test_input_data_list = post_process_maze(
                    pre_test_input_data_list)

                with torch.no_grad():
                    # test data elbo
                    results = model.reconstruction(pre_test_full_data_list)
                    post_test_rec_data_list = post_process_maze(
                        results['rec_data'])
                    output_img, output_mask = plot_rec(
                        post_test_init_data_list, post_test_input_data_list,
                        post_test_rec_data_list, results['mask_data'],
                        results['p_mask'], results['q_mask'])

                    # log
                    loss = model.test(pre_test_full_data_list)
                    writer.add_scalar('valid/total_loss', loss, b_idx)
                    writer.add_image('valid/rec_image',
                                     output_img.transpose([2, 0, 1]),
                                     global_step=b_idx)
                    writer.add_image('valid/mask_image',
                                     output_mask.transpose([2, 0, 1]),
                                     global_step=b_idx)

                    # full generation
                    pre_test_gen_data_list, test_mask_data_list = model.full_generation(
                        pre_test_init_data_list, seq_size)
                    post_test_gen_data_list = post_process_maze(
                        pre_test_gen_data_list)

                    # log
                    output_img = plot_gen(post_test_init_data_list,
                                          post_test_gen_data_list,
                                          test_mask_data_list)
                    writer.add_image('valid/full_gen_image',
                                     output_img.transpose([2, 0, 1]), b_idx)

                    # jumpy imagination
                    pre_test_gen_data_list = model.jumpy_generation(
                        pre_test_init_data_list, seq_size)
                    post_test_gen_data_list = post_process_maze(
                        pre_test_gen_data_list)

                    # log
                    output_img = plot_gen(post_test_init_data_list,
                                          post_test_gen_data_list)
                    writer.add_image('valid/jumpy_gen_image',
                                     output_img.transpose([2, 0, 1]), b_idx)
def main():

    dataset.preprocess()

    # prepare the data generator

    training_positive = open(dataset.positive_training_file,
                             'r',
                             encoding='utf-8')
    training_negative = open(dataset.negative_training_file,
                             'r',
                             encoding='utf-8')

    training_data_generator = dataset.data_batch_generator(
        training_positive, training_negative, 250)

    # define lstm network graph

    tf.reset_default_graph()

    labels = tf.placeholder(tf.float32, [None, num_classes],
                            name='labels')  # auto determine batch size
    data = tf.placeholder(tf.float32, [None, None, dataset.word_vector_size],
                          name='data')  # auto sequence length

    lstm_cell = tf.contrib.rnn.BasicLSTMCell(num_lstm_units)
    lstm_cell = tf.contrib.rnn.DropoutWrapper(cell=lstm_cell,
                                              output_keep_prob=0.75)

    value, state = tf.nn.dynamic_rnn(lstm_cell, data, dtype=tf.float32)

    weight = tf.Variable(tf.truncated_normal([num_lstm_units, num_classes]))
    bias = tf.Variable(tf.constant(0.1, shape=[num_classes]))

    last = value[:, -1, :]  # get the last hidden state in the model

    prediction = (tf.add(tf.matmul(last, weight), bias, name='prediction'))

    # visualise accuracy

    correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32),
                              name='accuracy')

    # define loss function and optimizer

    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=prediction,
                                                labels=labels))
    optimizer = tf.train.AdamOptimizer().minimize(loss)

    # run training

    sess = tf.InteractiveSession()
    saver = tf.train.Saver()
    sess.run(tf.global_variables_initializer())

    # define Tensorboard hooks

    tf.summary.scalar('Loss', loss)
    tf.summary.scalar('Accuracy', accuracy)
    merged = tf.summary.merge_all()
    logdir = "logs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + "/"
    writer = tf.summary.FileWriter(logdir, sess.graph)

    for i in range(max_iterations):

        # get next data batch

        try:
            crt_batch, crt_batch_labels = next(training_data_generator)
        except StopIteration:
            print("end of dataset")

            save_path = saver.save(sess,
                                   "models2/pretrained_lstm.ckpt",
                                   global_step=i)
            print("saved to %s" % save_path)

            break

        sess.run(optimizer, {data: crt_batch, labels: crt_batch_labels})

        # write summary to Tensorboard logs

        if i % tensorboard_update_interval is 0:
            summary = sess.run(merged, {
                data: crt_batch,
                labels: crt_batch_labels
            })
            writer.add_summary(summary, i)

    writer.close()

    training_positive.close()
    training_negative.close()
Example #28
0
def visualise(img):
    execute_net(dataset.preprocess(img))
Example #29
0
def main(hparams):
    with open("data/text8") as f:
        text = f.read()

    words = dataset.preprocess(text)

    vocab_to_int, int_to_vocab = dataset.create_lookup_tables(words)
    int_words = [vocab_to_int[word] for word in words]

    train_words, noise_dist = dataset.subsampling(int_words)

    device = "cuda" if torch.cuda.is_available() else "cpu"

    embedding_dim = 300
    model = SkipGramNeg(len(vocab_to_int),
                        embedding_dim,
                        noise_dist=noise_dist).to(device)

    # using the loss that we defined
    criterion = NegativeSamplingLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.003)

    print_every = 1500
    epochs = hparams.epochs
    batch_size = hparams.batch_size

    version = 0

    while True:
        save_path = os.path.join(hparams.ckpt_path, f"version-{version}")
        if not os.path.exists(save_path):
            os.makedirs(save_path)
            break
        else:
            version += 1

    summarywriter = SummaryWriter(save_path)
    global_step = 0
    for epoch in range(epochs):

        for step, (input_words, target_words) in tqdm(
                enumerate(dataset.get_batches(train_words, batch_size)),
                desc="Training On!",
                total=len(train_words) // batch_size,
                # total= len(train_words[:(len(train_words)//batch_size)])
        ):
            global_step += 1
            # steps+=1
            inputs, targets = torch.LongTensor(input_words), torch.LongTensor(
                target_words)
            inputs, targets = inputs.to(device), targets.to(device)

            # input, outpt, and noise vectors
            input_vectors = model.forward_input(inputs)
            output_vectors = model.forward_output(targets)
            noise_vectors = model.forward_noise(inputs.shape[0], 5)

            # negative sampling loss
            loss = criterion(input_vectors, output_vectors, noise_vectors)
            summarywriter.add_scalars("loss", {"train": loss}, global_step)
            optimizer.zero_grad()

            loss.backward()
            optimizer.step()

            if step % print_every == 0:
                tqdm.write(
                    f"Epoch: {epoch+1}/{epochs}, Loss: {loss.item():.4f}")
                # save model
                new_path = os.path.join(
                    save_path,
                    f"best_model_epoch_{epoch}_acc_{loss.item():.4f}.pt")

                for filename in glob.glob(os.path.join(save_path, "*.pt")):
                    os.remove(filename)
                torch.save(model.state_dict(), new_path)
                summarywriter.close()
Example #30
0
def predict(img):    
    execute_net(dataset.preprocess(img))
    print net.receptors[net.depth]
    pos = np.argmax(net.receptors[net.depth])
    print dataset.folders[pos]