Beispiel #1
0
def main():
    para = params_setup()
    logging_config_setup(para)

    logging.info('Creating graph')
    graph, model, data_generator = create_graph(para)

    with tf.Session(config=config_setup(), graph=graph) as sess:
        sess.run(tf.global_variables_initializer())
        logging.info('Loading weights')
        load_weights(para, sess, model)
        print_num_of_trainable_parameters()

        try:
            if para.mode == 'train':
                logging.info('Started training')
                train(para, sess, model, data_generator)
                if para.save_final_model_path != '':
                    save_weights(sess, model, para.save_final_model_path)
            elif para.mode == 'validation':
                logging.info('Started validation')
                test(para, sess, model, data_generator)
            elif para.mode == 'test':
                logging.info('Started testing')
                test(para, sess, model, data_generator)
            elif para.mode == 'predict':
                logging.info('Predicting')
                predict(para, sess, model, data_generator,
                        './data/solar-energy3/solar_predict.txt', para.samples)

        except KeyboardInterrupt:
            print('KeyboardInterrupt')
        finally:
            print('Stop')
def predict(config: str, meta: str, input: str, output_directory: str,
            output_filename: str) -> None:
    config = global_config.Config.from_file(config)
    model = models.create_model(config, meta)
    global_predict.predict(input, config=config, meta_path=meta).to_csv(
        os.path.join(
            output_directory or os.path.join("..", "models", model.model_id),
            output_filename or "predict.tsv"))
Beispiel #3
0
def main(_):
    args = params_setup()
    print("[args]: ", args)
    if args.mode == 'train':
        train(args)
    elif args.mode == 'test':
        predict(args)
    elif args.mode == 'chat':
        chat(args)
Beispiel #4
0
def main(_):
        args = params_setup(model_num=0)
        args1 = params_setup(model_num=1)

        args = check_mion_ray(args)
        args1 = check_mion_ray(args1)

        print("[args]: ", args)
        if args.mode == 'train':
            train(args)
        elif args.mode == 'test':
            predict(args)
        elif args.mode == 'chat':
            chat(args)
        elif args.mode == 'fight':
            fight(args, args1)
def prepare_and_predict(config: global_config.Config, meta: str,
                        input: str) -> dataframe.Dataframe:
    result = predict.predict(
        prepare_text(input, config),
        config=config,
        meta_path=meta,
    )
    result.sort(["chapter_id", "paragraph_id", "sentence_id", "word_id"])

    return result
def predict_route():
    sentence = request.args.get("sentence")
    output = predict(sentence)

    if output == 1:
        output = "Positive"
        prob = g.model.predict_proba(g.X).round(3)[0][1]

    else:
        output = "Negative"
        prob = g.model.predict_proba(g.X).round(3)[0][0]

    return render_template('index.html',
                           result=output,
                           sentence=sentence,
                           lemmed=g.lemmed,
                           prob=prob,
                           showPrediction=True)
Beispiel #7
0
	def __init__(self, label):
		super(self.__class__, self).__init__(label)
		self.predict_obj = predict(stops[0])
Beispiel #8
0
        '--out_dir',
        type=str,
        help='output directory where model hdf5 file will be saved')
    parser.add_argument('--path_to_image',
                        type=str,
                        help='path to the image to delover predictions for')
    parser.add_argument('--model_location',
                        type=str,
                        help='path to the model hdf5 file')

    args = parser.parse_args()

    if args.mode == 'train' and (args.img_dir is None
                                 or args.metadata_dir is None
                                 or args.out_dir is None):
        parser.error("--train mode requires --img_dir and --metadata_dir.")

    if args.mode == 'predict' and (args.path_to_image is None
                                   or args.model_location is None):
        parser.error("--predict mode requires --path_to_image.")

    img_dir = "/kaggle/input/images-classification/data/images"
    metadata_dir = "/kaggle/input/images-classification/data/"
    out_dir = "/data/workspace/"

    if args.mode == 'train':
        train_model(args.img_dir, args.metadata_dir, args.out_dir)

    if args.mode == 'predict':
        predict(args.path_to_image, args.model_location)
Beispiel #9
0
def inference(inference_loader, model, args):
    global pred_visualize, palette, d, feats_history, label_history, weight_dense, weight_sparse
    batch_time = AverageMeter()
    annotation_dir = os.path.join(args.data,
                                  'DAVIS_{}/Annotations/480p'.format(args.set))
    annotation_list = sorted(os.listdir(annotation_dir))

    last_video = 0
    frame_idx = 0
    all_ious = []
    with torch.no_grad():
        for i, (input, curr_video, img_original,
                annotation) in enumerate(inference_loader):
            if curr_video != last_video:
                # calculate sequence iou
                seq_ious = np.mean(ious, axis=0)
                all_ious.append(seq_ious)
                # save prediction
                pred_visualize = pred_visualize.cpu().numpy()

                for f in range(1, frame_idx):
                    save_path = args.save
                    save_name = str(f).zfill(5)
                    video_name = annotation_list[last_video]
                    pred = np.asarray(pred_visualize[f - 1], dtype=np.int32)
                    save_prediction(pred, palette, save_path, save_name,
                                    video_name)

                frame_idx = 0
                print("End of video {:d} {}, seq ious: {}".format(
                    last_video.item() + 1, video_name, seq_ious))
            if frame_idx == 0:
                input = input.to(device)
                with torch.no_grad():
                    feats_history = model(input)
                label_history, d, palette, weight_dense, weight_sparse = prepare_first_frame(
                    curr_video, args.save, annotation_dir, args.sigma1,
                    args.sigma2)
                label_set = np.unique(annotation)
                ious = []
                frame_idx += 1
                last_video = curr_video
                continue
            (batch_size, num_channels, H, W) = input.shape
            input = input.to(device)

            start = time.time()
            features = model(input)
            (_, feature_dim, H_d, W_d) = features.shape
            prediction = predict(feats_history, features[0], label_history,
                                 weight_dense, weight_sparse, frame_idx, args)
            # Store all frames' features
            new_label = idx2onehot(torch.argmax(prediction, 0), d).unsqueeze(1)
            label_history = torch.cat((label_history, new_label), 1)
            feats_history = torch.cat((feats_history, features), 0)

            last_video = curr_video
            frame_idx += 1

            # 1. upsample, 2. argmax
            prediction = torch.nn.functional.interpolate(prediction.view(
                1, d, H_d, W_d),
                                                         size=(H, W),
                                                         mode='bilinear',
                                                         align_corners=False)
            prediction = torch.argmax(prediction, 1)  # (1, H, W)

            if frame_idx == 2:
                pred_visualize = prediction
            else:
                pred_visualize = torch.cat((pred_visualize, prediction), 0)

            batch_time.update(time.time() - start)

            pred = np.asarray(np.squeeze(prediction.cpu().numpy()),
                              dtype=np.int32)
            annot = annotation.cpu().numpy()
            ious.append(calculate_multi_object_ious(pred, annot, label_set))

            # if i % 10 == 0:
            #     print('Validate: [{0}/{1}]\t'
            #           'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'.format(
            #         i, len(inference_loader), batch_time=batch_time))
        # save last video's prediction
        seq_ious = np.mean(ious, axis=0)
        all_ious.append(seq_ious)
        pred_visualize = pred_visualize.cpu().numpy()
        for f in range(1, frame_idx):
            save_path = args.save
            save_name = str(f).zfill(5)
            video_name = annotation_list[last_video]
            save_prediction(np.asarray(pred_visualize[f - 1], dtype=np.int32),
                            palette, save_path, save_name, video_name)
    all_ious = np.concatenate(all_ious, axis=0)
    print('Finished inference. mIoU: {}. ave batch time {}s.'.format(
        np.mean(all_ious), batch_time.avg))
Beispiel #10
0
def inference(inference_loader, model, args):
    global pred_visualize, palette, d, feats_history, label_history, weight_dense, weight_sparse
    batch_time = AverageMeter()
    annotation_dir = os.path.join(args.data, 'DAVIS_val/Annotations/480p')
    annotation_list = sorted(os.listdir(annotation_dir))

    last_video = 0
    frame_idx = 0
    with torch.no_grad():
        for i, (input, curr_video,
                img_original) in enumerate(inference_loader):
            if curr_video != last_video:
                # save prediction
                pred_visualize = pred_visualize.cpu().numpy()
                for f in range(1, frame_idx):
                    save_path = args.save
                    save_name = str(f).zfill(5)
                    video_name = annotation_list[last_video]
                    save_prediction(
                        np.asarray(pred_visualize[f - 1], dtype=np.int32),
                        palette, save_path, save_name, video_name)

                frame_idx = 0
                print("End of video %d. Processing a new annotation..." %
                      (last_video + 1))
            if frame_idx == 0:
                input = input.to(device)
                with torch.no_grad():
                    feats_history = model(input)
                label_history, d, palette, weight_dense, weight_sparse = prepare_first_frame(
                    curr_video, args.save, annotation_dir, args.sigma1,
                    args.sigma2)
                frame_idx += 1
                last_video = curr_video
                continue
            (batch_size, num_channels, H, W) = input.shape
            input = input.to(device)

            start = time.time()
            features = model(input)
            (_, feature_dim, H_d, W_d) = features.shape
            prediction = predict(feats_history, features[0], label_history,
                                 weight_dense, weight_sparse, frame_idx, args)
            # Store all frames' features
            new_label = idx2onehot(torch.argmax(prediction, 0), d).unsqueeze(1)
            label_history = torch.cat((label_history, new_label), 1)
            feats_history = torch.cat((feats_history, features), 0)

            last_video = curr_video
            frame_idx += 1

            # 1. upsample, 2. argmax
            prediction = torch.nn.functional.interpolate(prediction.view(
                1, d, H_d, W_d),
                                                         size=(H, W),
                                                         mode='bilinear',
                                                         align_corners=False)
            prediction = torch.argmax(prediction, 1)  # (1, H, W)

            if frame_idx == 2:
                pred_visualize = prediction
            else:
                pred_visualize = torch.cat((pred_visualize, prediction), 0)

            batch_time.update(time.time() - start)

            if i % 10 == 0:
                print('Validate: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'.
                      format(i, len(inference_loader), batch_time=batch_time))
        # save last video's prediction
        pred_visualize = pred_visualize.cpu().numpy()
        for f in range(1, frame_idx):
            save_path = args.save
            save_name = str(f).zfill(5)
            video_name = annotation_list[last_video]
            save_prediction(np.asarray(pred_visualize[f - 1], dtype=np.int32),
                            palette, save_path, save_name, video_name)
    print('Finished inference.')
Beispiel #11
0
s.headers.update({'cookie':c})
img = s.get("https://jaccount.sjtu.edu.cn/jaccount/captcha?"+ str(int(round(time.time() * 1000))))
with open("./tmp.jpg", 'wb') as tmp_img:
        for chunk in img.iter_content(80):
            tmp_img.write(chunk)

usernamebox = chrome.find_element_by_id("user")
usernamebox.clear()
usernamebox.send_keys(username)

passwordbox = chrome.find_element_by_id("pass")
passwordbox.clear()
passwordbox.send_keys(password)

captchabox = chrome.find_element_by_id("captcha")
captcha = predict(Image.open(BytesIO(img.content)).convert("L"))
captchabox.clear
captchabox.send_keys(captcha)
print(captcha)



captchabox.send_keys(Keys.RETURN)

#electionNumber = input("这是第几轮选课(海选、抢选、第三轮 请分别输入 1/2/3):")
electionNumber = '3'
while electionNumber not in ["1", "2", "3"]:
    electionNumber = input("非法输入,请重新输入(海选、抢选、第三轮 请分别输入 1/2/3):")
electionUrl = "http://electsys.sjtu.edu.cn/edu/student/elect/warning.aspx?&xklc="+electionNumber+"&lb=1"
chrome.get(electionUrl)
temp = chrome.find_element_by_id("CheckBox1")