out = (prediction[:, 1, :, :] > 0.).float() return (out == target).float().mean().item() def f1_score(output, target, epsilon=1e-7): # turn output into 0-1 map probas = (output[:, 1, :, :] > 0.).float() TP = (probas * target).sum(dim=1) precision = TP / (probas.sum(dim=1) + epsilon) recall = TP / (target.sum(dim=1) + epsilon) f1 = 2 * precision * recall / (precision + recall + epsilon) f1 = f1.clamp(min=epsilon, max=1-epsilon) return f1.mean().item(), (precision.mean().item(), recall.mean().item()) cc = Configs() print("Loading stored model") model = SegnetConvLSTM(cc.hidden_dims, decoder_out_channels=2, lstm_nlayers=len(cc.hidden_dims), vgg_decoder_config=cc.decoder_config) tu.load_model_checkpoint(model, '../train-results/model-fixed.torch', inference=False, map_location=device) model.to(device) print("Model loaded") tu_test_dataset = TUSimpleDataset(config.ts_root, config.ts_subdirs, config.ts_flabels, shuffle=False)#, shuffle_seed=9) # build data loader tu_test_dataloader = DataLoader(tu_test_dataset, batch_size=cc.test_batch, shuffle=True, num_workers=2) # using crossentropy for weighted loss criterion = nn.CrossEntropyLoss(weight=torch.FloatTensor([0.02, 1.02])).to(device)
import json from flask import Flask, request, send_file, render_template, abort, Response, redirect from rq import Queue from rq.job import Job from flask_cors import CORS from rq_scheduler import Scheduler from redis import Redis from utils.config import Configs from packages.github.github import Github from worker import conn from actions.actions import Actions from mongo.db import * configs = Configs() actions = Actions() github = Github() DB() app = Flask(__name__, static_folder="./dist/static", template_folder="./dist") CORS(app, resources={r"/*": {"origins": "*"}}) q = Queue(connection=conn) scheduler = Scheduler(connection=Redis()) @app.after_request def set_response_headers(response): response.headers["Cache-Control"] = "no-cache"