Exemplo n.º 1
0
def login():
    try:
        if request.method == "POST":
            db = Database()
            username = request.form['username'].lower()
            users = db.read('SELECT Username,password,AuthKey,GUID,B3ID,Permissions FROM users WHERE username = (%s)',
                            username)
            if not users:
                users = db.read('''SELECT Username,password,AuthKey,GUID,B3ID,Permissions FROM users WHERE
                                Email = (%s)''', username)
            db.close()
            # users = [['supiri', 'pass', 'auth', '123', '3', [True, True, True, True, True]]]

            if users and sha256_crypt.verify(request.form['password'], users[0][1]):
                # if request.form['username'].lower() == users[0][0] and request.form['password'] == users[0][1]:
                session['logged_in'] = True
                session['username'] = users[0][0].title()
                session['authkey'] = users[0][2]
                session['guid'] = users[0][3]
                session['b3id'] = users[0][4]
                session['permission'] = users[0][5]
                flash('Welcome {}!'.format(users[0][0].title()))
                return redirect(request.referrer)
            else:
                flash('Invalid credentials. Try Again.')
                return redirect(request.referrer)
    except Exception as e:
        LOG.exception(e)
        return redirect(request.referrer)
Exemplo n.º 2
0
def ban_player(ssid):
    try:
        db = Database()
        id = str(db.read('''SELECT B3ID from ScreenShots where ID = (%s)''', ssid)[0][0])

        m = MultipartEncoder(
            fields={"b3id": str(session['b3id']), "cmd": 'ban', "args": '@' + id,
                    "args2": '^3WallHacker^0|^1Proof- https://mrdetective.supiritech.com/ss/{}'.format(ssid),
                    "secretkey": db.read('SELECT secretkey from server_info')[0][0]}
        )
        ip, port, url = db.read('SELECT b3_ip,b3_port,b3_url from server_info')[0]
        LOG.error('http://{}:{}/{}'.format(ip, port, url))
        r = str(requests.post('http://{}:{}/{}'.format(ip, port, url), data=m,
                              headers={'Content-Type': m.content_type}).text)
        if 'true' in str(r):
            data = db.write('''UPDATE ScreenShots SET banned = (%s) WHERE ID = (%s)''', session['username'], str(ssid))
            flash('Player Was Successfully Banned')
        else:
            flash('There was a error while banning the player, Try Again in Bit !')
            flash('Make Sure You are Connected to Server Before Banning Again')
        db.close()
        return redirect(request.referrer)
    except:
        flash('Something Went Wrong')
        return redirect(request.referrer)
Exemplo n.º 3
0
def register():
    try:
        if request.method == "POST":
            db = Database()
            displayname = request.form['name']
            username = str(request.form['regusername']).lower()
            password = sha256_crypt.encrypt(str(request.form['regpassword']))
            email = request.form['email']
            AuthKey = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
            x = db.read("SELECT * FROM users WHERE Username = (%s)", username)
            y = db.read("SELECT * FROM users WHERE Email = (%s)", email)
            if x:
                flash('Username Already in Use')
                return redirect(request.referrer)
            if y:
                flash('Your Email is already Registered')
                return redirect(request.referrer)
            db.write('''INSERT INTO users(Name,Username,Password,Email,AuthKey) VALUES(%s,%s,%s,%s,%s)''', displayname,
                     username, password, email, AuthKey)
            db.close()
            flash('You Have Successfully Registered !')
            return redirect(request.referrer)
    except Exception as e:
        LOG.exception(e)
        return redirect(url_for(home))
Exemplo n.º 4
0
def home():
    try:
        if 'authkey' in session and session['authkey'] is not None:
            flash("Welcome ! It's look like you are new to here when you visit the Black Assassins Server type"
                  " !webauth {} to complete the registration".format(session['authkey']))
        db = Database()
        session['last_ss'] = int(db.read('SELECT ID FROM screenshots ORDER BY ID DESC LIMIT 1')[0][0])
        ss = db.read('SELECT * FROM screenshots WHERE ID <= (%s) ORDER BY ID DESC LIMIT 27', session['last_ss'])
        ss_new = []
        db.close()
        for s in ss:
            s = list(s)
            s[-1] = datetime.fromtimestamp(s[-1])
            s[1] = escape(s[1])
            s[4] = escape(str(s[4])[:90].strip(',').strip(' '))
            ss_new.append(s)
        ss = ss_new
        if request.method == "POST":
            print(request.form)
            return render_template('home.html', ss=ss)
        else:
            return render_template('home.html', ss=ss)
    except Exception as e:
        LOG.exception(e)
        return render_template('home.html', ss=[])
Exemplo n.º 5
0
def logout():
    try:
        session.clear()
        flash("You have been logged out!")
        return redirect(request.referrer)
    except Exception as e:
        LOG.exception(e)
        return render_template("home.html")
def Elastic_MobileNet(args, logfile):
    """
    based on MobileNet Version1 and ImageNet pretrained weight, https://github.com/marvis/pytorch-mobilenet
    但是这里并没有实现 alpha 乘子和width 乘子
    """
    num_categories = args.num_classes
    add_intermediate_layers = args.add_intermediate_layers
    pretrained_weight = args.pretrained_weight

    model = MobileNet(num_categories, add_intermediate_layers)

    if pretrained_weight == 1:
        tar = torch.load(model_urls['mobilenetV1'])
        state_dict = tar['state_dict']

        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            name = k[7:] # remove `module.`
            new_state_dict[name] = v
        model.load_state_dict(new_state_dict)

        # model.load_state_dict(model_zoo.load_url(model_urls['mobilenetV1']))
        LOG("loaded ImageNet pretrained weights", logfile)
        
    elif pretrained_weight == 0:
        LOG("not loading ImageNet pretrained weights", logfile)

    else:
        LOG("parameter--pretrained_weight, should be 0 or 1", logfile)
        NotImplementedError

    fc_features = model.fc.in_features
    model.fc = nn.Linear(fc_features, num_categories)

    for param in model.parameters():
        param.requires_grad = False
    
    if add_intermediate_layers == 2:
        LOG("set all intermediate classifiers and final classifiers parameter as trainable.", logfile)

        # get all extra classifiers params and final classifier params
        for inter_clf in model.intermediate_CLF:
            for param in inter_clf.parameters():
                param.requires_grad = True

        for param in model.fc.parameters():
            param.requires_grad = True     

    elif add_intermediate_layers == 0:
        LOG("only set final classifiers parameter as trainable.", logfile)

        for param in model.fc.parameters():
            param.requires_grad = True         
    else:
        NotImplementedError

    return model
Exemplo n.º 7
0
def imageview(ssid):
    db = Database()
    ss = db.read('SELECT * FROM screenshots WHERE ID = (%s) ORDER BY ID DESC LIMIT 1', ssid)
    db.close()
    try:
        return render_template("imageview.html", data=ss)
    except Exception as e:
        LOG.exception(e)
        flash('ScreenShot Not Found')
        return redirect(url_for('home'))
Exemplo n.º 8
0
def unban_player(ssid):
    try:
        db = Database()
        db.write('''UPDATE screenshots SET Banned = (%s) WHERE ID = (%s)''', None, ssid)
        flash("B3 Didn't Response to your Ban Request")
        flash("You have to Manually unban the Player via b3")
        db.close()
        return redirect(request.referrer)
    except Exception as e:
        LOG.exception(e)
        flash('Something Went Wrong')
        return redirect(request.referrer)
Exemplo n.º 9
0
def Elastic_VGG16_bn(args, logfile):
    num_categories = args.num_classes
    add_intermediate_layers = args.add_intermediate_layers
    pretrained_weight = args.pretrained_weight

    model = VGG(cfg['D'], add_intermediate_layers, num_categories, batch_norm=True)
    

    if pretrained_weight == 1:
        model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']))    
        LOG("loaded ImageNet pretrained weights", logfile)
        
    elif pretrained_weight == 0:
        LOG("not loading ImageNet pretrained weights", logfile)

    else:
        LOG("parameter--pretrained_weight, should be 0 or 1", logfile)
        NotImplementedError

    fc_features = model.classifier[6].in_features
    model.classifier[6] = nn.Linear(fc_features, num_categories)
    # print("number of outputs: ", num_categories)

    for param in model.parameters():
        param.requires_grad = False
    
    if add_intermediate_layers == 2:
        LOG("add intermediate layer classifiers", logfile)

        # get all extra classifiers params and final classifier params
        for inter_clf in model.intermediate_CLF:
            for param in inter_clf.parameters():
                param.requires_grad = True

        for param in model.classifier.parameters():
            param.requires_grad = True     

    elif add_intermediate_layers == 0:
        LOG("not adding any intermediate layer classifiers", logfile)

        for param in model.classifier.parameters():
            param.requires_grad = True         
    else:
        NotImplementedError
    
    return model    
def Elastic_SqueezeNet(args, logfile):
    num_categories = args.num_classes
    add_intermediate_layers = args.add_intermediate_layers
    pretrained_weight = args.pretrained_weight

    model = SqueezeNet(num_categories=num_categories,
                       add_intermediate_layers=add_intermediate_layers,
                       version=1.0)

    if pretrained_weight == 1:
        model.load_state_dict(model_zoo.load_url(model_urls['squeezenet1_0']))
        LOG("loaded ImageNet pretrained weights", logfile)

    elif pretrained_weight == 0:
        LOG("not loading ImageNet pretrained weights", logfile)

    else:
        LOG("parameter--pretrained_weight, should be 0 or 1", logfile)
        NotImplementedError

    model.classifier._modules["1"] = nn.Conv2d(512,
                                               num_categories,
                                               kernel_size=(1, 1))

    for param in model.parameters():
        param.requires_grad = False

    if add_intermediate_layers == 2:
        LOG(
            "set all intermediate classifiers and final classifiers parameter as trainable.",
            logfile)
        # get all extra classifiers params and final classifier params
        for inter_clf in model.intermediate_CLF:
            for param in inter_clf.parameters():
                param.requires_grad = True

    elif add_intermediate_layers == 0:
        LOG("only set final classifiers parameter as trainable.", logfile)

    else:
        NotImplementedError

    for param in model.classifier.parameters():
        param.requires_grad = True

    return model
Exemplo n.º 11
0
def auth():
    try:
        db = Database()
        if request.method == "POST" and request.form['secretkey'] == db.read('SELECT secretkey from server_info')[0][0]:
            b3id = int(request.form['b3id'])
            guid = request.form['guid']
            level = int(request.form['level'])
            if level >= 100:
                power = 'ban, train, getss, delss, manage'
            elif level >= 16:
                power = 'ban, train, getss'
            else:
                power = 'getss'
            authkey = request.form['authkey']
            db.write(
                '''UPDATE users SET B3ID = (%s), GUID = (%s) ,Permissions = (%s), authkey = (%s) WHERE authkey = (%s)''',
                b3id, guid, power, None, authkey)
            db.close()
        return 'ok'
    except Exception as e:
        LOG.exception(e)
        return 'error'
Exemplo n.º 12
0
def submit_ss():
    try:
        db = Database()
        if request.method == "POST" and request.form['secretkey'] == db.read('SELECT secretkey from server_info')[0][0]:
            # noinspection PyBroadException
            try:
                id = int(db.read('''SELECT ID FROM ScreenShots''')[-1][0]) + 1
            except:
                id = 1
            name = request.form['name'][:-2]
            b3id = int(request.form['b3id'])
            connections = int(request.form['connections'])
            aliases = request.form['aliases']
            guid = request.form['guid']
            penalties = int(request.form['penalties'])
            ip = request.form['ip']
            score = request.form['score']
            # noinspection PyBroadException
            try:
                with urllib.request.urlopen("https://ipinfo.io/{}/json".format(ip)) as url:
                    data = json.loads(url.read().decode())
                    address = '{}, {}'.format(data['city'], data['country']).strip(', ')
            except:
                address = 'Not Found'
            f = request.files['ss']

            s3_client.upload_fileobj(f, os.getenv("BUCKET_NAME").strip(), 'MrDetective/{}.jpg'.format(id),
                                     ExtraArgs={'ACL': 'public-read'})

            db.write('''INSERT INTO ScreenShots (Name,B3ID,Connections,Aliases,GUID,Address,IP,Penalties,Score,
                        Timestamp) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)''', name, b3id, connections,
                     aliases, guid, address, ip, penalties, score, int(time.time()))
            db.close()
            return jsonify('Got IT')
    except Exception as e:
        LOG.exception(e)
        return jsonify('Something Went Wrong')
def clf_SVM(X_train, y_train, X_test):
    '''
    train the model with stastical machine learning methods, here is linear SVC
    :param X_train:
    :param y_train:
    :param X_test:
    :return:
    '''
    global logFile
    start = time.time()

    # TASK: Build a vectorizer / classifier pipeline that filters out tokens
    # that are too rare or too frequent
    pipeline = Pipeline([
        ('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
        # ('clf', SVC(decision_function_shape='ovo')),
        ('clf', SVC(decision_function_shape='ovo'))
    ])

    # TASK: Build a grid search to find out whether unigrams or bigrams are more useful.
    # Fit the pipeline on the training set using grid search for the parameters
    parameters = {'vect__ngram_range': [(1, 1), (1, 2)]}
    grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
    grid_search.fit(X_train, y_train)

    # TASK: print the mean and std for each candidate along with the parameter
    # settings for all the candidates explored by grid search.
    n_candidates = len(grid_search.cv_results_['params'])
    for i in range(n_candidates):
        temp_result = str(i) + "params - " + str(
            grid_search.cv_results_['params'][i]) + "; mean - " + str(
                grid_search.cv_results_['mean_test_score'][i]
            ) + "; std - " + str(grid_search.cv_results_['std_test_score'][i])
        LOG(temp_result, logFile)

    y_pred = grid_search.predict(X_test)
    elapse = time.time() - start
    return y_pred, elapse
Exemplo n.º 14
0
def Elastic_InceptionV3(args, logfile):
    num_classes = args.num_classes
    add_intermediate_layers = args.add_intermediate_layers
    pretrained_weight = args.pretrained_weight

    model = Inception3(num_classes, add_intermediate_layers, aux_logits=True)

    if pretrained_weight == 1:
        model.load_state_dict(
            model_zoo.load_url(model_urls['inception_v3_google']))
        LOG("loaded ImageNet pretrained weights", logfile)

    elif pretrained_weight == 0:
        LOG("not loading ImageNet pretrained weights", logfile)

    else:
        LOG("parameter--pretrained_weight, should be 0 or 1", logfile)
        NotImplementedError

    fc_features = model.fc.in_features
    model.fc = nn.Linear(fc_features, num_classes)

    for param in model.parameters():
        param.requires_grad = True
    LOG(
        "*********************set all parameters as trainable, all params are requires_grad == True*********************",
        logfile)

    if add_intermediate_layers == 2:
        LOG("add intermediate layer classifiers", logfile)

        # get all extra classifiers params and final classifier params
        for inter_clf in model.intermediate_CLF:
            for param in inter_clf.parameters():
                param.requires_grad = True

    elif add_intermediate_layers == 0:
        LOG("not adding any intermediate layer classifiers", logfile)
    else:
        NotImplementedError

    for param in model.fc.parameters():
        param.requires_grad = True

    return model
def Elastic_ResNet(args, logfile):

    # num_outputs = 1 # initially only one classifier output

    num_classes = args.num_classes
    add_intermediate_layers = args.add_intermediate_layers
    pretrained_weight = args.pretrained_weight

    model_weight_url = None
    if args.model == "Elastic_ResNet18":
        # residual block type, 2 is BasicBlock, which means 2 conv-bn-relu in one block, 3 is BottleneckBlock, which means 3 conv-bn-relu blocks
        residual_block_type = 2
        model = ResNet(BasicBlock, [2, 2, 2, 2], residual_block_type,
                       num_classes, add_intermediate_layers)
        model_weight_url = model_urls['resnet18']
        LOG("successfully create model: (Elastic-)ResNet18", logfile)

    elif args.model == "Elastic_ResNet34":
        # residual block type, 2 is BasicBlock, which means 2 conv-bn-relu in one block, 3 is BottleneckBlock, which means 3 conv-bn-relu blocks
        residual_block_type = 2
        model = ResNet(BasicBlock, [3, 4, 6, 3], residual_block_type,
                       num_classes, add_intermediate_layers)
        model_weight_url = model_urls['resnet34']
        LOG("successfully create model: (Elastic-)ResNet34", logfile)

    elif args.model == "Elastic_ResNet50":
        residual_block_type = 3
        model = ResNet(Bottleneck, [3, 4, 6, 3], residual_block_type,
                       num_classes, add_intermediate_layers)
        model_weight_url = model_urls['resnet50']
        LOG("successfully create model: (Elastic-)ResNet50", logfile)

    elif args.model == "Elastic_ResNet101":
        residual_block_type = 3
        model = ResNet(Bottleneck, [3, 4, 23, 3], residual_block_type,
                       num_classes, add_intermediate_layers)
        model_weight_url = model_urls['resnet101']
        LOG("successfully create model: (Elastic-)ResNet101", logfile)

    elif args.model == "Elastic_ResNet152":
        residual_block_type = 3
        model = ResNet(Bottleneck, [3, 8, 36, 3], residual_block_type,
                       num_classes, add_intermediate_layers)
        model_weight_url = model_urls['resnet152']
        LOG("successfully create model: (Elastic-)ResNet152", logfile)

    if pretrained_weight == 1:
        model.load_state_dict(model_zoo.load_url(model_weight_url))
        LOG("loaded ImageNet pretrained weights", logfile)

    elif pretrained_weight == 0:
        LOG("not loading ImageNet pretrained weights", logfile)

    else:
        LOG("parameter--pretrained_weight, should be 0 or 1", logfile)
        NotImplementedError

    # if add_intermediate_layers == 0: # not adding any intermediate layer classifiers
    #     print("not adding any intermediate layer classifiers")
    #     LOG("not adding any intermediate layer classifiers", logfile)
    # elif add_intermediate_layers == 2:
    #     print("add any intermediate layer classifiers")
    #     LOG("add intermediate layer classifiers", logfile)

    # print("=====> successfully load pretrained imagenet weight")
    fc_features = model.fc.in_features
    model.fc = nn.Linear(fc_features, num_classes)

    for param in model.parameters():
        param.requires_grad = False

    if add_intermediate_layers == 2:
        LOG("add intermediate layer classifiers", logfile)

        # get all extra classifiers params and final classifier params
        for inter_clf in model.intermediate_CLF:
            for param in inter_clf.parameters():
                param.requires_grad = True

        for param in model.fc.parameters():
            param.requires_grad = True

    elif add_intermediate_layers == 0:
        LOG("not adding any intermediate layer classifiers", logfile)

        for param in model.fc.parameters():
            param.requires_grad = True
    else:
        NotImplementedError

    return model
def Elastic_DenseNet(args, logfile):

    num_classes = args.num_classes
    add_intermediate_layers = args.add_intermediate_layers
    pretrained_weight = args.pretrained_weight

    model_weight_url = None

    if args.model == "Elastic_DenseNet121":
        model = DenseNet(add_intermediate_layers=add_intermediate_layers,
                         num_categories=num_classes,
                         num_init_features=64,
                         growth_rate=32,
                         block_config=(6, 12, 24, 16))
        model_weight_url = model_urls['densenet121']
        LOG("successfully create model: (Elastic-)Dense121", logfile)

    elif args.model == "Elastic_DenseNet169":
        model = DenseNet(add_intermediate_layers=add_intermediate_layers,
                         num_categories=num_classes,
                         num_init_features=64,
                         growth_rate=32,
                         block_config=(6, 12, 32, 32))
        model_weight_url = model_urls['densenet169']
        LOG("successfully create model: (Elastic-)Dense169", logfile)

    else:
        LOG("DenseNet model should be DenseNet121, DenseNet169, DenseNet201",
            logfile)
        NotImplementedError

    if pretrained_weight == 1:
        # '.'s are no longer allowed in module names, but pervious _DenseLayer
        # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
        # They are also in the checkpoints in model_urls. This pattern is used
        # to find such keys.
        pattern = re.compile(
            r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$'
        )
        state_dict = model_zoo.load_url(model_weight_url)
        for key in list(state_dict.keys()):
            res = pattern.match(key)
            if res:
                new_key = res.group(1) + res.group(2)
                state_dict[new_key] = state_dict[key]
                del state_dict[key]
        model.load_state_dict(state_dict)
        LOG("loaded ImageNet pretrained weights", logfile)
    elif pretrained_weight == 0:
        LOG("not loading ImageNet pretrained weights", logfile)

    else:
        LOG("parameter--pretrained_weight, should be 0 or 1", logfile)
        NotImplementedError

    in_features = model.classifier.in_features
    model.classifier = nn.Linear(in_features, num_classes)

    for param in model.parameters():
        param.requires_grad = False

    if add_intermediate_layers == 2:
        LOG("add intermediate layer classifiers", logfile)

        # get all extra classifiers params and final classifier params
        for inter_clf in model.intermediate_CLF:
            for param in inter_clf.parameters():
                param.requires_grad = True

        for param in model.classifier.parameters():
            param.requires_grad = True

    elif add_intermediate_layers == 0:
        LOG("not adding any intermediate layer classifiers", logfile)

        for param in model.classifier.parameters():
            param.requires_grad = True
    else:
        NotImplementedError

    return model
def main(**kwargs):
    global args

    for arg, v in kwargs.items():
        args.__setattr__(arg, v)

    # program_start_time = time.time()
    instanceName = "classification_Accuracy"
    folder_path = os.path.dirname(os.path.abspath(__file__))

    timestamp = datetime.datetime.now()
    ts_str = timestamp.strftime('%Y-%m-%d-%H-%M-%S')
    path = folder_path + os.sep + instanceName + os.sep + args.model + os.sep + ts_str + "_" + args.dataset

    os.makedirs(path)

    args.savedir = path

    global logFile
    logFile = path + os.sep + "log.txt"

    LOG(str(args), logFile)

    X_train, y_train, X_test, y_test = prepare_data_svm(args)

    LOG(
        "train data size : " + str(len(y_train)) + " test data size : " +
        str(len(y_test)), logFile)

    if args.model == "liblinear_svm":
        # # liblinear_svm prediction
        # y_pred_SVM, time_SVM = liblinear_svm(X_train, y_train, X_test, y_test)
        # LOG("time elapse: " + str(time_SVM), logFile)
        # LOG("multiclass liblinear SVM: " + str(accuracy_score(y_test, y_pred_SVM)), logFile)
        # SVM_predictions_csv = np.column_stack((X_test, y_pred_SVM))

        # SVM_predictions_csv.to_csv(path + os.sep + "test_classification_result.csv", sep=',', index=True)
        pass

    elif args.model == "svm" or args.model == "SVM":
        # SVM prediction
        y_pred, time = clf_SVM(X_train, y_train, X_test)
        accuracy = accuracy_score(y_test, y_pred)
        LOG("time elapse: " + str(time) + " seconds", logFile)
        LOG("[SVM] accuracy: " + str(accuracy), logFile)

        df = pd.DataFrame(data={
            "test review": X_test,
            "test_label": y_pred,
            "ground truth": y_test
        })
        df.to_csv(path + os.sep + "test_classification_result.csv",
                  sep=',',
                  index=True)

    else:
        NotImplementedError

    LOG("============Finish============", logFile)

    # svm_out_path ="liblinear_SVM_prediction_4rd_run.csv"
    # with open(svm_out_path, 'w') as f:
    #     csv.writer(f).writerows(SVM_predictions_csv)
    # f.close()

    # # first test on VADER system
    # y_pred_VADER, time_VADER = clf_VADER(X_test)
    # print("VADER elapsed time: ", round(time_VADER, 2), " s")
    #
    # # os.makedirs(prefix_path)
    #
    # # Save the evaluation to a csv
    # VADER_predictions_csv= np.column_stack((X_test, y_pred_VADER))
    #
    # vader_out_path = "VADER_prediction.csv"
    # with open(vader_out_path, 'w') as f:
    #     csv.writer(f).writerows(VADER_predictions_csv)
    # f.close()

    # # find these reviews which are wrongly classified
    # X_test = list(X_test)
    # y_test = list(y_test)
    # # wrong_clf_reviews_VADER = dict()
    #
    # wrong_clf_reviews_list = list()
    # print("test size length: ", len(y_test))
    #
    # assert len(y_test) == len(y_pred_VADER)
    #
    # for i in range(len(y_pred_VADER)):
    #     if y_pred_VADER[i] != y_test[i]:
    #         wrong_clf_reviews_list.append([y_pred_VADER[i], y_test[i], i, X_test[i], "VADER"])
    #     else:
    #         pass
    #

    #calculate confusion matrix
    logFile = confusion_matrix(y_pred, y_test, logFile)

    # save misclassified reviews
    wrong_clf_reviews = save_misclassified_reviews(X_test, y_pred, y_test,
                                                   args.model)

    wrong_clf_reviews.to_csv(path + os.sep + "wrong_clf_reviews.csv",
                             sep=',',
                             index=True)
Exemplo n.º 18
0
def main(**kwargs):
    global args
    lowest_error1 = 100

    for arg, v in kwargs.items():
        args.__setattr__(arg, v)

    program_start_time = time.time()
    instanceName = "Classification_Accuracy"
    folder_path = os.path.dirname(
        os.path.abspath(__file__)) + os.sep + args.model

    timestamp = datetime.datetime.now()
    ts_str = timestamp.strftime('%Y-%m-%d-%H-%M-%S')
    path = folder_path + os.sep + instanceName + os.sep + args.model_name + os.sep + ts_str

    tensorboard_folder = path + os.sep + "Graph"
    os.makedirs(path)
    args.savedir = path

    writer = SummaryWriter(tensorboard_folder)

    global logFile
    logFile = path + os.sep + "log.txt"
    args.filename = logFile
    global num_outputs

    print(args)
    global device
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    if args.data == "cifar100" or args.data == "CIFAR100":
        fig_title_str = " on CIFAR-100"

    elif args.data == "cifar10" or args.data == "CIFAR10":
        fig_title_str = " on CIFAR-10"
    elif args.data == "tiny_imagenet":
        fig_title_str = " on tiny_imagenet"
    else:
        LOG(
            "ERROR =============================dataset should be CIFAR10 or CIFAR100",
            logFile)
        NotImplementedError

    captionStrDict = {
        "fig_title": fig_title_str,
        "x_label": "epoch",
        'elastic_final_layer_label': "Final_Layer_Output_Classifier",
        "elastic_intermediate_layer_label": "Intermediate_Layer_Classifier_"
    }

    # save input parameters into log file

    LOG("program start time: " + ts_str + "\n", logFile)

    # if args.layers_weight_change == 1:
    #     LOG("weights for intermediate layers: 1/(34-Depth), giving different weights for different intermediate layers output, using the formula weigh = 1/(34-Depth)", logFile)
    # elif args.layers_weight_change == 0:
    #     LOG("weights for intermediate layers: 1, giving same weights for different intermediate layers output as  1", logFile)
    # else:
    #     print("Parameter --layers_weight_change, Error")
    #     sys.exit()

    if args.model == "Elastic_ResNet18" or args.model == "Elastic_ResNet34" or args.model == "Elastic_ResNet50" or args.model == "Elastic_ResNet101" or args.model == "Elastic_ResNet152":
        model = Elastic_ResNet(args, logFile)

    elif args.model == "Elastic_InceptionV3":
        args.target_size = (
            299, 299, 3
        )  # since pytorch inceptionv3 pretrained accepts image size (299, 299, 3) instead of (224, 224, 3)
        model = Elastic_InceptionV3(args, logFile)

    elif args.model == "Elastic_MobileNet":
        model = Elastic_MobileNet(args, logFile)

    elif args.model == "Elastic_VGG16":
        model = Elastic_VGG16_bn(args, logFile)

    elif args.model == "Elastic_SqueezeNet":
        model = Elastic_SqueezeNet(args, logFile)

    elif args.model == "Elastic_DenseNet121" or args.model == "Elastic_DenseNet169" or args.model == "Elastic_DenseNet201":
        model = Elastic_DenseNet(args, logFile)

    else:
        LOG(
            "--model parameter should be in ResNet, InceptionV3, MobileNet, VGG16, SqueezeNet, DenseNet",
            logFile)
        exit()

    num_outputs = model.num_outputs
    # num_outputs = 1

    LOG("num_outputs: " + str(num_outputs), logFile)
    LOG("successfully create model: " + args.model, logFile)

    args_str = str(args)
    LOG(args_str, logFile)

    model = model.to(device)
    if device == 'cuda':
        model = torch.nn.DataParallel(model).cuda()
        cudnn.benchmark = True

    # TUT thinkstation data folder path
    data_folder = "/media/yi/e7036176-287c-4b18-9609-9811b8e33769/tiny_imagenet/tiny-imagenet-200"

    # narvi data folder path
    # data_folder = "/home/zhouy/data/tiny-imagenet-200"

    # XPS 15 laptop data folder path
    # data_folder = "D:\Elastic\data"
    # args.batch_size = 1

    summary(model, (3, 224, 224))

    if args.data == "tiny_imagenet":
        train_loader, test_loader = tiny_image_data_loader(data_folder, args)
    else:
        train_loader = get_train_loader(args.data,
                                        data_dir=data_folder,
                                        batch_size=args.batch_size,
                                        augment=False,
                                        target_size=args.target_size,
                                        random_seed=20180614,
                                        valid_size=0.2,
                                        shuffle=True,
                                        show_sample=False,
                                        num_workers=4,
                                        pin_memory=True,
                                        debug=args.debug)

        test_loader = get_test_loader(args.data,
                                      data_dir=data_folder,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      target_size=args.target_size,
                                      num_workers=4,
                                      pin_memory=True,
                                      debug=args.debug)

    criterion = nn.CrossEntropyLoss().cuda()

    if args.data != "tiny_imagenet":
        pretrain_optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,
                                                    model.parameters()),
                                             args.pretrain_learning_rate,
                                             momentum=args.momentum,
                                             weight_decay=args.weight_decay)

        LOG("==> Pretraining for **1** epoches    \n", logFile)
        for pretrain_epoch in range(0, 1):
            accs, losses, lr = train(train_loader, model, criterion,
                                     pretrain_optimizer, pretrain_epoch)
            epoch_result = "    pretrain epoch: " + str(
                pretrain_epoch) + ", pretrain error: " + str(
                    accs) + ", pretrain loss: " + str(
                        losses) + ", pretrain learning rate: " + str(
                            lr) + ", pretrain total train sum loss: " + str(
                                sum(losses))
            LOG(epoch_result, logFile)

        summary(model, (3, 224, 224))

    LOG("==> Full training    \n", logFile)
    for param in model.parameters():
        param.requires_grad = True

    optimizers = []
    childs = []
    k = 0
    for child in model.parameters():
        childs.append(child)
        k += 1

    # childs_params = [childs[:9], childs[:15], childs[:21], childs[:27],
    #                     childs[:33], childs[:39], childs[:45], childs[:51],
    #                     childs[:57], childs[:63], childs[:69], childs[:75], childs]
    childs_params = [childs[:25], childs[:43], childs[:61], childs]

    for i in range(num_outputs):
        optimizer = torch.optim.SGD(childs_params[i],
                                    args.learning_rate,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
        optimizers.append(optimizer)
    # optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
    # summary(model, (3,224,224))

    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           mode='min',
                                                           threshold=1e-4,
                                                           patience=10)

    # implement early stop by own
    EarlyStopping_epoch_count = 0

    epochs_train_accs = []
    epochs_train_top5_accs = []
    epochs_train_losses = []
    epochs_test_accs = []
    epochs_test_losses = []
    epochs_lr = []
    epochs_test_top5_accs = []

    for epoch in range(0, args.epochs):

        epoch_str = "==================================== epoch %d ==============================" % epoch
        LOG(epoch_str, logFile)
        # Train for one epoch
        accs, losses, lr, accs_top5 = train(train_loader, model, criterion,
                                            optimizers, epoch)
        epochs_train_accs.append(accs)
        epochs_train_losses.append(losses)
        epochs_lr.append(lr)
        epochs_train_top5_accs.append(accs_top5)

        writer.add_scalar(tensorboard_folder + os.sep + "data" + os.sep + 'lr',
                          lr, epoch)
        for i, a, l, k in zip(range(len(accs)), accs, losses, accs_top5):
            writer.add_scalar(
                tensorboard_folder + os.sep + "data" + os.sep +
                'train_error_' + str(i), a, epoch)
            writer.add_scalar(
                tensorboard_folder + os.sep + "data" + os.sep +
                'train_losses_' + str(i), l, epoch)
            writer.add_scalar(
                tensorboard_folder + os.sep + "data" + os.sep +
                'train_top5_error_' + str(i), k, epoch)

        epoch_result = "\ntrain error: " + str(accs) + "top 5 error: " + str(
            accs_top5) + ", \nloss: " + str(
                losses) + ", \nlearning rate " + str(
                    lr) + ", \ntotal train sum loss " + str(sum(losses))
        LOG(epoch_result, logFile)

        if num_outputs > 1:
            writer.add_scalar(
                tensorboard_folder + os.sep + "data" + os.sep +
                'train_total_sum_losses', sum(losses), epoch)
            losses.append(sum(losses))  # add the total sum loss
            LOG("train_total_sum_losses: " + str(sum(losses)), logFile)

        # run on test dataset
        LOG("==> test \n", logFile)
        test_accs, test_losses, test_top5_accs = validate(
            test_loader, model, criterion)

        epochs_test_accs.append(test_accs)
        epochs_test_losses.append(test_losses)
        epochs_test_top5_accs.append(test_top5_accs)

        for i, a, l, k in zip(range(len(test_accs)), test_accs, test_losses,
                              test_top5_accs):
            writer.add_scalar(
                tensorboard_folder + os.sep + "data" + os.sep + 'test_error_' +
                str(i), a, epoch)
            writer.add_scalar(
                tensorboard_folder + os.sep + "data" + os.sep +
                'test_losses_' + str(i), l, epoch)
            writer.add_scalar(
                tensorboard_folder + os.sep + "data" + os.sep +
                'test_top5_losses_' + str(i), k, epoch)

        test_result_str = "==> Test epoch: \nfinal output classifier error: " + str(
            test_accs
        ) + "test top 5 error: " + str(test_top5_accs) + ", \ntest_loss" + str(
            test_losses) + ", \ntotal test sum loss " + str(sum(test_losses))
        LOG(test_result_str, logFile)

        total_loss = sum(test_losses)

        if num_outputs > 1:
            writer.add_scalar(
                tensorboard_folder + os.sep + "data" + os.sep +
                'test_total_sum_losses', total_loss, epoch)
            test_losses.append(total_loss)  # add the total sum loss
            LOG("test_total_sum_losses: " + str(total_loss), logFile)

        log_stats(path, accs, losses, lr, test_accs, test_losses, accs_top5,
                  test_top5_accs)

        # Remember best prec@1 and save checkpoint
        is_best = test_accs[
            -1] < lowest_error1  #error not accuracy, but i don't want to change variable names

        if is_best:

            lowest_error1 = test_accs[-1]  #但是有个问题,有时是倒数第二个CLF取得更好的结果

            save_checkpoint(
                {
                    'epoch': epoch,
                    'model': args.model_name,
                    'state_dict': model.state_dict(),
                    'best_prec1': lowest_error1,
                    'optimizer': optimizer.state_dict(),
                }, args)

        # apply early_stop with monitoring val_loss
        # EarlyStopping(patience=15, score_function=score_function(val_loss), trainer=model)

        scheduler.step(total_loss)  # adjust learning rate with test_loss

        if epoch == 0:
            prev_epoch_loss = total_loss  # use all intemediate classifiers sum loss instead of only one classifier loss
        else:
            if total_loss >= prev_epoch_loss:  # means this current epoch doesn't reduce test losses
                EarlyStopping_epoch_count += 1
        if EarlyStopping_epoch_count > 20:
            LOG(
                "No improving test_loss for more than 10 epochs, stop running model",
                logFile)
            break

    # n_flops, n_params = measure_model(model, IMAGE_SIZE, IMAGE_SIZE)
    # FLOPS_result = 'Finished training! FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6)
    # LOG(FLOPS_result, logFile)
    # print(FLOPS_result)
    writer.close()

    end_timestamp = datetime.datetime.now()
    end_ts_str = end_timestamp.strftime('%Y-%m-%d-%H-%M-%S')
    LOG("program end time: " + end_ts_str + "\n", logFile)

    # here plot figures
    plot_figs(epochs_train_accs, epochs_train_losses, epochs_test_accs,
              epochs_test_losses, args, captionStrDict)
    LOG("============Finish============", logFile)
Exemplo n.º 19
0
def train(train_loader, model, criterion, optimizers, epoch):

    model.train()

    lr = None
    all_acc = []
    all_acc_top5 = []
    all_loss = []

    for ix in range(num_outputs):
        all_loss.append(AverageMeter())
        all_acc.append(AverageMeter())
        all_acc_top5.append(AverageMeter())

    LOG("==> train ", logFile)
    # print("num_outputs: ", num_outputs)

    for i, (input, target) in enumerate(train_loader):
        # print("input: ", input, input.shape)
        # print("target: ", target, target.shape)

        # bp_1
        if args.backpropagation == 1:
            # LOG("enter backpropagation method : " + str(args.backpropagation) +"\n", logFile)

            target = target.cuda(async=True)
            input_var = torch.autograd.Variable(input)
            target_var = torch.autograd.Variable(target)

            for ix in range(num_outputs):
                outputs = model(input_var)
                #
                optimizers[ix].zero_grad()

                loss = criterion(outputs[ix], target_var)

                loss.backward()

                optimizers[ix].step()

                # optimizer.zero_grad()
                # if ix == (num_outputs - 1):
                #     loss.backward()
                # else:
                #     loss.backward(retain_graph=True)

                # optimizer.step()
                all_loss[ix].update(loss.item(), input.size(0))

                # top 1 accuracy
                prec1 = accuracy(outputs[ix].data, target)
                all_acc[ix].update(prec1[0].data[0].item(), input.size(0))

                # # top 5 accuracy
                prec5 = accuracy(outputs[ix].data, target, topk=(5, ))
                # print("prec top 5-1: ", prec5)
                # print("prec top 5-2: ", prec5[0])
                # print("prec top 5-3: ", prec5[0].data[0].item())
                all_acc_top5[ix].update(prec5[0].data[0].item(), input.size(0))

        # elif args.backpropagation == 2:
        #     # LOG("enter backpropagation method : " + str(args.backpropagation) +"\n", logFile)
        #     # bp_2
        #     for ix in range(num_outputs):

        #         target = target.cuda(async=True)
        #         input_var = torch.autograd.Variable(input)
        #         target_var = torch.autograd.Variable(target)
        #         optimizer.zero_grad()
        #         outputs = model(input_var)
        #         loss = criterion(outputs[ix], target_var)
        #         loss.backward()
        #         optimizer.step()

        #         all_loss[ix].update(loss.item(), input.size(0))

        #         # top 1 accuracy
        #         prec1 = accuracy(outputs[ix].data, target)
        #         all_acc[ix].update(prec1[0].data[0].item(), input.size(0))

        #         # top 5 accuracy
        #         prec5 = accuracy(outputs[ix].data, target, topk=(5,))
        #         all_acc_top5[ix].update(prec5[0].data[0].item(), input.size(0))

        # elif args.backpropagation == 3:
        #     # LOG("enter backpropagation method : " + str(args.backpropagation) +"\n", logFile)
        #     # bp_3
        #     target = target.cuda(async=True)
        #     input_var = torch.autograd.Variable(input)
        #     target_var = torch.autograd.Variable(target)

        #     optimizer.zero_grad()
        #     outputs = model(input_var)
        #     losses = 0
        #     for ix in range(len(outputs)):
        #         # print("outputs[ix]: ", outputs[ix])
        #         loss = criterion(outputs[ix], target_var)
        #         losses += loss

        #         all_loss[ix].update(loss.item(), input.size(0))

        #         # top 1 accuracy
        #         prec1 = accuracy(outputs[ix].data, target)
        #         all_acc[ix].update(prec1[0].data[0].item(), input.size(0))

        #         # top 5 accuracy
        #         prec5 = accuracy(outputs[ix].data, target, topk=(5,))
        #         all_acc_top5[ix].update(prec5[0].data[0].item(), input.size(0))

        #     # losses = losses/len(outputs)
        #     losses.backward()
        #     optimizer.step()
        else:
            NotImplementedError

    accs = []
    accs_top5 = []
    ls = []
    for i, j, k in zip(all_acc, all_loss, all_acc_top5):
        accs.append(float(100 - i.avg))
        ls.append(j.avg)
        accs_top5.append(float(100 - k.avg))

    try:
        lr = float(str(optimizers[-1]).split("\n")[-5].split(" ")[-1])
    except:
        lr = 100

    print("train epoch top 5 error: ", accs_top5)
    return accs, ls, lr, accs_top5
def main(**kwargs):
    global args

    for arg, v in kwargs.items():
        args.__setattr__(arg, v)

    print(args)

    program_start_time = time.time()
    instanceName = "classification_Accuracy"
    folder_path = os.path.dirname(os.path.abspath(__file__))

    timestamp = datetime.datetime.now()
    ts_str = timestamp.strftime('%Y-%m-%d-%H-%M-%S')
    path = folder_path + os.sep + instanceName + os.sep + args.model + os.sep + ts_str + "_" + args.dataset + "_" + args.wordembedding

    if args.debug:
        print("[Debug mode]")
        path = folder_path + os.sep + instanceName + os.sep + "Debug-" + args.model + os.sep + ts_str + "_" + args.dataset + "_" + args.wordembedding
    else:
        path = folder_path + os.sep + instanceName + os.sep + args.model + os.sep + ts_str + "_" + args.dataset + "_" + args.wordembedding

    os.makedirs(path)

    args.savedir = path

    global logFile
    logFile = path + os.sep + "log.txt"

    if args.model == "BiLSTMConv":
        Model = models.BiLSTMConv

    # elif args.model == "BiGRU":
    #     Model = models.BiGRU

    # elif args.model == "WordCNN":
    #     Model = models.WordCNN

    # elif args.model == "BiGRUWithTimeDropout":
    #     Model = models.BiGRUWithTimeDropout

    elif args.model == "CNN_Text_Model":
        Model = CNN_Text_Model.CNN_Text
    elif args.model == "VDCNN":
        Model = vcdnn.VDCNN
    else:
        NotImplementedError

    # process the input data.

    captionStrDict = {"fig_title": args.dataset, "x_label": "epoch"}

    train_iter, test_iter, net = data_preprocess.prepare_data_and_model(
        Model=Model, args=args, using_gpu=True)
    print("args: ", args)

    LOG(str(args), logFile)

    global device
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    device = 'cuda'

    net = net.to(device)

    if device == 'cuda':
        net = torch.nn.DataParallel(net).cuda()
        cudnn.benchmark = True

    optimizer = optim.Adam(params=net.parameters(), lr=1e-3, weight_decay=1e-4)
    lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                             step_size=1000,
                                             gamma=.99)
    if device == "cuda":
        criterion = nn.CrossEntropyLoss().cuda()
    else:
        criterion = nn.CrossEntropyLoss()

    # criterion = nn.CrossEntropyLoss().cuda()

    best_test_acc = 0
    best_test_results = []
    ground_truth = []

    epoch_train_accs = []
    epoch_train_losses = []
    epoch_test_accs = []
    epoch_test_losses = []
    epoch_lrs = []

    for epoch in range(args.epochs):

        epoch_start_time = time.time()

        train_accs = []
        train_losses = []

        for batch in tqdm(train_iter):
            lr_scheduler.step()

            net.train()
            xs = batch.text
            ys = batch.label
            # # ys = ys.squeeze(1)
            # print("ys_train data type: ", type(ys))
            # print("ys_train: ", ys)
            if device == 'cuda':
                ys = ys.cuda(async=True)
            # ys = torch.autograd.Variable(ys)
            xs = torch.autograd.Variable(xs)
            ys_var = torch.autograd.Variable(ys)
            # print(ys_var)

            logits = net(xs)
            loss = criterion(logits, ys_var)
            # print("loss: ", loss.item())

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            train_losses.append(loss.item() / int(args.batch_size))
            train_accs.append(accuracy(logits.data, ys))

        train_accs_normal = [i[0].item() for i in train_accs]

        # print("epoch ", epoch, " :  training accumulated accuracy ", np.mean(train_accs_normal))
        LOG("epoch: " + str(epoch), logFile)
        LOG("[TRAIN] accumulated accuracy: " + str(np.mean(train_accs_normal)),
            logFile)

        epoch_train_accs.append(np.mean(train_accs_normal))
        epoch_train_losses.append(np.mean(train_losses))

        test_accs = []
        test_losses = []
        test_predict_results = []

        best_test_acc = 0

        net.eval()

        pred_results = []

        print("running testing.....")
        for batch in tqdm(test_iter):
            xs_test = batch.text
            ys_test = batch.label

            logits_test = net(xs_test)
            test_loss = criterion(logits_test, ys_test)

            test_losses.append(test_loss.item() / int(args.batch_size))
            test_accs.append(accuracy(logits_test.data, ys_test))

            pred_results = pred_results + logits_test.topk(
                1, 1, True, True)[1].t().cpu().numpy().tolist()[0]

            if epoch == (args.epochs - 1):
                ground_truth = ground_truth + ys_test.cpu().numpy().tolist()

        test_accs_normal = [i[0].item() for i in test_accs]

        # print("epoch {} :  testing accumulated accuracy {} %".format(epoch, np.mean(test_accs)))
        print("epoch ", epoch, " :  testing accumulated accuracy ",
              np.mean(test_accs_normal))

        # LOG("epoch: "+ str(epoch), logFile)
        LOG("[TEST] accumulated accuracy: " + str(np.mean(test_accs_normal)),
            logFile)

        if best_test_acc < np.mean(test_accs_normal):
            best_test_acc = np.mean(test_accs_normal)
            best_test_results = pred_results
            torch.save(net.state_dict(),
                       path + os.sep + str(Model.name) + ".pkl")

        epoch_test_accs.append(np.mean(test_accs_normal))
        epoch_test_losses.append(np.mean(test_losses))

        # epoch_lrs.append(0.1)
        try:
            lr = float(str(optimizer[-1]).split("\n")[-5].split(" ")[-1])
        except:
            lr = 100
        epoch_lrs.append(lr)

        log_stats(path, [np.mean(train_accs_normal)], [np.mean(train_losses)],
                  [np.mean(test_accs_normal)], [np.mean(test_losses)], lr)

        one_epoch_last_time = time.time() - epoch_start_time

        LOG("last time: " + str(one_epoch_last_time), logFile)

    df = pd.DataFrame(data={
        "test_label": best_test_results,
        "ground truth": ground_truth
    })
    df.to_csv(path + os.sep + "test_classification_result.csv",
              sep=',',
              index=True)

    # save the metrics report
    logFile = confusion_matrix(df["test_label"], df["ground truth"], logFile)

    # #     # here plot figures
    # # algos\Classification_Accuracy\CNN_Text_Model\2019-01-23-14-58-01_tripadvisor\test_acc.txt
    #     import pandas as pd
    #     # algos\Classification_Accuracy\BiLSTMConv\\2019-01-22-10-29-54_tripadvisor\test_acc.txt
    #     epoch_test_accs = list(pd.read_csv("algos\\Classification_Accuracy\\CNN_Text_Model\\2019-01-23-14-58-01_tripadvisor\\test_acc.txt", header=None).iloc[:,0])
    #     epoch_train_accs = list(pd.read_csv("algos\\Classification_Accuracy\\CNN_Text_Model\\2019-01-23-14-58-01_tripadvisor\\train_acc.txt", header=None).iloc[:,0])
    #     epoch_train_losses = list(pd.read_csv("algos\\Classification_Accuracy\\CNN_Text_Model\\2019-01-23-14-58-01_tripadvisor\\train_losses.txt", header=None).iloc[:,0])
    #     epoch_test_losses = list(pd.read_csv("algos\\Classification_Accuracy\\CNN_Text_Model\\2019-01-23-14-58-01_tripadvisor\\test_losses.txt", header=None).iloc[:,0])

    plot_figs(epoch_train_accs, epoch_train_losses, epoch_test_accs,
              epoch_test_losses, args, captionStrDict)
    LOG("============Finish============", logFile)