예제 #1
0
def get_model():
    """ 获取模型,并加载官方预训练的模型参数 """

    # 获取模型
    model = models.main()
    # model.summary() cache_subdir='models_dir'

    # 下载模型参数
    WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
    filename = 'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'  # 下载后保存的文件名
    checksum = '3e9f4e4f77bbe2c9bec13b53ee1c2319'
    weights_path = get_file(filename,
                            WEIGHTS_PATH_NO_TOP,
                            cache_subdir='models')
    # print(weights_path)

    # 加载参数
    model.load_weights(weights_path, by_name=True)

    # 编译
    model.compile(loss=customied_loss,
                  optimizer=Adam(1e-3),
                  metrics=['accuracy'])

    return model
def get_model():
    """ 加载模型和参数"""
    # 获取模型
    model = models.main()

    # 加载参数
    # model.load_weights("callbacks/ep044-loss0.030-val_loss0.028.h5")
    model.load_weights('.\callbackslast1.h5')
    return model
예제 #3
0
파일: main.py 프로젝트: EKELE-NNOROM/GANs
def main(args):
    #os.listdir(data_root)
    img_shape = (args.channels, args.img_size, args.img_size)

    cuda = True if torch.cuda.is_available() else False

    # Loss function
    adversarial_loss = torch.nn.BCELoss()
    auxiliary_loss = torch.nn.CrossEntropyLoss()

    # Initialize generator and discriminator
    generator, discriminator = models.main(args)

    if cuda:
        generator.cuda()
        discriminator.cuda()
        adversarial_loss.cuda()
        auxiliary_loss.cuda()

    generator.apply(weights_init_normal)
    discriminator.apply(weights_init_normal)
    print('Generator: ', generator)
    print('Discriminator: ', discriminator)

    dataset = datasets.ImageFolder(root=args.data_root,
                                   transform=transforms.Compose([
                                       transforms.Resize(args.img_size),
                                       transforms.CenterCrop(args.img_size),
                                       transforms.ToTensor(),
                                       transforms.Normalize((0.5, 0.5, 0.5),
                                                            (0.5, 0.5, 0.5)),
                                   ]))
    # Create the dataloader
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=args.batch_size,
                                             shuffle=True,
                                             num_workers=2)

    train(generator, discriminator, dataloader, args, cuda, adversarial_loss,
          auxiliary_loss)
예제 #4
0
파일: start.py 프로젝트: cohux/AWD_Platform
import batch
import flagfresher
import os
import models

#os.system('/bin/bash -c "docker volume rm $(docker volume ls -qf dangling=true)"')
# docker volume rm $(docker volume ls -qf dangling=true)

teams = 10

models.main(teams)
batch.start_awd()
flagfresher.main()
예제 #5
0
파일: app.py 프로젝트: v-studios/zephyr
def sync():
    import models
    models.main()
예제 #6
0
thrds = 8       # docker 同时操作线程


npcteams = 2    #额外的npc队伍
#lock = threading.Lock()



q = Queue() 

logger = logset('start')
logger.addHandler(console)


models.main(npcteams) #初始化数据库


timespan = 1 * 60 # 刷新 flag 时间

'''
主要流程

先创建容器,然后启动容器,再关闭容器,等比赛开始时再启动容器
'''
# port 规则为 3 00 队伍id 22 服务端口
subject =   {
            #'yunnam_simple': {'sshport':30022,'serviceport':30080},
            #'pwn_simple':   {'sshport':30032,'serviceport':30090},
            #'tomcat8':  {'sshport':30042,'serviceport':30040},
            'pwn':[{'network':'172.10.%d.1','servicename':'awd_note','serviceport':44500}]
예제 #7
0
def main():
    # Filter warnings that polute the project stdout.
    filter_warnings()
    # Rationale: produce cleaner results.

    # Set the random seed for the entire project.
    du.set_random_seed(0)
    # Rationale: ensure reproducibility of the results.

    # Flush previous runs.
    # constants.flush_project_results(constants.TMP_PATH,
    #                                 constants.OUTPUT_PATH)
    # Rationale: provide a clear state for the project to run and enforces
    # reproducibility of the results.

    # Download, load and save data.
    data_loading.main()
    dataframe = data_loading.load_data(constants.DATASET_PATH,
                                       constants.TMP_PATH)
    data_loading.save_data(dataframe, constants.TMP_PATH,
                           constants.DATASET_PATH)
    # Rationale: *Loading*: load data in the main module and pass it as a first
    # argument to every other defined function (that relates to the data set)
    # thus saving precious time with data loading. *Saving*: for big data sets
    # saving the dataset as a fast read format (such as HDF5) saves time.

    # Load and combine data processing pipelines.
    data_processing.main(dataframe, nan_strategy='drop')
    # Rationale: prepare data to be fed into the models.
    # Different algorithms make use of different data structures. For instance
    # XGBoost allow for nans. Data transformations usually don't.

    # Perform exploratory data analyses.
    data_exploration.main(dataframe)
    # Rationale: conduct exploratory data analyses.

    # Data split.
    # Removed.
    # Rationale: module 'models' should execute this.

    # Perform grid search.
    # Iteration over processed data sets may occur here since they are model
    # dependent.
    grid_search.main(constants.MODELS, constants.GRIDS)
    best_combination_of_datasets_and_grids = (
        grid_search.dict_of_best_datasets_and_grids(constants.MODELS,
                                                    constants.GRIDS))
    best_datasets = best_combination_of_datasets_and_grids['best_datasets']
    best_grids = best_combination_of_datasets_and_grids['best_grids']
    # Rationale: perform grid search as part of machine learning best
    # practices.

    # Summary of what was executed so far:
    # 1) Setting of the random seed for reproducibility.
    # 2) Flusing of intermediate results for a clean run.
    # 3) Data loading and data saving.
    # 4) Conduction of exploratory data analyses.
    # 5) Grid search of best model hyper parameters.
    # To conclude our project we need the grand finale: model selection and
    # evaluation/comparison.
    models.main(constants.MODELS, best_datasets, best_grids,
                constants.MODEL_FITTING_PARAMETERS)
예제 #8
0
파일: mysql.py 프로젝트: Lupino/lee
import config
import lee

lee.connect(config.mysql_path)

from models import main
main()
예제 #9
0
import settings
import models
import responder
from handlers import WineAttributeResource, PredictionResource, HealthCheckResource

models.main()
api = responder.API(cors=True,
                    allowed_hosts=["*"],
                    cors_params={
                        "allow_origins": "*",
                        "allow_methods": "*",
                        "allow_headers": "*"
                    })

api.add_route('/api/wine_attributes', WineAttributeResource)
api.add_route('/api/predict', PredictionResource)
api.add_route('/api/healthcheck', HealthCheckResource)

if __name__ == '__main__':
    api.run(address="0.0.0.0", port=5432, debug=True)
예제 #10
0
파일: master.py 프로젝트: mirrormere/TFNode
# This file is the master python script,
# that will run each of the parsers and populate the database.

###############################
#           Env               #

import models
import config
import gffProcessor
import sqlA_insert as insert
import vcf_parse

###############################
#           Main              #

if __name__ == "__main__":
	#parse the gff and populate the database
	print "Creating database\n"
	models.main()
	print "Load GFF into gene_model database\n"
	gffProcessor.main(config.GFF, config.CHROMOSOMES)
	print "Load the interaction network into database\n"
	insert.main()
	#print "Populate the VCF tables"
	#vcf_parse.main()