Esempio n. 1
0
class EventType:

    ARRIVAL = -1
    #Silly implementation, find a workaround
    SERVICE_FINISH = []
    config = Config('perfsim.config')
    NUM_SERVER = config.NUM_SERVER
    for i in range(0, NUM_SERVER):
        SERVICE_FINISH.append(i)
    #NOTE  : Service finish may or may not mean departure

    def type_from_num(self, queue_number):
        #This function will always be called with queue number

        #For zeroth row, the event is SERVICE_FINISH[1]

        return EventType.SERVICE_FINISH[queue_number]

    def queue_from_event(self, etype):
        '''
        Right now, the enumeration is simple. The queue associated with
        SERVICE_FINISH[i] is i - 1
        '''
        return etype

    def name(self, number):
        if (number == -1):
            return "ARRIVAL"
        else:
            return "SERVICE_FINISH Q" + str(number)

    name = classmethod(name)
    type_from_num = classmethod(type_from_num)
    queue_from_event = classmethod(queue_from_event)
Esempio n. 2
0
 def test_config_update(self, _, mock: Mock):
     mock.return_value = 2
     conf = Config()
     mock.return_value = 10
     self.assertEqual(True, conf.check_config_update())
     conf.update_config()
     self.assertEqual(10, conf.last_modification)
Esempio n. 3
0
def main(image):
    # Configuration for hyper-parameters
    config = Config()

    # Image Preprocessing
    transform = config.test_transform

    # Load vocabulary
    with open(os.path.join(config.vocab_path, 'vocab.pkl'), 'rb') as f:
        vocab = pickle.load(f)

    # Build Models
    encoder = EncoderCNN(config.embed_size)
    encoder.eval()  # evaluation mode (BN uses moving mean/variance)
    decoder = DecoderRNN(config.embed_size, config.hidden_size, len(vocab),
                         config.num_layers)

    # Load the trained model parameters
    encoder.load_state_dict(
        torch.load(
            os.path.join(config.teacher_cnn_path, config.trained_encoder)))
    decoder.load_state_dict(
        torch.load(
            os.path.join(config.teacher_lstm_path, config.trained_decoder)))
    # Prepare Image
    image = Image.open(image)
    image_tensor = Variable(transform(image).unsqueeze(0))

    # Set initial states
    state = (Variable(torch.zeros(config.num_layers, 1, config.hidden_size)),
             Variable(torch.zeros(config.num_layers, 1, config.hidden_size)))

    # If use gpu
    if torch.cuda.is_available():
        encoder.cuda()
        decoder.cuda()
        state = [s.cuda() for s in state]
        image_tensor = image_tensor.cuda()

    # Generate caption from image
    feature = encoder(image_tensor)
    sampled_ids = decoder.sample(feature, state)
    sampled_ids = sampled_ids.cpu().data.numpy()

    # Decode word_ids to words
    sampled_caption = []
    for word_id in sampled_ids:
        word = vocab.idx2word[word_id]
        sampled_caption.append(word)
        if word_id == 96:
            sampled_caption.append('<end>')
            break
        if word == '<end>':
            break
    sentence = ' '.join(sampled_caption)

    # Print out image and generated caption.
    print(sentence)
    return sentence
Esempio n. 4
0
def test_passenger_generation():
    """Tests passenger generation"""
    config_name = get_full_class_name(Config)
    with patch(config_name + '.graph_dict',
               new_callable=PropertyMock) as mock_graph_dict:
        with patch(config_name + '.lines_dict',
                   new_callable=PropertyMock) as mock_lines_dict:
            with patch(config_name + '.traffic_data_dict',
                       new_callable=PropertyMock) as mock_traffic_dict:
                mock_graph_dict.return_value = {
                    'A': [('B', 1)],
                    'B': [('A', 1)]
                }
                mock_lines_dict.return_value = {
                    0: {
                        'id': 0,
                        'bus_capacity': 1,
                        'frequency1': 10000000,
                        'frequency2': 1000000000,
                        'route1': ['B', 'A'],
                        'route2': ['A', 'B']
                    }
                }
                config = Config(["A", "B"], {}, {}, {}, 1.0)
                mock_traffic_dict.return_value = {
                    'A': {
                        'A': 0,
                        'B': 120
                    },
                    'B': {
                        'A': 0,
                        'B': 0
                    }
                }

                generated = []
                model = []
                simulation = Simulation(config)
                print(config.lines_dict)
                simulation.refresh()
                simulation.refresh()

                for i in range(100000):
                    simulation.refresh()
                    if simulation.stops['A'].passengers:
                        generated.append(
                            simulation.stops['A'].passengers[0].count)

                for i in range(len(generated) - 1, 0, -1):
                    generated[i] -= generated[i - 1]
                    model.append(np.random.poisson(2))
                model.append(np.random.poisson(2))
                generated = np.sort(np.array(generated))
                model = np.sort(np.array(model))
                res = sum(np.abs(generated - model) != 0)

                assert res / len(
                    generated) <= 0.02, " jak nie dziolo to odpalic od nowa i nie narzekac, " \
                                        "bo tak naprawde dziala tylko czasem nie"
def main():
    config = Config()
    splits = ['train', 'val']
    for split in splits:
        image_dir = os.path.join(config.image_path, '%s2014/' % split)
        output_dir = os.path.join(config.image_path, '%s2014resized' % split)
        resize_images(image_dir, output_dir,
                      (config.image_size, config.image_size))
Esempio n. 6
0
 def __init__(self):
     config = Config()
     self.myclient = pymongo.MongoClient(
         config.get('database', 'host'), int(config.get('database',
                                                        'port')))
     self.mydb = self.myclient[config.get('database', 'db_name')]
     self.mydb.authenticate(config.get('database', 'user'),
                            config.get('database', 'password'))
def main():
    config = Config()
    vocab = build_vocab(json=os.path.join(config.caption_path,
                                          'captions_train2014.json'),
                        threshold=config.word_count_threshold)
    vocab_path = os.path.join(config.vocab_path, 'vocab.pkl')
    with open(vocab_path, 'wb') as f:
        pickle.dump(vocab, f, pickle.HIGHEST_PROTOCOL)
    print("Saved the vocabulary wrapper to ", vocab_path)
Esempio n. 8
0
def main():
    start = time.time()
    config_data_path = r'./config/config.json'
    config = Config(config_data_path)
    data_df, date_list, industry_list = data_process(config)
    date_factor = date_list.sort_values()
    industry_factor = industry_list.sort_values()
    compute_factor(data_df, date_factor, industry_factor, config)
    print('cost time:', time.time() - start)
Esempio n. 9
0
 def setup(self):
     """"preform basic configuration, create the Config object which enables configuration to preform
     configuration with usage of config.json"""
     logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
     self.config = Config()
     self.config.start()
     self.client = Client(self.config)
     self.agentLog = collector.AgentLogCollector()
     logging.info("Agent initialized successfully!")
Esempio n. 10
0
    def build_default(deployment_home, cwd, CommandClasses):
        """
        Create a runtime from the command line arguments and configuration on
        disk.

        If you want something more custom, e.g. in testing, you can build
        it yourself ;)
        """

        if len(CommandClasses):
            arg_parser = CommandClasses[0].build_arg_parser()
        else:
            arg_parser = Runtime.__create_placeholder_arg_parser()
        Runtime.add_default_arguments(arg_parser)

        runtime = Runtime()

        for CommandClass in CommandClasses:
            CommandClass.register_services(runtime)

        for ServiceClass in runtime.each_service_class():
            add_default_arguments_method = getattr(ServiceClass,
                                                   'add_default_arguments',
                                                   None)
            if add_default_arguments_method and callable(
                    add_default_arguments_method):
                ServiceClass.add_default_arguments(arg_parser)

        options = arg_parser.parse_args()
        if not hasattr(options, 'deployment_home'):
            options.deployment_home = deployment_home

        config = Config()
        config.set_options(options)
        config.set_cwd(cwd)
        if hasattr(CommandClass, 'DOTFILE_NAME'):
            config.set_dotfile_name(CommandClass.DOTFILE_NAME)

        try:
            config.read()
        except Exception as e:
            if not hasattr(
                    CommandClass,
                    'is_config_required') or CommandClass.is_config_required():
                exc_info = sys.exc_info()
                raise e, None, exc_info[2]

        log = Log()
        log.set_options(options)

        runtime.set_options(options)
        runtime.set_config(config)
        runtime.set_log(log)
        runtime.set_cwd(cwd)

        return runtime
Esempio n. 11
0
    def __init__(self):
        LOG.debug("JellyfinClient initializing...")

        self.config = Config()
        self.http = HTTP(self)
        self.wsc = WSClient(self)
        self.auth = ConnectionManager(self)
        self.jellyfin = api.API(self.http)
        self.callback_ws = callback
        self.callback = callback
Esempio n. 12
0
def dim_division_main():
    """
    维度划分程序主入口
    :return:
    """
    # 获取配置对象
    conf = Config()
    logging.info("维度划分开始")
    dim_division(conf)
    logging.info("维度划分结束")
Esempio n. 13
0
def v1(pretrained=False):
    config = Config()
    model, _ = caption.build_model(config)
    
    if pretrained:
        checkpoint = torch.hub.load_state_dict_from_url(
            url='https://github.com/saahiluppal/catr/releases/download/0.1/weights_9348032.pth',
            map_location='cpu'
        )
        model.load_state_dict(checkpoint['model'])
    
    return model
Esempio n. 14
0
def get_length_dist():
    # Load configuration
    config = Config()

    # Get distribution of num_events
    result = []
    with open(config.data_path, 'r') as data_file:
        lines = data_file.readlines()
        for line in lines:
            user_behave = json.loads(line).values()[0]
            result.append(len(user_behave))

    return result
Esempio n. 15
0
	def __init__(self):
		self.config = Config()
		self.__running = True
		import os
		os.environ['SDL_VIDEO_CENTERED'] = '1'
		self.__screen = pygame.display.set_mode(
			self.config.get(self.config.RESOLUTION))
		if self.config.get(self.config.FULLSCREEN):
			self.toggle_fullscreen()
		pygame.display.set_caption("ISN Project")
		self.__clock = pygame.time.Clock()
		self.__dtime = 0
		self.__scene = Scenes.test_scene.SceneTest(self)
Esempio n. 16
0
    def __init__(self):
        cnf = Config()
        self.csv_model_path = cnf.csv_model_path
        self.new_file_dir = cnf.new_csv_dir
        self.resource_csv = cnf.resource_csv
        self.partNos_count = cnf.partNos_count
        self.asc_codes_count = cnf.asc_codes_count
        self.logger = logger
        self.csv_model_name = None
        self.start_time = time.time()

        self.load_resource_csv()
        self.load_csv_model()
def main():

    conf = Config().config

    while CONTINUE_WORKING:
        to_draw = {
            'isp': internet_speed_summary.InternetSpeed(conf).get_speed(),
            'qnap': qnap_summary.QnapSummary(conf).get_stats(),
            'pihole': pi_hole_summary.PiHoleSummary(conf).get_stats()
        }

        canvas = canvas_helper.CanvasHelper()
        canvas.draw(to_draw)
        sleep(1)
Esempio n. 18
0
def run_dim_cluster_main(syscode):
    conf = Config()
    logging.info("{}系统分析开始".format(syscode))

    input_helper, output_helper = dynamic_import(conf)
    input_conn, output_conn = get_input_output_conn(conf)

    tables_schedule = output_helper.get_all_fk_tables(output_conn, conf.output_schema)
    filter_fks = output_helper.get_all_fk_id_in_detail(output_conn, conf.output_schema)

    tables = [tup for tup in tables_schedule if tup[0] == syscode]
    logging.info("分析表数量:{}".format(len(tables)))
    run_analyse(conf, input_conn, output_conn, tables, filter_fks)
    logging.info("{}系统分析结束".format(syscode))
    close_odbc_connection(input_conn)
    close_db2_connection(output_conn)
Esempio n. 19
0
def feature_main(sys_code, table_code, etl_date, date_offset, alg):
    """
    字段特征分析程序入口
    :param sys_code: 系统编号
    :param table_code: 表编号
    :param etl_date: 卸数日期
    :param date_offset: 日期偏移量
    :param alg: 卸数方式
    :return:
    """
    etl_dates = date_trans(etl_date, date_offset)
    conf = Config()

    # 2、开始按表分析字段特征
    logging.info("{}表特征分析开始".format(table_code))
    analyse_table_feature(conf, sys_code, table_code, alg, etl_dates)
    logging.info("{}表特征分析完成".format(table_code))
Esempio n. 20
0
def main(_):
    # Load configuration
    config = Config()

    # Build the graph and run session
    with tf.Graph().as_default(), tf.Session() as sess:
        autoEncoder = AutoEncoder(config)
        autoEncoder.build_model()
        sess.run(tf.initialize_all_variables())
        for epoch in range(config.num_epoch):
            with open(config.data_path, 'r') as data_file:
                mse = autoEncoder.run_epoch(sess, data_file)
            if epoch % 1 == 0:
                print('Epoch %d has mean square error: %g' % (epoch, mse))

        saver = tf.train.Saver(tf.all_variables())
        saver.save(sess, config.save_path)
Esempio n. 21
0
def main():
    config_path = r'./config/config.json'
    config = Config(config_path)
    mrawret_save_path = config.mrawret_save_path
    mrawret = pd.read_csv(mrawret_save_path)
    num = len(mrawret)
    func = lambda x: (pow(x, 1/num) - 1) * 52
    factor_model_return = mrawret.loc[num-1, 'factor_model_cum'] / mrawret.loc[0, 'factor_model_cum']
    hs300index_return = mrawret.loc[num-1, 'hs300index_cum'] / mrawret.loc[0, 'hs300index_cum']
    net_ret_return = mrawret.loc[num-1, 'net_ret_cum'] / mrawret.loc[0, 'net_ret_cum']
    print('factor model ', func(factor_model_return))
    print('hs300index', func(hs300index_return))
    print("net_ret ", func(net_ret_return))
    print("*" * 50)
    print('factor model ', np.mean(mrawret.loc[1:, 'factor_model']) * 52)
    print('hs300index ', np.mean(mrawret.loc[1:, 'hs300index']) * 52)
    print('net_ret ', np.mean(mrawret.loc[1:, 'net_ret']) * 52)
def test_bus_generation():
    """Tests bus generation"""
    config_name = get_full_class_name(Config)
    with patch(config_name + '.graph_dict',
               new_callable=PropertyMock) as mock_graph_dict:
        with patch(config_name + '.lines_dict',
                   new_callable=PropertyMock) as mock_lines_dict:
            with patch(config_name + '.traffic_data_dict',
                       new_callable=PropertyMock) as mock_traffic_dict:
                mock_graph_dict.return_value = {
                    'A': [('B', 1)],
                    'B': [('A', 1)]
                }
                mock_lines_dict.return_value = {
                    0: {
                        'id': 0,
                        'bus_capacity': 20,
                        'frequency1': 10,
                        'frequency2': 1000,
                        'route1': ['B', 'A'],
                        'route2': ['A', 'B']
                    }
                }
                config = Config(["A", "B"], {}, {}, {}, 1.0)
                mock_traffic_dict.return_value = {
                    'A': {
                        'A': 0,
                        'B': 0
                    },
                    'B': {
                        'A': 0,
                        'B': 0
                    }
                }

                simulation = Simulation(config)
                assert Bus.BUS_COUNTER == 0
                simulation.refresh()
                simulation.refresh()
                assert Bus.BUS_COUNTER == 2

                for i in range(1, 10):
                    for _ in range(10):
                        simulation.refresh()
                    assert Bus.BUS_COUNTER == 2 + i
Esempio n. 23
0
 def test_init(self, _):
     dict_ = {
         "config_update_delay": 10,
         "server": {
             "ip": "localhost",
             "port": 8081
         },
         "send_frequency": 10,
         "send_agent_errors": False,
         "sys_logs": {
             "send": True,
             "limit": 200,
             "reverse": True,
             "priority": "ERROR"
         }
     }
     mock = mock_open(read_data=json.dumps(dict_))
     with patch("configuration.open", mock):
         conf = Config()
         self.assertEqual(10, conf.get_send_frequency())
         self.assertEqual('localhost', conf.get_server_ip())
         self.assertEqual(8081, conf.get_server_port())
         self.assertEqual(dict_['sys_logs'], conf.logs_config.__dict__)
Esempio n. 24
0
    def __init__(self,
                 input_filename,
                 parameters=None,
                 output_filenames=None,
                 extrap_filenames=None):
        # Give default values to attributes that are not specified at
        # instantiation. These values must be changed *after* instantiation.
        self.use_brune = True
        self.use_gsl = True
        self.ext_par_file = '\n'
        self.ext_capture_file = '\n'
        self.command = 'AZURE2'
        self.root_directory = ''

        self.config = Config(input_filename, parameters=parameters)
        '''
        If parameters are not specified, they are inferred from the input file.
        '''
        if parameters is None:
            self.parameters = self.config.parameters.copy()
        else:
            self.parameters = parameters.copy()
        '''
        If output files are not specified, they are inferred from the input file.
        '''
        if output_filenames is None:
            self.output_filenames = self.config.data.output_files
        else:
            self.output_filenames = output_filenames
        '''
        If extrapolation files are not specified, they are inferred from the input file.
        '''
        if extrap_filenames is None:
            self.extrap_filenames = self.config.test.output_files
        else:
            self.extrap_filenames = extrap_filenames
Esempio n. 25
0
def main():
    # Load configuration
    config = Config()

    # Parse user_list representations
    user_list = []
    user_id_list = []
    with open(config.rep_path, "r") as data_file:
        lines = data_file.readlines()
        for line in lines:
            user_ = line.split(":")[1].replace("[", "").replace(']"}',
                                                                "").split()
            user = [float(u) for u in user_[1:len(user_)]]
            user_list.append(user)
            user_id_list.append(
                line.split(":")[0].replace("{", "").replace('"', ""))
    user_list = np.array(user_list)
    user_id_list = np.array(user_id_list)

    # If tsne is already run
    path_user_tsne = os.path.join(os.path.dirname(config.save_path),
                                  "user_tsne")
    if os.path.isfile(path_user_tsne):
        user_tsne = load_object(path_user_tsne)
    else:
        # Run TSNE
        model = TSNE(n_components=2, random_state=0)
        np.set_printoptions(suppress=True)
        user_tsne = model.fit_transform(user_list)

        # Save TSNE objects
        print "Save user_tsne."
        save_object(user_tsne, "save/user_tsne")

    # Run KMeans clustering
    kmeans = KMeans(init="k-means++", n_clusters=8, n_init=10)
    km = kmeans.fit(user_list)

    # Get cluster labels
    labels = km.labels_
    unique_labels = set(labels)

    # Save clustering results
    save_object(user_id_list, "save/user_ids_km")
    save_object(labels, "save/labels_km")

    # Save the cluster_to_user dict
    cluster_to_user = dict()
    for k in unique_labels:
        class_member_mask = (labels == k)
        class_k = user_id_list[class_member_mask]
        cluster_to_user[k] = class_k
    save_object(cluster_to_user, "save/cluster_to_user")

    # Save the user_to_cluster dict
    user_to_cluster = dict()
    for user, label in zip(user_id_list, labels):
        user_to_cluster[user] = label
    save_object(user_to_cluster, "save/user_to_cluster")

    # Plot results
    colors = plt.get_cmap("Spectral")(np.linspace(0, 1, len(unique_labels)))
    for k, col in zip(unique_labels, colors):
        class_member_mask = (labels == k)
        xy = user_tsne[class_member_mask]
        plt.plot(xy[:, 0],
                 xy[:, 1],
                 "o",
                 markerfacecolor=col,
                 markeredgecolor="k",
                 markersize=3)

    plt.title("KMeans Clustering")
    plt.show()
Esempio n. 26
0
parser.add_argument('--v', type=str, help='version', default='v3')
args = parser.parse_args()
image_path = args.path
version = args.v

if version == 'v1':
    model = torch.hub.load('saahiluppal/catr', 'v1', pretrained=True)
elif version == 'v2':
    model = torch.hub.load('saahiluppal/catr', 'v2', pretrained=True)
elif version == 'v3':
    model = torch.hub.load('saahiluppal/catr', 'v3', pretrained=True)
else:
    raise NotImplementedError('Version not implemented')

tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
config = Config()

start_token = tokenizer.convert_tokens_to_ids(tokenizer._cls_token)
end_token = tokenizer.convert_tokens_to_ids(tokenizer._sep_token)

image = Image.open(image_path)
image = coco.val_transform(image)
image = image.unsqueeze(0)


def create_caption_and_mask(start_token, max_length):
    caption_template = torch.zeros((1, max_length), dtype=torch.long)
    mask_template = torch.ones((1, max_length), dtype=torch.bool)

    caption_template[:, 0] = start_token
    mask_template[:, 0] = False
def main():
    # Configuration for hyper-parameters
    config = Config()
    
    # Image preprocessing
    transform = config.train_transform
    
    # Load vocabulary wrapper
    with open(os.path.join(config.vocab_path, 'vocab.pkl'), 'rb') as f:
        vocab = pickle.load(f)

    # Build data loader
    image_path = os.path.join(config.image_path, 'train2014')
    json_path = os.path.join(config.caption_path, 'captions_train2014.json')
    train_loader = get_data_loader(image_path, json_path, vocab, 
                                   transform, config.batch_size,
                                   shuffle=True, num_workers=config.num_threads) 
    total_step = len(train_loader)

    # Build Models
    teachercnn = EncoderCNN(config.embed_size)
    teachercnn.eval()
    studentcnn = StudentCNN_Model1(config.embed_size)
    #Load the best teacher model
    teachercnn.load_state_dict(torch.load(os.path.join('../TrainedModels/TeacherCNN', config.trained_encoder))) 
    studentlstm = DecoderRNN(config.embed_size, config.hidden_size/2, 
                         len(vocab), config.num_layers/2)

    if torch.cuda.is_available():
        teachercnn.cuda()
	studentcnn.cuda()
        studentlstm.cuda()

    # Loss and Optimizer
    criterion_lstm = nn.CrossEntropyLoss()
    criterion_cnn = nn.MSELoss()
    params = list(studentlstm.parameters()) + list(studentcnn.parameters())
    optimizer_lstm = torch.optim.Adam(params, lr=config.learning_rate)    
    optimizer_cnn = torch.optim.Adam(studentcnn.parameters(), lr=config.cnn_learningrate)    
    
    print('entering in to training loop')    
    # Train the Models
	
    for epoch in range(config.num_epochs):
        for i, (images, captions, lengths, img_ids) in enumerate(train_loader):
	    images = Variable(images)
            captions = Variable(captions)
            if torch.cuda.is_available():
                images = images.cuda()
                captions = captions.cuda()
            targets = pack_padded_sequence(captions, lengths, batch_first=True)[0]
            # Forward, Backward and Optimize
	    optimizer_lstm.zero_grad()
	    optimizer_cnn.zero_grad()
            features_tr = teachercnn(images)
	    features_st = studentcnn(images)
            outputs = studentlstm(features_st, captions, lengths)
            loss = criterion(features_st, features_tr.detach()) + criterion_lstm(outputs, targets)
            loss.backward()
            optimizer_cnn.step()
            optimizer_lstm.step()
     
           # Print log info
            if i % config.log_step == 0:
                print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f'
                      %(epoch, config.num_epochs, i, total_step, 
                        loss.data[0], np.exp(loss.data[0]))) 
                
            # Save the Model
            if (i+1) % config.save_step == 0:
                torch.save(studentlstm.state_dict(), 
                           os.path.join(config.student_lstm_path, 
                                        'decoder-%d-%d.pkl' %(epoch+1, i+1)))
		torch.save(studentcnn.state_dict(), 
                           os.path.join(config.student_cnn_path, 
                                        'encoder-%d-%d.pkl' %(epoch+1, i+1)))
Esempio n. 28
0
import torch
import cv2
import numpy as np
import random
import matplotlib.pyplot as plt
import os
import glob
import json
from skimage import exposure
import warnings

from configuration import Config
import multiprocessing

multiprocessing.set_start_method('spawn', True)
cfg = Config()


class ImageGenerator(Dataset):

    def __init__(self, input_path, num_images, transform=None):

        with open(input_path, 'r') as f:
            self.img_list = f.readlines()
        self.num_images = num_images
        self.transform = transform

    def __len__(self):
        return (self.num_images // cfg.batch_size) * cfg.batch_size

    def __getitem__(self, idx):
Esempio n. 29
0
def main():
    # Configuration for hyper-parameters

    torch.cuda.set_device(0)
    config = Config()
    # Image preprocessing
    transform = config.train_transform
    # Load vocabulary wrapper
    with open(os.path.join(config.vocab_path, 'vocab.pkl'), 'rb') as f:
        vocab = pickle.load(f)
    # Build data loader
    train_image_path = os.path.join(config.image_path, 'train2017')
    json_path = os.path.join(config.caption_path, 'captions_train2017.json')
    train_loader = get_data_loader(train_image_path,
                                   json_path,
                                   vocab,
                                   transform,
                                   config.batch_size,
                                   shuffle=False,
                                   num_workers=config.num_threads)

    val_image_path = os.path.join(config.image_path, 'val2017')
    json_path = os.path.join(config.caption_path, 'captions_val2017.json')
    val_loader = get_data_loader(val_image_path,
                                 json_path,
                                 vocab,
                                 transform,
                                 config.batch_size,
                                 shuffle=False,
                                 num_workers=config.num_threads)

    total_step = len(train_loader)

    # Build Models
    encoder = EncoderCNN(config.embed_size)
    encoder.eval()
    decoder = DecoderRNN(config.embed_size, config.hidden_size, len(vocab),
                         config.num_layers)

    if torch.cuda.is_available():
        encoder.cuda()
        decoder.cuda()

    # Loss and Optimizer
    criterion = nn.CrossEntropyLoss()
    params = list(decoder.parameters()) + list(encoder.resnet.fc.parameters())
    optimizer = torch.optim.Adam(params, lr=config.learning_rate)

    print('entering in to training loop')
    # Train the Models

    with open('train1_log.txt', 'w') as logfile:
        logfile.write('Validation Error,Training Error')
        for epoch in range(0, 25):
            for i, (images, captions, lengths,
                    img_ids) in enumerate(train_loader):
                images = Variable(images)
                captions = Variable(captions)
                if torch.cuda.is_available():
                    images = images.cuda()
                    captions = captions.cuda()
                targets = pack_padded_sequence(captions,
                                               lengths,
                                               batch_first=True)[0]
                # Forward, Backward and Optimize
                optimizer.zero_grad()
                features = encoder(images)
                outputs = decoder(features, captions, lengths)
                loss = criterion(outputs, targets)
                loss.backward()
                optimizer.step()
                # Print log info
                if i % config.log_step == 0:
                    print(
                        'Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f'
                        % (epoch, config.num_epochs, i, total_step,
                           loss.data[0], np.exp(loss.data[0])))

                # Save the Model
                if (i + 1) % config.save_step == 0:
                    torch.save(
                        encoder.state_dict(),
                        os.path.join(config.teacher_cnn_path,
                                     'encoder-%d-%d.pkl' % (epoch + 1, i + 1)))
                    torch.save(
                        decoder.state_dict(),
                        os.path.join(config.teacher_lstm_path,
                                     'decoder-%d-%d.pkl' % (epoch + 1, i + 1)))

            print('Just Completed an Epoch, Initite Validation Error Test')
            avgvalloss = 0
            for j, (images, captions, lengths,
                    img_ids) in enumerate(val_loader):
                images = Variable(images)
                captions = Variable(captions)
                if torch.cuda.is_available():
                    images = images.cuda()
                    captions = captions.cuda()
                targets = pack_padded_sequence(captions,
                                               lengths,
                                               batch_first=True)[0]
                optimizer.zero_grad()
                features = encoder(images)
                outputs = decoder(features, captions, lengths)
                valloss = criterion(outputs, targets)
                if j == 0:
                    avgvalloss = valloss.data[0]
                avgvalloss = (avgvalloss + valloss.data[0]) / 2
                if ((j + 1) % 1000 == 0):
                    print('Average Validation Loss: %.4f' % (avgvalloss))
                    logfile.write(
                        str(avgvalloss) + ',' + str(loss.data[0]) + str('\n'))
                    break
Esempio n. 30
0
#!/usr/local/bin/python3.7

import const as cs
from configuration import Config
from reaction import Reaction

# Read temperature
print("Calculation of the reaction rate")
T_K = float(input("Type temperature (in K): "))
if T_K < 0.0:
    raise Exception("Negative temperature")

# Initiate thermodynamics objects
in_state = Config("initial state")
ts_state = Config("transition state")
fi_state = Config("final state")
print()

print(in_state)
print(ts_state)
print(fi_state)

diss = Reaction("dissociation", in_state, ts_state, fi_state, T_K)
print(diss)