Пример #1
0
    def model_evaluate_and_save(self, _actor, _critic, _class_name):
        # self.model_actor.compile(optimizer='rmsprop', loss=_loss_func, metrics=['accuracy'])
        # loss, accuracy = self.model_actor.evaluate(self.eval_x, self.eval_y)
        #
        # _, best_loss = self.get_best_loss_file(_class_name)

        # if best_loss > loss:
        today = utils.get_today()
        time_now = utils.get_time()
        path = self.get_model_weight_path(_class_name)
        file_path = path + _class_name + '_' + today + '_' + time_now + '_'
        _actor.save_weights(file_path + 'actor.h5')
        _critic.save_weights(file_path + 'critic.h5')
Пример #2
0
    size_upsample = (image_size[1], image_size[0])
    b, nc, h, w = feature_conv.shape
    # weight softmax x convolution weight and normalize
    cam = weight_softmax[class_idx].dot(feature_conv.reshape((nc, h * w)))
    cam = cam.reshape(h, w)
    cam = cam - np.min(cam)
    cam_img = (255 * cam / np.max(cam)).astype(np.uint8)
    output_cam = cv2.resize(cam_img, size_upsample)
    return output_cam


if __name__ == '__main__':
    args = parser.parse_args()

    # make folder
    today = utils.get_today() + '_cam'
    save_dir = Path(args.save_dir) / today
    utils.make_folder(save_dir)

    # transform
    transform = transforms.Compose([
        transforms.Resize((128, 128)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    #load CIFAR10 DATASETS
    testset = torchvision.datasets.CIFAR10(root=args.data_dir,
                                           train=False,
                                           download=True,
                                           transform=transform)
Пример #3
0
def get_index():
    print('CALL old TS...')
    df = ts_utils_api.call_sh_index()
    write_data(df, 'stock_index')
    return df


def get_calender(start, end):
    print('CALL pro ...')
    df = ts_pro_api.call_calender(start, end)
    write_data(df, 'calender')
    return df


if __name__ == '__main__':
    print('clear stock_index...')
    DBUtils.truncate('stock_index')

    print('set stock_index...')
    df = get_index()

    print('clear calender...')
    DBUtils.truncate('calender')

    print('set calender...')
    df = get_calender(Utils.day0, Utils.get_today())

    print('SAMPLE:')
    df = read_data('calender')
    print(df.tail())
Пример #4
0
parser.add_argument('--num_workers', type=int, default=2, help='number of workers for training')
parser.add_argument('--lr', '--learning_rate', type=float, default=0.0001, help='learning rate')
parser.add_argument('--num_class', type=int, default=10, help='number of classes to classify of datasets')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                    help='momentum for sgd, alpha parameter for adam')
parser.add_argument('--beta', default=0.999, type=float, metavar='M', help='beta parameters for adam')
parser.add_argument('--model', required=True, type=str, help='beta parameters for adam optimizer')

# classes index for CIFAR10
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

if __name__ == '__main__':
    args = parser.parse_args()

    # make folder for experiment
    today = utils.get_today()
    save_dir = Path(args.save_dir) / today
    utils.make_folder(save_dir)

    # summary writer
    writer = SummaryWriter(save_dir)

    # transform data
    transform = transforms.Compose([
        transforms.Resize((args.img_size, args.img_size)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    # load data
    trainset = torchvision.datasets.CIFAR10(root=args.data_dir, train=True, download=True, transform=transform)
Пример #5
0
import requests
from data import DataGather
from utils import utils

mStock_num_dic = {}
mStock_name_dic = {}


def get_dic_data_path():
    paths = os.getcwd() + '/data/dic_data/'
    if not os.path.exists(paths):
        os.makedirs(paths)
    return paths


DIC_NAME_FILE = get_dic_data_path() + utils.get_today() + '_stock_name_dic.csv'
DIC_NUM_FILE = get_dic_data_path() + utils.get_today() + '_stock_num_dic.csv'


def parse_stock_dictionary():
    global mStock_num_dic
    global mStock_name_dic

    base_url_kospi = 'https://finance.naver.com/sise/sise_market_sum.nhn?sosok=0&page='
    base_url_kosdaq = 'https://finance.naver.com/sise/sise_market_sum.nhn?sosok=1&page='

    mStock_num_dic = {}
    mStock_name_dic = {}
    for base_url in [base_url_kospi, base_url_kosdaq]:
        for i in range(1, 11):
            url = base_url + str(i)