return set(range(0, self.sequence_size)) == set(factors) def humanize(x): return map(lambda e: e + 1, x) def machinize(x): return map(lambda e: e - 1, x) def solution(idx): K, C, S = map(int, util.list_input()) sequence_size = K complexity = C tester_count = S # K진수의 C자리 숫자(0이 패딩)에서 모든 수를 뽑는 문제와 같음 # 한번에 C자리 뽑을 수 있으므로 C*S가 K보다 작으면 망함 f = Fractal(sequence_size, complexity) indexset = humanize(f.search_indexset()) if len(indexset) > tester_count: util.print_case(idx, 'IMPOSSIBLE') else: util.print_case(idx, ' '.join(map(str, indexset))) if __name__ == '__main__': count = util.int_input() # float_input, list_input util.loop(count, solution)
def linear_search(digit, count): indigit = digit - 2 solutions = [] for i in range(0, 2**indigit): i_bin = bin(i)[2:] padded = ('0' * indigit) + i_bin str_coin = '1' + padded[-indigit:] + '1' assert len(str_coin) == digit coin = JamCoin.from_str(str_coin) divisors = coin.validate() if divisors: solutions.append((coin, divisors)) if len(solutions) >= count: break return solutions def solution(idx): N, J = map(int, util.list_input()) util.print_case(idx, '') answers = linear_search(N, J) assert len(answers) == J for coin, divisors in answers: print coin, ' '.join(map(str, divisors)) if __name__ == '__main__': count = util.int_input() util.loop(count, solution) Status API Training Shop Blog About
import sys from datetime import datetime from datasets import * import random, pdb, sys import numpy as np import tqdm, util from models import * model = CPDModel(node_features=(8, 100), edge_features=(1, 32), hidden_dim=(16, 100)) optimizer = tf.keras.optimizers.Adam() util.load_checkpoint(model, optimizer, sys.argv[1]) _, _, testset = cath_dataset( 3000) # fix this to only give individual amino acids loss, acc, confusion = util.loop(testset, model, train=False) print('ALL TEST PERPLEXITY {}, ACCURACY {}'.format(np.exp(loss), acc)) util.save_confusion(confusion) _, _, testset = cath_dataset(3000, filter_file='../data/test_split_L100.json') loss, acc, confusion = util.loop(testset, model, train=False) print('SHORT TEST PERPLEXITY {}, ACCURACY {}'.format(np.exp(loss), acc)) util.save_confusion(confusion) _, _, testset = cath_dataset(3000, filter_file='../data/test_split_sc.json') loss, acc, confusion = util.loop(testset, model, train=False) print('SINGLE CHAIN TEST PERPLEXITY {}, ACCURACY {}'.format(np.exp(loss), acc)) util.save_confusion(confusion)