def __init__(self, num_classes, fix_points, depth=16, k=64, batch_norm=False): super(VGGCurve, self).__init__() layer_blocks, activation_blocks, poolings = make_layers( get_config(depth, k=k), batch_norm, fix_points=fix_points) self.layer_blocks = layer_blocks self.activation_blocks = activation_blocks self.poolings = poolings self.dropout1 = nn.Dropout() self.fc1 = curves.Linear(512, 512, fix_points=fix_points) self.relu1 = nn.ReLU(inplace=True) self.dropout2 = nn.Dropout() self.fc2 = curves.Linear(512, 512, fix_points=fix_points) self.relu2 = nn.ReLU(inplace=True) self.fc3 = curves.Linear(512, num_classes, fix_points=fix_points) # Initialize weights for m in self.modules(): if isinstance(m, curves.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels for i in range(m.num_bends): getattr(m, 'weight_%d' % i).data.normal_( 0, math.sqrt(2. / n)) getattr(m, 'bias_%d' % i).data.zero_()
def __init__(self): self.previous_time = 0 self.death_time = 0 self.ambulance_time = 0 self.bottle_time = 0 self.incident_num = 0 self.generator = bottles.BottleGenerator() self.current_bottle = bottles.ghost_bottle self.previous_bottle = bottles.ghost_bottle self.shift = curves.SineOut(-self.SHIFT_AMOUNT, 0, self.SHIFT_LENGTH) self.toss_x = curves.Linear(0, -320, self.SHIFT_LENGTH) self.toss_y = curves.QuadraticArc(0, -100, self.SHIFT_LENGTH - 4) self.toss_scale = curves.Linear(1, 0.05, self.SHIFT_LENGTH) self.toss_rotate = curves.Linear(0, 290, self.SHIFT_LENGTH) self.toss_x.frame = self.toss_x.length self.toss_y.frame = self.toss_y.length self.toss_scale.frame = self.toss_scale.length self.toss_rotate.frame = self.toss_rotate.length self.homunculus_eat_delay = 0 self.green_timer_frame = 0 self.one_more_frame = 0 self.game_over = False self.win = False self.ambulance_anim_countdown = 0 self.death_anim_countdown = 0 self.death_anim_frame = 0 self.death_circles = [] self.in_ending_cutscene = False self.ambulance_entrance = curves.SineOut(1000, -250, 80) self.ambulance_exit = curves.SineOut(-250, -1000, 80) self.ambulance_x = 1000 self.bottles = [] self.allergies = [] self.previous_brand = "" self.bottles_to_judge = [] self.judgement_timers = [] self.alternating = False self.has_eaten = False self.last_eaten_is_safe = False self.menu_level_num = 0
def __init__(self, num_classes, fix_points, depth=16, batch_norm=False): super(OneLayerCurve, self).__init__() self.fc1 = curves.Linear(INPUT_DIM, N_HIDDEN_NODES, fix_points=fix_points, bias=False) self.relu1 = nn.ReLU(inplace=True) self.fc2 = curves.Linear(N_HIDDEN_NODES, num_classes, fix_points=fix_points, bias=False)
def __init__(self, num_classes, fix_points, depth=110): super(PreResNetCurve, self).__init__() if depth >= 44: assert (depth - 2) % 9 == 0, 'depth should be 9n+2' n = (depth - 2) // 9 block = BottleneckCurve else: assert (depth - 2) % 6 == 0, 'depth should be 6n+2' n = (depth - 2) // 6 block = BasicBlockCurve self.inplanes = 16 self.conv1 = curves.Conv2d(3, 16, kernel_size=3, padding=1, bias=False, fix_points=fix_points) self.layer1 = self._make_layer(block, 16, n, fix_points=fix_points) self.layer2 = self._make_layer(block, 32, n, stride=2, fix_points=fix_points) self.layer3 = self._make_layer(block, 64, n, stride=2, fix_points=fix_points) self.bn = curves.BatchNorm2d(64 * block.expansion, fix_points=fix_points) self.relu = nn.ReLU(inplace=True) self.avgpool = nn.AvgPool2d(8) self.fc = curves.Linear(64 * block.expansion, num_classes, fix_points=fix_points) for m in self.modules(): if isinstance(m, curves.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels for i in range(m.num_bends): getattr(m, 'weight_%d' % i).data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, curves.BatchNorm2d): for i in range(m.num_bends): getattr(m, 'weight_%d' % i).data.fill_(1) getattr(m, 'bias_%d' % i).data.zero_()
def __init__(self, batch_size, num_classes, hidden_size, vocab_size, embedding_length, weights, fix_points): super(LSTMClassifierCurve, self).__init__() """ Arguments --------- batch_size : Size of the batch which is same as the batch_size of the data returned by the TorchText BucketIterator num_classes : 2 = (pos, neg) hidden_sie : Size of the hidden_state of the LSTM vocab_size : Size of the vocabulary containing unique words embedding_length : Embeddding dimension of GloVe word embeddings weights : Pre-trained GloVe word_embeddings which we will use to create our word_embedding look-up table fixed_points : boolian vector """ self.batch_size = batch_size self.num_classes = num_classes self.hidden_size = hidden_size self.vocab_size = vocab_size self.embedding_length = embedding_length self.word_embeddings = curves.Embedding( vocab_size, embedding_length, fix_points=fix_points) # Initializing the look-up table. for i in range(len(fix_points)): self.word_embeddings.register_parameter( 'weight_%d' % i, nn.Parameter(weights, requires_grad=False) ) # Assigning the look-up table to the pre-trained GloVe word embedding. self.lstm = curves.LSTM(embedding_length, hidden_size, fix_points=fix_points) self.label = curves.Linear(hidden_size, num_classes, fix_points=fix_points)
def __init__(self, num_classes, fix_points, depth=28, k=10, p=0): super(WideResNetCurve, self).__init__() self.in_planes = 16 assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4' n = (depth - 4) / 6 nstages = [16, 16 * k, 32 * k, 64 * k] self.conv1 = conv3x3curve(3, nstages[0], fix_points=fix_points) self.layer1 = self._wide_layer(WideBasicCurve, nstages[1], n, p, stride=1, fix_points=fix_points) self.layer2 = self._wide_layer(WideBasicCurve, nstages[2], n, p, stride=2, fix_points=fix_points) self.layer3 = self._wide_layer(WideBasicCurve, nstages[3], n, p, stride=2, fix_points=fix_points) self.bn1 = curves.BatchNorm2d(nstages[3], momentum=0.9, fix_points=fix_points) self.linear = curves.Linear(nstages[3], num_classes, fix_points=fix_points)
def __init__(self, dimensions, fix_points, input_dim=1, output_dim=1, dropout=None): # super(RegNetCurve, self).__init__() super().__init__() self.dimensions = [input_dim, *dimensions, output_dim] for i in range(len(self.dimensions) - 1): if dropout is not None and i > 0: self.add_module('dropout%d' % i, MDropout(self.dimensions[i], p=dropout)) self.add_module('linear%d' % i, curves.Linear(self.dimensions[i], self.dimensions[i + 1], fix_points=fix_points)) if i < len(self.dimensions) - 2: self.add_module('tanh%d' % i, torch.nn.ReLU())
def __init__(self, num_classes, fix_points, block=BottleneckCurve, nblocks=[6, 12, 24, 16], growth_rate=12, reduction=0.5): super(DenseNetCurve, self).__init__() self.growth_rate = growth_rate num_planes = 2 * growth_rate self.conv1 = curves.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False, fix_points=fix_points) self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0], fix_points=fix_points) num_planes += nblocks[0] * growth_rate out_planes = int(math.floor(num_planes * reduction)) self.trans1 = Transition(num_planes, out_planes, fix_points) num_planes = out_planes self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1], fix_points=fix_points) num_planes += nblocks[1] * growth_rate out_planes = int(math.floor(num_planes * reduction)) self.trans2 = Transition(num_planes, out_planes, fix_points) num_planes = out_planes self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2], fix_points=fix_points) num_planes += nblocks[2] * growth_rate out_planes = int(math.floor(num_planes * reduction)) self.trans3 = Transition(num_planes, out_planes, fix_points) num_planes = out_planes self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3], fix_points=fix_points) num_planes += nblocks[3] * growth_rate self.bn = curves.BatchNorm2d(num_planes, fix_points=fix_points) self.linear = curves.Linear(num_planes, num_classes, fix_points=fix_points)
def __init__(self, num_classes, fix_points): super(ConvFCCurve, self).__init__() self.conv1 = curves.Conv2d(3, 32, kernel_size=5, padding=2, fix_points=fix_points) self.relu1 = nn.ReLU(True) self.max_pool1 = nn.MaxPool2d(kernel_size=3, stride=2) self.conv2 = curves.Conv2d(32, 64, kernel_size=5, padding=2, fix_points=fix_points) self.relu2 = nn.ReLU(True) self.max_pool2 = nn.MaxPool2d(3, 2) self.conv3 = curves.Conv2d(64, 128, kernel_size=5, padding=2, fix_points=fix_points) self.relu3 = nn.ReLU(True) self.max_pool3 = nn.MaxPool2d(3, 2) self.fc4 = curves.Linear(1152, 1000, fix_points=fix_points) self.relu4 = nn.ReLU(True) self.fc5 = curves.Linear(1000, 1000, fix_points=fix_points) self.relu5 = nn.ReLU(True) self.fc6 = curves.Linear(1000, num_classes, fix_points=fix_points) # Initialize weights for m in self.modules(): if isinstance(m, curves.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels for i in range(m.num_bends): getattr(m, 'weight_%d' % i).data.normal_( 0, math.sqrt(2. / n)) getattr(m, 'bias_%d' % i).data.zero_()
[2500., 5.0]])) # Motor degradation curve data = np.load('gp_data.npy') x, y = data[:,0,None], data[:,1,None] failurecurve = curves.WarpedGP(x, y, warping_terms=2) initial_degradation = 1 # PDM pdm = tools.PDM(rpmcurve, torquecurve, failurecurve, initial_degradation, 1/1500/25) # Drill string drillstring = tools.DrillString(pdm, bit) # Cost function maintenance cost_maint = curves.Linear(np.array([[0., 4.], [4000., 20.]])) # Geology rock1 = geology.Rock(315, 68.6, 50, 0.93, 33, 0.65) rock2 = geology.Rock(278, 330, 125, 0.48, 157, 0.98) geo = geology.Geology({0: rock1, 800: rock2}) # Enumeration algo = algorithms.enum # Boundary heuristic # algo = algorithms.boundary_heuristic # No-degradation heuristic # algo = algorithms.no_degradation_start_heuristic