def tearDown(self): game_score = getattr(self.score1, 'score', None) game_name = getattr(self.game, 'name', None) if game_score: ParseBatcher().batch_delete( GameScore.Query.filter(score=game_score)) if game_name: ParseBatcher().batch_delete(Game.Query.filter(name=game_name))
def tearDown(self): city_name = getattr(self.sao_paulo, 'name', None) game_score = getattr(self.score, 'score', None) collected_item_type = getattr(self.collected_item, 'type', None) if city_name: ParseBatcher().batch_delete(City.Query.filter(name=city_name)) if game_score: ParseBatcher().batch_delete( GameScore.Query.filter(score=game_score)) if collected_item_type: ParseBatcher().batch_delete( CollectedItem.Query.filter(type=collected_item_type))
def setUpClass(cls): """save a bunch of GameScore objects with varying scores""" # first delete any that exist ParseBatcher().batch_delete(GameScore.Query.all()) ParseBatcher().batch_delete(Game.Query.all()) cls.game = Game(title="Candyland", creator=None) cls.game.save() cls.scores = [ GameScore(score=s, player_name='John Doe', game=cls.game) for s in range(1, 6) ] ParseBatcher().batch_save(cls.scores)
def testBatch(self): """test saving, updating and deleting objects in batches""" scores = [ GameScore(score=s, player_name='Jane', cheat_mode=False) for s in range(5) ] batcher = ParseBatcher() batcher.batch_save(scores) self.assertEqual( GameScore.Query.filter(player_name='Jane').count(), 5, "batch_save didn't create objects") self.assertTrue(all(s.objectId is not None for s in scores), "batch_save didn't record object IDs") # test updating for s in scores: s.score += 10 batcher.batch_save(scores) updated_scores = GameScore.Query.filter(player_name='Jane') self.assertEqual(sorted([s.score for s in updated_scores]), list(range(10, 15)), msg="batch_save didn't update objects") # test deletion batcher.batch_delete(scores) self.assertEqual( GameScore.Query.filter(player_name='Jane').count(), 0, "batch_delete didn't delete objects")
def testCanBatchUpdate(self): user = self._get_logged_user() phone_number = "555-0134" original_updatedAt = user.updatedAt user.phone = phone_number batcher = ParseBatcher() batcher.batch_save([user]) self.assertTrue( User.Query.filter(phone=phone_number).exists(), 'Failed to batch update user data. New info not on Parse') self.assertNotEqual( user.updatedAt, original_updatedAt, 'Failed to batch update user data: updatedAt not changed')
def testRelations(self): """Make some maps, make a Game Mode that has many maps, find all maps given a Game Mode""" maps = [GameMap(name="map " + i) for i in ['a', 'b', 'c', 'd']] ParseBatcher().batch_save(maps) gm = GameMode(name='test mode') gm.save() gm.addRelation("maps", GameMap.__name__, [m.objectId for m in maps]) modes = GameMode.Query.all() self.assertEqual(len(modes), 1) mode = modes[0] maps_for_mode = GameMap.Query.filter(maps__relatedTo=mode) self.assertEqual(len(maps_for_mode), 4) gm.delete() ParseBatcher().batch_delete(maps)
def tearDown(self): ParseBatcher().batch_delete(Review.Query.all())
def tearDown(self): '''delete additional helper objects created in perticular tests''' if self.test_objects: ParseBatcher().batch_delete(self.test_objects) self.test_objects = []
def tearDownClass(cls): '''delete all GameScore and Game objects''' ParseBatcher().batch_delete(chain(cls.scores, [cls.game]))
def delete(self, id, record=False, forced=False): """ Delete a cluster instance Clean containers, remove db entry. Only operate on active host. :param id: id of the cluster to delete :param record: Whether to record into the released collections :param forced: Whether to removing user-using cluster, for release :return: """ logger.debug("Delete cluster: id={}, forced={}".format(id, forced)) try: cluster = ClusterModel.Query.get(id=id) except Exception: logger.warning("Cannot find cluster {}".format(id)) return False c = self.db_update_one({"id": id}, {"status": NETWORK_STATUS_DELETING}, after=False) # we are safe from occasional applying now user_id = c.user_id # original user_id if not forced and user_id and user_id != "": # not forced, and chain is used by normal user, then no process logger.warning("Cannot delete cluster {} by " "user {}".format(id, user_id)) cluster = ClusterModel(objectId=cluster.objectId, user_id=user_id) cluster.save() return False else: cluster = ClusterModel(objectId=cluster.objectId, status=NETWORK_STATUS_DELETING) cluster.save() host_id, worker_api, network_type, consensus_plugin, cluster_size = \ str(c.host.id), c.worker_api, \ c.network_type if c.network_type else NETWORK_TYPE_FABRIC_PRE_V1, \ c.consensus_plugin if c.consensus_plugin else \ CONSENSUS_PLUGINS_FABRIC_V1[0], \ c.size if c.size else NETWORK_SIZE_FABRIC_PRE_V1[0] # port = api_url.split(":")[-1] or CLUSTER_PORT_START h = self.host_handler.get_active_host_by_id(host_id) if not h: logger.warning("Host {} inactive".format(host_id)) cluster = ClusterModel(objectId=cluster.objectId, user_id=user_id) cluster.save() return False if network_type == NETWORK_TYPE_FABRIC_V1: config = FabricV1NetworkConfig(consensus_plugin=consensus_plugin, size=cluster_size) elif network_type == NETWORK_TYPE_FABRIC_V1_1: config = FabricV1NetworkConfig(consensus_plugin=consensus_plugin, size=cluster_size) config.network_type = NETWORK_TYPE_FABRIC_V1_1 elif network_type == NETWORK_TYPE_FABRIC_V1_2: config = FabricV1NetworkConfig(consensus_plugin=consensus_plugin, size=cluster_size) config.network_type = NETWORK_TYPE_FABRIC_V1_2 elif network_type == NETWORK_TYPE_FABRIC_PRE_V1: config = FabricPreNetworkConfig(consensus_plugin=consensus_plugin, consensus_mode='', size=cluster_size) else: return False config.update({"env": c.env}) delete_result = self.cluster_agents[h.type].delete( id, worker_api, config) if not delete_result: logger.warning("Error to run compose clean work") cluster = ClusterModel(objectId=cluster.objectId, user_id=user_id) cluster.save() return False # remove cluster info from host logger.info("remove cluster from host, cluster:{}".format(id)) # h.update(pull__clusters=id) # h = HostModel(objectId=h.objectId) c.delete() service_ports = ServicePort.Query.filter(cluster=c.as_pointer) batcher = ParseBatcher() batcher.batch_delete(service_ports) containers = Container.Query.filter(cluster=c.as_pointer) batcher.batch_delete(containers) return True