@@ -99,7 +99,8 @@ def _execute(self):
9999 # wants the scheduler to pick a destination host, or a host was
100100 # specified but is not forcing it, so they want the scheduler
101101 # filters to run on the specified host, like a scheduler hint.
102- self .destination , self .dest_node , self .limits = self ._find_destination ()
102+ self .destination , self .dest_node , self .limits = \
103+ self ._find_destination ()
103104 else :
104105 # This is the case that the user specified the 'force' flag when
105106 # live migrating with a specific destination host so the scheduler
@@ -110,7 +111,7 @@ def _execute(self):
110111 self ._check_destination_has_enough_memory ()
111112 source_node , dest_node = (
112113 self ._check_compatible_with_source_hypervisor (
113- self .destination ))
114+ self .destination , self . dest_node ))
114115 # TODO(mriedem): Call select_destinations() with a
115116 # skip_filters=True flag so the scheduler does the work of claiming
116117 # resources on the destination in Placement but still bypass the
@@ -317,7 +318,7 @@ def _check_destination_is_not_source(self):
317318 instance_id = self .instance .uuid , host = self .destination )
318319
319320 def _check_destination_has_enough_memory (self ):
320- compute = self ._get_compute_info (self .destination )
321+ compute = self ._get_compute_info (self .destination , self . dest_node )
321322 free_ram_mb = compute .free_ram_mb
322323 total_ram_mb = compute .memory_mb
323324 mem_inst = self .instance .memory_mb
@@ -340,17 +341,19 @@ def _check_destination_has_enough_memory(self):
340341
341342 def _get_compute_info (self , host , nodename = None ):
342343 if not nodename :
343- return objects .ComputeNode .get_first_node_by_host_for_old_compat (
344- self .context , host )
344+ nodes = objects .ComputeNodeList .get_all_by_host (self .context , host )
345+ if len (nodes ) != 1 :
346+ raise exception .ComputeHostNotFound (host = host )
347+ return nodes [0 ]
345348
346349 return objects .ComputeNode .get_by_host_and_nodename (
347350 self .context , host , nodename )
348351
349- def _check_compatible_with_source_hypervisor (self , destination ):
352+ def _check_compatible_with_source_hypervisor (self , dest_host , dest_node ):
350353 migration = self .migration
351354 source_info = self ._get_compute_info (migration .source_compute ,
352355 migration .source_node )
353- destination_info = self ._get_compute_info (destination )
356+ destination_info = self ._get_compute_info (dest_host , dest_node )
354357
355358 source_type = source_info .hypervisor_type
356359 destination_type = destination_info .hypervisor_type
@@ -469,14 +472,12 @@ def _get_destination_cell_mapping(self):
469472 reason = (_ ('Unable to determine in which cell '
470473 'destination host %s lives.' ) % self .destination ))
471474
472- def _get_request_spec_for_select_destinations (self , attempted_hosts = None ):
475+ def _get_request_spec_for_select_destinations (self ):
473476 """Builds a RequestSpec that can be passed to select_destinations
474477
475478 Used when calling the scheduler to pick a destination host for live
476479 migrating the instance.
477480
478- :param attempted_hosts: List of host names to ignore in the scheduler.
479- This is generally at least seeded with the source host.
480481 :returns: nova.objects.RequestSpec object
481482 """
482483 # NOTE(fwiesel): In order to check the compatibility
@@ -530,14 +531,13 @@ def _get_request_spec_for_select_destinations(self, attempted_hosts=None):
530531
531532 def _find_destination (self ):
532533 # TODO(johngarbutt) this retry loop should be shared
533- attempted_hosts = [self .source ]
534- request_spec = self ._get_request_spec_for_select_destinations (
535- attempted_hosts )
534+ attempted_nodes = [self .source_node ]
535+ request_spec = self ._get_request_spec_for_select_destinations ()
536536
537537 host = None
538538 while host is None :
539- self ._check_not_over_max_retries (attempted_hosts )
540- request_spec .ignore_hosts = attempted_hosts
539+ self ._check_not_over_max_retries (attempted_nodes )
540+ request_spec .ignore_nodes = attempted_nodes
541541 try :
542542 selection_lists = self .query_client .select_destinations (
543543 self .context , request_spec , [self .instance .uuid ],
@@ -546,6 +546,7 @@ def _find_destination(self):
546546 # only one instance, and we don't care about any alternates.
547547 selection = selection_lists [0 ][0 ]
548548 host = selection .service_host
549+ node = selection .nodename
549550 except messaging .RemoteError as ex :
550551 # TODO(ShaoHe Feng) There maybe multi-scheduler, and the
551552 # scheduling algorithm is R-R, we can let other scheduler try.
@@ -568,17 +569,18 @@ def _find_destination(self):
568569 self .context , self .report_client ,
569570 self .instance .pci_requests .requests , provider_mapping )
570571 try :
571- self ._check_compatible_with_source_hypervisor (host )
572+ self ._check_compatible_with_source_hypervisor (host , node )
572573 self ._call_livem_checks_on_host (host , provider_mapping )
573574 except (exception .Invalid , exception .MigrationPreCheckError ) as e :
574- LOG .debug ("Skipping host : %(host)s because: %(e)s" ,
575- {"host" : host , "e" : e })
576- attempted_hosts .append (host )
575+ LOG .debug ("Skipping node : %(host)s/%(node )s because: %(e)s" ,
576+ {"host" : host , "node" : node , " e" : e })
577+ attempted_nodes .append (node )
577578 # The scheduler would have created allocations against the
578579 # selected destination host in Placement, so we need to remove
579580 # those before moving on.
580581 self ._remove_host_allocations (selection .compute_node_uuid )
581582 host = None
583+ node = None
582584 # TODO(artom) We should probably just return the whole selection object
583585 # at this point.
584586 return (selection .service_host , selection .nodename , selection .limits )
@@ -595,11 +597,11 @@ def _remove_host_allocations(self, compute_node_uuid):
595597 self .report_client .remove_provider_tree_from_instance_allocation (
596598 self .context , self .instance .uuid , compute_node_uuid )
597599
598- def _check_not_over_max_retries (self , attempted_hosts ):
600+ def _check_not_over_max_retries (self , attempted_nodes ):
599601 if CONF .migrate_max_retries == - 1 :
600602 return
601603
602- retries = len (attempted_hosts ) - 1
604+ retries = len (attempted_nodes ) - 1
603605 if retries > CONF .migrate_max_retries :
604606 if self .migration :
605607 self .migration .status = 'failed'
0 commit comments