OpenStack之虚机冷迁移代码简析

  前不久我们看了openstack的热迁移代码,并进行了简单的分析。真的,很简单的分析。现在天气凉了,为了应时令,再简析下虚机冷迁移的代码。

  还是老样子,前端的Horizon代码就省去了,直接看后端的代码实现,前端通过请求的action进入到nova/api/openstack/compute/contrib/admin_actions.py文件。代码如下: 

  @wsgi.action('migrate')
def _migrate(self, req, id, body):
"""Permit admins to migrate a server to a new host."""
context = req.environ['nova.context']
authorize(context, 'migrate')
try:
instance = self.compute_api.get(context, id, want_objects=True)
self.compute_api.resize(req.environ['nova.context'], instance)
except exception.QuotaError as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'migrate')
except exception.InstanceNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except Exception as e:
LOG.exception(_("Error in migrate %s"), e)
raise exc.HTTPBadRequest()
return webob.Response(status_int=202)

  首先,第7行可以看到,获得虚机,然后,第8行是主要的实现方法,那么问题来了:冷迁移竟然调用和resize一样的方法!!!赶紧跟进,进入到nova/compute/api.py文件中:

 def resize(self, context, instance, flavor_id=None,
**extra_instance_updates):
"""
如果flavor_id是空的,该过程将被视为迁移,还使用之前的flavor_id;
如果非空的话,虚机将被迁移到新的主机上,并且使用新的flavor_id
"""
self._check_auto_disk_config(instance, **extra_instance_updates) current_instance_type = flavors.extract_flavor(instance) # 如果没有flavor_id的话,就只进行虚机的迁移
if not flavor_id:
LOG.debug(_("flavor_id is None. Assuming migration."),
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no") current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s"),
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance) if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id) same_instance_type = (current_instance_type['id'] ==
new_instance_type['id']) # 检查一下flavor,不强迫用户更改 if not same_instance_type and new_instance_type.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id) if same_instance_type and flavor_id and self.cell_type != 'compute':
raise exception.CannotResizeToSameFlavor() # 检查是否有足够的空间支持扩展
deltas = self._upsize_quota_delta(context, new_instance_type,
current_instance_type)
try:
project_id, user_id = quotas_obj.ids_from_instance(context,
instance)
quotas = self._reserve_quota_delta(context, deltas,
project_id=project_id)
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
headroom = exc.kwargs['headroom'] resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance."),
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=deltas[resource],
used=used, allowed=total_allowed,
resource=resource) instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=[None]) filter_properties = {'ignore_hosts': []} if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host']) # 检查是否有flavor_id,如果没有的话就执行虚机迁移的工作
if (not flavor_id and not CONF.allow_migrate_to_same_host):
filter_properties['ignore_hosts'].append(instance['host']) if self.cell_type == 'api':
# 创建迁移记录
self._resize_cells_support(context, quotas, instance,
current_instance_type,
new_instance_type) self._record_action_start(context, instance, instance_actions.RESIZE) scheduler_hint = {'filter_properties': filter_properties}
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type,
reservations=quotas.reservations or [])
  
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED],
task_state=[None])

  一些代码的分析直接写在代码上面了,97-101行是一些装饰器,用来进行一些状态之类的检查,92-95行调用conductor中的api,执行resize_instance函数,进入到nova/conductor/api.py中,找到resize_instance的代码:

 def resize_instance(self, context, instance, extra_instance_updates,
scheduler_hint, flavor, reservations):
# 'extra_instance_updates'这个参数在这里用不到,
#  但是留着它是为了这个方法能被cells_rpcapi各版本
# 兼容考虑
self._manager.migrate_server(
context, instance, scheduler_hint, False, False, flavor,
None, None, reservations)

  这个方法没什么说的,直接调用nova/conductor/manager.py中的migrate_server方法:

 def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None):
if instance and not isinstance(instance, instance_obj.Instance):
# 在RPC API的V2版本之前,还支持
# 老版本的虚机
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=attrs)
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit)
elif not live and not rebuild and flavor:
instance_uuid = instance['uuid']
with compute_utils.EventReporter(context, self.db,
'cold_migrate', instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
reservations)
else:
raise NotImplementedError()

  这里第11行到13行大家都比较熟悉了,在OpenStack之虚机热迁移代码解析 中我们已经见过,没错,那个执行的就是虚机热迁移,当然14-20行就是我们本次要讲的虚机冷迁移咯,18-20行就是调用此冷迁移的函数,代码如下:

 def _cold_migrate(self, context, instance, flavor, filter_properties,
reservations):
image_ref = instance.image_ref
image = compute_utils.get_image_metadata(
context, self.image_service, image_ref, instance) request_spec = scheduler_utils.build_request_spec(
context, image, [instance], instance_type=flavor) quotas = quotas_obj.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
hosts = self.scheduler_rpcapi.select_destinations(
context, request_spec, filter_properties)
host_state = hosts[0]
except exception.NoValidHost as ex:
vm_state = instance['vm_state']
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
quotas.rollback() LOG.warning(_("No valid host found for cold migrate"),
instance=instance)
return try:
scheduler_utils.populate_filter_properties(filter_properties,
host_state) filter_properties.pop('context', None) # 这里的instance_type在compute.api.resize中的request_spec木有“extra_specs”,
# 所以为了向后兼容,就把这个给移除掉 request_spec['instance_type'].pop('extra_specs') (host, node) = (host_state['host'], host_state['nodename'])
self.compute_rpcapi.prep_resize(
context, image, instance,
flavor, host,
reservations, request_spec=request_spec,
filter_properties=filter_properties, node=node)
except Exception as ex:
with excutils.save_and_reraise_exception():
updates = {'vm_state': instance['vm_state'],
'task_state': None}
self._set_vm_state_and_notify(context, 'migrate_server',
updates, ex, request_spec)
quotas.rollback()

  前面几行首先得到一些个参数,然后使用调度算法自动获取一个最适合迁移的host,然后得到host以及前面的参数之后,就是42行的执行冷迁移也就是nova/compute/rpcapi.py中和nova/compute/manager中的pre_resize函数:

     def prep_resize(self, ctxt, image, instance, instance_type, host,
reservations=None, request_spec=None,
filter_properties=None, node=None): version = self._get_compat_version('3.0', '2.43')
instance_type_p = jsonutils.to_primitive(instance_type)
image_p = jsonutils.to_primitive(image)
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'prep_resize',
instance=instance,
instance_type=instance_type_p,
image=image_p, reservations=reservations,
request_spec=request_spec,
filter_properties=filter_properties,
node=node)
  def prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node):
"""     
开始将虚机迁移到另一个主机上的准备; """
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug(_("No node specified, defaulting to %s"), node,
instance=instance) with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
self.conductor_api.notify_usage_exists(
context, instance, current_period=True)
self._notify_about_instance_usage(
context, instance, "resize.prep.start")
try:
self._prep_resize(context, image, instance,
instance_type, reservations,
request_spec, filter_properties,
node)
except Exception: exc_info = sys.exc_info()
self._reschedule_resize_or_reraise(context, image, instance,
exc_info, instance_type, reservations, request_spec,
filter_properties)
finally:
extra_usage_info = dict(
new_instance_type=instance_type['name'],
new_instance_type_id=instance_type['id']) self._notify_about_instance_usage(
context, instance, "resize.prep.end",
extra_usage_info=extra_usage_info)

  获得各种参数,并进行可用空间以及存在性检测,然后调用_pre_resize方法:

 def _prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node): if not filter_properties:
filter_properties = {} if not instance['host']:
self._set_instance_error_state(context, instance['uuid'])
msg = _('Instance has no source host')
raise exception.MigrationError(msg) same_host = instance['host'] == self.host
if same_host and not CONF.allow_resize_to_same_host:
self._set_instance_error_state(context, instance['uuid'])
msg = _('destination same as source!')
raise exception.MigrationError(msg) #保存新的instance_tyoe sys_meta = instance.system_metadata
flavors.save_flavor_info(sys_meta, instance_type, prefix='new_') #保存虚机的状态,以便恢复到原先的虚机状态
vm_state = instance['vm_state']
LOG.debug(_('Stashing vm_state: %s'), vm_state, instance=instance)
sys_meta['old_vm_state'] = vm_state
instance.save() limits = filter_properties.get('limits', {})
rt = self._get_resource_tracker(node)
with rt.resize_claim(context, instance, instance_type,
limits=limits) as claim:
LOG.audit(_('Migrating'), context=context, instance=instance)
self.compute_rpcapi.resize_instance(context, instance,
claim.migration, image, instance_type, reservations) @wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault

  最后在34行调用nova/compute/rpcapi.py中和nova/compute/manager.py中的resize_instance方法:

 def resize_instance(self, ctxt, instance, migration, image, instance_type,
reservations=None): version = self._get_compat_version('3.0', '2.45')
instance_type_p = jsonutils.to_primitive(instance_type)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resize_instance',
instance=instance, migration=migration,
image=image, reservations=reservations,
instance_type=instance_type_p)
 resize_instance(self, context, instance, image,
reservations, migration, instance_type):
"""开始虚机迁移"""
with self._error_out_instance_on_exception(context, instance.uuid,
reservations):
if not instance_type:
instance_type = flavor_obj.Flavor.get_by_id(
context, migration['new_instance_type_id']) network_info = self._get_instance_nw_info(context, instance) migration.status = 'migrating'
migration.save(context.elevated()) instance.task_state = task_states.RESIZE_MIGRATING
instance.save(expected_task_state=task_states.RESIZE_PREP) self._notify_about_instance_usage(
context, instance, "resize.start", network_info=network_info) bdms = (block_device_obj.BlockDeviceMappingList.
get_by_instance_uuid(context, instance.uuid))
block_device_info = self._get_instance_volume_block_device_info(
context, instance, bdms=bdms) disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration.dest_host,
instance_type, network_info,
block_device_info) self._terminate_volume_connections(context, instance, bdms) migration_p = obj_base.obj_to_primitive(migration)
instance_p = obj_base.obj_to_primitive(instance)
self.conductor_api.network_migrate_instance_start(context,
instance_p,
migration_p) migration.status = 'post-migrating'
migration.save(context.elevated()) instance.host = migration.dest_compute
instance.node = migration.dest_node
instance.task_state = task_states.RESIZE_MIGRATED
instance.save(expected_task_state=task_states.RESIZE_MIGRATING) self.compute_rpcapi.finish_resize(context, instance,
migration, image, disk_info,
migration.dest_compute, reservations=reservations) self._notify_about_instance_usage(context, instance, "resize.end",
network_info=network_info)
self.instance_events.clear_events_for_instance(instance)

  各种参数的获取,以及磁盘信息的拷贝(26-29),最后调用finish_resize方法(47-49):

     def finish_resize(self, context, disk_info, image, instance,
reservations, migration):
"""完成迁移
在新的主机上打开虚机 """
try:
self._finish_resize(context, instance, migration,
disk_info, image)
self._quota_commit(context, reservations)
except Exception as error:
LOG.exception(_('Setting instance vm_state to ERROR'),
instance=instance)
with excutils.save_and_reraise_exception():
try:
self._quota_rollback(context, reservations)
except Exception as qr_error:
LOG.exception(_("Failed to rollback quota for failed "
"finish_resize: %s"),
qr_error, instance=instance)
self._set_instance_error_state(context, instance['uuid']) @object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault

  调用_finish_resize函数完成迁移:

 def _finish_resize(self, context, instance, migration, disk_info,
image):
resize_instance = False
old_instance_type_id = migration['old_instance_type_id']
new_instance_type_id = migration['new_instance_type_id']
old_instance_type = flavors.extract_flavor(instance)
sys_meta = instance.system_metadata
# 获取之前虚机的状态,如果没有设定的话
# 默认设为ACTIVE,以便向后兼容 old_vm_state = sys_meta.get('old_vm_state', vm_states.ACTIVE)
flavors.save_flavor_info(sys_meta,
old_instance_type,
prefix='old_') if old_instance_type_id != new_instance_type_id:
instance_type = flavors.extract_flavor(instance, prefix='new_')
flavors.save_flavor_info(sys_meta, instance_type)
instance.instance_type_id = instance_type['id']
instance.memory_mb = instance_type['memory_mb']
instance.vcpus = instance_type['vcpus']
instance.root_gb = instance_type['root_gb']
instance.ephemeral_gb = instance_type['ephemeral_gb']
instance.system_metadata = sys_meta
instance.save()
resize_instance = True # 在目的主机上建立网络
self.network_api.setup_networks_on_host(context, instance,
migration['dest_compute']) instance_p = obj_base.obj_to_primitive(instance)
migration_p = obj_base.obj_to_primitive(migration)
self.conductor_api.network_migrate_instance_finish(context,
instance_p,
migration_p) network_info = self._get_instance_nw_info(context, instance) instance.task_state = task_states.RESIZE_FINISH
instance.system_metadata = sys_meta
instance.save(expected_task_state=task_states.RESIZE_MIGRATED) self._notify_about_instance_usage(
context, instance, "finish_resize.start",
network_info=network_info) block_device_info = self._get_instance_volume_block_device_info(
context, instance, refresh_conn_info=True) # 如果虚机原先状态是STOPPED,
# 迁移后不能开启虚机
power_on = old_vm_state != vm_states.STOPPED
self.driver.finish_migration(context, migration, instance,
disk_info,
network_info,
image, resize_instance,
block_device_info, power_on) migration.status = 'finished'
migration.save(context.elevated()) instance.vm_state = vm_states.RESIZED
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_FINISH) self._notify_about_instance_usage(
context, instance, "finish_resize.end",
network_info=network_info) @wrap_exception()
@reverts_task_state
@wrap_instance_event
@errors_out_migration
@wrap_instance_fault

  虚机迁移完成后回到虚机迁移的resize_instance函数:

 self._notify_about_instance_usage(context, instance, "resize.end",
network_info=network_info)
self.instance_events.clear_events_for_instance(instance)

  进行扫尾工作。然后虚机冷迁移就正式结束了,至于虚机冷迁移的算法调度策略以及磁盘信息的获得在此不过多讨论,毕竟是简析么~请允许我偷下懒

PS:本博客欢迎转发,但请注明博客地址及作者~

  博客地址:http://www.cnblogs.com/voidy/

  <。)#)))≦

04-26 00:56