forked from autotest/virt-test
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils_test.py
2487 lines (2087 loc) · 91.4 KB
/
utils_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
High-level KVM test utility functions.
This module is meant to reduce code size by performing common test procedures.
Generally, code here should look like test code.
More specifically:
- Functions in this module should raise exceptions if things go wrong
(unlike functions in kvm_utils.py and qemu_vm.py which report failure via
their returned values).
- Functions in this module may use logging.info(), in addition to
logging.debug() and logging.error(), to log messages the user may be
interested in (unlike kvm_utils.py and qemu_vm.py which use
logging.debug() for anything that isn't an error).
- Functions in this module typically use functions and classes from
lower-level modules (e.g. kvm_utils.py, qemu_vm.py, kvm_subprocess.py).
- Functions in this module should not be used by lower-level modules.
- Functions in this module should be used in the right context.
For example, a function should not be used where it may display
misleading or inaccurate info or debug messages.
@copyright: 2008-2009 Red Hat Inc.
"""
import time, os, logging, re, signal, imp, tempfile, commands, errno, fcntl
import threading, shelve, socket, glob
from Queue import Queue
from autotest.client.shared import error
from autotest.client import utils, os_dep
from autotest.client.tools import scan_results
from autotest.client.shared.syncdata import SyncData, SyncListenServer
import aexpect, utils_misc, virt_vm, remote, storage, env_process
import virttest
try:
from autotest.client.shared import utils_cgroup
except ImportError:
# TODO: Obsoleted path used prior autotest-0.15.2/virttest-2013.06.24
from virttest import utils_cgroup
# Handle transition from autotest global_config (0.14.x series) to
# settings (0.15.x onwards)
try:
from autotest.client.shared import global_config
section_values = global_config.global_config.get_section_values
settings_value = global_config.global_config.get_config_value
except ImportError:
from autotest.client.shared.settings import settings
section_values = settings.get_section_values
settings_value = settings.get_value
def get_living_vm(env, vm_name):
"""
Get a VM object from the environment and make sure it's alive.
@param env: Dictionary with test environment.
@param vm_name: Name of the desired VM object.
@return: A VM object.
"""
vm = env.get_vm(vm_name)
if not vm:
raise error.TestError("VM '%s' not found in environment" % vm_name)
if not vm.is_alive():
raise error.TestError("VM '%s' seems to be dead; test requires a "
"living VM" % vm_name)
return vm
def wait_for_login(vm, nic_index=0, timeout=240, start=0, step=2, serial=None):
"""
Try logging into a VM repeatedly. Stop on success or when timeout expires.
@param vm: VM object.
@param nic_index: Index of NIC to access in the VM.
@param timeout: Time to wait before giving up.
@param serial: Whether to use a serial connection instead of a remote
(ssh, rss) one.
@return: A shell session object.
"""
end_time = time.time() + timeout
session = None
if serial:
mode = 'serial'
logging.info("Trying to log into guest %s using serial connection,"
" timeout %ds", vm.name, timeout)
time.sleep(start)
while time.time() < end_time:
try:
session = vm.serial_login()
break
except remote.LoginError, e:
logging.debug(e)
time.sleep(step)
else:
mode = 'remote'
logging.info("Trying to log into guest %s using remote connection,"
" timeout %ds", vm.name, timeout)
time.sleep(start)
while time.time() < end_time:
try:
session = vm.login(nic_index=nic_index)
break
except (remote.LoginError, virt_vm.VMError), e:
logging.debug(e)
time.sleep(step)
if not session and vm.get_params().get("try_serial_login") == "yes":
mode = "serial"
logging.info("Remote login failed, trying to login '%s' with "
"serial, timeout %ds", vm.name, timeout)
time.sleep(start)
while time.time() < end_time:
try:
session = vm.serial_login()
break
except remote.LoginError, e:
logging.debug(e)
time.sleep(step)
if not session:
raise error.TestFail("Could not log into guest %s using %s connection" %
(vm.name, mode))
logging.info("Logged into guest %s using %s connection", vm.name, mode)
return session
def reboot(vm, session, method="shell", sleep_before_reset=10, nic_index=0,
timeout=240):
"""
Reboot the VM and wait for it to come back up by trying to log in until
timeout expires.
@param vm: VM object.
@param session: A shell session object.
@param method: Reboot method. Can be "shell" (send a shell reboot
command) or "system_reset" (send a system_reset monitor command).
@param nic_index: Index of NIC to access in the VM, when logging in after
rebooting.
@param timeout: Time to wait before giving up (after rebooting).
@return: A new shell session object.
"""
if method == "shell":
# Send a reboot command to the guest's shell
session.sendline(vm.get_params().get("reboot_command"))
logging.info("Reboot command sent. Waiting for guest to go down")
elif method == "system_reset":
# Sleep for a while before sending the command
time.sleep(sleep_before_reset)
# Clear the event list of all QMP monitors
monitors = [m for m in vm.monitors if m.protocol == "qmp"]
for m in monitors:
m.clear_events()
# Send a system_reset monitor command
vm.monitor.cmd("system_reset")
logging.info("Monitor command system_reset sent. Waiting for guest to "
"go down")
# Look for RESET QMP events
time.sleep(1)
for m in monitors:
if not m.get_event("RESET"):
raise error.TestFail("RESET QMP event not received after "
"system_reset (monitor '%s')" % m.name)
else:
logging.info("RESET QMP event received")
else:
logging.error("Unknown reboot method: %s", method)
# Wait for the session to become unresponsive and close it
if not utils_misc.wait_for(lambda: not session.is_responsive(timeout=30),
120, 0, 1):
raise error.TestFail("Guest refuses to go down")
session.close()
# Try logging into the guest until timeout expires
logging.info("Guest is down. Waiting for it to go up again, timeout %ds",
timeout)
session = vm.wait_for_login(nic_index, timeout=timeout)
logging.info("Guest is up again")
return session
@error.context_aware
def update_boot_option(vm, args_removed=None, args_added=None,
need_reboot=True):
"""
Update guest default kernel option.
@param vm: The VM object.
@param args_removed: Kernel options want to remove.
@param args_added: Kernel options want to add.
@param need_reboot: Whether need reboot VM or not.
@raise error.TestError: Raised if fail to update guest kernel cmdlie.
"""
if vm.params.get("os_type") == 'windows':
# this function is only for linux, if we need to change
# windows guest's boot option, we can use a function like:
# update_win_bootloader(args_removed, args_added, reboot)
# (this function is not implement.)
# here we just:
return
login_timeout = int(vm.params.get("login_timeout"))
session = vm.wait_for_login(timeout=login_timeout)
msg = "Update guest kernel cmdline. "
cmd = "grubby --update-kernel=`grubby --default-kernel` "
if args_removed is not None:
msg += " remove args: %s." % args_removed
cmd += '--remove-args="%s." ' % args_removed
if args_added is not None:
msg += " add args: %s" % args_added
cmd += '--args="%s"' % args_added
error.context(msg, logging.info)
s, o = session.cmd_status_output(cmd)
if s != 0:
logging.error(o)
raise error.TestError("Fail to modify guest kernel cmdline")
if need_reboot:
error.context("Rebooting guest ...", logging.info)
vm.reboot(session=session, timeout=login_timeout)
def migrate(vm, env=None, mig_timeout=3600, mig_protocol="tcp",
mig_cancel=False, offline=False, stable_check=False,
clean=False, save_path=None, dest_host='localhost', mig_port=None):
"""
Migrate a VM locally and re-register it in the environment.
@param vm: The VM to migrate.
@param env: The environment dictionary. If omitted, the migrated VM will
not be registered.
@param mig_timeout: timeout value for migration.
@param mig_protocol: migration protocol
@param mig_cancel: Test migrate_cancel or not when protocol is tcp.
@param dest_host: Destination host (defaults to 'localhost').
@param mig_port: Port that will be used for migration.
@return: The post-migration VM, in case of same host migration, True in
case of multi-host migration.
"""
def mig_finished():
try:
o = vm.monitor.info("migrate")
if isinstance(o, str):
return "status: active" not in o
else:
return o.get("status") != "active"
except Exception:
pass
def mig_succeeded():
o = vm.monitor.info("migrate")
if isinstance(o, str):
return "status: completed" in o
else:
return o.get("status") == "completed"
def mig_failed():
o = vm.monitor.info("migrate")
if isinstance(o, str):
return "status: failed" in o
else:
return o.get("status") == "failed"
def mig_cancelled():
o = vm.monitor.info("migrate")
if isinstance(o, str):
return ("Migration status: cancelled" in o or
"Migration status: canceled" in o)
else:
return (o.get("status") == "cancelled" or
o.get("status") == "canceled")
def wait_for_migration():
if not utils_misc.wait_for(mig_finished, mig_timeout, 2, 2,
"Waiting for migration to finish"):
raise error.TestFail("Timeout expired while waiting for migration "
"to finish")
if dest_host == 'localhost':
dest_vm = vm.clone()
if (dest_host == 'localhost') and stable_check:
# Pause the dest vm after creation
dest_vm.params['extra_params'] = (dest_vm.params.get('extra_params','')
+ ' -S')
if dest_host == 'localhost':
dest_vm.create(migration_mode=mig_protocol, mac_source=vm)
try:
try:
if mig_protocol in [ "tcp", "rdma", "x-rdma" ]:
if dest_host == 'localhost':
uri = mig_protocol + ":0:%d" % dest_vm.migration_port
else:
uri = mig_protocol + ':%s:%d' % (dest_host, mig_port)
elif mig_protocol == "unix":
uri = "unix:%s" % dest_vm.migration_file
elif mig_protocol == "exec":
uri = '"exec:nc localhost %s"' % dest_vm.migration_port
if offline:
vm.pause()
vm.monitor.migrate(uri)
if mig_cancel:
time.sleep(2)
vm.monitor.cmd("migrate_cancel")
if not utils_misc.wait_for(mig_cancelled, 60, 2, 2,
"Waiting for migration "
"cancellation"):
raise error.TestFail("Failed to cancel migration")
if offline:
vm.resume()
if dest_host == 'localhost':
dest_vm.destroy(gracefully=False)
return vm
else:
wait_for_migration()
if (dest_host == 'localhost') and stable_check:
save_path = None or "/tmp"
save1 = os.path.join(save_path, "src")
save2 = os.path.join(save_path, "dst")
vm.save_to_file(save1)
dest_vm.save_to_file(save2)
# Fail if we see deltas
md5_save1 = utils.hash_file(save1)
md5_save2 = utils.hash_file(save2)
if md5_save1 != md5_save2:
raise error.TestFail("Mismatch of VM state before "
"and after migration")
if (dest_host == 'localhost') and offline:
dest_vm.resume()
except Exception:
if dest_host == 'localhost':
dest_vm.destroy()
raise
finally:
if (dest_host == 'localhost') and stable_check and clean:
logging.debug("Cleaning the state files")
if os.path.isfile(save1):
os.remove(save1)
if os.path.isfile(save2):
os.remove(save2)
# Report migration status
if mig_succeeded():
logging.info("Migration finished successfully")
elif mig_failed():
raise error.TestFail("Migration failed")
else:
status = vm.monitor.info("migrate")
raise error.TestFail("Migration ended with unknown status: %s" %
status)
if dest_host == 'localhost':
if dest_vm.monitor.verify_status("paused"):
logging.debug("Destination VM is paused, resuming it")
dest_vm.resume()
# Kill the source VM
vm.destroy(gracefully=False)
# Replace the source VM with the new cloned VM
if (dest_host == 'localhost') and (env is not None):
env.register_vm(vm.name, dest_vm)
# Return the new cloned VM
if dest_host == 'localhost':
return dest_vm
else:
return vm
def guest_active(vm):
o = vm.monitor.info("status")
if isinstance(o, str):
return "status: running" in o
else:
if "status" in o:
return o.get("status") == "running"
else:
return o.get("running")
class MigrationData(object):
def __init__(self, params, srchost, dsthost, vms_name, params_append):
"""
Class that contains data needed for one migration.
"""
self.params = params.copy()
self.params.update(params_append)
self.source = False
if params.get("hostid") == srchost:
self.source = True
self.destination = False
if params.get("hostid") == dsthost:
self.destination = True
self.src = srchost
self.dst = dsthost
self.hosts = [srchost, dsthost]
self.mig_id = {'src': srchost, 'dst': dsthost, "vms": vms_name}
self.vms_name = vms_name
self.vms = []
self.vm_ports = None
def is_src(self):
"""
@return: True if host is source.
"""
return self.source
def is_dst(self):
"""
@return: True if host is destination.
"""
return self.destination
class MultihostMigration(object):
"""
Class that provides a framework for multi-host migration.
Migration can be run both synchronously and asynchronously.
To specify what is going to happen during the multi-host
migration, it is necessary to reimplement the method
migration_scenario. It is possible to start multiple migrations
in separate threads, since self.migrate is thread safe.
Only one test using multihost migration framework should be
started on one machine otherwise it is necessary to solve the
problem with listen server port.
Multihost migration starts SyncListenServer through which
all messages are transfered, since the multiple hosts can
be in diferent states.
Class SyncData is used to transfer data over network or
synchronize the migration process. Synchronization sessions
are recognized by session_id.
It is important to note that, in order to have multi-host
migration, one needs shared guest image storage. The simplest
case is when the guest images are on an NFS server.
Example:
class TestMultihostMigration(utils_misc.MultihostMigration):
def __init__(self, test, params, env):
super(testMultihostMigration, self).__init__(test, params, env)
def migration_scenario(self):
srchost = self.params.get("hosts")[0]
dsthost = self.params.get("hosts")[1]
def worker(mig_data):
vm = env.get_vm("vm1")
session = vm.wait_for_login(timeout=self.login_timeout)
session.sendline("nohup dd if=/dev/zero of=/dev/null &")
session.cmd("killall -0 dd")
def check_worker(mig_data):
vm = env.get_vm("vm1")
session = vm.wait_for_login(timeout=self.login_timeout)
session.cmd("killall -9 dd")
# Almost synchronized migration, waiting to end it.
# Work is started only on first VM.
self.migrate_wait(["vm1", "vm2"], srchost, dsthost,
worker, check_worker)
# Migration started in different threads.
# It allows to start multiple migrations simultaneously.
mig1 = self.migrate(["vm1"], srchost, dsthost,
worker, check_worker)
mig2 = self.migrate(["vm2"], srchost, dsthost)
mig2.join()
mig1.join()
mig = TestMultihostMigration(test, params, env)
mig.run()
"""
def __init__(self, test, params, env, preprocess_env=True):
self.test = test
self.params = params
self.env = env
self.hosts = params.get("hosts")
self.hostid = params.get('hostid', "")
self.comm_port = int(params.get("comm_port", 13234))
vms_count = len(params["vms"].split())
self.login_timeout = int(params.get("login_timeout", 360))
self.disk_prepare_timeout = int(params.get("disk_prepare_timeout",
160 * vms_count))
self.finish_timeout = int(params.get("finish_timeout",
120 * vms_count))
self.new_params = None
if params.get("clone_master") == "yes":
self.clone_master = True
else:
self.clone_master = False
self.mig_timeout = int(params.get("mig_timeout"))
# Port used to communicate info between source and destination
self.regain_ip_cmd = params.get("regain_ip_cmd", None)
self.vm_lock = threading.Lock()
self.sync_server = None
if self.clone_master:
self.sync_server = SyncListenServer()
if preprocess_env:
self.preprocess_env()
self._hosts_barrier(self.hosts, self.hosts, 'disk_prepared',
self.disk_prepare_timeout)
def migration_scenario(self):
"""
Multi Host migration_scenario is started from method run where the
exceptions are checked. It is not necessary to take care of
cleaning up after test crash or finish.
"""
raise NotImplementedError
def post_migration(self, vm, cancel_delay, mig_offline, dsthost, vm_ports,
not_wait_for_migration, fd, mig_data):
pass
def migrate_vms_src(self, mig_data):
"""
Migrate vms source.
@param mig_Data: Data for migration.
For change way how machine migrates is necessary
re implement this method.
"""
def mig_wrapper(vm, cancel_delay, dsthost, vm_ports,
not_wait_for_migration, mig_offline, mig_data):
vm.migrate(cancel_delay=cancel_delay, offline=mig_offline,
dest_host=dsthost, remote_port=vm_ports[vm.name],
not_wait_for_migration=not_wait_for_migration)
self.post_migration(vm, cancel_delay, mig_offline, dsthost,
vm_ports, not_wait_for_migration, None,
mig_data)
logging.info("Start migrating now...")
cancel_delay = mig_data.params.get("cancel_delay")
if cancel_delay is not None:
cancel_delay = int(cancel_delay)
not_wait_for_migration = mig_data.params.get("not_wait_for_migration")
if not_wait_for_migration == "yes":
not_wait_for_migration = True
mig_offline = mig_data.params.get("mig_offline")
if mig_offline == "yes":
mig_offline = True
else:
mig_offline = False
multi_mig = []
for vm in mig_data.vms:
multi_mig.append((mig_wrapper, (vm, cancel_delay, mig_data.dst,
mig_data.vm_ports,
not_wait_for_migration,
mig_offline, mig_data)))
utils_misc.parallel(multi_mig)
def migrate_vms_dest(self, mig_data):
"""
Migrate vms destination. This function is started on dest host during
migration.
@param mig_Data: Data for migration.
"""
pass
def __del__(self):
if self.sync_server:
self.sync_server.close()
def master_id(self):
return self.hosts[0]
def _hosts_barrier(self, hosts, session_id, tag, timeout):
logging.debug("Barrier timeout: %d tags: %s" % (timeout, tag))
tags = SyncData(self.master_id(), self.hostid, hosts,
"%s,%s,barrier" % (str(session_id), tag),
self.sync_server).sync(tag, timeout)
logging.debug("Barrier tag %s" % (tags))
def preprocess_env(self):
"""
Prepare env to start vms.
"""
storage.preprocess_images(self.test.bindir, self.params, self.env)
def _check_vms_source(self, mig_data):
start_mig_tout = mig_data.params.get("start_migration_timeout", None)
if start_mig_tout is None:
for vm in mig_data.vms:
vm.wait_for_login(timeout=self.login_timeout)
if mig_data.params.get("host_mig_offline") != "yes":
sync = SyncData(self.master_id(), self.hostid, mig_data.hosts,
mig_data.mig_id, self.sync_server)
mig_data.vm_ports = sync.sync(timeout=120)[mig_data.dst]
logging.info("Received from destination the migration port %s",
str(mig_data.vm_ports))
def _check_vms_dest(self, mig_data):
mig_data.vm_ports = {}
for vm in mig_data.vms:
logging.info("Communicating to source migration port %s",
vm.migration_port)
mig_data.vm_ports[vm.name] = vm.migration_port
if mig_data.params.get("host_mig_offline") != "yes":
SyncData(self.master_id(), self.hostid,
mig_data.hosts, mig_data.mig_id,
self.sync_server).sync(mig_data.vm_ports, timeout=120)
def _prepare_params(self, mig_data):
"""
Prepare separate params for vm migration.
@param vms_name: List of vms.
"""
new_params = mig_data.params.copy()
new_params["vms"] = " ".join(mig_data.vms_name)
return new_params
def _check_vms(self, mig_data):
"""
Check if vms are started correctly.
@param vms: list of vms.
@param source: Must be True if is source machine.
"""
logging.info("Try check vms %s" % (mig_data.vms_name))
for vm in mig_data.vms_name:
if not self.env.get_vm(vm) in mig_data.vms:
mig_data.vms.append(self.env.get_vm(vm))
for vm in mig_data.vms:
logging.info("Check vm %s on host %s" % (vm.name, self.hostid))
vm.verify_alive()
if mig_data.is_src():
self._check_vms_source(mig_data)
else:
self._check_vms_dest(mig_data)
def prepare_for_migration(self, mig_data, migration_mode):
"""
Prepare destination of migration for migration.
@param mig_data: Class with data necessary for migration.
@param migration_mode: Migration mode for prepare machine.
"""
new_params = self._prepare_params(mig_data)
new_params['migration_mode'] = migration_mode
new_params['start_vm'] = 'yes'
self.vm_lock.acquire()
env_process.process(self.test, new_params, self.env,
env_process.preprocess_image,
env_process.preprocess_vm)
self.vm_lock.release()
self._check_vms(mig_data)
def migrate_vms(self, mig_data):
"""
Migrate vms.
"""
if mig_data.is_src():
self.migrate_vms_src(mig_data)
else:
self.migrate_vms_dest(mig_data)
def check_vms_dst(self, mig_data):
"""
Check vms after migrate.
@param mig_data: object with migration data.
"""
for vm in mig_data.vms:
vm.resume()
if not guest_active(vm):
raise error.TestFail("Guest not active after migration")
logging.info("Migrated guest appears to be running")
logging.info("Logging into migrated guest after migration...")
for vm in mig_data.vms:
if not self.regain_ip_cmd is None:
session_serial = vm.wait_for_serial_login(timeout=
self.login_timeout)
#There is sometime happen that system sends some message on
#serial console and IP renew command block test. Because
#there must be added "sleep" in IP renew command.
session_serial.cmd(self.regain_ip_cmd)
vm.wait_for_login(timeout=self.login_timeout)
def check_vms_src(self, mig_data):
"""
Check vms after migrate.
@param mig_data: object with migration data.
"""
pass
def postprocess_env(self):
"""
Kill vms and delete cloned images.
"""
pass
def before_migration(self, mig_data):
"""
Do something right before migration.
@param mig_data: object with migration data.
"""
pass
def migrate(self, vms_name, srchost, dsthost, start_work=None,
check_work=None, mig_mode="tcp", params_append=None):
"""
Migrate machine from srchost to dsthost. It executes start_work on
source machine before migration and executes check_work on dsthost
after migration.
Migration execution progress:
source host | dest host
--------------------------------------------------------
prepare guest on both sides of migration
- start machine and check if machine works
- synchronize transfer data needed for migration
--------------------------------------------------------
start work on source guests | wait for migration
--------------------------------------------------------
migrate guest to dest host.
wait on finish migration synchronization
--------------------------------------------------------
| check work on vms
--------------------------------------------------------
wait for sync on finish migration
@param vms_name: List of vms.
@param srchost: src host id.
@param dsthost: dst host id.
@param start_work: Function started before migration.
@param check_work: Function started after migration.
@param mig_mode: Migration mode.
@param params_append: Append params to self.params only for migration.
"""
def migrate_wrap(vms_name, srchost, dsthost, start_work=None,
check_work=None, params_append=None):
logging.info("Starting migrate vms %s from host %s to %s" %
(vms_name, srchost, dsthost))
pause = self.params.get("paused_after_start_vm")
mig_error = None
mig_data = MigrationData(self.params, srchost, dsthost,
vms_name, params_append)
cancel_delay = self.params.get("cancel_delay", None)
host_offline_migration = self.params.get("host_mig_offline")
try:
try:
if mig_data.is_src():
self.prepare_for_migration(mig_data, None)
elif self.hostid == dsthost:
if host_offline_migration != "yes":
self.prepare_for_migration(mig_data, mig_mode)
else:
return
if mig_data.is_src():
if start_work:
if pause != "yes":
start_work(mig_data)
else:
raise error.TestNAError("Can't start work if "
"vm is paused.")
# Starts VM and waits timeout before migration.
if pause == "yes" and mig_data.is_src():
for vm in mig_data.vms:
vm.resume()
wait = self.params.get("start_migration_timeout", 0)
logging.debug("Wait for migraiton %s seconds." %
(wait))
time.sleep(int(wait))
self.before_migration(mig_data)
self.migrate_vms(mig_data)
timeout = 60
if cancel_delay is None:
if host_offline_migration == "yes":
self._hosts_barrier(self.hosts,
mig_data.mig_id,
'wait_for_offline_mig',
self.finish_timeout)
if mig_data.is_dst():
self.prepare_for_migration(mig_data, mig_mode)
self._hosts_barrier(self.hosts,
mig_data.mig_id,
'wait2_for_offline_mig',
self.finish_timeout)
if (not mig_data.is_src()):
timeout = self.mig_timeout
self._hosts_barrier(mig_data.hosts, mig_data.mig_id,
'mig_finished', timeout)
if mig_data.is_dst():
self.check_vms_dst(mig_data)
if check_work:
check_work(mig_data)
else:
self.check_vms_src(mig_data)
if check_work:
check_work(mig_data)
except:
mig_error = True
raise
finally:
if not mig_error and cancel_delay is None:
self._hosts_barrier(self.hosts,
mig_data.mig_id,
'test_finihed',
self.finish_timeout)
elif mig_error:
raise
def wait_wrap(vms_name, srchost, dsthost):
mig_data = MigrationData(self.params, srchost, dsthost, vms_name,
None)
timeout = (self.login_timeout + self.mig_timeout +
self.finish_timeout)
self._hosts_barrier(self.hosts, mig_data.mig_id,
'test_finihed', timeout)
if (self.hostid in [srchost, dsthost]):
mig_thread = utils.InterruptedThread(migrate_wrap, (vms_name,
srchost,
dsthost,
start_work,
check_work,
params_append))
else:
mig_thread = utils.InterruptedThread(wait_wrap, (vms_name,
srchost,
dsthost))
mig_thread.start()
return mig_thread
def migrate_wait(self, vms_name, srchost, dsthost, start_work=None,
check_work=None, mig_mode="tcp", params_append=None):
"""
Migrate machine from srchost to dsthost and wait for finish.
It executes start_work on source machine before migration and executes
check_work on dsthost after migration.
@param vms_name: List of vms.
@param srchost: src host id.
@param dsthost: dst host id.
@param start_work: Function which is started before migration.
@param check_work: Function which is started after
done of migration.
"""
self.migrate(vms_name, srchost, dsthost, start_work, check_work,
mig_mode, params_append).join()
def cleanup(self):
"""
Cleanup env after test.
"""
if self.clone_master:
self.sync_server.close()
self.postprocess_env()
def run(self):
"""
Start multihost migration scenario.
After scenario is finished or if scenario crashed it calls postprocess
machines and cleanup env.
"""
try:
self.migration_scenario()
self._hosts_barrier(self.hosts, self.hosts, 'all_test_finihed',
self.finish_timeout)
finally:
self.cleanup()
class MultihostMigrationFd(MultihostMigration):
def __init__(self, test, params, env, preprocess_env=True):
super(MultihostMigrationFd, self).__init__(test, params, env,
preprocess_env)
def migrate_vms_src(self, mig_data):
"""
Migrate vms source.
@param mig_Data: Data for migration.
For change way how machine migrates is necessary
re implement this method.
"""
def mig_wrapper(vm, cancel_delay, mig_offline, dsthost, vm_ports,
not_wait_for_migration, fd):
vm.migrate(cancel_delay=cancel_delay, offline=mig_offline,
dest_host=dsthost,
not_wait_for_migration=not_wait_for_migration,
protocol="fd",
fd_src=fd)
self.post_migration(vm, cancel_delay, mig_offline, dsthost,
vm_ports, not_wait_for_migration, fd, mig_data)
logging.info("Start migrating now...")
cancel_delay = mig_data.params.get("cancel_delay")
if cancel_delay is not None:
cancel_delay = int(cancel_delay)
not_wait_for_migration = mig_data.params.get("not_wait_for_migration")
if not_wait_for_migration == "yes":
not_wait_for_migration = True
mig_offline = mig_data.params.get("mig_offline")
if mig_offline == "yes":
mig_offline = True
else:
mig_offline = False
multi_mig = []
for vm in mig_data.vms:
fd = vm.params.get("migration_fd")
multi_mig.append((mig_wrapper, (vm, cancel_delay, mig_offline,
mig_data.dst, mig_data.vm_ports,
not_wait_for_migration,
fd)))
utils_misc.parallel(multi_mig)
def _check_vms_source(self, mig_data):
start_mig_tout = mig_data.params.get("start_migration_timeout", None)
if start_mig_tout is None:
for vm in mig_data.vms:
vm.wait_for_login(timeout=self.login_timeout)
self._hosts_barrier(mig_data.hosts, mig_data.mig_id,
'prepare_VMS', 60)
def _check_vms_dest(self, mig_data):
self._hosts_barrier(mig_data.hosts, mig_data.mig_id,
'prepare_VMS', 120)
for vm in mig_data.vms:
fd = vm.params.get("migration_fd")
os.close(fd)
def _connect_to_server(self, host, port, timeout=60):